Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(251)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h

Issue 2964953002: Remove webrtc::VideoEncoderFactory (Closed)
Patch Set: Add dep to base:sequenced_task_checker Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
12 #define WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
13
14 #include <algorithm>
15 #include <map>
16 #include <memory>
17 #include <vector>
18
19 #include "webrtc/api/video/i420_buffer.h"
20 #include "webrtc/api/video/video_frame.h"
21 #include "webrtc/common_video/include/video_frame.h"
22 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
23 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
24 #include "webrtc/modules/video_coding/codecs/vp8/simulcast_rate_allocator.h"
25 #include "webrtc/modules/video_coding/codecs/vp8/temporal_layers.h"
26 #include "webrtc/modules/video_coding/include/mock/mock_video_codec_interface.h"
27 #include "webrtc/modules/video_coding/include/video_coding_defines.h"
28 #include "webrtc/rtc_base/checks.h"
29 #include "webrtc/test/gtest.h"
30
31 using ::testing::_;
32 using ::testing::AllOf;
33 using ::testing::Field;
34 using ::testing::Return;
35
36 namespace webrtc {
37 namespace testing {
38
39 const int kDefaultWidth = 1280;
40 const int kDefaultHeight = 720;
41 const int kNumberOfSimulcastStreams = 3;
42 const int kColorY = 66;
43 const int kColorU = 22;
44 const int kColorV = 33;
45 const int kMaxBitrates[kNumberOfSimulcastStreams] = {150, 600, 1200};
46 const int kMinBitrates[kNumberOfSimulcastStreams] = {50, 150, 600};
47 const int kTargetBitrates[kNumberOfSimulcastStreams] = {100, 450, 1000};
48 const int kDefaultTemporalLayerProfile[3] = {3, 3, 3};
49
50 template <typename T>
51 void SetExpectedValues3(T value0, T value1, T value2, T* expected_values) {
52 expected_values[0] = value0;
53 expected_values[1] = value1;
54 expected_values[2] = value2;
55 }
56
57 enum PlaneType {
58 kYPlane = 0,
59 kUPlane = 1,
60 kVPlane = 2,
61 kNumOfPlanes = 3,
62 };
63
64 class Vp8TestEncodedImageCallback : public EncodedImageCallback {
65 public:
66 Vp8TestEncodedImageCallback() : picture_id_(-1) {
67 memset(temporal_layer_, -1, sizeof(temporal_layer_));
68 memset(layer_sync_, false, sizeof(layer_sync_));
69 }
70
71 ~Vp8TestEncodedImageCallback() {
72 delete[] encoded_key_frame_._buffer;
73 delete[] encoded_frame_._buffer;
74 }
75
76 virtual Result OnEncodedImage(const EncodedImage& encoded_image,
77 const CodecSpecificInfo* codec_specific_info,
78 const RTPFragmentationHeader* fragmentation) {
79 // Only store the base layer.
80 if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
81 if (encoded_image._frameType == kVideoFrameKey) {
82 delete[] encoded_key_frame_._buffer;
83 encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
84 encoded_key_frame_._size = encoded_image._size;
85 encoded_key_frame_._length = encoded_image._length;
86 encoded_key_frame_._frameType = kVideoFrameKey;
87 encoded_key_frame_._completeFrame = encoded_image._completeFrame;
88 memcpy(encoded_key_frame_._buffer, encoded_image._buffer,
89 encoded_image._length);
90 } else {
91 delete[] encoded_frame_._buffer;
92 encoded_frame_._buffer = new uint8_t[encoded_image._size];
93 encoded_frame_._size = encoded_image._size;
94 encoded_frame_._length = encoded_image._length;
95 memcpy(encoded_frame_._buffer, encoded_image._buffer,
96 encoded_image._length);
97 }
98 }
99 picture_id_ = codec_specific_info->codecSpecific.VP8.pictureId;
100 layer_sync_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
101 codec_specific_info->codecSpecific.VP8.layerSync;
102 temporal_layer_[codec_specific_info->codecSpecific.VP8.simulcastIdx] =
103 codec_specific_info->codecSpecific.VP8.temporalIdx;
104 return Result(Result::OK, encoded_image._timeStamp);
105 }
106 void GetLastEncodedFrameInfo(int* picture_id,
107 int* temporal_layer,
108 bool* layer_sync,
109 int stream) {
110 *picture_id = picture_id_;
111 *temporal_layer = temporal_layer_[stream];
112 *layer_sync = layer_sync_[stream];
113 }
114 void GetLastEncodedKeyFrame(EncodedImage* encoded_key_frame) {
115 *encoded_key_frame = encoded_key_frame_;
116 }
117 void GetLastEncodedFrame(EncodedImage* encoded_frame) {
118 *encoded_frame = encoded_frame_;
119 }
120
121 private:
122 EncodedImage encoded_key_frame_;
123 EncodedImage encoded_frame_;
124 int picture_id_;
125 int temporal_layer_[kNumberOfSimulcastStreams];
126 bool layer_sync_[kNumberOfSimulcastStreams];
127 };
128
129 class Vp8TestDecodedImageCallback : public DecodedImageCallback {
130 public:
131 Vp8TestDecodedImageCallback() : decoded_frames_(0) {}
132 int32_t Decoded(VideoFrame& decoded_image) override {
133 rtc::scoped_refptr<I420BufferInterface> i420_buffer =
134 decoded_image.video_frame_buffer()->ToI420();
135 for (int i = 0; i < decoded_image.width(); ++i) {
136 EXPECT_NEAR(kColorY, i420_buffer->DataY()[i], 1);
137 }
138
139 // TODO(mikhal): Verify the difference between U,V and the original.
140 for (int i = 0; i < i420_buffer->ChromaWidth(); ++i) {
141 EXPECT_NEAR(kColorU, i420_buffer->DataU()[i], 4);
142 EXPECT_NEAR(kColorV, i420_buffer->DataV()[i], 4);
143 }
144 decoded_frames_++;
145 return 0;
146 }
147 int32_t Decoded(VideoFrame& decoded_image, int64_t decode_time_ms) override {
148 RTC_NOTREACHED();
149 return -1;
150 }
151 void Decoded(VideoFrame& decoded_image,
152 rtc::Optional<int32_t> decode_time_ms,
153 rtc::Optional<uint8_t> qp) override {
154 Decoded(decoded_image);
155 }
156 int DecodedFrames() { return decoded_frames_; }
157
158 private:
159 int decoded_frames_;
160 };
161
162 class TestVp8Simulcast : public ::testing::Test {
163 public:
164 TestVp8Simulcast(VP8Encoder* encoder, VP8Decoder* decoder)
165 : encoder_(encoder), decoder_(decoder) {}
166
167 static void SetPlane(uint8_t* data,
168 uint8_t value,
169 int width,
170 int height,
171 int stride) {
172 for (int i = 0; i < height; i++, data += stride) {
173 // Setting allocated area to zero - setting only image size to
174 // requested values - will make it easier to distinguish between image
175 // size and frame size (accounting for stride).
176 memset(data, value, width);
177 memset(data + width, 0, stride - width);
178 }
179 }
180
181 // Fills in an I420Buffer from |plane_colors|.
182 static void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer,
183 int plane_colors[kNumOfPlanes]) {
184 SetPlane(buffer->MutableDataY(), plane_colors[0], buffer->width(),
185 buffer->height(), buffer->StrideY());
186
187 SetPlane(buffer->MutableDataU(), plane_colors[1], buffer->ChromaWidth(),
188 buffer->ChromaHeight(), buffer->StrideU());
189
190 SetPlane(buffer->MutableDataV(), plane_colors[2], buffer->ChromaWidth(),
191 buffer->ChromaHeight(), buffer->StrideV());
192 }
193
194 static void DefaultSettings(VideoCodec* settings,
195 const int* temporal_layer_profile) {
196 RTC_CHECK(settings);
197 memset(settings, 0, sizeof(VideoCodec));
198 strncpy(settings->plName, "VP8", 4);
199 settings->codecType = kVideoCodecVP8;
200 // 96 to 127 dynamic payload types for video codecs
201 settings->plType = 120;
202 settings->startBitrate = 300;
203 settings->minBitrate = 30;
204 settings->maxBitrate = 0;
205 settings->maxFramerate = 30;
206 settings->width = kDefaultWidth;
207 settings->height = kDefaultHeight;
208 settings->numberOfSimulcastStreams = kNumberOfSimulcastStreams;
209 ASSERT_EQ(3, kNumberOfSimulcastStreams);
210 settings->timing_frame_thresholds = {kDefaultTimingFramesDelayMs,
211 kDefaultOutlierFrameSizePercent};
212 ConfigureStream(kDefaultWidth / 4, kDefaultHeight / 4, kMaxBitrates[0],
213 kMinBitrates[0], kTargetBitrates[0],
214 &settings->simulcastStream[0], temporal_layer_profile[0]);
215 ConfigureStream(kDefaultWidth / 2, kDefaultHeight / 2, kMaxBitrates[1],
216 kMinBitrates[1], kTargetBitrates[1],
217 &settings->simulcastStream[1], temporal_layer_profile[1]);
218 ConfigureStream(kDefaultWidth, kDefaultHeight, kMaxBitrates[2],
219 kMinBitrates[2], kTargetBitrates[2],
220 &settings->simulcastStream[2], temporal_layer_profile[2]);
221 settings->VP8()->resilience = kResilientStream;
222 settings->VP8()->denoisingOn = true;
223 settings->VP8()->errorConcealmentOn = false;
224 settings->VP8()->automaticResizeOn = false;
225 settings->VP8()->frameDroppingOn = true;
226 settings->VP8()->keyFrameInterval = 3000;
227 }
228
229 static void ConfigureStream(int width,
230 int height,
231 int max_bitrate,
232 int min_bitrate,
233 int target_bitrate,
234 SimulcastStream* stream,
235 int num_temporal_layers) {
236 assert(stream);
237 stream->width = width;
238 stream->height = height;
239 stream->maxBitrate = max_bitrate;
240 stream->minBitrate = min_bitrate;
241 stream->targetBitrate = target_bitrate;
242 stream->numberOfTemporalLayers = num_temporal_layers;
243 stream->qpMax = 45;
244 }
245
246 protected:
247 void SetUp() override { SetUpCodec(kDefaultTemporalLayerProfile); }
248
249 void TearDown() override {
250 encoder_->Release();
251 decoder_->Release();
252 }
253
254 void SetUpCodec(const int* temporal_layer_profile) {
255 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_);
256 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_);
257 DefaultSettings(&settings_, temporal_layer_profile);
258 SetUpRateAllocator();
259 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
260 EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1));
261 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight);
262 input_buffer_->InitializeData();
263 input_frame_.reset(
264 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
265 }
266
267 void SetUpRateAllocator() {
268 TemporalLayersFactory* tl_factory = new TemporalLayersFactory();
269 rate_allocator_.reset(new SimulcastRateAllocator(
270 settings_, std::unique_ptr<TemporalLayersFactory>(tl_factory)));
271 settings_.VP8()->tl_factory = tl_factory;
272 }
273
274 void SetRates(uint32_t bitrate_kbps, uint32_t fps) {
275 encoder_->SetRateAllocation(
276 rate_allocator_->GetAllocation(bitrate_kbps * 1000, fps), fps);
277 }
278
279 void ExpectStreams(FrameType frame_type, int expected_video_streams) {
280 ASSERT_GE(expected_video_streams, 0);
281 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams);
282 if (expected_video_streams >= 1) {
283 EXPECT_CALL(
284 encoder_callback_,
285 OnEncodedImage(
286 AllOf(Field(&EncodedImage::_frameType, frame_type),
287 Field(&EncodedImage::_encodedWidth, kDefaultWidth / 4),
288 Field(&EncodedImage::_encodedHeight, kDefaultHeight / 4)),
289 _, _))
290 .Times(1)
291 .WillRepeatedly(Return(EncodedImageCallback::Result(
292 EncodedImageCallback::Result::OK, 0)));
293 }
294 if (expected_video_streams >= 2) {
295 EXPECT_CALL(
296 encoder_callback_,
297 OnEncodedImage(
298 AllOf(Field(&EncodedImage::_frameType, frame_type),
299 Field(&EncodedImage::_encodedWidth, kDefaultWidth / 2),
300 Field(&EncodedImage::_encodedHeight, kDefaultHeight / 2)),
301 _, _))
302 .Times(1)
303 .WillRepeatedly(Return(EncodedImageCallback::Result(
304 EncodedImageCallback::Result::OK, 0)));
305 }
306 if (expected_video_streams >= 3) {
307 EXPECT_CALL(
308 encoder_callback_,
309 OnEncodedImage(
310 AllOf(Field(&EncodedImage::_frameType, frame_type),
311 Field(&EncodedImage::_encodedWidth, kDefaultWidth),
312 Field(&EncodedImage::_encodedHeight, kDefaultHeight)),
313 _, _))
314 .Times(1)
315 .WillRepeatedly(Return(EncodedImageCallback::Result(
316 EncodedImageCallback::Result::OK, 0)));
317 }
318 }
319
320 void VerifyTemporalIdxAndSyncForAllSpatialLayers(
321 Vp8TestEncodedImageCallback* encoder_callback,
322 const int* expected_temporal_idx,
323 const bool* expected_layer_sync,
324 int num_spatial_layers) {
325 int picture_id = -1;
326 int temporal_layer = -1;
327 bool layer_sync = false;
328 for (int i = 0; i < num_spatial_layers; i++) {
329 encoder_callback->GetLastEncodedFrameInfo(&picture_id, &temporal_layer,
330 &layer_sync, i);
331 EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
332 EXPECT_EQ(expected_layer_sync[i], layer_sync);
333 }
334 }
335
336 // We currently expect all active streams to generate a key frame even though
337 // a key frame was only requested for some of them.
338 void TestKeyFrameRequestsOnAllStreams() {
339 SetRates(kMaxBitrates[2], 30); // To get all three streams.
340 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
341 kVideoFrameDelta);
342 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
343 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
344
345 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
346 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
347 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
348
349 frame_types[0] = kVideoFrameKey;
350 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
351 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
352 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
353
354 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
355 frame_types[1] = kVideoFrameKey;
356 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
357 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
358 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
359
360 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
361 frame_types[2] = kVideoFrameKey;
362 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
363 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
364 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
365
366 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
367 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
368 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
369 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
370 }
371
372 void TestPaddingAllStreams() {
373 // We should always encode the base layer.
374 SetRates(kMinBitrates[0] - 1, 30);
375 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
376 kVideoFrameDelta);
377 ExpectStreams(kVideoFrameKey, 1);
378 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
379
380 ExpectStreams(kVideoFrameDelta, 1);
381 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
382 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
383 }
384
385 void TestPaddingTwoStreams() {
386 // We have just enough to get only the first stream and padding for two.
387 SetRates(kMinBitrates[0], 30);
388 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
389 kVideoFrameDelta);
390 ExpectStreams(kVideoFrameKey, 1);
391 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
392
393 ExpectStreams(kVideoFrameDelta, 1);
394 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
395 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
396 }
397
398 void TestPaddingTwoStreamsOneMaxedOut() {
399 // We are just below limit of sending second stream, so we should get
400 // the first stream maxed out (at |maxBitrate|), and padding for two.
401 SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
402 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
403 kVideoFrameDelta);
404 ExpectStreams(kVideoFrameKey, 1);
405 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
406
407 ExpectStreams(kVideoFrameDelta, 1);
408 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
409 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
410 }
411
412 void TestPaddingOneStream() {
413 // We have just enough to send two streams, so padding for one stream.
414 SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
415 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
416 kVideoFrameDelta);
417 ExpectStreams(kVideoFrameKey, 2);
418 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
419
420 ExpectStreams(kVideoFrameDelta, 2);
421 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
422 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
423 }
424
425 void TestPaddingOneStreamTwoMaxedOut() {
426 // We are just below limit of sending third stream, so we should get
427 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
428 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30);
429 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
430 kVideoFrameDelta);
431 ExpectStreams(kVideoFrameKey, 2);
432 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
433
434 ExpectStreams(kVideoFrameDelta, 2);
435 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
436 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
437 }
438
439 void TestSendAllStreams() {
440 // We have just enough to send all streams.
441 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30);
442 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
443 kVideoFrameDelta);
444 ExpectStreams(kVideoFrameKey, 3);
445 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
446
447 ExpectStreams(kVideoFrameDelta, 3);
448 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
449 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
450 }
451
452 void TestDisablingStreams() {
453 // We should get three media streams.
454 SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30);
455 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
456 kVideoFrameDelta);
457 ExpectStreams(kVideoFrameKey, 3);
458 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
459
460 ExpectStreams(kVideoFrameDelta, 3);
461 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
462 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
463
464 // We should only get two streams and padding for one.
465 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
466 ExpectStreams(kVideoFrameDelta, 2);
467 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
468 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
469
470 // We should only get the first stream and padding for two.
471 SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
472 ExpectStreams(kVideoFrameDelta, 1);
473 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
474 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
475
476 // We don't have enough bitrate for the thumbnail stream, but we should get
477 // it anyway with current configuration.
478 SetRates(kTargetBitrates[0] - 1, 30);
479 ExpectStreams(kVideoFrameDelta, 1);
480 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
481 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
482
483 // We should only get two streams and padding for one.
484 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30);
485 // We get a key frame because a new stream is being enabled.
486 ExpectStreams(kVideoFrameKey, 2);
487 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
488 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
489
490 // We should get all three streams.
491 SetRates(kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30);
492 // We get a key frame because a new stream is being enabled.
493 ExpectStreams(kVideoFrameKey, 3);
494 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
495 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
496 }
497
498 void SwitchingToOneStream(int width, int height) {
499 // Disable all streams except the last and set the bitrate of the last to
500 // 100 kbps. This verifies the way GTP switches to screenshare mode.
501 settings_.VP8()->numberOfTemporalLayers = 1;
502 settings_.maxBitrate = 100;
503 settings_.startBitrate = 100;
504 settings_.width = width;
505 settings_.height = height;
506 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) {
507 settings_.simulcastStream[i].maxBitrate = 0;
508 settings_.simulcastStream[i].width = settings_.width;
509 settings_.simulcastStream[i].height = settings_.height;
510 }
511 // Setting input image to new resolution.
512 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
513 input_buffer_->InitializeData();
514
515 input_frame_.reset(
516 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
517
518 // The for loop above did not set the bitrate of the highest layer.
519 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]
520 .maxBitrate = 0;
521 // The highest layer has to correspond to the non-simulcast resolution.
522 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width =
523 settings_.width;
524 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height =
525 settings_.height;
526 SetUpRateAllocator();
527 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
528
529 // Encode one frame and verify.
530 SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
531 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
532 kVideoFrameDelta);
533 EXPECT_CALL(
534 encoder_callback_,
535 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
536 Field(&EncodedImage::_encodedWidth, width),
537 Field(&EncodedImage::_encodedHeight, height)),
538 _, _))
539 .Times(1)
540 .WillRepeatedly(Return(
541 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0)));
542 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
543
544 // Switch back.
545 DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
546 // Start at the lowest bitrate for enabling base stream.
547 settings_.startBitrate = kMinBitrates[0];
548 SetUpRateAllocator();
549 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
550 SetRates(settings_.startBitrate, 30);
551 ExpectStreams(kVideoFrameKey, 1);
552 // Resize |input_frame_| to the new resolution.
553 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height);
554 input_buffer_->InitializeData();
555 input_frame_.reset(
556 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
557 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types));
558 }
559
560 void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); }
561
562 void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); }
563
564 void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); }
565
566 // Test the layer pattern and sync flag for various spatial-temporal patterns.
567 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same
568 // temporal_layer id and layer_sync is expected for all streams.
569 void TestSaptioTemporalLayers333PatternEncoder() {
570 Vp8TestEncodedImageCallback encoder_callback;
571 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
572 SetRates(kMaxBitrates[2], 30); // To get all three streams.
573
574 int expected_temporal_idx[3] = {-1, -1, -1};
575 bool expected_layer_sync[3] = {false, false, false};
576
577 // First frame: #0.
578 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
579 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
580 SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
581 VerifyTemporalIdxAndSyncForAllSpatialLayers(
582 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
583
584 // Next frame: #1.
585 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
586 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
587 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
588 SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
589 VerifyTemporalIdxAndSyncForAllSpatialLayers(
590 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
591
592 // Next frame: #2.
593 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
594 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
595 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx);
596 SetExpectedValues3<bool>(true, true, true, expected_layer_sync);
597 VerifyTemporalIdxAndSyncForAllSpatialLayers(
598 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
599
600 // Next frame: #3.
601 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
602 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
603 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
604 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
605 VerifyTemporalIdxAndSyncForAllSpatialLayers(
606 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
607
608 // Next frame: #4.
609 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
610 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
611 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx);
612 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
613 VerifyTemporalIdxAndSyncForAllSpatialLayers(
614 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
615
616 // Next frame: #5.
617 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
618 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
619 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx);
620 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
621 VerifyTemporalIdxAndSyncForAllSpatialLayers(
622 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
623 }
624
625 // Test the layer pattern and sync flag for various spatial-temporal patterns.
626 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and
627 // 1 temporal layer for highest resolution.
628 // For this profile, we expect the temporal index pattern to be:
629 // 1st stream: 0, 2, 1, 2, ....
630 // 2nd stream: 0, 1, 0, 1, ...
631 // 3rd stream: -1, -1, -1, -1, ....
632 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer
633 // should always have temporal layer idx set to kNoTemporalIdx = -1.
634 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255.
635 // TODO(marpan): Although this seems safe for now, we should fix this.
636 void TestSpatioTemporalLayers321PatternEncoder() {
637 int temporal_layer_profile[3] = {3, 2, 1};
638 SetUpCodec(temporal_layer_profile);
639 Vp8TestEncodedImageCallback encoder_callback;
640 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
641 SetRates(kMaxBitrates[2], 30); // To get all three streams.
642
643 int expected_temporal_idx[3] = {-1, -1, -1};
644 bool expected_layer_sync[3] = {false, false, false};
645
646 // First frame: #0.
647 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
648 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
649 SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
650 VerifyTemporalIdxAndSyncForAllSpatialLayers(
651 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
652
653 // Next frame: #1.
654 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
655 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
656 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
657 SetExpectedValues3<bool>(true, true, false, expected_layer_sync);
658 VerifyTemporalIdxAndSyncForAllSpatialLayers(
659 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
660
661 // Next frame: #2.
662 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
663 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
664 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx);
665 SetExpectedValues3<bool>(true, false, false, expected_layer_sync);
666 VerifyTemporalIdxAndSyncForAllSpatialLayers(
667 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
668
669 // Next frame: #3.
670 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
671 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
672 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
673 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
674 VerifyTemporalIdxAndSyncForAllSpatialLayers(
675 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
676
677 // Next frame: #4.
678 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
679 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
680 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx);
681 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
682 VerifyTemporalIdxAndSyncForAllSpatialLayers(
683 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
684
685 // Next frame: #5.
686 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
687 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
688 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx);
689 SetExpectedValues3<bool>(false, false, false, expected_layer_sync);
690 VerifyTemporalIdxAndSyncForAllSpatialLayers(
691 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3);
692 }
693
694 void TestStrideEncodeDecode() {
695 Vp8TestEncodedImageCallback encoder_callback;
696 Vp8TestDecodedImageCallback decoder_callback;
697 encoder_->RegisterEncodeCompleteCallback(&encoder_callback);
698 decoder_->RegisterDecodeCompleteCallback(&decoder_callback);
699
700 SetRates(kMaxBitrates[2], 30); // To get all three streams.
701 // Setting two (possibly) problematic use cases for stride:
702 // 1. stride > width 2. stride_y != stride_uv/2
703 int stride_y = kDefaultWidth + 20;
704 int stride_uv = ((kDefaultWidth + 1) / 2) + 5;
705 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y,
706 stride_uv, stride_uv);
707 input_frame_.reset(
708 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0));
709
710 // Set color.
711 int plane_offset[kNumOfPlanes];
712 plane_offset[kYPlane] = kColorY;
713 plane_offset[kUPlane] = kColorU;
714 plane_offset[kVPlane] = kColorV;
715 CreateImage(input_buffer_, plane_offset);
716
717 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
718
719 // Change color.
720 plane_offset[kYPlane] += 1;
721 plane_offset[kUPlane] += 1;
722 plane_offset[kVPlane] += 1;
723 CreateImage(input_buffer_, plane_offset);
724 input_frame_->set_timestamp(input_frame_->timestamp() + 3000);
725 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL));
726
727 EncodedImage encoded_frame;
728 // Only encoding one frame - so will be a key frame.
729 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame);
730 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL));
731 encoder_callback.GetLastEncodedFrame(&encoded_frame);
732 decoder_->Decode(encoded_frame, false, NULL);
733 EXPECT_EQ(2, decoder_callback.DecodedFrames());
734 }
735
736 std::unique_ptr<VP8Encoder> encoder_;
737 MockEncodedImageCallback encoder_callback_;
738 std::unique_ptr<VP8Decoder> decoder_;
739 MockDecodedImageCallback decoder_callback_;
740 VideoCodec settings_;
741 rtc::scoped_refptr<I420Buffer> input_buffer_;
742 std::unique_ptr<VideoFrame> input_frame_;
743 std::unique_ptr<SimulcastRateAllocator> rate_allocator_;
744 };
745
746 } // namespace testing
747 } // namespace webrtc
748
749 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698