OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
229 int stride) { | 229 int stride) { |
230 for (int i = 0; i < height; i++, data += stride) { | 230 for (int i = 0; i < height; i++, data += stride) { |
231 // Setting allocated area to zero - setting only image size to | 231 // Setting allocated area to zero - setting only image size to |
232 // requested values - will make it easier to distinguish between image | 232 // requested values - will make it easier to distinguish between image |
233 // size and frame size (accounting for stride). | 233 // size and frame size (accounting for stride). |
234 memset(data, value, width); | 234 memset(data, value, width); |
235 memset(data + width, 0, stride - width); | 235 memset(data + width, 0, stride - width); |
236 } | 236 } |
237 } | 237 } |
238 | 238 |
239 // Fills in an I420Buffer from |plane_colors|. | 239 // Fills in an VideoFrameBuffer from |plane_colors|. |
240 static void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer, | 240 static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer, |
241 int plane_colors[kNumOfPlanes]) { | 241 int plane_colors[kNumOfPlanes]) { |
242 int width = buffer->width(); | 242 int width = buffer->width(); |
243 int height = buffer->height(); | 243 int height = buffer->height(); |
244 int chroma_width = (width + 1) / 2; | 244 int chroma_width = (width + 1) / 2; |
245 int chroma_height = (height + 1) / 2; | 245 int chroma_height = (height + 1) / 2; |
246 | 246 |
247 SetPlane(buffer->MutableDataY(), plane_colors[0], | 247 SetPlane(buffer->MutableDataY(), plane_colors[0], |
248 width, height, buffer->StrideY()); | 248 width, height, buffer->StrideY()); |
249 | 249 |
250 SetPlane(buffer->MutableDataU(), plane_colors[1], | 250 SetPlane(buffer->MutableDataU(), plane_colors[1], |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
310 protected: | 310 protected: |
311 virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); } | 311 virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); } |
312 | 312 |
313 virtual void SetUpCodec(const int* temporal_layer_profile) { | 313 virtual void SetUpCodec(const int* temporal_layer_profile) { |
314 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_); | 314 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_); |
315 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_); | 315 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_); |
316 DefaultSettings(&settings_, temporal_layer_profile); | 316 DefaultSettings(&settings_, temporal_layer_profile); |
317 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 317 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
318 EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1)); | 318 EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1)); |
319 int half_width = (kDefaultWidth + 1) / 2; | 319 int half_width = (kDefaultWidth + 1) / 2; |
320 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, | 320 input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth, |
321 kDefaultWidth, half_width, half_width); | 321 half_width, half_width); |
322 input_buffer_->InitializeData(); | 322 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, |
323 input_frame_.reset( | 323 input_frame_.allocated_size(kYPlane)); |
324 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); | 324 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, |
| 325 input_frame_.allocated_size(kUPlane)); |
| 326 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, |
| 327 input_frame_.allocated_size(kVPlane)); |
325 } | 328 } |
326 | 329 |
327 virtual void TearDown() { | 330 virtual void TearDown() { |
328 encoder_->Release(); | 331 encoder_->Release(); |
329 decoder_->Release(); | 332 decoder_->Release(); |
330 } | 333 } |
331 | 334 |
332 void ExpectStreams(FrameType frame_type, int expected_video_streams) { | 335 void ExpectStreams(FrameType frame_type, int expected_video_streams) { |
333 ASSERT_GE(expected_video_streams, 0); | 336 ASSERT_GE(expected_video_streams, 0); |
334 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams); | 337 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
386 } | 389 } |
387 } | 390 } |
388 | 391 |
389 // We currently expect all active streams to generate a key frame even though | 392 // We currently expect all active streams to generate a key frame even though |
390 // a key frame was only requested for some of them. | 393 // a key frame was only requested for some of them. |
391 void TestKeyFrameRequestsOnAllStreams() { | 394 void TestKeyFrameRequestsOnAllStreams() { |
392 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 395 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
393 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 396 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
394 kVideoFrameDelta); | 397 kVideoFrameDelta); |
395 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 398 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
396 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 399 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
397 | 400 |
398 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); | 401 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); |
399 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 402 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
400 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 403 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
401 | 404 |
402 frame_types[0] = kVideoFrameKey; | 405 frame_types[0] = kVideoFrameKey; |
403 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 406 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
404 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 407 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
405 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 408 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
406 | 409 |
407 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 410 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
408 frame_types[1] = kVideoFrameKey; | 411 frame_types[1] = kVideoFrameKey; |
409 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 412 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
410 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 413 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
411 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 414 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
412 | 415 |
413 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 416 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
414 frame_types[2] = kVideoFrameKey; | 417 frame_types[2] = kVideoFrameKey; |
415 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 418 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
416 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 419 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
417 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 420 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
418 | 421 |
419 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 422 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
420 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); | 423 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); |
421 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 424 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
422 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 425 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
423 } | 426 } |
424 | 427 |
425 void TestPaddingAllStreams() { | 428 void TestPaddingAllStreams() { |
426 // We should always encode the base layer. | 429 // We should always encode the base layer. |
427 encoder_->SetRates(kMinBitrates[0] - 1, 30); | 430 encoder_->SetRates(kMinBitrates[0] - 1, 30); |
428 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 431 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
429 kVideoFrameDelta); | 432 kVideoFrameDelta); |
430 ExpectStreams(kVideoFrameKey, 1); | 433 ExpectStreams(kVideoFrameKey, 1); |
431 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 434 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
432 | 435 |
433 ExpectStreams(kVideoFrameDelta, 1); | 436 ExpectStreams(kVideoFrameDelta, 1); |
434 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 437 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
435 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 438 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
436 } | 439 } |
437 | 440 |
438 void TestPaddingTwoStreams() { | 441 void TestPaddingTwoStreams() { |
439 // We have just enough to get only the first stream and padding for two. | 442 // We have just enough to get only the first stream and padding for two. |
440 encoder_->SetRates(kMinBitrates[0], 30); | 443 encoder_->SetRates(kMinBitrates[0], 30); |
441 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 444 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
442 kVideoFrameDelta); | 445 kVideoFrameDelta); |
443 ExpectStreams(kVideoFrameKey, 1); | 446 ExpectStreams(kVideoFrameKey, 1); |
444 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 447 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
445 | 448 |
446 ExpectStreams(kVideoFrameDelta, 1); | 449 ExpectStreams(kVideoFrameDelta, 1); |
447 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 450 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
448 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 451 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
449 } | 452 } |
450 | 453 |
451 void TestPaddingTwoStreamsOneMaxedOut() { | 454 void TestPaddingTwoStreamsOneMaxedOut() { |
452 // We are just below limit of sending second stream, so we should get | 455 // We are just below limit of sending second stream, so we should get |
453 // the first stream maxed out (at |maxBitrate|), and padding for two. | 456 // the first stream maxed out (at |maxBitrate|), and padding for two. |
454 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); | 457 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); |
455 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 458 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
456 kVideoFrameDelta); | 459 kVideoFrameDelta); |
457 ExpectStreams(kVideoFrameKey, 1); | 460 ExpectStreams(kVideoFrameKey, 1); |
458 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 461 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
459 | 462 |
460 ExpectStreams(kVideoFrameDelta, 1); | 463 ExpectStreams(kVideoFrameDelta, 1); |
461 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 464 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
462 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 465 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
463 } | 466 } |
464 | 467 |
465 void TestPaddingOneStream() { | 468 void TestPaddingOneStream() { |
466 // We have just enough to send two streams, so padding for one stream. | 469 // We have just enough to send two streams, so padding for one stream. |
467 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); | 470 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); |
468 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 471 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
469 kVideoFrameDelta); | 472 kVideoFrameDelta); |
470 ExpectStreams(kVideoFrameKey, 2); | 473 ExpectStreams(kVideoFrameKey, 2); |
471 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 474 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
472 | 475 |
473 ExpectStreams(kVideoFrameDelta, 2); | 476 ExpectStreams(kVideoFrameDelta, 2); |
474 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 477 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
475 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 478 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
476 } | 479 } |
477 | 480 |
478 void TestPaddingOneStreamTwoMaxedOut() { | 481 void TestPaddingOneStreamTwoMaxedOut() { |
479 // We are just below limit of sending third stream, so we should get | 482 // We are just below limit of sending third stream, so we should get |
480 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. | 483 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. |
481 encoder_->SetRates( | 484 encoder_->SetRates( |
482 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30); | 485 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30); |
483 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 486 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
484 kVideoFrameDelta); | 487 kVideoFrameDelta); |
485 ExpectStreams(kVideoFrameKey, 2); | 488 ExpectStreams(kVideoFrameKey, 2); |
486 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 489 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
487 | 490 |
488 ExpectStreams(kVideoFrameDelta, 2); | 491 ExpectStreams(kVideoFrameDelta, 2); |
489 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 492 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
490 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 493 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
491 } | 494 } |
492 | 495 |
493 void TestSendAllStreams() { | 496 void TestSendAllStreams() { |
494 // We have just enough to send all streams. | 497 // We have just enough to send all streams. |
495 encoder_->SetRates( | 498 encoder_->SetRates( |
496 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30); | 499 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30); |
497 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 500 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
498 kVideoFrameDelta); | 501 kVideoFrameDelta); |
499 ExpectStreams(kVideoFrameKey, 3); | 502 ExpectStreams(kVideoFrameKey, 3); |
500 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 503 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
501 | 504 |
502 ExpectStreams(kVideoFrameDelta, 3); | 505 ExpectStreams(kVideoFrameDelta, 3); |
503 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 506 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
504 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 507 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
505 } | 508 } |
506 | 509 |
507 void TestDisablingStreams() { | 510 void TestDisablingStreams() { |
508 // We should get three media streams. | 511 // We should get three media streams. |
509 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30); | 512 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30); |
510 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 513 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
511 kVideoFrameDelta); | 514 kVideoFrameDelta); |
512 ExpectStreams(kVideoFrameKey, 3); | 515 ExpectStreams(kVideoFrameKey, 3); |
513 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 516 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
514 | 517 |
515 ExpectStreams(kVideoFrameDelta, 3); | 518 ExpectStreams(kVideoFrameDelta, 3); |
516 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 519 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
517 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 520 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
518 | 521 |
519 // We should only get two streams and padding for one. | 522 // We should only get two streams and padding for one. |
520 encoder_->SetRates( | 523 encoder_->SetRates( |
521 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); | 524 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); |
522 ExpectStreams(kVideoFrameDelta, 2); | 525 ExpectStreams(kVideoFrameDelta, 2); |
523 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 526 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
524 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 527 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
525 | 528 |
526 // We should only get the first stream and padding for two. | 529 // We should only get the first stream and padding for two. |
527 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30); | 530 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30); |
528 ExpectStreams(kVideoFrameDelta, 1); | 531 ExpectStreams(kVideoFrameDelta, 1); |
529 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 532 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
530 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 533 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
531 | 534 |
532 // We don't have enough bitrate for the thumbnail stream, but we should get | 535 // We don't have enough bitrate for the thumbnail stream, but we should get |
533 // it anyway with current configuration. | 536 // it anyway with current configuration. |
534 encoder_->SetRates(kTargetBitrates[0] - 1, 30); | 537 encoder_->SetRates(kTargetBitrates[0] - 1, 30); |
535 ExpectStreams(kVideoFrameDelta, 1); | 538 ExpectStreams(kVideoFrameDelta, 1); |
536 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 539 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
537 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 540 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
538 | 541 |
539 // We should only get two streams and padding for one. | 542 // We should only get two streams and padding for one. |
540 encoder_->SetRates( | 543 encoder_->SetRates( |
541 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); | 544 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); |
542 // We get a key frame because a new stream is being enabled. | 545 // We get a key frame because a new stream is being enabled. |
543 ExpectStreams(kVideoFrameKey, 2); | 546 ExpectStreams(kVideoFrameKey, 2); |
544 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 547 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
545 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 548 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
546 | 549 |
547 // We should get all three streams. | 550 // We should get all three streams. |
548 encoder_->SetRates( | 551 encoder_->SetRates( |
549 kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30); | 552 kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30); |
550 // We get a key frame because a new stream is being enabled. | 553 // We get a key frame because a new stream is being enabled. |
551 ExpectStreams(kVideoFrameKey, 3); | 554 ExpectStreams(kVideoFrameKey, 3); |
552 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 555 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
553 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 556 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
554 } | 557 } |
555 | 558 |
556 void SwitchingToOneStream(int width, int height) { | 559 void SwitchingToOneStream(int width, int height) { |
557 // Disable all streams except the last and set the bitrate of the last to | 560 // Disable all streams except the last and set the bitrate of the last to |
558 // 100 kbps. This verifies the way GTP switches to screenshare mode. | 561 // 100 kbps. This verifies the way GTP switches to screenshare mode. |
559 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1; | 562 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1; |
560 settings_.maxBitrate = 100; | 563 settings_.maxBitrate = 100; |
561 settings_.startBitrate = 100; | 564 settings_.startBitrate = 100; |
562 settings_.width = width; | 565 settings_.width = width; |
563 settings_.height = height; | 566 settings_.height = height; |
564 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) { | 567 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) { |
565 settings_.simulcastStream[i].maxBitrate = 0; | 568 settings_.simulcastStream[i].maxBitrate = 0; |
566 settings_.simulcastStream[i].width = settings_.width; | 569 settings_.simulcastStream[i].width = settings_.width; |
567 settings_.simulcastStream[i].height = settings_.height; | 570 settings_.simulcastStream[i].height = settings_.height; |
568 } | 571 } |
569 // Setting input image to new resolution. | 572 // Setting input image to new resolution. |
570 int half_width = (settings_.width + 1) / 2; | 573 int half_width = (settings_.width + 1) / 2; |
571 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height, | 574 input_frame_.CreateEmptyFrame(settings_.width, settings_.height, |
572 settings_.width, half_width, half_width); | 575 settings_.width, half_width, half_width); |
573 input_buffer_->InitializeData(); | 576 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, |
574 | 577 input_frame_.allocated_size(kYPlane)); |
575 input_frame_.reset( | 578 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, |
576 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); | 579 input_frame_.allocated_size(kUPlane)); |
| 580 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, |
| 581 input_frame_.allocated_size(kVPlane)); |
577 | 582 |
578 // The for loop above did not set the bitrate of the highest layer. | 583 // The for loop above did not set the bitrate of the highest layer. |
579 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1] | 584 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1] |
580 .maxBitrate = 0; | 585 .maxBitrate = 0; |
581 // The highest layer has to correspond to the non-simulcast resolution. | 586 // The highest layer has to correspond to the non-simulcast resolution. |
582 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width = | 587 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width = |
583 settings_.width; | 588 settings_.width; |
584 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height = | 589 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height = |
585 settings_.height; | 590 settings_.height; |
586 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 591 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
587 | 592 |
588 // Encode one frame and verify. | 593 // Encode one frame and verify. |
589 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); | 594 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); |
590 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 595 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
591 kVideoFrameDelta); | 596 kVideoFrameDelta); |
592 EXPECT_CALL( | 597 EXPECT_CALL( |
593 encoder_callback_, | 598 encoder_callback_, |
594 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey), | 599 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey), |
595 Field(&EncodedImage::_encodedWidth, width), | 600 Field(&EncodedImage::_encodedWidth, width), |
596 Field(&EncodedImage::_encodedHeight, height)), | 601 Field(&EncodedImage::_encodedHeight, height)), |
597 _, _)) | 602 _, _)) |
598 .Times(1) | 603 .Times(1) |
599 .WillRepeatedly(Return( | 604 .WillRepeatedly(Return( |
600 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); | 605 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); |
601 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 606 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
602 | 607 |
603 // Switch back. | 608 // Switch back. |
604 DefaultSettings(&settings_, kDefaultTemporalLayerProfile); | 609 DefaultSettings(&settings_, kDefaultTemporalLayerProfile); |
605 // Start at the lowest bitrate for enabling base stream. | 610 // Start at the lowest bitrate for enabling base stream. |
606 settings_.startBitrate = kMinBitrates[0]; | 611 settings_.startBitrate = kMinBitrates[0]; |
607 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 612 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
608 encoder_->SetRates(settings_.startBitrate, 30); | 613 encoder_->SetRates(settings_.startBitrate, 30); |
609 ExpectStreams(kVideoFrameKey, 1); | 614 ExpectStreams(kVideoFrameKey, 1); |
610 // Resize |input_frame_| to the new resolution. | 615 // Resize |input_frame_| to the new resolution. |
611 half_width = (settings_.width + 1) / 2; | 616 half_width = (settings_.width + 1) / 2; |
612 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height, | 617 input_frame_.CreateEmptyFrame(settings_.width, settings_.height, |
613 settings_.width, half_width, half_width); | 618 settings_.width, half_width, half_width); |
614 input_buffer_->InitializeData(); | 619 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, |
615 input_frame_.reset( | 620 input_frame_.allocated_size(kYPlane)); |
616 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); | 621 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, |
617 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); | 622 input_frame_.allocated_size(kUPlane)); |
| 623 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, |
| 624 input_frame_.allocated_size(kVPlane)); |
| 625 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); |
618 } | 626 } |
619 | 627 |
620 void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); } | 628 void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); } |
621 | 629 |
622 void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); } | 630 void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); } |
623 | 631 |
624 void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); } | 632 void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); } |
625 | 633 |
626 void TestRPSIEncoder() { | 634 void TestRPSIEncoder() { |
627 Vp8TestEncodedImageCallback encoder_callback; | 635 Vp8TestEncodedImageCallback encoder_callback; |
628 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 636 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
629 | 637 |
630 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 638 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
631 | 639 |
632 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 640 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
633 int picture_id = -1; | 641 int picture_id = -1; |
634 int temporal_layer = -1; | 642 int temporal_layer = -1; |
635 bool layer_sync = false; | 643 bool layer_sync = false; |
636 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 644 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
637 &layer_sync, 0); | 645 &layer_sync, 0); |
638 EXPECT_EQ(0, temporal_layer); | 646 EXPECT_EQ(0, temporal_layer); |
639 EXPECT_TRUE(layer_sync); | 647 EXPECT_TRUE(layer_sync); |
640 int key_frame_picture_id = picture_id; | 648 int key_frame_picture_id = picture_id; |
641 | 649 |
642 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 650 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
643 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 651 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
644 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 652 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
645 &layer_sync, 0); | 653 &layer_sync, 0); |
646 EXPECT_EQ(2, temporal_layer); | 654 EXPECT_EQ(2, temporal_layer); |
647 EXPECT_TRUE(layer_sync); | 655 EXPECT_TRUE(layer_sync); |
648 | 656 |
649 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 657 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
650 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 658 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
651 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 659 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
652 &layer_sync, 0); | 660 &layer_sync, 0); |
653 EXPECT_EQ(1, temporal_layer); | 661 EXPECT_EQ(1, temporal_layer); |
654 EXPECT_TRUE(layer_sync); | 662 EXPECT_TRUE(layer_sync); |
655 | 663 |
656 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 664 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
657 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 665 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
658 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 666 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
659 &layer_sync, 0); | 667 &layer_sync, 0); |
660 EXPECT_EQ(2, temporal_layer); | 668 EXPECT_EQ(2, temporal_layer); |
661 EXPECT_FALSE(layer_sync); | 669 EXPECT_FALSE(layer_sync); |
662 | 670 |
663 CodecSpecificInfo codec_specific; | 671 CodecSpecificInfo codec_specific; |
664 codec_specific.codecType = kVideoCodecVP8; | 672 codec_specific.codecType = kVideoCodecVP8; |
665 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; | 673 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; |
666 | 674 |
667 // Must match last key frame to trigger. | 675 // Must match last key frame to trigger. |
668 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; | 676 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; |
669 | 677 |
670 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 678 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
671 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); | 679 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); |
672 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 680 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
673 &layer_sync, 0); | 681 &layer_sync, 0); |
674 | 682 |
675 EXPECT_EQ(0, temporal_layer); | 683 EXPECT_EQ(0, temporal_layer); |
676 EXPECT_TRUE(layer_sync); | 684 EXPECT_TRUE(layer_sync); |
677 | 685 |
678 // Must match last key frame to trigger, test bad id. | 686 // Must match last key frame to trigger, test bad id. |
679 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17; | 687 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17; |
680 | 688 |
681 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 689 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
682 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); | 690 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); |
683 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 691 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
684 &layer_sync, 0); | 692 &layer_sync, 0); |
685 | 693 |
686 EXPECT_EQ(2, temporal_layer); | 694 EXPECT_EQ(2, temporal_layer); |
687 // The previous frame was a base layer sync (since it was a frame that | 695 // The previous frame was a base layer sync (since it was a frame that |
688 // only predicts from key frame and hence resets the temporal pattern), | 696 // only predicts from key frame and hence resets the temporal pattern), |
689 // so this frame (the next one) must have |layer_sync| set to true. | 697 // so this frame (the next one) must have |layer_sync| set to true. |
690 EXPECT_TRUE(layer_sync); | 698 EXPECT_TRUE(layer_sync); |
691 } | 699 } |
692 | 700 |
693 void TestRPSIEncodeDecode() { | 701 void TestRPSIEncodeDecode() { |
694 Vp8TestEncodedImageCallback encoder_callback; | 702 Vp8TestEncodedImageCallback encoder_callback; |
695 Vp8TestDecodedImageCallback decoder_callback; | 703 Vp8TestDecodedImageCallback decoder_callback; |
696 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 704 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
697 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); | 705 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); |
698 | 706 |
699 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 707 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
700 | 708 |
701 // Set color. | 709 // Set color. |
702 int plane_offset[kNumOfPlanes]; | 710 int plane_offset[kNumOfPlanes]; |
703 plane_offset[kYPlane] = kColorY; | 711 plane_offset[kYPlane] = kColorY; |
704 plane_offset[kUPlane] = kColorU; | 712 plane_offset[kUPlane] = kColorU; |
705 plane_offset[kVPlane] = kColorV; | 713 plane_offset[kVPlane] = kColorV; |
706 CreateImage(input_buffer_, plane_offset); | 714 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
707 | 715 |
708 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 716 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
709 int picture_id = -1; | 717 int picture_id = -1; |
710 int temporal_layer = -1; | 718 int temporal_layer = -1; |
711 bool layer_sync = false; | 719 bool layer_sync = false; |
712 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 720 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
713 &layer_sync, 0); | 721 &layer_sync, 0); |
714 EXPECT_EQ(0, temporal_layer); | 722 EXPECT_EQ(0, temporal_layer); |
715 EXPECT_TRUE(layer_sync); | 723 EXPECT_TRUE(layer_sync); |
716 int key_frame_picture_id = picture_id; | 724 int key_frame_picture_id = picture_id; |
717 | 725 |
718 // Change color. | 726 // Change color. |
719 plane_offset[kYPlane] += 1; | 727 plane_offset[kYPlane] += 1; |
720 plane_offset[kUPlane] += 1; | 728 plane_offset[kUPlane] += 1; |
721 plane_offset[kVPlane] += 1; | 729 plane_offset[kVPlane] += 1; |
722 CreateImage(input_buffer_, plane_offset); | 730 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
723 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 731 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
724 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 732 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
725 | 733 |
726 // Change color. | 734 // Change color. |
727 plane_offset[kYPlane] += 1; | 735 plane_offset[kYPlane] += 1; |
728 plane_offset[kUPlane] += 1; | 736 plane_offset[kUPlane] += 1; |
729 plane_offset[kVPlane] += 1; | 737 plane_offset[kVPlane] += 1; |
730 CreateImage(input_buffer_, plane_offset); | 738 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
731 | 739 |
732 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 740 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
733 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 741 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
734 | 742 |
735 // Change color. | 743 // Change color. |
736 plane_offset[kYPlane] += 1; | 744 plane_offset[kYPlane] += 1; |
737 plane_offset[kUPlane] += 1; | 745 plane_offset[kUPlane] += 1; |
738 plane_offset[kVPlane] += 1; | 746 plane_offset[kVPlane] += 1; |
739 CreateImage(input_buffer_, plane_offset); | 747 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
740 | 748 |
741 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 749 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
742 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 750 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
743 | 751 |
744 CodecSpecificInfo codec_specific; | 752 CodecSpecificInfo codec_specific; |
745 codec_specific.codecType = kVideoCodecVP8; | 753 codec_specific.codecType = kVideoCodecVP8; |
746 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; | 754 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; |
747 // Must match last key frame to trigger. | 755 // Must match last key frame to trigger. |
748 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; | 756 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; |
749 | 757 |
750 // Change color back to original. | 758 // Change color back to original. |
751 plane_offset[kYPlane] = kColorY; | 759 plane_offset[kYPlane] = kColorY; |
752 plane_offset[kUPlane] = kColorU; | 760 plane_offset[kUPlane] = kColorU; |
753 plane_offset[kVPlane] = kColorV; | 761 plane_offset[kVPlane] = kColorV; |
754 CreateImage(input_buffer_, plane_offset); | 762 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
755 | 763 |
756 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 764 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
757 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); | 765 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); |
758 | 766 |
759 EncodedImage encoded_frame; | 767 EncodedImage encoded_frame; |
760 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); | 768 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); |
761 decoder_->Decode(encoded_frame, false, NULL); | 769 decoder_->Decode(encoded_frame, false, NULL); |
762 encoder_callback.GetLastEncodedFrame(&encoded_frame); | 770 encoder_callback.GetLastEncodedFrame(&encoded_frame); |
763 decoder_->Decode(encoded_frame, false, NULL); | 771 decoder_->Decode(encoded_frame, false, NULL); |
764 EXPECT_EQ(2, decoder_callback.DecodedFrames()); | 772 EXPECT_EQ(2, decoder_callback.DecodedFrames()); |
765 } | 773 } |
766 | 774 |
767 // Test the layer pattern and sync flag for various spatial-temporal patterns. | 775 // Test the layer pattern and sync flag for various spatial-temporal patterns. |
768 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same | 776 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same |
769 // temporal_layer id and layer_sync is expected for all streams. | 777 // temporal_layer id and layer_sync is expected for all streams. |
770 void TestSaptioTemporalLayers333PatternEncoder() { | 778 void TestSaptioTemporalLayers333PatternEncoder() { |
771 Vp8TestEncodedImageCallback encoder_callback; | 779 Vp8TestEncodedImageCallback encoder_callback; |
772 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 780 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
773 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 781 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
774 | 782 |
775 int expected_temporal_idx[3] = {-1, -1, -1}; | 783 int expected_temporal_idx[3] = {-1, -1, -1}; |
776 bool expected_layer_sync[3] = {false, false, false}; | 784 bool expected_layer_sync[3] = {false, false, false}; |
777 | 785 |
778 // First frame: #0. | 786 // First frame: #0. |
779 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 787 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
780 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); | 788 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); |
781 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 789 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
782 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 790 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
783 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 791 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
784 | 792 |
785 // Next frame: #1. | 793 // Next frame: #1. |
786 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 794 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
787 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 795 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
788 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 796 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
789 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 797 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
790 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 798 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
791 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 799 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
792 | 800 |
793 // Next frame: #2. | 801 // Next frame: #2. |
794 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 802 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
795 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 803 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
796 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx); | 804 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx); |
797 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 805 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
798 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 806 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
799 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 807 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
800 | 808 |
801 // Next frame: #3. | 809 // Next frame: #3. |
802 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 810 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
803 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 811 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
804 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 812 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
805 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 813 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
806 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 814 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
807 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 815 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
808 | 816 |
809 // Next frame: #4. | 817 // Next frame: #4. |
810 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 818 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
811 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 819 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
812 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); | 820 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); |
813 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 821 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
814 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 822 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
815 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 823 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
816 | 824 |
817 // Next frame: #5. | 825 // Next frame: #5. |
818 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 826 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
819 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 827 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
820 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 828 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
821 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 829 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
822 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 830 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
823 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 831 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
824 } | 832 } |
825 | 833 |
826 // Test the layer pattern and sync flag for various spatial-temporal patterns. | 834 // Test the layer pattern and sync flag for various spatial-temporal patterns. |
827 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and | 835 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and |
828 // 1 temporal layer for highest resolution. | 836 // 1 temporal layer for highest resolution. |
829 // For this profile, we expect the temporal index pattern to be: | 837 // For this profile, we expect the temporal index pattern to be: |
830 // 1st stream: 0, 2, 1, 2, .... | 838 // 1st stream: 0, 2, 1, 2, .... |
831 // 2nd stream: 0, 1, 0, 1, ... | 839 // 2nd stream: 0, 1, 0, 1, ... |
832 // 3rd stream: -1, -1, -1, -1, .... | 840 // 3rd stream: -1, -1, -1, -1, .... |
833 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer | 841 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer |
834 // should always have temporal layer idx set to kNoTemporalIdx = -1. | 842 // should always have temporal layer idx set to kNoTemporalIdx = -1. |
835 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255. | 843 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255. |
836 // TODO(marpan): Although this seems safe for now, we should fix this. | 844 // TODO(marpan): Although this seems safe for now, we should fix this. |
837 void TestSpatioTemporalLayers321PatternEncoder() { | 845 void TestSpatioTemporalLayers321PatternEncoder() { |
838 int temporal_layer_profile[3] = {3, 2, 1}; | 846 int temporal_layer_profile[3] = {3, 2, 1}; |
839 SetUpCodec(temporal_layer_profile); | 847 SetUpCodec(temporal_layer_profile); |
840 Vp8TestEncodedImageCallback encoder_callback; | 848 Vp8TestEncodedImageCallback encoder_callback; |
841 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 849 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
842 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 850 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
843 | 851 |
844 int expected_temporal_idx[3] = {-1, -1, -1}; | 852 int expected_temporal_idx[3] = {-1, -1, -1}; |
845 bool expected_layer_sync[3] = {false, false, false}; | 853 bool expected_layer_sync[3] = {false, false, false}; |
846 | 854 |
847 // First frame: #0. | 855 // First frame: #0. |
848 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 856 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
849 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); | 857 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); |
850 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); | 858 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); |
851 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 859 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
852 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 860 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
853 | 861 |
854 // Next frame: #1. | 862 // Next frame: #1. |
855 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 863 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
856 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 864 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
857 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 865 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
858 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); | 866 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); |
859 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 867 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
860 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 868 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
861 | 869 |
862 // Next frame: #2. | 870 // Next frame: #2. |
863 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 871 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
864 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 872 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
865 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx); | 873 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx); |
866 SetExpectedValues3<bool>(true, false, false, expected_layer_sync); | 874 SetExpectedValues3<bool>(true, false, false, expected_layer_sync); |
867 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 875 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
868 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 876 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
869 | 877 |
870 // Next frame: #3. | 878 // Next frame: #3. |
871 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 879 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
872 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 880 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
873 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 881 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
874 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 882 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
875 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 883 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
876 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 884 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
877 | 885 |
878 // Next frame: #4. | 886 // Next frame: #4. |
879 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 887 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
880 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 888 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
881 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); | 889 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); |
882 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 890 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
883 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 891 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
884 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 892 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
885 | 893 |
886 // Next frame: #5. | 894 // Next frame: #5. |
887 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 895 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
888 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 896 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
889 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 897 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
890 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 898 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
891 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 899 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
892 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 900 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
893 } | 901 } |
894 | 902 |
895 void TestStrideEncodeDecode() { | 903 void TestStrideEncodeDecode() { |
896 Vp8TestEncodedImageCallback encoder_callback; | 904 Vp8TestEncodedImageCallback encoder_callback; |
897 Vp8TestDecodedImageCallback decoder_callback; | 905 Vp8TestDecodedImageCallback decoder_callback; |
898 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 906 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
899 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); | 907 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); |
900 | 908 |
901 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 909 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
902 // Setting two (possibly) problematic use cases for stride: | 910 // Setting two (possibly) problematic use cases for stride: |
903 // 1. stride > width 2. stride_y != stride_uv/2 | 911 // 1. stride > width 2. stride_y != stride_uv/2 |
904 int stride_y = kDefaultWidth + 20; | 912 int stride_y = kDefaultWidth + 20; |
905 int stride_uv = ((kDefaultWidth + 1) / 2) + 5; | 913 int stride_uv = ((kDefaultWidth + 1) / 2) + 5; |
906 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y, | 914 input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y, |
907 stride_uv, stride_uv); | 915 stride_uv, stride_uv); |
908 input_frame_.reset( | |
909 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); | |
910 | |
911 // Set color. | 916 // Set color. |
912 int plane_offset[kNumOfPlanes]; | 917 int plane_offset[kNumOfPlanes]; |
913 plane_offset[kYPlane] = kColorY; | 918 plane_offset[kYPlane] = kColorY; |
914 plane_offset[kUPlane] = kColorU; | 919 plane_offset[kUPlane] = kColorU; |
915 plane_offset[kVPlane] = kColorV; | 920 plane_offset[kVPlane] = kColorV; |
916 CreateImage(input_buffer_, plane_offset); | 921 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
917 | 922 |
918 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 923 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
919 | 924 |
920 // Change color. | 925 // Change color. |
921 plane_offset[kYPlane] += 1; | 926 plane_offset[kYPlane] += 1; |
922 plane_offset[kUPlane] += 1; | 927 plane_offset[kUPlane] += 1; |
923 plane_offset[kVPlane] += 1; | 928 plane_offset[kVPlane] += 1; |
924 CreateImage(input_buffer_, plane_offset); | 929 CreateImage(input_frame_.video_frame_buffer(), plane_offset); |
925 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); | 930 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); |
926 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); | 931 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); |
927 | 932 |
928 EncodedImage encoded_frame; | 933 EncodedImage encoded_frame; |
929 // Only encoding one frame - so will be a key frame. | 934 // Only encoding one frame - so will be a key frame. |
930 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); | 935 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); |
931 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL)); | 936 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL)); |
932 encoder_callback.GetLastEncodedFrame(&encoded_frame); | 937 encoder_callback.GetLastEncodedFrame(&encoded_frame); |
933 decoder_->Decode(encoded_frame, false, NULL); | 938 decoder_->Decode(encoded_frame, false, NULL); |
934 EXPECT_EQ(2, decoder_callback.DecodedFrames()); | 939 EXPECT_EQ(2, decoder_callback.DecodedFrames()); |
935 } | 940 } |
936 | 941 |
(...skipping 19 matching lines...) Expand all Loading... |
956 } | 961 } |
957 ++stream; | 962 ++stream; |
958 } | 963 } |
959 } | 964 } |
960 | 965 |
961 std::unique_ptr<VP8Encoder> encoder_; | 966 std::unique_ptr<VP8Encoder> encoder_; |
962 MockEncodedImageCallback encoder_callback_; | 967 MockEncodedImageCallback encoder_callback_; |
963 std::unique_ptr<VP8Decoder> decoder_; | 968 std::unique_ptr<VP8Decoder> decoder_; |
964 MockDecodedImageCallback decoder_callback_; | 969 MockDecodedImageCallback decoder_callback_; |
965 VideoCodec settings_; | 970 VideoCodec settings_; |
966 rtc::scoped_refptr<I420Buffer> input_buffer_; | 971 VideoFrame input_frame_; |
967 std::unique_ptr<VideoFrame> input_frame_; | |
968 }; | 972 }; |
969 | 973 |
970 } // namespace testing | 974 } // namespace testing |
971 } // namespace webrtc | 975 } // namespace webrtc |
972 | 976 |
973 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_ | 977 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_ |
OLD | NEW |