OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
229 int stride) { | 229 int stride) { |
230 for (int i = 0; i < height; i++, data += stride) { | 230 for (int i = 0; i < height; i++, data += stride) { |
231 // Setting allocated area to zero - setting only image size to | 231 // Setting allocated area to zero - setting only image size to |
232 // requested values - will make it easier to distinguish between image | 232 // requested values - will make it easier to distinguish between image |
233 // size and frame size (accounting for stride). | 233 // size and frame size (accounting for stride). |
234 memset(data, value, width); | 234 memset(data, value, width); |
235 memset(data + width, 0, stride - width); | 235 memset(data + width, 0, stride - width); |
236 } | 236 } |
237 } | 237 } |
238 | 238 |
239 // Fills in an VideoFrameBuffer from |plane_colors|. | 239 // Fills in an I420Buffer from |plane_colors|. |
240 static void CreateImage(const rtc::scoped_refptr<VideoFrameBuffer>& buffer, | 240 static void CreateImage(const rtc::scoped_refptr<I420Buffer>& buffer, |
241 int plane_colors[kNumOfPlanes]) { | 241 int plane_colors[kNumOfPlanes]) { |
242 int width = buffer->width(); | 242 int width = buffer->width(); |
243 int height = buffer->height(); | 243 int height = buffer->height(); |
244 int chroma_width = (width + 1) / 2; | 244 int chroma_width = (width + 1) / 2; |
245 int chroma_height = (height + 1) / 2; | 245 int chroma_height = (height + 1) / 2; |
246 | 246 |
247 SetPlane(buffer->MutableDataY(), plane_colors[0], | 247 SetPlane(buffer->MutableDataY(), plane_colors[0], |
248 width, height, buffer->StrideY()); | 248 width, height, buffer->StrideY()); |
249 | 249 |
250 SetPlane(buffer->MutableDataU(), plane_colors[1], | 250 SetPlane(buffer->MutableDataU(), plane_colors[1], |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
310 protected: | 310 protected: |
311 virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); } | 311 virtual void SetUp() { SetUpCodec(kDefaultTemporalLayerProfile); } |
312 | 312 |
313 virtual void SetUpCodec(const int* temporal_layer_profile) { | 313 virtual void SetUpCodec(const int* temporal_layer_profile) { |
314 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_); | 314 encoder_->RegisterEncodeCompleteCallback(&encoder_callback_); |
315 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_); | 315 decoder_->RegisterDecodeCompleteCallback(&decoder_callback_); |
316 DefaultSettings(&settings_, temporal_layer_profile); | 316 DefaultSettings(&settings_, temporal_layer_profile); |
317 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 317 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
318 EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1)); | 318 EXPECT_EQ(0, decoder_->InitDecode(&settings_, 1)); |
319 int half_width = (kDefaultWidth + 1) / 2; | 319 int half_width = (kDefaultWidth + 1) / 2; |
320 input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, kDefaultWidth, | 320 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, |
321 half_width, half_width); | 321 kDefaultWidth, half_width, half_width); |
322 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, | 322 input_buffer_->InitializeData(); |
323 input_frame_.allocated_size(kYPlane)); | 323 input_frame_.reset( |
324 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, | 324 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); |
325 input_frame_.allocated_size(kUPlane)); | |
326 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, | |
327 input_frame_.allocated_size(kVPlane)); | |
328 } | 325 } |
329 | 326 |
330 virtual void TearDown() { | 327 virtual void TearDown() { |
331 encoder_->Release(); | 328 encoder_->Release(); |
332 decoder_->Release(); | 329 decoder_->Release(); |
333 } | 330 } |
334 | 331 |
335 void ExpectStreams(FrameType frame_type, int expected_video_streams) { | 332 void ExpectStreams(FrameType frame_type, int expected_video_streams) { |
336 ASSERT_GE(expected_video_streams, 0); | 333 ASSERT_GE(expected_video_streams, 0); |
337 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams); | 334 ASSERT_LE(expected_video_streams, kNumberOfSimulcastStreams); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
389 } | 386 } |
390 } | 387 } |
391 | 388 |
392 // We currently expect all active streams to generate a key frame even though | 389 // We currently expect all active streams to generate a key frame even though |
393 // a key frame was only requested for some of them. | 390 // a key frame was only requested for some of them. |
394 void TestKeyFrameRequestsOnAllStreams() { | 391 void TestKeyFrameRequestsOnAllStreams() { |
395 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 392 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
396 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 393 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
397 kVideoFrameDelta); | 394 kVideoFrameDelta); |
398 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 395 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
399 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 396 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
400 | 397 |
401 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); | 398 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); |
402 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 399 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
403 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 400 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
404 | 401 |
405 frame_types[0] = kVideoFrameKey; | 402 frame_types[0] = kVideoFrameKey; |
406 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 403 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
407 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 404 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
408 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 405 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
409 | 406 |
410 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 407 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
411 frame_types[1] = kVideoFrameKey; | 408 frame_types[1] = kVideoFrameKey; |
412 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 409 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
413 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 410 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
414 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 411 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
415 | 412 |
416 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 413 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
417 frame_types[2] = kVideoFrameKey; | 414 frame_types[2] = kVideoFrameKey; |
418 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); | 415 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams); |
419 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 416 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
420 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 417 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
421 | 418 |
422 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); | 419 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta); |
423 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); | 420 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams); |
424 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 421 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
425 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 422 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
426 } | 423 } |
427 | 424 |
428 void TestPaddingAllStreams() { | 425 void TestPaddingAllStreams() { |
429 // We should always encode the base layer. | 426 // We should always encode the base layer. |
430 encoder_->SetRates(kMinBitrates[0] - 1, 30); | 427 encoder_->SetRates(kMinBitrates[0] - 1, 30); |
431 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 428 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
432 kVideoFrameDelta); | 429 kVideoFrameDelta); |
433 ExpectStreams(kVideoFrameKey, 1); | 430 ExpectStreams(kVideoFrameKey, 1); |
434 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 431 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
435 | 432 |
436 ExpectStreams(kVideoFrameDelta, 1); | 433 ExpectStreams(kVideoFrameDelta, 1); |
437 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 434 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
438 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 435 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
439 } | 436 } |
440 | 437 |
441 void TestPaddingTwoStreams() { | 438 void TestPaddingTwoStreams() { |
442 // We have just enough to get only the first stream and padding for two. | 439 // We have just enough to get only the first stream and padding for two. |
443 encoder_->SetRates(kMinBitrates[0], 30); | 440 encoder_->SetRates(kMinBitrates[0], 30); |
444 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 441 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
445 kVideoFrameDelta); | 442 kVideoFrameDelta); |
446 ExpectStreams(kVideoFrameKey, 1); | 443 ExpectStreams(kVideoFrameKey, 1); |
447 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 444 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
448 | 445 |
449 ExpectStreams(kVideoFrameDelta, 1); | 446 ExpectStreams(kVideoFrameDelta, 1); |
450 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 447 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
451 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 448 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
452 } | 449 } |
453 | 450 |
454 void TestPaddingTwoStreamsOneMaxedOut() { | 451 void TestPaddingTwoStreamsOneMaxedOut() { |
455 // We are just below limit of sending second stream, so we should get | 452 // We are just below limit of sending second stream, so we should get |
456 // the first stream maxed out (at |maxBitrate|), and padding for two. | 453 // the first stream maxed out (at |maxBitrate|), and padding for two. |
457 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); | 454 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); |
458 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 455 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
459 kVideoFrameDelta); | 456 kVideoFrameDelta); |
460 ExpectStreams(kVideoFrameKey, 1); | 457 ExpectStreams(kVideoFrameKey, 1); |
461 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 458 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
462 | 459 |
463 ExpectStreams(kVideoFrameDelta, 1); | 460 ExpectStreams(kVideoFrameDelta, 1); |
464 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 461 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
465 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 462 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
466 } | 463 } |
467 | 464 |
468 void TestPaddingOneStream() { | 465 void TestPaddingOneStream() { |
469 // We have just enough to send two streams, so padding for one stream. | 466 // We have just enough to send two streams, so padding for one stream. |
470 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); | 467 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); |
471 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 468 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
472 kVideoFrameDelta); | 469 kVideoFrameDelta); |
473 ExpectStreams(kVideoFrameKey, 2); | 470 ExpectStreams(kVideoFrameKey, 2); |
474 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 471 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
475 | 472 |
476 ExpectStreams(kVideoFrameDelta, 2); | 473 ExpectStreams(kVideoFrameDelta, 2); |
477 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 474 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
478 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 475 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
479 } | 476 } |
480 | 477 |
481 void TestPaddingOneStreamTwoMaxedOut() { | 478 void TestPaddingOneStreamTwoMaxedOut() { |
482 // We are just below limit of sending third stream, so we should get | 479 // We are just below limit of sending third stream, so we should get |
483 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. | 480 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. |
484 encoder_->SetRates( | 481 encoder_->SetRates( |
485 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30); | 482 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] - 1, 30); |
486 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 483 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
487 kVideoFrameDelta); | 484 kVideoFrameDelta); |
488 ExpectStreams(kVideoFrameKey, 2); | 485 ExpectStreams(kVideoFrameKey, 2); |
489 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 486 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
490 | 487 |
491 ExpectStreams(kVideoFrameDelta, 2); | 488 ExpectStreams(kVideoFrameDelta, 2); |
492 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 489 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
493 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 490 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
494 } | 491 } |
495 | 492 |
496 void TestSendAllStreams() { | 493 void TestSendAllStreams() { |
497 // We have just enough to send all streams. | 494 // We have just enough to send all streams. |
498 encoder_->SetRates( | 495 encoder_->SetRates( |
499 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30); | 496 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2], 30); |
500 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 497 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
501 kVideoFrameDelta); | 498 kVideoFrameDelta); |
502 ExpectStreams(kVideoFrameKey, 3); | 499 ExpectStreams(kVideoFrameKey, 3); |
503 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 500 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
504 | 501 |
505 ExpectStreams(kVideoFrameDelta, 3); | 502 ExpectStreams(kVideoFrameDelta, 3); |
506 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 503 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
507 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 504 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
508 } | 505 } |
509 | 506 |
510 void TestDisablingStreams() { | 507 void TestDisablingStreams() { |
511 // We should get three media streams. | 508 // We should get three media streams. |
512 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30); | 509 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + kMaxBitrates[2], 30); |
513 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 510 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
514 kVideoFrameDelta); | 511 kVideoFrameDelta); |
515 ExpectStreams(kVideoFrameKey, 3); | 512 ExpectStreams(kVideoFrameKey, 3); |
516 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 513 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
517 | 514 |
518 ExpectStreams(kVideoFrameDelta, 3); | 515 ExpectStreams(kVideoFrameDelta, 3); |
519 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 516 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
520 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 517 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
521 | 518 |
522 // We should only get two streams and padding for one. | 519 // We should only get two streams and padding for one. |
523 encoder_->SetRates( | 520 encoder_->SetRates( |
524 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); | 521 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); |
525 ExpectStreams(kVideoFrameDelta, 2); | 522 ExpectStreams(kVideoFrameDelta, 2); |
526 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 523 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
527 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 524 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
528 | 525 |
529 // We should only get the first stream and padding for two. | 526 // We should only get the first stream and padding for two. |
530 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30); | 527 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30); |
531 ExpectStreams(kVideoFrameDelta, 1); | 528 ExpectStreams(kVideoFrameDelta, 1); |
532 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 529 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
533 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 530 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
534 | 531 |
535 // We don't have enough bitrate for the thumbnail stream, but we should get | 532 // We don't have enough bitrate for the thumbnail stream, but we should get |
536 // it anyway with current configuration. | 533 // it anyway with current configuration. |
537 encoder_->SetRates(kTargetBitrates[0] - 1, 30); | 534 encoder_->SetRates(kTargetBitrates[0] - 1, 30); |
538 ExpectStreams(kVideoFrameDelta, 1); | 535 ExpectStreams(kVideoFrameDelta, 1); |
539 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 536 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
540 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 537 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
541 | 538 |
542 // We should only get two streams and padding for one. | 539 // We should only get two streams and padding for one. |
543 encoder_->SetRates( | 540 encoder_->SetRates( |
544 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); | 541 kTargetBitrates[0] + kTargetBitrates[1] + kMinBitrates[2] / 2, 30); |
545 // We get a key frame because a new stream is being enabled. | 542 // We get a key frame because a new stream is being enabled. |
546 ExpectStreams(kVideoFrameKey, 2); | 543 ExpectStreams(kVideoFrameKey, 2); |
547 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 544 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
548 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 545 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
549 | 546 |
550 // We should get all three streams. | 547 // We should get all three streams. |
551 encoder_->SetRates( | 548 encoder_->SetRates( |
552 kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30); | 549 kTargetBitrates[0] + kTargetBitrates[1] + kTargetBitrates[2], 30); |
553 // We get a key frame because a new stream is being enabled. | 550 // We get a key frame because a new stream is being enabled. |
554 ExpectStreams(kVideoFrameKey, 3); | 551 ExpectStreams(kVideoFrameKey, 3); |
555 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 552 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
556 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 553 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
557 } | 554 } |
558 | 555 |
559 void SwitchingToOneStream(int width, int height) { | 556 void SwitchingToOneStream(int width, int height) { |
560 // Disable all streams except the last and set the bitrate of the last to | 557 // Disable all streams except the last and set the bitrate of the last to |
561 // 100 kbps. This verifies the way GTP switches to screenshare mode. | 558 // 100 kbps. This verifies the way GTP switches to screenshare mode. |
562 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1; | 559 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1; |
563 settings_.maxBitrate = 100; | 560 settings_.maxBitrate = 100; |
564 settings_.startBitrate = 100; | 561 settings_.startBitrate = 100; |
565 settings_.width = width; | 562 settings_.width = width; |
566 settings_.height = height; | 563 settings_.height = height; |
567 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) { | 564 for (int i = 0; i < settings_.numberOfSimulcastStreams - 1; ++i) { |
568 settings_.simulcastStream[i].maxBitrate = 0; | 565 settings_.simulcastStream[i].maxBitrate = 0; |
569 settings_.simulcastStream[i].width = settings_.width; | 566 settings_.simulcastStream[i].width = settings_.width; |
570 settings_.simulcastStream[i].height = settings_.height; | 567 settings_.simulcastStream[i].height = settings_.height; |
571 } | 568 } |
572 // Setting input image to new resolution. | 569 // Setting input image to new resolution. |
573 int half_width = (settings_.width + 1) / 2; | 570 int half_width = (settings_.width + 1) / 2; |
574 input_frame_.CreateEmptyFrame(settings_.width, settings_.height, | 571 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height, |
575 settings_.width, half_width, half_width); | 572 settings_.width, half_width, half_width); |
576 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, | 573 input_buffer_->InitializeData(); |
577 input_frame_.allocated_size(kYPlane)); | 574 |
578 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, | 575 input_frame_.reset( |
579 input_frame_.allocated_size(kUPlane)); | 576 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); |
580 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, | |
581 input_frame_.allocated_size(kVPlane)); | |
582 | 577 |
583 // The for loop above did not set the bitrate of the highest layer. | 578 // The for loop above did not set the bitrate of the highest layer. |
584 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1] | 579 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1] |
585 .maxBitrate = 0; | 580 .maxBitrate = 0; |
586 // The highest layer has to correspond to the non-simulcast resolution. | 581 // The highest layer has to correspond to the non-simulcast resolution. |
587 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width = | 582 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].width = |
588 settings_.width; | 583 settings_.width; |
589 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height = | 584 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].height = |
590 settings_.height; | 585 settings_.height; |
591 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 586 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
592 | 587 |
593 // Encode one frame and verify. | 588 // Encode one frame and verify. |
594 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); | 589 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); |
595 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, | 590 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, |
596 kVideoFrameDelta); | 591 kVideoFrameDelta); |
597 EXPECT_CALL( | 592 EXPECT_CALL( |
598 encoder_callback_, | 593 encoder_callback_, |
599 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey), | 594 OnEncodedImage(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey), |
600 Field(&EncodedImage::_encodedWidth, width), | 595 Field(&EncodedImage::_encodedWidth, width), |
601 Field(&EncodedImage::_encodedHeight, height)), | 596 Field(&EncodedImage::_encodedHeight, height)), |
602 _, _)) | 597 _, _)) |
603 .Times(1) | 598 .Times(1) |
604 .WillRepeatedly(Return( | 599 .WillRepeatedly(Return( |
605 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); | 600 EncodedImageCallback::Result(EncodedImageCallback::Result::OK, 0))); |
606 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | 601 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
607 | 602 |
608 // Switch back. | 603 // Switch back. |
609 DefaultSettings(&settings_, kDefaultTemporalLayerProfile); | 604 DefaultSettings(&settings_, kDefaultTemporalLayerProfile); |
610 // Start at the lowest bitrate for enabling base stream. | 605 // Start at the lowest bitrate for enabling base stream. |
611 settings_.startBitrate = kMinBitrates[0]; | 606 settings_.startBitrate = kMinBitrates[0]; |
612 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); | 607 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); |
613 encoder_->SetRates(settings_.startBitrate, 30); | 608 encoder_->SetRates(settings_.startBitrate, 30); |
614 ExpectStreams(kVideoFrameKey, 1); | 609 ExpectStreams(kVideoFrameKey, 1); |
615 // Resize |input_frame_| to the new resolution. | 610 // Resize |input_frame_| to the new resolution. |
616 half_width = (settings_.width + 1) / 2; | 611 half_width = (settings_.width + 1) / 2; |
617 input_frame_.CreateEmptyFrame(settings_.width, settings_.height, | 612 input_buffer_ = I420Buffer::Create(settings_.width, settings_.height, |
618 settings_.width, half_width, half_width); | 613 settings_.width, half_width, half_width); |
619 memset(input_frame_.video_frame_buffer()->MutableDataY(), 0, | 614 input_buffer_->InitializeData(); |
620 input_frame_.allocated_size(kYPlane)); | 615 input_frame_.reset( |
621 memset(input_frame_.video_frame_buffer()->MutableDataU(), 0, | 616 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); |
622 input_frame_.allocated_size(kUPlane)); | 617 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, &frame_types)); |
623 memset(input_frame_.video_frame_buffer()->MutableDataV(), 0, | |
624 input_frame_.allocated_size(kVPlane)); | |
625 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); | |
626 } | 618 } |
627 | 619 |
628 void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); } | 620 void TestSwitchingToOneStream() { SwitchingToOneStream(1024, 768); } |
629 | 621 |
630 void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); } | 622 void TestSwitchingToOneOddStream() { SwitchingToOneStream(1023, 769); } |
631 | 623 |
632 void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); } | 624 void TestSwitchingToOneSmallStream() { SwitchingToOneStream(4, 4); } |
633 | 625 |
634 void TestRPSIEncoder() { | 626 void TestRPSIEncoder() { |
635 Vp8TestEncodedImageCallback encoder_callback; | 627 Vp8TestEncodedImageCallback encoder_callback; |
636 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 628 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
637 | 629 |
638 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 630 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
639 | 631 |
640 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 632 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
641 int picture_id = -1; | 633 int picture_id = -1; |
642 int temporal_layer = -1; | 634 int temporal_layer = -1; |
643 bool layer_sync = false; | 635 bool layer_sync = false; |
644 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 636 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
645 &layer_sync, 0); | 637 &layer_sync, 0); |
646 EXPECT_EQ(0, temporal_layer); | 638 EXPECT_EQ(0, temporal_layer); |
647 EXPECT_TRUE(layer_sync); | 639 EXPECT_TRUE(layer_sync); |
648 int key_frame_picture_id = picture_id; | 640 int key_frame_picture_id = picture_id; |
649 | 641 |
650 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 642 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
651 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 643 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
652 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 644 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
653 &layer_sync, 0); | 645 &layer_sync, 0); |
654 EXPECT_EQ(2, temporal_layer); | 646 EXPECT_EQ(2, temporal_layer); |
655 EXPECT_TRUE(layer_sync); | 647 EXPECT_TRUE(layer_sync); |
656 | 648 |
657 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 649 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
658 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 650 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
659 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 651 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
660 &layer_sync, 0); | 652 &layer_sync, 0); |
661 EXPECT_EQ(1, temporal_layer); | 653 EXPECT_EQ(1, temporal_layer); |
662 EXPECT_TRUE(layer_sync); | 654 EXPECT_TRUE(layer_sync); |
663 | 655 |
664 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 656 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
665 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 657 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
666 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 658 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
667 &layer_sync, 0); | 659 &layer_sync, 0); |
668 EXPECT_EQ(2, temporal_layer); | 660 EXPECT_EQ(2, temporal_layer); |
669 EXPECT_FALSE(layer_sync); | 661 EXPECT_FALSE(layer_sync); |
670 | 662 |
671 CodecSpecificInfo codec_specific; | 663 CodecSpecificInfo codec_specific; |
672 codec_specific.codecType = kVideoCodecVP8; | 664 codec_specific.codecType = kVideoCodecVP8; |
673 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; | 665 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; |
674 | 666 |
675 // Must match last key frame to trigger. | 667 // Must match last key frame to trigger. |
676 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; | 668 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; |
677 | 669 |
678 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 670 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
679 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); | 671 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); |
680 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 672 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
681 &layer_sync, 0); | 673 &layer_sync, 0); |
682 | 674 |
683 EXPECT_EQ(0, temporal_layer); | 675 EXPECT_EQ(0, temporal_layer); |
684 EXPECT_TRUE(layer_sync); | 676 EXPECT_TRUE(layer_sync); |
685 | 677 |
686 // Must match last key frame to trigger, test bad id. | 678 // Must match last key frame to trigger, test bad id. |
687 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17; | 679 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id + 17; |
688 | 680 |
689 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 681 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
690 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); | 682 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); |
691 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 683 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
692 &layer_sync, 0); | 684 &layer_sync, 0); |
693 | 685 |
694 EXPECT_EQ(2, temporal_layer); | 686 EXPECT_EQ(2, temporal_layer); |
695 // The previous frame was a base layer sync (since it was a frame that | 687 // The previous frame was a base layer sync (since it was a frame that |
696 // only predicts from key frame and hence resets the temporal pattern), | 688 // only predicts from key frame and hence resets the temporal pattern), |
697 // so this frame (the next one) must have |layer_sync| set to true. | 689 // so this frame (the next one) must have |layer_sync| set to true. |
698 EXPECT_TRUE(layer_sync); | 690 EXPECT_TRUE(layer_sync); |
699 } | 691 } |
700 | 692 |
701 void TestRPSIEncodeDecode() { | 693 void TestRPSIEncodeDecode() { |
702 Vp8TestEncodedImageCallback encoder_callback; | 694 Vp8TestEncodedImageCallback encoder_callback; |
703 Vp8TestDecodedImageCallback decoder_callback; | 695 Vp8TestDecodedImageCallback decoder_callback; |
704 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 696 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
705 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); | 697 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); |
706 | 698 |
707 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 699 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
708 | 700 |
709 // Set color. | 701 // Set color. |
710 int plane_offset[kNumOfPlanes]; | 702 int plane_offset[kNumOfPlanes]; |
711 plane_offset[kYPlane] = kColorY; | 703 plane_offset[kYPlane] = kColorY; |
712 plane_offset[kUPlane] = kColorU; | 704 plane_offset[kUPlane] = kColorU; |
713 plane_offset[kVPlane] = kColorV; | 705 plane_offset[kVPlane] = kColorV; |
714 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 706 CreateImage(input_buffer_, plane_offset); |
715 | 707 |
716 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 708 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
717 int picture_id = -1; | 709 int picture_id = -1; |
718 int temporal_layer = -1; | 710 int temporal_layer = -1; |
719 bool layer_sync = false; | 711 bool layer_sync = false; |
720 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, | 712 encoder_callback.GetLastEncodedFrameInfo(&picture_id, &temporal_layer, |
721 &layer_sync, 0); | 713 &layer_sync, 0); |
722 EXPECT_EQ(0, temporal_layer); | 714 EXPECT_EQ(0, temporal_layer); |
723 EXPECT_TRUE(layer_sync); | 715 EXPECT_TRUE(layer_sync); |
724 int key_frame_picture_id = picture_id; | 716 int key_frame_picture_id = picture_id; |
725 | 717 |
726 // Change color. | 718 // Change color. |
727 plane_offset[kYPlane] += 1; | 719 plane_offset[kYPlane] += 1; |
728 plane_offset[kUPlane] += 1; | 720 plane_offset[kUPlane] += 1; |
729 plane_offset[kVPlane] += 1; | 721 plane_offset[kVPlane] += 1; |
730 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 722 CreateImage(input_buffer_, plane_offset); |
731 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 723 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
732 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 724 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
733 | 725 |
734 // Change color. | 726 // Change color. |
735 plane_offset[kYPlane] += 1; | 727 plane_offset[kYPlane] += 1; |
736 plane_offset[kUPlane] += 1; | 728 plane_offset[kUPlane] += 1; |
737 plane_offset[kVPlane] += 1; | 729 plane_offset[kVPlane] += 1; |
738 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 730 CreateImage(input_buffer_, plane_offset); |
739 | 731 |
740 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 732 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
741 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 733 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
742 | 734 |
743 // Change color. | 735 // Change color. |
744 plane_offset[kYPlane] += 1; | 736 plane_offset[kYPlane] += 1; |
745 plane_offset[kUPlane] += 1; | 737 plane_offset[kUPlane] += 1; |
746 plane_offset[kVPlane] += 1; | 738 plane_offset[kVPlane] += 1; |
747 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 739 CreateImage(input_buffer_, plane_offset); |
748 | 740 |
749 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 741 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
750 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 742 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
751 | 743 |
752 CodecSpecificInfo codec_specific; | 744 CodecSpecificInfo codec_specific; |
753 codec_specific.codecType = kVideoCodecVP8; | 745 codec_specific.codecType = kVideoCodecVP8; |
754 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; | 746 codec_specific.codecSpecific.VP8.hasReceivedRPSI = true; |
755 // Must match last key frame to trigger. | 747 // Must match last key frame to trigger. |
756 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; | 748 codec_specific.codecSpecific.VP8.pictureIdRPSI = key_frame_picture_id; |
757 | 749 |
758 // Change color back to original. | 750 // Change color back to original. |
759 plane_offset[kYPlane] = kColorY; | 751 plane_offset[kYPlane] = kColorY; |
760 plane_offset[kUPlane] = kColorU; | 752 plane_offset[kUPlane] = kColorU; |
761 plane_offset[kVPlane] = kColorV; | 753 plane_offset[kVPlane] = kColorV; |
762 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 754 CreateImage(input_buffer_, plane_offset); |
763 | 755 |
764 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 756 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
765 EXPECT_EQ(0, encoder_->Encode(input_frame_, &codec_specific, NULL)); | 757 EXPECT_EQ(0, encoder_->Encode(*input_frame_, &codec_specific, NULL)); |
766 | 758 |
767 EncodedImage encoded_frame; | 759 EncodedImage encoded_frame; |
768 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); | 760 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); |
769 decoder_->Decode(encoded_frame, false, NULL); | 761 decoder_->Decode(encoded_frame, false, NULL); |
770 encoder_callback.GetLastEncodedFrame(&encoded_frame); | 762 encoder_callback.GetLastEncodedFrame(&encoded_frame); |
771 decoder_->Decode(encoded_frame, false, NULL); | 763 decoder_->Decode(encoded_frame, false, NULL); |
772 EXPECT_EQ(2, decoder_callback.DecodedFrames()); | 764 EXPECT_EQ(2, decoder_callback.DecodedFrames()); |
773 } | 765 } |
774 | 766 |
775 // Test the layer pattern and sync flag for various spatial-temporal patterns. | 767 // Test the layer pattern and sync flag for various spatial-temporal patterns. |
776 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same | 768 // 3-3-3 pattern: 3 temporal layers for all spatial streams, so same |
777 // temporal_layer id and layer_sync is expected for all streams. | 769 // temporal_layer id and layer_sync is expected for all streams. |
778 void TestSaptioTemporalLayers333PatternEncoder() { | 770 void TestSaptioTemporalLayers333PatternEncoder() { |
779 Vp8TestEncodedImageCallback encoder_callback; | 771 Vp8TestEncodedImageCallback encoder_callback; |
780 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 772 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
781 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 773 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
782 | 774 |
783 int expected_temporal_idx[3] = {-1, -1, -1}; | 775 int expected_temporal_idx[3] = {-1, -1, -1}; |
784 bool expected_layer_sync[3] = {false, false, false}; | 776 bool expected_layer_sync[3] = {false, false, false}; |
785 | 777 |
786 // First frame: #0. | 778 // First frame: #0. |
787 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 779 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
788 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); | 780 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); |
789 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 781 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
790 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 782 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
791 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 783 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
792 | 784 |
793 // Next frame: #1. | 785 // Next frame: #1. |
794 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 786 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
795 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 787 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
796 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 788 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
797 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 789 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
798 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 790 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
799 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 791 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
800 | 792 |
801 // Next frame: #2. | 793 // Next frame: #2. |
802 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 794 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
803 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 795 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
804 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx); | 796 SetExpectedValues3<int>(1, 1, 1, expected_temporal_idx); |
805 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); | 797 SetExpectedValues3<bool>(true, true, true, expected_layer_sync); |
806 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 798 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
807 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 799 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
808 | 800 |
809 // Next frame: #3. | 801 // Next frame: #3. |
810 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 802 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
811 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 803 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
812 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 804 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
813 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 805 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
814 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 806 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
815 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 807 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
816 | 808 |
817 // Next frame: #4. | 809 // Next frame: #4. |
818 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 810 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
819 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 811 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
820 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); | 812 SetExpectedValues3<int>(0, 0, 0, expected_temporal_idx); |
821 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 813 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
822 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 814 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
823 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 815 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
824 | 816 |
825 // Next frame: #5. | 817 // Next frame: #5. |
826 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 818 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
827 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 819 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
828 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); | 820 SetExpectedValues3<int>(2, 2, 2, expected_temporal_idx); |
829 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 821 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
830 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 822 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
831 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 823 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
832 } | 824 } |
833 | 825 |
834 // Test the layer pattern and sync flag for various spatial-temporal patterns. | 826 // Test the layer pattern and sync flag for various spatial-temporal patterns. |
835 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and | 827 // 3-2-1 pattern: 3 temporal layers for lowest resolution, 2 for middle, and |
836 // 1 temporal layer for highest resolution. | 828 // 1 temporal layer for highest resolution. |
837 // For this profile, we expect the temporal index pattern to be: | 829 // For this profile, we expect the temporal index pattern to be: |
838 // 1st stream: 0, 2, 1, 2, .... | 830 // 1st stream: 0, 2, 1, 2, .... |
839 // 2nd stream: 0, 1, 0, 1, ... | 831 // 2nd stream: 0, 1, 0, 1, ... |
840 // 3rd stream: -1, -1, -1, -1, .... | 832 // 3rd stream: -1, -1, -1, -1, .... |
841 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer | 833 // Regarding the 3rd stream, note that a stream/encoder with 1 temporal layer |
842 // should always have temporal layer idx set to kNoTemporalIdx = -1. | 834 // should always have temporal layer idx set to kNoTemporalIdx = -1. |
843 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255. | 835 // Since CodecSpecificInfoVP8.temporalIdx is uint8_t, this will wrap to 255. |
844 // TODO(marpan): Although this seems safe for now, we should fix this. | 836 // TODO(marpan): Although this seems safe for now, we should fix this. |
845 void TestSpatioTemporalLayers321PatternEncoder() { | 837 void TestSpatioTemporalLayers321PatternEncoder() { |
846 int temporal_layer_profile[3] = {3, 2, 1}; | 838 int temporal_layer_profile[3] = {3, 2, 1}; |
847 SetUpCodec(temporal_layer_profile); | 839 SetUpCodec(temporal_layer_profile); |
848 Vp8TestEncodedImageCallback encoder_callback; | 840 Vp8TestEncodedImageCallback encoder_callback; |
849 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 841 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
850 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 842 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
851 | 843 |
852 int expected_temporal_idx[3] = {-1, -1, -1}; | 844 int expected_temporal_idx[3] = {-1, -1, -1}; |
853 bool expected_layer_sync[3] = {false, false, false}; | 845 bool expected_layer_sync[3] = {false, false, false}; |
854 | 846 |
855 // First frame: #0. | 847 // First frame: #0. |
856 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 848 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
857 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); | 849 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); |
858 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); | 850 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); |
859 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 851 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
860 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 852 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
861 | 853 |
862 // Next frame: #1. | 854 // Next frame: #1. |
863 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 855 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
864 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 856 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
865 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 857 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
866 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); | 858 SetExpectedValues3<bool>(true, true, false, expected_layer_sync); |
867 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 859 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
868 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 860 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
869 | 861 |
870 // Next frame: #2. | 862 // Next frame: #2. |
871 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 863 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
872 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 864 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
873 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx); | 865 SetExpectedValues3<int>(1, 0, 255, expected_temporal_idx); |
874 SetExpectedValues3<bool>(true, false, false, expected_layer_sync); | 866 SetExpectedValues3<bool>(true, false, false, expected_layer_sync); |
875 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 867 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
876 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 868 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
877 | 869 |
878 // Next frame: #3. | 870 // Next frame: #3. |
879 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 871 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
880 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 872 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
881 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 873 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
882 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 874 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
883 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 875 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
884 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 876 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
885 | 877 |
886 // Next frame: #4. | 878 // Next frame: #4. |
887 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 879 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
888 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 880 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
889 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); | 881 SetExpectedValues3<int>(0, 0, 255, expected_temporal_idx); |
890 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 882 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
891 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 883 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
892 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 884 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
893 | 885 |
894 // Next frame: #5. | 886 // Next frame: #5. |
895 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 887 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
896 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 888 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
897 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); | 889 SetExpectedValues3<int>(2, 1, 255, expected_temporal_idx); |
898 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); | 890 SetExpectedValues3<bool>(false, false, false, expected_layer_sync); |
899 VerifyTemporalIdxAndSyncForAllSpatialLayers( | 891 VerifyTemporalIdxAndSyncForAllSpatialLayers( |
900 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); | 892 &encoder_callback, expected_temporal_idx, expected_layer_sync, 3); |
901 } | 893 } |
902 | 894 |
903 void TestStrideEncodeDecode() { | 895 void TestStrideEncodeDecode() { |
904 Vp8TestEncodedImageCallback encoder_callback; | 896 Vp8TestEncodedImageCallback encoder_callback; |
905 Vp8TestDecodedImageCallback decoder_callback; | 897 Vp8TestDecodedImageCallback decoder_callback; |
906 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); | 898 encoder_->RegisterEncodeCompleteCallback(&encoder_callback); |
907 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); | 899 decoder_->RegisterDecodeCompleteCallback(&decoder_callback); |
908 | 900 |
909 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. | 901 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. |
910 // Setting two (possibly) problematic use cases for stride: | 902 // Setting two (possibly) problematic use cases for stride: |
911 // 1. stride > width 2. stride_y != stride_uv/2 | 903 // 1. stride > width 2. stride_y != stride_uv/2 |
912 int stride_y = kDefaultWidth + 20; | 904 int stride_y = kDefaultWidth + 20; |
913 int stride_uv = ((kDefaultWidth + 1) / 2) + 5; | 905 int stride_uv = ((kDefaultWidth + 1) / 2) + 5; |
914 input_frame_.CreateEmptyFrame(kDefaultWidth, kDefaultHeight, stride_y, | 906 input_buffer_ = I420Buffer::Create(kDefaultWidth, kDefaultHeight, stride_y, |
915 stride_uv, stride_uv); | 907 stride_uv, stride_uv); |
| 908 input_frame_.reset( |
| 909 new VideoFrame(input_buffer_, 0, 0, webrtc::kVideoRotation_0)); |
| 910 |
916 // Set color. | 911 // Set color. |
917 int plane_offset[kNumOfPlanes]; | 912 int plane_offset[kNumOfPlanes]; |
918 plane_offset[kYPlane] = kColorY; | 913 plane_offset[kYPlane] = kColorY; |
919 plane_offset[kUPlane] = kColorU; | 914 plane_offset[kUPlane] = kColorU; |
920 plane_offset[kVPlane] = kColorV; | 915 plane_offset[kVPlane] = kColorV; |
921 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 916 CreateImage(input_buffer_, plane_offset); |
922 | 917 |
923 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 918 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
924 | 919 |
925 // Change color. | 920 // Change color. |
926 plane_offset[kYPlane] += 1; | 921 plane_offset[kYPlane] += 1; |
927 plane_offset[kUPlane] += 1; | 922 plane_offset[kUPlane] += 1; |
928 plane_offset[kVPlane] += 1; | 923 plane_offset[kVPlane] += 1; |
929 CreateImage(input_frame_.video_frame_buffer(), plane_offset); | 924 CreateImage(input_buffer_, plane_offset); |
930 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); | 925 input_frame_->set_timestamp(input_frame_->timestamp() + 3000); |
931 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, NULL)); | 926 EXPECT_EQ(0, encoder_->Encode(*input_frame_, NULL, NULL)); |
932 | 927 |
933 EncodedImage encoded_frame; | 928 EncodedImage encoded_frame; |
934 // Only encoding one frame - so will be a key frame. | 929 // Only encoding one frame - so will be a key frame. |
935 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); | 930 encoder_callback.GetLastEncodedKeyFrame(&encoded_frame); |
936 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL)); | 931 EXPECT_EQ(0, decoder_->Decode(encoded_frame, false, NULL)); |
937 encoder_callback.GetLastEncodedFrame(&encoded_frame); | 932 encoder_callback.GetLastEncodedFrame(&encoded_frame); |
938 decoder_->Decode(encoded_frame, false, NULL); | 933 decoder_->Decode(encoded_frame, false, NULL); |
939 EXPECT_EQ(2, decoder_callback.DecodedFrames()); | 934 EXPECT_EQ(2, decoder_callback.DecodedFrames()); |
940 } | 935 } |
941 | 936 |
(...skipping 19 matching lines...) Expand all Loading... |
961 } | 956 } |
962 ++stream; | 957 ++stream; |
963 } | 958 } |
964 } | 959 } |
965 | 960 |
966 std::unique_ptr<VP8Encoder> encoder_; | 961 std::unique_ptr<VP8Encoder> encoder_; |
967 MockEncodedImageCallback encoder_callback_; | 962 MockEncodedImageCallback encoder_callback_; |
968 std::unique_ptr<VP8Decoder> decoder_; | 963 std::unique_ptr<VP8Decoder> decoder_; |
969 MockDecodedImageCallback decoder_callback_; | 964 MockDecodedImageCallback decoder_callback_; |
970 VideoCodec settings_; | 965 VideoCodec settings_; |
971 VideoFrame input_frame_; | 966 rtc::scoped_refptr<I420Buffer> input_buffer_; |
| 967 std::unique_ptr<VideoFrame> input_frame_; |
972 }; | 968 }; |
973 | 969 |
974 } // namespace testing | 970 } // namespace testing |
975 } // namespace webrtc | 971 } // namespace webrtc |
976 | 972 |
977 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_ | 973 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_ |
OLD | NEW |