Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(243)

Side by Side Diff: webrtc/modules/video_coding/codecs/vp8/simulcast_unittest.h

Issue 1415693002: Remove VideoFrameType aliases for FrameType. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: rebase Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 ~Vp8TestEncodedImageCallback() { 63 ~Vp8TestEncodedImageCallback() {
64 delete [] encoded_key_frame_._buffer; 64 delete [] encoded_key_frame_._buffer;
65 delete [] encoded_frame_._buffer; 65 delete [] encoded_frame_._buffer;
66 } 66 }
67 67
68 virtual int32_t Encoded(const EncodedImage& encoded_image, 68 virtual int32_t Encoded(const EncodedImage& encoded_image,
69 const CodecSpecificInfo* codec_specific_info, 69 const CodecSpecificInfo* codec_specific_info,
70 const RTPFragmentationHeader* fragmentation) { 70 const RTPFragmentationHeader* fragmentation) {
71 // Only store the base layer. 71 // Only store the base layer.
72 if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) { 72 if (codec_specific_info->codecSpecific.VP8.simulcastIdx == 0) {
73 if (encoded_image._frameType == kKeyFrame) { 73 if (encoded_image._frameType == kVideoFrameKey) {
74 delete [] encoded_key_frame_._buffer; 74 delete [] encoded_key_frame_._buffer;
75 encoded_key_frame_._buffer = new uint8_t[encoded_image._size]; 75 encoded_key_frame_._buffer = new uint8_t[encoded_image._size];
76 encoded_key_frame_._size = encoded_image._size; 76 encoded_key_frame_._size = encoded_image._size;
77 encoded_key_frame_._length = encoded_image._length; 77 encoded_key_frame_._length = encoded_image._length;
78 encoded_key_frame_._frameType = kKeyFrame; 78 encoded_key_frame_._frameType = kVideoFrameKey;
79 encoded_key_frame_._completeFrame = encoded_image._completeFrame; 79 encoded_key_frame_._completeFrame = encoded_image._completeFrame;
80 memcpy(encoded_key_frame_._buffer, 80 memcpy(encoded_key_frame_._buffer,
81 encoded_image._buffer, 81 encoded_image._buffer,
82 encoded_image._length); 82 encoded_image._length);
83 } else { 83 } else {
84 delete [] encoded_frame_._buffer; 84 delete [] encoded_frame_._buffer;
85 encoded_frame_._buffer = new uint8_t[encoded_image._size]; 85 encoded_frame_._buffer = new uint8_t[encoded_image._size];
86 encoded_frame_._size = encoded_image._size; 86 encoded_frame_._size = encoded_image._size;
87 encoded_frame_._length = encoded_image._length; 87 encoded_frame_._length = encoded_image._length;
88 memcpy(encoded_frame_._buffer, 88 memcpy(encoded_frame_._buffer,
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
382 &layer_sync, i); 382 &layer_sync, i);
383 EXPECT_EQ(expected_temporal_idx[i], temporal_layer); 383 EXPECT_EQ(expected_temporal_idx[i], temporal_layer);
384 EXPECT_EQ(expected_layer_sync[i], layer_sync); 384 EXPECT_EQ(expected_layer_sync[i], layer_sync);
385 } 385 }
386 } 386 }
387 387
388 // We currently expect all active streams to generate a key frame even though 388 // We currently expect all active streams to generate a key frame even though
389 // a key frame was only requested for some of them. 389 // a key frame was only requested for some of them.
390 void TestKeyFrameRequestsOnAllStreams() { 390 void TestKeyFrameRequestsOnAllStreams() {
391 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams. 391 encoder_->SetRates(kMaxBitrates[2], 30); // To get all three streams.
392 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 392 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
393 ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams); 393 kVideoFrameDelta);
394 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
394 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 395 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
395 396
396 ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams); 397 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
397 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 398 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
398 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 399 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
399 400
400 frame_types[0] = kKeyFrame; 401 frame_types[0] = kVideoFrameKey;
401 ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams); 402 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
402 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 403 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
403 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 404 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
404 405
405 std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame); 406 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
406 frame_types[1] = kKeyFrame; 407 frame_types[1] = kVideoFrameKey;
407 ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams); 408 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
408 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 409 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
409 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 410 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
410 411
411 std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame); 412 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
412 frame_types[2] = kKeyFrame; 413 frame_types[2] = kVideoFrameKey;
413 ExpectStreams(kKeyFrame, kNumberOfSimulcastStreams); 414 ExpectStreams(kVideoFrameKey, kNumberOfSimulcastStreams);
414 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 415 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
415 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 416 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
416 417
417 std::fill(frame_types.begin(), frame_types.end(), kDeltaFrame); 418 std::fill(frame_types.begin(), frame_types.end(), kVideoFrameDelta);
418 ExpectStreams(kDeltaFrame, kNumberOfSimulcastStreams); 419 ExpectStreams(kVideoFrameDelta, kNumberOfSimulcastStreams);
419 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 420 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
420 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 421 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
421 } 422 }
422 423
423 void TestPaddingAllStreams() { 424 void TestPaddingAllStreams() {
424 // We should always encode the base layer. 425 // We should always encode the base layer.
425 encoder_->SetRates(kMinBitrates[0] - 1, 30); 426 encoder_->SetRates(kMinBitrates[0] - 1, 30);
426 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 427 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
427 ExpectStreams(kKeyFrame, 1); 428 kVideoFrameDelta);
429 ExpectStreams(kVideoFrameKey, 1);
428 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 430 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
429 431
430 ExpectStreams(kDeltaFrame, 1); 432 ExpectStreams(kVideoFrameDelta, 1);
431 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 433 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
432 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 434 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
433 } 435 }
434 436
435 void TestPaddingTwoStreams() { 437 void TestPaddingTwoStreams() {
436 // We have just enough to get only the first stream and padding for two. 438 // We have just enough to get only the first stream and padding for two.
437 encoder_->SetRates(kMinBitrates[0], 30); 439 encoder_->SetRates(kMinBitrates[0], 30);
438 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 440 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
439 ExpectStreams(kKeyFrame, 1); 441 kVideoFrameDelta);
442 ExpectStreams(kVideoFrameKey, 1);
440 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 443 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
441 444
442 ExpectStreams(kDeltaFrame, 1); 445 ExpectStreams(kVideoFrameDelta, 1);
443 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 446 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
444 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 447 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
445 } 448 }
446 449
447 void TestPaddingTwoStreamsOneMaxedOut() { 450 void TestPaddingTwoStreamsOneMaxedOut() {
448 // We are just below limit of sending second stream, so we should get 451 // We are just below limit of sending second stream, so we should get
449 // the first stream maxed out (at |maxBitrate|), and padding for two. 452 // the first stream maxed out (at |maxBitrate|), and padding for two.
450 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30); 453 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] - 1, 30);
451 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 454 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
452 ExpectStreams(kKeyFrame, 1); 455 kVideoFrameDelta);
456 ExpectStreams(kVideoFrameKey, 1);
453 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 457 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
454 458
455 ExpectStreams(kDeltaFrame, 1); 459 ExpectStreams(kVideoFrameDelta, 1);
456 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 460 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
457 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 461 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
458 } 462 }
459 463
460 void TestPaddingOneStream() { 464 void TestPaddingOneStream() {
461 // We have just enough to send two streams, so padding for one stream. 465 // We have just enough to send two streams, so padding for one stream.
462 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30); 466 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1], 30);
463 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 467 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
464 ExpectStreams(kKeyFrame, 2); 468 kVideoFrameDelta);
469 ExpectStreams(kVideoFrameKey, 2);
465 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 470 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
466 471
467 ExpectStreams(kDeltaFrame, 2); 472 ExpectStreams(kVideoFrameDelta, 2);
468 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 473 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
469 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 474 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
470 } 475 }
471 476
472 void TestPaddingOneStreamTwoMaxedOut() { 477 void TestPaddingOneStreamTwoMaxedOut() {
473 // We are just below limit of sending third stream, so we should get 478 // We are just below limit of sending third stream, so we should get
474 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|. 479 // first stream's rate maxed out at |targetBitrate|, second at |maxBitrate|.
475 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] + 480 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
476 kMinBitrates[2] - 1, 30); 481 kMinBitrates[2] - 1, 30);
477 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 482 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
478 ExpectStreams(kKeyFrame, 2); 483 kVideoFrameDelta);
484 ExpectStreams(kVideoFrameKey, 2);
479 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 485 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
480 486
481 ExpectStreams(kDeltaFrame, 2); 487 ExpectStreams(kVideoFrameDelta, 2);
482 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 488 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
483 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 489 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
484 } 490 }
485 491
486 void TestSendAllStreams() { 492 void TestSendAllStreams() {
487 // We have just enough to send all streams. 493 // We have just enough to send all streams.
488 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] + 494 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
489 kMinBitrates[2], 30); 495 kMinBitrates[2], 30);
490 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 496 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
491 ExpectStreams(kKeyFrame, 3); 497 kVideoFrameDelta);
498 ExpectStreams(kVideoFrameKey, 3);
492 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 499 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
493 500
494 ExpectStreams(kDeltaFrame, 3); 501 ExpectStreams(kVideoFrameDelta, 3);
495 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 502 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
496 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 503 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
497 } 504 }
498 505
499 void TestDisablingStreams() { 506 void TestDisablingStreams() {
500 // We should get three media streams. 507 // We should get three media streams.
501 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] + 508 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1] +
502 kMaxBitrates[2], 30); 509 kMaxBitrates[2], 30);
503 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 510 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
504 ExpectStreams(kKeyFrame, 3); 511 kVideoFrameDelta);
512 ExpectStreams(kVideoFrameKey, 3);
505 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 513 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
506 514
507 ExpectStreams(kDeltaFrame, 3); 515 ExpectStreams(kVideoFrameDelta, 3);
508 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 516 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
509 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 517 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
510 518
511 // We should only get two streams and padding for one. 519 // We should only get two streams and padding for one.
512 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] + 520 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
513 kMinBitrates[2] / 2, 30); 521 kMinBitrates[2] / 2, 30);
514 ExpectStreams(kDeltaFrame, 2); 522 ExpectStreams(kVideoFrameDelta, 2);
515 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 523 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
516 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 524 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
517 525
518 // We should only get the first stream and padding for two. 526 // We should only get the first stream and padding for two.
519 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30); 527 encoder_->SetRates(kTargetBitrates[0] + kMinBitrates[1] / 2, 30);
520 ExpectStreams(kDeltaFrame, 1); 528 ExpectStreams(kVideoFrameDelta, 1);
521 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 529 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
522 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 530 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
523 531
524 // We don't have enough bitrate for the thumbnail stream, but we should get 532 // We don't have enough bitrate for the thumbnail stream, but we should get
525 // it anyway with current configuration. 533 // it anyway with current configuration.
526 encoder_->SetRates(kTargetBitrates[0] - 1, 30); 534 encoder_->SetRates(kTargetBitrates[0] - 1, 30);
527 ExpectStreams(kDeltaFrame, 1); 535 ExpectStreams(kVideoFrameDelta, 1);
528 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 536 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
529 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 537 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
530 538
531 // We should only get two streams and padding for one. 539 // We should only get two streams and padding for one.
532 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] + 540 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
533 kMinBitrates[2] / 2, 30); 541 kMinBitrates[2] / 2, 30);
534 // We get a key frame because a new stream is being enabled. 542 // We get a key frame because a new stream is being enabled.
535 ExpectStreams(kKeyFrame, 2); 543 ExpectStreams(kVideoFrameKey, 2);
536 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 544 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
537 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 545 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
538 546
539 // We should get all three streams. 547 // We should get all three streams.
540 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] + 548 encoder_->SetRates(kTargetBitrates[0] + kTargetBitrates[1] +
541 kTargetBitrates[2], 30); 549 kTargetBitrates[2], 30);
542 // We get a key frame because a new stream is being enabled. 550 // We get a key frame because a new stream is being enabled.
543 ExpectStreams(kKeyFrame, 3); 551 ExpectStreams(kVideoFrameKey, 3);
544 input_frame_.set_timestamp(input_frame_.timestamp() + 3000); 552 input_frame_.set_timestamp(input_frame_.timestamp() + 3000);
545 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 553 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
546 } 554 }
547 555
548 void SwitchingToOneStream(int width, int height) { 556 void SwitchingToOneStream(int width, int height) {
549 // Disable all streams except the last and set the bitrate of the last to 557 // Disable all streams except the last and set the bitrate of the last to
550 // 100 kbps. This verifies the way GTP switches to screenshare mode. 558 // 100 kbps. This verifies the way GTP switches to screenshare mode.
551 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1; 559 settings_.codecSpecific.VP8.numberOfTemporalLayers = 1;
552 settings_.maxBitrate = 100; 560 settings_.maxBitrate = 100;
553 settings_.startBitrate = 100; 561 settings_.startBitrate = 100;
(...skipping 20 matching lines...) Expand all
574 maxBitrate = 0; 582 maxBitrate = 0;
575 // The highest layer has to correspond to the non-simulcast resolution. 583 // The highest layer has to correspond to the non-simulcast resolution.
576 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]. 584 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
577 width = settings_.width; 585 width = settings_.width;
578 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1]. 586 settings_.simulcastStream[settings_.numberOfSimulcastStreams - 1].
579 height = settings_.height; 587 height = settings_.height;
580 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); 588 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
581 589
582 // Encode one frame and verify. 590 // Encode one frame and verify.
583 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30); 591 encoder_->SetRates(kMaxBitrates[0] + kMaxBitrates[1], 30);
584 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams, kDeltaFrame); 592 std::vector<FrameType> frame_types(kNumberOfSimulcastStreams,
585 EXPECT_CALL(encoder_callback_, Encoded( 593 kVideoFrameDelta);
586 AllOf(Field(&EncodedImage::_frameType, kKeyFrame), 594 EXPECT_CALL(encoder_callback_,
587 Field(&EncodedImage::_encodedWidth, width), 595 Encoded(AllOf(Field(&EncodedImage::_frameType, kVideoFrameKey),
588 Field(&EncodedImage::_encodedHeight, height)), _, _)) 596 Field(&EncodedImage::_encodedWidth, width),
597 Field(&EncodedImage::_encodedHeight, height)),
598 _, _))
589 .Times(1) 599 .Times(1)
590 .WillRepeatedly(Return(0)); 600 .WillRepeatedly(Return(0));
591 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types)); 601 EXPECT_EQ(0, encoder_->Encode(input_frame_, NULL, &frame_types));
592 602
593 // Switch back. 603 // Switch back.
594 DefaultSettings(&settings_, kDefaultTemporalLayerProfile); 604 DefaultSettings(&settings_, kDefaultTemporalLayerProfile);
595 // Start at the lowest bitrate for enabling base stream. 605 // Start at the lowest bitrate for enabling base stream.
596 settings_.startBitrate = kMinBitrates[0]; 606 settings_.startBitrate = kMinBitrates[0];
597 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200)); 607 EXPECT_EQ(0, encoder_->InitEncode(&settings_, 1, 1200));
598 encoder_->SetRates(settings_.startBitrate, 30); 608 encoder_->SetRates(settings_.startBitrate, 30);
599 ExpectStreams(kKeyFrame, 1); 609 ExpectStreams(kVideoFrameKey, 1);
600 // Resize |input_frame_| to the new resolution. 610 // Resize |input_frame_| to the new resolution.
601 half_width = (settings_.width + 1) / 2; 611 half_width = (settings_.width + 1) / 2;
602 input_frame_.CreateEmptyFrame(settings_.width, settings_.height, 612 input_frame_.CreateEmptyFrame(settings_.width, settings_.height,
603 settings_.width, half_width, half_width); 613 settings_.width, half_width, half_width);
604 memset(input_frame_.buffer(kYPlane), 0, 614 memset(input_frame_.buffer(kYPlane), 0,
605 input_frame_.allocated_size(kYPlane)); 615 input_frame_.allocated_size(kYPlane));
606 memset(input_frame_.buffer(kUPlane), 0, 616 memset(input_frame_.buffer(kUPlane), 0,
607 input_frame_.allocated_size(kUPlane)); 617 input_frame_.allocated_size(kUPlane));
608 memset(input_frame_.buffer(kVPlane), 0, 618 memset(input_frame_.buffer(kVPlane), 0,
609 input_frame_.allocated_size(kVPlane)); 619 input_frame_.allocated_size(kVPlane));
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
981 rtc::scoped_ptr<VP8Decoder> decoder_; 991 rtc::scoped_ptr<VP8Decoder> decoder_;
982 MockDecodedImageCallback decoder_callback_; 992 MockDecodedImageCallback decoder_callback_;
983 VideoCodec settings_; 993 VideoCodec settings_;
984 VideoFrame input_frame_; 994 VideoFrame input_frame_;
985 }; 995 };
986 996
987 } // namespace testing 997 } // namespace testing
988 } // namespace webrtc 998 } // namespace webrtc
989 999
990 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_ 1000 #endif // WEBRTC_MODULES_VIDEO_CODING_CODECS_VP8_SIMULCAST_UNITTEST_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698