Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: webrtc/media/base/videoframe_unittest.h

Issue 1921493004: Revert of Delete cricket::VideoFrame methods GetYPlane and GetYPitch. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/media/base/videoframe.cc ('k') | webrtc/media/engine/webrtcvideoframe.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 uint32_t height, 260 uint32_t height,
261 T* frame) { 261 T* frame) {
262 int y1_pos, y2_pos, u_pos, v_pos; 262 int y1_pos, y2_pos, u_pos, v_pos;
263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { 263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) {
264 return false; 264 return false;
265 } 265 }
266 266
267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); 267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
268 int awidth = (width + 1) & ~1; 268 int awidth = (width + 1) & ~1;
269 frame->InitToBlack(width, height, 0); 269 frame->InitToBlack(width, height, 0);
270 int stride_y = frame->video_frame_buffer()->StrideY(); 270 int stride_y = frame->GetYPitch();
271 int stride_u = frame->video_frame_buffer()->StrideU(); 271 int stride_u = frame->GetUPitch();
272 int stride_v = frame->video_frame_buffer()->StrideV(); 272 int stride_v = frame->GetVPitch();
273 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY();
274 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU();
275 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV();
276 for (uint32_t y = 0; y < height; ++y) { 273 for (uint32_t y = 0; y < height; ++y) {
277 for (uint32_t x = 0; x < width; x += 2) { 274 for (uint32_t x = 0; x < width; x += 2) {
278 const uint8_t* quad1 = start + (y * awidth + x) * 2; 275 const uint8_t* quad1 = start + (y * awidth + x) * 2;
279 plane_y[stride_y * y + x] = quad1[y1_pos]; 276 frame->GetYPlane()[stride_y * y + x] = quad1[y1_pos];
280 if ((x + 1) < width) { 277 if ((x + 1) < width) {
281 plane_y[stride_y * y + x + 1] = quad1[y2_pos]; 278 frame->GetYPlane()[stride_y * y + x + 1] = quad1[y2_pos];
282 } 279 }
283 if ((y & 1) == 0) { 280 if ((y & 1) == 0) {
284 const uint8_t* quad2 = quad1 + awidth * 2; 281 const uint8_t* quad2 = quad1 + awidth * 2;
285 if ((y + 1) >= height) { 282 if ((y + 1) >= height) {
286 quad2 = quad1; 283 quad2 = quad1;
287 } 284 }
288 plane_u[stride_u * (y / 2) + x / 2] = 285 frame->GetUPlane()[stride_u * (y / 2) + x / 2] =
289 (quad1[u_pos] + quad2[u_pos] + 1) / 2; 286 (quad1[u_pos] + quad2[u_pos] + 1) / 2;
290 plane_v[stride_v * (y / 2) + x / 2] = 287 frame->GetVPlane()[stride_v * (y / 2) + x / 2] =
291 (quad1[v_pos] + quad2[v_pos] + 1) / 2; 288 (quad1[v_pos] + quad2[v_pos] + 1) / 2;
292 } 289 }
293 } 290 }
294 } 291 }
295 return true; 292 return true;
296 } 293 }
297 294
298 // Convert RGB to 420. 295 // Convert RGB to 420.
299 // A negative height inverts the image. 296 // A negative height inverts the image.
300 bool ConvertRgb(const rtc::MemoryStream* ms, 297 bool ConvertRgb(const rtc::MemoryStream* ms,
301 uint32_t fourcc, 298 uint32_t fourcc,
302 int32_t width, 299 int32_t width,
303 int32_t height, 300 int32_t height,
304 T* frame) { 301 T* frame) {
305 int r_pos, g_pos, b_pos, bytes; 302 int r_pos, g_pos, b_pos, bytes;
306 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { 303 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
307 return false; 304 return false;
308 } 305 }
309 int pitch = width * bytes; 306 int pitch = width * bytes;
310 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); 307 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
311 if (height < 0) { 308 if (height < 0) {
312 height = -height; 309 height = -height;
313 start = start + pitch * (height - 1); 310 start = start + pitch * (height - 1);
314 pitch = -pitch; 311 pitch = -pitch;
315 } 312 }
316 frame->InitToBlack(width, height, 0); 313 frame->InitToBlack(width, height, 0);
317 int stride_y = frame->video_frame_buffer()->StrideY(); 314 int stride_y = frame->GetYPitch();
318 int stride_u = frame->video_frame_buffer()->StrideU(); 315 int stride_u = frame->GetUPitch();
319 int stride_v = frame->video_frame_buffer()->StrideV(); 316 int stride_v = frame->GetVPitch();
320 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY();
321 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU();
322 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV();
323 for (int32_t y = 0; y < height; y += 2) { 317 for (int32_t y = 0; y < height; y += 2) {
324 for (int32_t x = 0; x < width; x += 2) { 318 for (int32_t x = 0; x < width; x += 2) {
325 const uint8_t* rgb[4]; 319 const uint8_t* rgb[4];
326 uint8_t yuv[4][3]; 320 uint8_t yuv[4][3];
327 rgb[0] = start + y * pitch + x * bytes; 321 rgb[0] = start + y * pitch + x * bytes;
328 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); 322 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0);
329 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); 323 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0);
330 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); 324 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0);
331 for (size_t i = 0; i < 4; ++i) { 325 for (size_t i = 0; i < 4; ++i) {
332 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], 326 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos],
333 &yuv[i][0], &yuv[i][1], &yuv[i][2]); 327 &yuv[i][0], &yuv[i][1], &yuv[i][2]);
334 } 328 }
335 plane_y[stride_y * y + x] = yuv[0][0]; 329 frame->GetYPlane()[stride_y * y + x] = yuv[0][0];
336 if ((x + 1) < width) { 330 if ((x + 1) < width) {
337 plane_y[stride_y * y + x + 1] = yuv[1][0]; 331 frame->GetYPlane()[stride_y * y + x + 1] = yuv[1][0];
338 } 332 }
339 if ((y + 1) < height) { 333 if ((y + 1) < height) {
340 plane_y[stride_y * (y + 1) + x] = yuv[2][0]; 334 frame->GetYPlane()[stride_y * (y + 1) + x] = yuv[2][0];
341 if ((x + 1) < width) { 335 if ((x + 1) < width) {
342 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; 336 frame->GetYPlane()[stride_y * (y + 1) + x + 1] = yuv[3][0];
343 } 337 }
344 } 338 }
345 plane_u[stride_u * (y / 2) + x / 2] = 339 frame->GetUPlane()[stride_u * (y / 2) + x / 2] =
346 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; 340 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4;
347 plane_v[stride_v * (y / 2) + x / 2] = 341 frame->GetVPlane()[stride_v * (y / 2) + x / 2] =
348 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; 342 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4;
349 } 343 }
350 } 344 }
351 return true; 345 return true;
352 } 346 }
353 347
354 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. 348 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia.
355 void ConvertRgbPixel(uint8_t r, 349 void ConvertRgbPixel(uint8_t r,
356 uint8_t g, 350 uint8_t g,
357 uint8_t b, 351 uint8_t b,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
394 } else if (fourcc == cricket::FOURCC_ARGB) { 388 } else if (fourcc == cricket::FOURCC_ARGB) {
395 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory. 389 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory.
396 } else { 390 } else {
397 return false; 391 return false;
398 } 392 }
399 return true; 393 return true;
400 } 394 }
401 395
402 // Comparison functions for testing. 396 // Comparison functions for testing.
403 static bool IsNull(const cricket::VideoFrame& frame) { 397 static bool IsNull(const cricket::VideoFrame& frame) {
404 return !frame.video_frame_buffer(); 398 return !frame.GetYPlane();
405 } 399 }
406 400
407 static bool IsSize(const cricket::VideoFrame& frame, 401 static bool IsSize(const cricket::VideoFrame& frame,
408 int width, 402 int width,
409 int height) { 403 int height) {
410 return !IsNull(frame) && frame.video_frame_buffer()->StrideY() >= width && 404 return !IsNull(frame) && frame.GetYPitch() >= width &&
411 frame.video_frame_buffer()->StrideU() >= width / 2 && 405 frame.GetUPitch() >= width / 2 &&
412 frame.video_frame_buffer()->StrideV() >= width / 2 && 406 frame.GetVPitch() >= width / 2 &&
413 frame.width() == width && frame.height() == height; 407 frame.width() == width && frame.height() == height;
414 } 408 }
415 409
416 static bool IsPlaneEqual(const std::string& name, 410 static bool IsPlaneEqual(const std::string& name,
417 const uint8_t* plane1, 411 const uint8_t* plane1,
418 uint32_t pitch1, 412 uint32_t pitch1,
419 const uint8_t* plane2, 413 const uint8_t* plane2,
420 uint32_t pitch2, 414 uint32_t pitch2,
421 uint32_t width, 415 uint32_t width,
422 uint32_t height, 416 uint32_t height,
(...skipping 20 matching lines...) Expand all
443 int width, 437 int width,
444 int height, 438 int height,
445 int64_t time_stamp, 439 int64_t time_stamp,
446 const uint8_t* y, 440 const uint8_t* y,
447 uint32_t ypitch, 441 uint32_t ypitch,
448 const uint8_t* u, 442 const uint8_t* u,
449 uint32_t upitch, 443 uint32_t upitch,
450 const uint8_t* v, 444 const uint8_t* v,
451 uint32_t vpitch, 445 uint32_t vpitch,
452 int max_error) { 446 int max_error) {
453 return IsSize(frame, width, height) && frame.GetTimeStamp() == time_stamp && 447 return IsSize(frame, width, height) &&
454 IsPlaneEqual("y", frame.video_frame_buffer()->DataY(), 448 frame.GetTimeStamp() == time_stamp &&
455 frame.video_frame_buffer()->StrideY(), y, ypitch, 449 IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch,
456 static_cast<uint32_t>(width), 450 static_cast<uint32_t>(width),
457 static_cast<uint32_t>(height), max_error) && 451 static_cast<uint32_t>(height), max_error) &&
458 IsPlaneEqual("u", frame.video_frame_buffer()->DataU(), 452 IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch,
459 frame.video_frame_buffer()->StrideU(), u, upitch,
460 static_cast<uint32_t>((width + 1) / 2), 453 static_cast<uint32_t>((width + 1) / 2),
461 static_cast<uint32_t>((height + 1) / 2), max_error) && 454 static_cast<uint32_t>((height + 1) / 2), max_error) &&
462 IsPlaneEqual("v", frame.video_frame_buffer()->DataV(), 455 IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch,
463 frame.video_frame_buffer()->StrideV(), v, vpitch,
464 static_cast<uint32_t>((width + 1) / 2), 456 static_cast<uint32_t>((width + 1) / 2),
465 static_cast<uint32_t>((height + 1) / 2), max_error); 457 static_cast<uint32_t>((height + 1) / 2), max_error);
466 } 458 }
467 459
468 static bool IsEqual(const cricket::VideoFrame& frame1, 460 static bool IsEqual(const cricket::VideoFrame& frame1,
469 const cricket::VideoFrame& frame2, 461 const cricket::VideoFrame& frame2,
470 int max_error) { 462 int max_error) {
471 return IsEqual(frame1, 463 return IsEqual(frame1,
472 frame2.width(), frame2.height(), 464 frame2.width(), frame2.height(),
473 frame2.GetTimeStamp(), 465 frame2.GetTimeStamp(),
474 frame2.video_frame_buffer()->DataY(), 466 frame2.GetYPlane(), frame2.GetYPitch(),
475 frame2.video_frame_buffer()->StrideY(), 467 frame2.GetUPlane(), frame2.GetUPitch(),
476 frame2.video_frame_buffer()->DataU(), 468 frame2.GetVPlane(), frame2.GetVPitch(),
477 frame2.video_frame_buffer()->StrideU(),
478 frame2.video_frame_buffer()->DataV(),
479 frame2.video_frame_buffer()->StrideV(),
480 max_error); 469 max_error);
481 } 470 }
482 471
483 static bool IsEqualWithCrop(const cricket::VideoFrame& frame1, 472 static bool IsEqualWithCrop(const cricket::VideoFrame& frame1,
484 const cricket::VideoFrame& frame2, 473 const cricket::VideoFrame& frame2,
485 int hcrop, int vcrop, int max_error) { 474 int hcrop, int vcrop, int max_error) {
486 return frame1.width() <= frame2.width() && 475 return frame1.width() <= frame2.width() &&
487 frame1.height() <= frame2.height() && 476 frame1.height() <= frame2.height() &&
488 IsEqual(frame1, 477 IsEqual(frame1,
489 frame2.width() - hcrop * 2, 478 frame2.width() - hcrop * 2,
490 frame2.height() - vcrop * 2, 479 frame2.height() - vcrop * 2,
491 frame2.GetTimeStamp(), 480 frame2.GetTimeStamp(),
492 frame2.video_frame_buffer()->DataY() 481 frame2.GetYPlane() + vcrop * frame2.GetYPitch()
493 + vcrop * frame2.video_frame_buffer()->StrideY()
494 + hcrop, 482 + hcrop,
495 frame2.video_frame_buffer()->StrideY(), 483 frame2.GetYPitch(),
496 frame2.video_frame_buffer()->DataU() 484 frame2.GetUPlane() + vcrop * frame2.GetUPitch() / 2
497 + vcrop * frame2.video_frame_buffer()->StrideU() / 2
498 + hcrop / 2, 485 + hcrop / 2,
499 frame2.video_frame_buffer()->StrideU(), 486 frame2.GetUPitch(),
500 frame2.video_frame_buffer()->DataV() 487 frame2.GetVPlane() + vcrop * frame2.GetVPitch() / 2
501 + vcrop * frame2.video_frame_buffer()->StrideV() / 2
502 + hcrop / 2, 488 + hcrop / 2,
503 frame2.video_frame_buffer()->StrideV(), 489 frame2.GetVPitch(),
504 max_error); 490 max_error);
505 } 491 }
506 492
507 static bool IsBlack(const cricket::VideoFrame& frame) { 493 static bool IsBlack(const cricket::VideoFrame& frame) {
508 return !IsNull(frame) && 494 return !IsNull(frame) &&
509 *frame.video_frame_buffer()->DataY() == 16 && 495 *frame.GetYPlane() == 16 &&
510 *frame.video_frame_buffer()->DataU() == 128 && 496 *frame.GetUPlane() == 128 &&
511 *frame.video_frame_buffer()->DataV() == 128; 497 *frame.GetVPlane() == 128;
512 } 498 }
513 499
514 //////////////////////// 500 ////////////////////////
515 // Construction tests // 501 // Construction tests //
516 //////////////////////// 502 ////////////////////////
517 503
518 // Test constructing an image from a I420 buffer. 504 // Test constructing an image from a I420 buffer.
519 void ConstructI420() { 505 void ConstructI420() {
520 T frame; 506 T frame;
521 EXPECT_TRUE(IsNull(frame)); 507 EXPECT_TRUE(IsNull(frame));
(...skipping 26 matching lines...) Expand all
548 534
549 // Test constructing an image from a I422 buffer. 535 // Test constructing an image from a I422 buffer.
550 void ConstructI422() { 536 void ConstructI422() {
551 T frame1, frame2; 537 T frame1, frame2;
552 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 538 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
553 size_t buf_size = kWidth * kHeight * 2; 539 size_t buf_size = kWidth * kHeight * 2;
554 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); 540 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
555 uint8_t* y = ALIGNP(buf.get(), kAlignment); 541 uint8_t* y = ALIGNP(buf.get(), kAlignment);
556 uint8_t* u = y + kWidth * kHeight; 542 uint8_t* u = y + kWidth * kHeight;
557 uint8_t* v = u + (kWidth / 2) * kHeight; 543 uint8_t* v = u + (kWidth / 2) * kHeight;
558 EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(), 544 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(),
559 frame1.video_frame_buffer()->StrideY(), 545 frame1.GetUPlane(), frame1.GetUPitch(),
560 frame1.video_frame_buffer()->DataU(), 546 frame1.GetVPlane(), frame1.GetVPitch(),
561 frame1.video_frame_buffer()->StrideU(),
562 frame1.video_frame_buffer()->DataV(),
563 frame1.video_frame_buffer()->StrideV(),
564 y, kWidth, 547 y, kWidth,
565 u, kWidth / 2, 548 u, kWidth / 2,
566 v, kWidth / 2, 549 v, kWidth / 2,
567 kWidth, kHeight)); 550 kWidth, kHeight));
568 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422, 551 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422,
569 kWidth, kHeight, &frame2)); 552 kWidth, kHeight, &frame2));
570 EXPECT_TRUE(IsEqual(frame1, frame2, 1)); 553 EXPECT_TRUE(IsEqual(frame1, frame2, 1));
571 } 554 }
572 555
573 // Test constructing an image from a YUY2 buffer. 556 // Test constructing an image from a YUY2 buffer.
574 void ConstructYuy2() { 557 void ConstructYuy2() {
575 T frame1, frame2; 558 T frame1, frame2;
576 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 559 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
577 size_t buf_size = kWidth * kHeight * 2; 560 size_t buf_size = kWidth * kHeight * 2;
578 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); 561 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
579 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment); 562 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment);
580 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(), 563 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(),
581 frame1.video_frame_buffer()->StrideY(), 564 frame1.GetUPlane(), frame1.GetUPitch(),
582 frame1.video_frame_buffer()->DataU(), 565 frame1.GetVPlane(), frame1.GetVPitch(),
583 frame1.video_frame_buffer()->StrideU(),
584 frame1.video_frame_buffer()->DataV(),
585 frame1.video_frame_buffer()->StrideV(),
586 yuy2, kWidth * 2, 566 yuy2, kWidth * 2,
587 kWidth, kHeight)); 567 kWidth, kHeight));
588 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 568 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
589 kWidth, kHeight, &frame2)); 569 kWidth, kHeight, &frame2));
590 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 570 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
591 } 571 }
592 572
593 // Test constructing an image from a YUY2 buffer with buffer unaligned. 573 // Test constructing an image from a YUY2 buffer with buffer unaligned.
594 void ConstructYuy2Unaligned() { 574 void ConstructYuy2Unaligned() {
595 T frame1, frame2; 575 T frame1, frame2;
596 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 576 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
597 size_t buf_size = kWidth * kHeight * 2; 577 size_t buf_size = kWidth * kHeight * 2;
598 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]); 578 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]);
599 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1; 579 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1;
600 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(), 580 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(),
601 frame1.video_frame_buffer()->StrideY(), 581 frame1.GetUPlane(), frame1.GetUPitch(),
602 frame1.video_frame_buffer()->DataU(), 582 frame1.GetVPlane(), frame1.GetVPitch(),
603 frame1.video_frame_buffer()->StrideU(),
604 frame1.video_frame_buffer()->DataV(),
605 frame1.video_frame_buffer()->StrideV(),
606 yuy2, kWidth * 2, 583 yuy2, kWidth * 2,
607 kWidth, kHeight)); 584 kWidth, kHeight));
608 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 585 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
609 kWidth, kHeight, &frame2)); 586 kWidth, kHeight, &frame2));
610 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 587 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
611 } 588 }
612 589
613 // Test constructing an image from a wide YUY2 buffer. 590 // Test constructing an image from a wide YUY2 buffer.
614 // Normal is 1280x720. Wide is 12800x72 591 // Normal is 1280x720. Wide is 12800x72
615 void ConstructYuy2Wide() { 592 void ConstructYuy2Wide() {
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
808 ASSERT_TRUE(ms.get() != NULL); \ 785 ASSERT_TRUE(ms.get() != NULL); \
809 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \ 786 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \
810 -kHeight, kWidth, kHeight, \ 787 -kHeight, kWidth, kHeight, \
811 webrtc::kVideoRotation_180, &frame1)); \ 788 webrtc::kVideoRotation_180, &frame1)); \
812 size_t data_size; \ 789 size_t data_size; \
813 bool ret = ms->GetSize(&data_size); \ 790 bool ret = ms->GetSize(&data_size); \
814 EXPECT_TRUE(ret); \ 791 EXPECT_TRUE(ret); \
815 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 792 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
816 kHeight, \ 793 kHeight, \
817 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ 794 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
818 data_size, 0, webrtc::kVideoRotation_0)); \ 795 data_size, 0, webrtc::kVideoRotation_0)); \
819 int width_rotate = frame1.width(); \ 796 int width_rotate = frame1.width(); \
820 int height_rotate = frame1.height(); \ 797 int height_rotate = frame1.height(); \
821 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ 798 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \
822 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ 799 libyuv::I420Mirror( \
823 frame2.video_frame_buffer()->StrideY(), \ 800 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \
824 frame2.video_frame_buffer()->DataU(), \ 801 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \
825 frame2.video_frame_buffer()->StrideU(), \ 802 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \
826 frame2.video_frame_buffer()->DataV(), \ 803 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \
827 frame2.video_frame_buffer()->StrideV(), \ 804 kHeight); \
828 frame3.video_frame_buffer()->MutableDataY(), \
829 frame3.video_frame_buffer()->StrideY(), \
830 frame3.video_frame_buffer()->MutableDataU(), \
831 frame3.video_frame_buffer()->StrideU(), \
832 frame3.video_frame_buffer()->MutableDataV(), \
833 frame3.video_frame_buffer()->StrideV(), \
834 kWidth, kHeight); \
835 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \ 805 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \
836 } 806 }
837 807
838 TEST_MIRROR(I420, 420) 808 TEST_MIRROR(I420, 420)
839 809
840 // Macro to help test different rotations 810 // Macro to help test different rotations
841 #define TEST_ROTATE(FOURCC, BPP, ROTATE) \ 811 #define TEST_ROTATE(FOURCC, BPP, ROTATE) \
842 void Construct##FOURCC##Rotate##ROTATE() { \ 812 void Construct##FOURCC##Rotate##ROTATE() { \
843 T frame1, frame2, frame3; \ 813 T frame1, frame2, frame3; \
844 std::unique_ptr<rtc::MemoryStream> ms( \ 814 std::unique_ptr<rtc::MemoryStream> ms( \
845 CreateYuvSample(kWidth, kHeight, BPP)); \ 815 CreateYuvSample(kWidth, kHeight, BPP)); \
846 ASSERT_TRUE(ms.get() != NULL); \ 816 ASSERT_TRUE(ms.get() != NULL); \
847 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \ 817 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \
848 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \ 818 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \
849 &frame1)); \ 819 &frame1)); \
850 size_t data_size; \ 820 size_t data_size; \
851 bool ret = ms->GetSize(&data_size); \ 821 bool ret = ms->GetSize(&data_size); \
852 EXPECT_TRUE(ret); \ 822 EXPECT_TRUE(ret); \
853 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 823 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
854 kHeight, \ 824 kHeight, \
855 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ 825 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
856 data_size, 0, webrtc::kVideoRotation_0)); \ 826 data_size, 0, webrtc::kVideoRotation_0)); \
857 int width_rotate = frame1.width(); \ 827 int width_rotate = frame1.width(); \
858 int height_rotate = frame1.height(); \ 828 int height_rotate = frame1.height(); \
859 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ 829 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \
860 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ 830 libyuv::I420Rotate( \
861 frame2.video_frame_buffer()->StrideY(), \ 831 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \
862 frame2.video_frame_buffer()->DataU(), \ 832 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \
863 frame2.video_frame_buffer()->StrideU(), \ 833 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \
864 frame2.video_frame_buffer()->DataV(), \ 834 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \
865 frame2.video_frame_buffer()->StrideV(), \ 835 kHeight, libyuv::kRotate##ROTATE); \
866 frame3.video_frame_buffer()->MutableDataY(), \
867 frame3.video_frame_buffer()->StrideY(), \
868 frame3.video_frame_buffer()->MutableDataU(), \
869 frame3.video_frame_buffer()->StrideU(), \
870 frame3.video_frame_buffer()->MutableDataV(), \
871 frame3.video_frame_buffer()->StrideV(), \
872 kWidth, kHeight, libyuv::kRotate##ROTATE); \
873 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \ 836 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \
874 } 837 }
875 838
876 // Test constructing an image with rotation. 839 // Test constructing an image with rotation.
877 TEST_ROTATE(I420, 12, 0) 840 TEST_ROTATE(I420, 12, 0)
878 TEST_ROTATE(I420, 12, 90) 841 TEST_ROTATE(I420, 12, 90)
879 TEST_ROTATE(I420, 12, 180) 842 TEST_ROTATE(I420, 12, 180)
880 TEST_ROTATE(I420, 12, 270) 843 TEST_ROTATE(I420, 12, 270)
881 TEST_ROTATE(YV12, 12, 0) 844 TEST_ROTATE(YV12, 12, 0)
882 TEST_ROTATE(YV12, 12, 90) 845 TEST_ROTATE(YV12, 12, 90)
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
982 T frame; 945 T frame;
983 uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2]; 946 uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2];
984 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2); 947 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2);
985 for (int i = 0; i < repeat_; ++i) { 948 for (int i = 0; i < repeat_; ++i) {
986 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5, 949 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5,
987 sizeof(pixels5x5), 0, 950 sizeof(pixels5x5), 0,
988 webrtc::kVideoRotation_0)); 951 webrtc::kVideoRotation_0));
989 } 952 }
990 EXPECT_EQ(5, frame.width()); 953 EXPECT_EQ(5, frame.width());
991 EXPECT_EQ(5, frame.height()); 954 EXPECT_EQ(5, frame.height());
992 EXPECT_EQ(5, frame.video_frame_buffer()->StrideY()); 955 EXPECT_EQ(5, frame.GetYPitch());
993 EXPECT_EQ(3, frame.video_frame_buffer()->StrideU()); 956 EXPECT_EQ(3, frame.GetUPitch());
994 EXPECT_EQ(3, frame.video_frame_buffer()->StrideV()); 957 EXPECT_EQ(3, frame.GetVPitch());
995 } 958 }
996 959
997 // Test 1 pixel edge case image ARGB buffer. 960 // Test 1 pixel edge case image ARGB buffer.
998 void ConstructARGB1Pixel() { 961 void ConstructARGB1Pixel() {
999 T frame; 962 T frame;
1000 uint8_t pixel[4] = {64, 128, 192, 255}; 963 uint8_t pixel[4] = {64, 128, 192, 255};
1001 for (int i = 0; i < repeat_; ++i) { 964 for (int i = 0; i < repeat_; ++i) {
1002 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel, 965 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel,
1003 sizeof(pixel), 0, 966 sizeof(pixel), 0,
1004 webrtc::kVideoRotation_0)); 967 webrtc::kVideoRotation_0));
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
1151 EXPECT_TRUE(IsEqual(frame1, frame2, 32)); 1114 EXPECT_TRUE(IsEqual(frame1, frame2, 32));
1152 } 1115 }
1153 1116
1154 // Test constructing an image from an I400 MJPG buffer. 1117 // Test constructing an image from an I400 MJPG buffer.
1155 // TODO(fbarchard): Stronger compare on chroma. Compare agaisnt a grey image. 1118 // TODO(fbarchard): Stronger compare on chroma. Compare agaisnt a grey image.
1156 void ConstructMjpgI400() { 1119 void ConstructMjpgI400() {
1157 T frame1, frame2; 1120 T frame1, frame2;
1158 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1121 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1159 ASSERT_TRUE(LoadFrame(kJpeg400Filename, 1122 ASSERT_TRUE(LoadFrame(kJpeg400Filename,
1160 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); 1123 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2));
1161 EXPECT_TRUE(IsPlaneEqual("y", frame1.video_frame_buffer()->DataY(), 1124 EXPECT_TRUE(IsPlaneEqual("y", frame1.GetYPlane(), frame1.GetYPitch(),
1162 frame1.video_frame_buffer()->StrideY(), 1125 frame2.GetYPlane(), frame2.GetYPitch(),
1163 frame2.video_frame_buffer()->DataY(),
1164 frame2.video_frame_buffer()->StrideY(),
1165 kWidth, kHeight, 32)); 1126 kWidth, kHeight, 32));
1166 EXPECT_TRUE(IsEqual(frame1, frame2, 128)); 1127 EXPECT_TRUE(IsEqual(frame1, frame2, 128));
1167 } 1128 }
1168 1129
1169 // Test constructing an image from an I420 MJPG buffer. 1130 // Test constructing an image from an I420 MJPG buffer.
1170 void ValidateFrame(const char* name, 1131 void ValidateFrame(const char* name,
1171 uint32_t fourcc, 1132 uint32_t fourcc,
1172 int data_adjust, 1133 int data_adjust,
1173 int size_adjust, 1134 int size_adjust,
1174 bool expected_result) { 1135 bool expected_result) {
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1336 } 1297 }
1337 1298
1338 // Test creating a copy and check that it just increments the refcount. 1299 // Test creating a copy and check that it just increments the refcount.
1339 void ConstructCopyIsRef() { 1300 void ConstructCopyIsRef() {
1340 T frame1, frame2; 1301 T frame1, frame2;
1341 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1302 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1342 for (int i = 0; i < repeat_; ++i) { 1303 for (int i = 0; i < repeat_; ++i) {
1343 EXPECT_TRUE(frame2.Init(frame1)); 1304 EXPECT_TRUE(frame2.Init(frame1));
1344 } 1305 }
1345 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 1306 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
1346 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); 1307 EXPECT_EQ(frame1.GetYPlane(), frame2.GetYPlane());
1308 EXPECT_EQ(frame1.GetUPlane(), frame2.GetUPlane());
1309 EXPECT_EQ(frame1.GetVPlane(), frame2.GetVPlane());
1347 } 1310 }
1348 1311
1349 // Test creating an empty image and initing it to black. 1312 // Test creating an empty image and initing it to black.
1350 void ConstructBlack() { 1313 void ConstructBlack() {
1351 T frame; 1314 T frame;
1352 for (int i = 0; i < repeat_; ++i) { 1315 for (int i = 0; i < repeat_; ++i) {
1353 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0)); 1316 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0));
1354 } 1317 }
1355 EXPECT_TRUE(IsSize(frame, kWidth, kHeight)); 1318 EXPECT_TRUE(IsSize(frame, kWidth, kHeight));
1356 EXPECT_TRUE(IsBlack(frame)); 1319 EXPECT_TRUE(IsBlack(frame));
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
1449 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1412 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1450 1413
1451 for (int i = 0; i < repeat_to; ++i) { 1414 for (int i = 0; i < repeat_to; ++i) {
1452 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, 1415 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc,
1453 out, 1416 out,
1454 out_size, stride)); 1417 out_size, stride));
1455 } 1418 }
1456 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0)); 1419 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0));
1457 for (int i = 0; i < repeat_from; ++i) { 1420 for (int i = 0; i < repeat_from; ++i) {
1458 EXPECT_EQ(0, RGBToI420(out, stride, 1421 EXPECT_EQ(0, RGBToI420(out, stride,
1459 frame2.video_frame_buffer()->MutableDataY(), 1422 frame2.GetYPlane(), frame2.GetYPitch(),
1460 frame2.video_frame_buffer()->StrideY(), 1423 frame2.GetUPlane(), frame2.GetUPitch(),
1461 frame2.video_frame_buffer()->MutableDataU(), 1424 frame2.GetVPlane(), frame2.GetVPitch(),
1462 frame2.video_frame_buffer()->StrideU(),
1463 frame2.video_frame_buffer()->MutableDataV(),
1464 frame2.video_frame_buffer()->StrideV(),
1465 kWidth, kHeight)); 1425 kWidth, kHeight));
1466 } 1426 }
1467 if (rowpad) { 1427 if (rowpad) {
1468 EXPECT_EQ(0, outtop[kWidth * bpp]); // Ensure stride skipped end of row. 1428 EXPECT_EQ(0, outtop[kWidth * bpp]); // Ensure stride skipped end of row.
1469 EXPECT_NE(0, outtop[astride]); // Ensure pixel at start of 2nd row. 1429 EXPECT_NE(0, outtop[astride]); // Ensure pixel at start of 2nd row.
1470 } else { 1430 } else {
1471 EXPECT_NE(0, outtop[kWidth * bpp]); // Expect something to be here. 1431 EXPECT_NE(0, outtop[kWidth * bpp]); // Expect something to be here.
1472 } 1432 }
1473 EXPECT_EQ(0, outtop[out_size]); // Ensure no overrun. 1433 EXPECT_EQ(0, outtop[out_size]); // Ensure no overrun.
1474 EXPECT_TRUE(IsEqual(frame1, frame2, error)); 1434 EXPECT_TRUE(IsEqual(frame1, frame2, error));
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
1757 // Test converting from I420 to I422. 1717 // Test converting from I420 to I422.
1758 void ConvertToI422Buffer() { 1718 void ConvertToI422Buffer() {
1759 T frame1, frame2; 1719 T frame1, frame2;
1760 size_t out_size = kWidth * kHeight * 2; 1720 size_t out_size = kWidth * kHeight * 2;
1761 std::unique_ptr<uint8_t[]> buf(new uint8_t[out_size + kAlignment]); 1721 std::unique_ptr<uint8_t[]> buf(new uint8_t[out_size + kAlignment]);
1762 uint8_t* y = ALIGNP(buf.get(), kAlignment); 1722 uint8_t* y = ALIGNP(buf.get(), kAlignment);
1763 uint8_t* u = y + kWidth * kHeight; 1723 uint8_t* u = y + kWidth * kHeight;
1764 uint8_t* v = u + (kWidth / 2) * kHeight; 1724 uint8_t* v = u + (kWidth / 2) * kHeight;
1765 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1725 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1766 for (int i = 0; i < repeat_; ++i) { 1726 for (int i = 0; i < repeat_; ++i) {
1767 EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(), 1727 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(),
1768 frame1.video_frame_buffer()->StrideY(), 1728 frame1.GetUPlane(), frame1.GetUPitch(),
1769 frame1.video_frame_buffer()->DataU(), 1729 frame1.GetVPlane(), frame1.GetVPitch(),
1770 frame1.video_frame_buffer()->StrideU(),
1771 frame1.video_frame_buffer()->DataV(),
1772 frame1.video_frame_buffer()->StrideV(),
1773 y, kWidth, 1730 y, kWidth,
1774 u, kWidth / 2, 1731 u, kWidth / 2,
1775 v, kWidth / 2, 1732 v, kWidth / 2,
1776 kWidth, kHeight)); 1733 kWidth, kHeight));
1777 } 1734 }
1778 EXPECT_TRUE(frame2.Init(cricket::FOURCC_I422, kWidth, kHeight, kWidth, 1735 EXPECT_TRUE(frame2.Init(cricket::FOURCC_I422, kWidth, kHeight, kWidth,
1779 kHeight, y, out_size, 1, 1, 0, 1736 kHeight, y, out_size, 1, 1, 0,
1780 webrtc::kVideoRotation_0)); 1737 webrtc::kVideoRotation_0));
1781 EXPECT_TRUE(IsEqual(frame1, frame2, 1)); 1738 EXPECT_TRUE(IsEqual(frame1, frame2, 1));
1782 } 1739 }
1783 1740
1784 /////////////////// 1741 ///////////////////
1785 // General tests // 1742 // General tests //
1786 /////////////////// 1743 ///////////////////
1787 1744
1788 void Copy() { 1745 void Copy() {
1789 std::unique_ptr<T> source(new T); 1746 std::unique_ptr<T> source(new T);
1790 std::unique_ptr<cricket::VideoFrame> target; 1747 std::unique_ptr<cricket::VideoFrame> target;
1791 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); 1748 ASSERT_TRUE(LoadFrameNoRepeat(source.get()));
1792 target.reset(source->Copy()); 1749 target.reset(source->Copy());
1793 EXPECT_TRUE(IsEqual(*source, *target, 0)); 1750 EXPECT_TRUE(IsEqual(*source, *target, 0));
1794 source.reset(); 1751 source.reset();
1795 ASSERT_TRUE(target->video_frame_buffer() != NULL); 1752 EXPECT_TRUE(target->GetYPlane() != NULL);
1796 EXPECT_TRUE(target->video_frame_buffer()->DataY() != NULL);
1797 } 1753 }
1798 1754
1799 void CopyIsRef() { 1755 void CopyIsRef() {
1800 std::unique_ptr<T> source(new T); 1756 std::unique_ptr<T> source(new T);
1801 std::unique_ptr<const cricket::VideoFrame> target; 1757 std::unique_ptr<const cricket::VideoFrame> target;
1802 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); 1758 ASSERT_TRUE(LoadFrameNoRepeat(source.get()));
1803 target.reset(source->Copy()); 1759 target.reset(source->Copy());
1804 EXPECT_TRUE(IsEqual(*source, *target, 0)); 1760 EXPECT_TRUE(IsEqual(*source, *target, 0));
1805 const T* const_source = source.get(); 1761 const T* const_source = source.get();
1806 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer()); 1762 EXPECT_EQ(const_source->GetYPlane(), target->GetYPlane());
1763 EXPECT_EQ(const_source->GetUPlane(), target->GetUPlane());
1764 EXPECT_EQ(const_source->GetVPlane(), target->GetVPlane());
1807 } 1765 }
1808 1766
1809 void StretchToFrame() { 1767 void StretchToFrame() {
1810 // Create the source frame as a black frame. 1768 // Create the source frame as a black frame.
1811 T source; 1769 T source;
1812 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0)); 1770 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0));
1813 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); 1771 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2));
1814 1772
1815 // Create the target frame by loading from a file. 1773 // Create the target frame by loading from a file.
1816 T target1; 1774 T target1;
1817 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); 1775 ASSERT_TRUE(LoadFrameNoRepeat(&target1));
1818 EXPECT_FALSE(IsBlack(target1)); 1776 EXPECT_FALSE(IsBlack(target1));
1819 1777
1820 // Stretch and check if the stretched target is black. 1778 // Stretch and check if the stretched target is black.
1821 source.StretchToFrame(&target1, true, false); 1779 source.StretchToFrame(&target1, true, false);
1822 EXPECT_TRUE(IsBlack(target1)); 1780 EXPECT_TRUE(IsBlack(target1));
1823 1781
1824 // Crop and stretch and check if the stretched target is black. 1782 // Crop and stretch and check if the stretched target is black.
1825 T target2; 1783 T target2;
1826 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); 1784 ASSERT_TRUE(LoadFrameNoRepeat(&target2));
1827 source.StretchToFrame(&target2, true, true); 1785 source.StretchToFrame(&target2, true, true);
1828 EXPECT_TRUE(IsBlack(target2)); 1786 EXPECT_TRUE(IsBlack(target2));
1829 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); 1787 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp());
1830 } 1788 }
1831 1789
1832 int repeat_; 1790 int repeat_;
1833 }; 1791 };
1834 1792
1835 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ 1793 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_
OLDNEW
« no previous file with comments | « webrtc/media/base/videoframe.cc ('k') | webrtc/media/engine/webrtcvideoframe.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698