Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1221)

Side by Side Diff: webrtc/media/base/videoframe_unittest.h

Issue 1923903002: Reland of Delete cricket::VideoFrame methods GetYPlane and GetYPitch. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/media/base/videoframe.cc ('k') | webrtc/media/engine/webrtcvideoframe.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
260 uint32_t height, 260 uint32_t height,
261 T* frame) { 261 T* frame) {
262 int y1_pos, y2_pos, u_pos, v_pos; 262 int y1_pos, y2_pos, u_pos, v_pos;
263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { 263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) {
264 return false; 264 return false;
265 } 265 }
266 266
267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); 267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
268 int awidth = (width + 1) & ~1; 268 int awidth = (width + 1) & ~1;
269 frame->InitToBlack(width, height, 0); 269 frame->InitToBlack(width, height, 0);
270 int stride_y = frame->GetYPitch(); 270 int stride_y = frame->video_frame_buffer()->StrideY();
271 int stride_u = frame->GetUPitch(); 271 int stride_u = frame->video_frame_buffer()->StrideU();
272 int stride_v = frame->GetVPitch(); 272 int stride_v = frame->video_frame_buffer()->StrideV();
273 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY();
274 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU();
275 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV();
273 for (uint32_t y = 0; y < height; ++y) { 276 for (uint32_t y = 0; y < height; ++y) {
274 for (uint32_t x = 0; x < width; x += 2) { 277 for (uint32_t x = 0; x < width; x += 2) {
275 const uint8_t* quad1 = start + (y * awidth + x) * 2; 278 const uint8_t* quad1 = start + (y * awidth + x) * 2;
276 frame->GetYPlane()[stride_y * y + x] = quad1[y1_pos]; 279 plane_y[stride_y * y + x] = quad1[y1_pos];
277 if ((x + 1) < width) { 280 if ((x + 1) < width) {
278 frame->GetYPlane()[stride_y * y + x + 1] = quad1[y2_pos]; 281 plane_y[stride_y * y + x + 1] = quad1[y2_pos];
279 } 282 }
280 if ((y & 1) == 0) { 283 if ((y & 1) == 0) {
281 const uint8_t* quad2 = quad1 + awidth * 2; 284 const uint8_t* quad2 = quad1 + awidth * 2;
282 if ((y + 1) >= height) { 285 if ((y + 1) >= height) {
283 quad2 = quad1; 286 quad2 = quad1;
284 } 287 }
285 frame->GetUPlane()[stride_u * (y / 2) + x / 2] = 288 plane_u[stride_u * (y / 2) + x / 2] =
286 (quad1[u_pos] + quad2[u_pos] + 1) / 2; 289 (quad1[u_pos] + quad2[u_pos] + 1) / 2;
287 frame->GetVPlane()[stride_v * (y / 2) + x / 2] = 290 plane_v[stride_v * (y / 2) + x / 2] =
288 (quad1[v_pos] + quad2[v_pos] + 1) / 2; 291 (quad1[v_pos] + quad2[v_pos] + 1) / 2;
289 } 292 }
290 } 293 }
291 } 294 }
292 return true; 295 return true;
293 } 296 }
294 297
295 // Convert RGB to 420. 298 // Convert RGB to 420.
296 // A negative height inverts the image. 299 // A negative height inverts the image.
297 bool ConvertRgb(const rtc::MemoryStream* ms, 300 bool ConvertRgb(const rtc::MemoryStream* ms,
298 uint32_t fourcc, 301 uint32_t fourcc,
299 int32_t width, 302 int32_t width,
300 int32_t height, 303 int32_t height,
301 T* frame) { 304 T* frame) {
302 int r_pos, g_pos, b_pos, bytes; 305 int r_pos, g_pos, b_pos, bytes;
303 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { 306 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
304 return false; 307 return false;
305 } 308 }
306 int pitch = width * bytes; 309 int pitch = width * bytes;
307 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); 310 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
308 if (height < 0) { 311 if (height < 0) {
309 height = -height; 312 height = -height;
310 start = start + pitch * (height - 1); 313 start = start + pitch * (height - 1);
311 pitch = -pitch; 314 pitch = -pitch;
312 } 315 }
313 frame->InitToBlack(width, height, 0); 316 frame->InitToBlack(width, height, 0);
314 int stride_y = frame->GetYPitch(); 317 int stride_y = frame->video_frame_buffer()->StrideY();
315 int stride_u = frame->GetUPitch(); 318 int stride_u = frame->video_frame_buffer()->StrideU();
316 int stride_v = frame->GetVPitch(); 319 int stride_v = frame->video_frame_buffer()->StrideV();
320 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY();
321 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU();
322 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV();
317 for (int32_t y = 0; y < height; y += 2) { 323 for (int32_t y = 0; y < height; y += 2) {
318 for (int32_t x = 0; x < width; x += 2) { 324 for (int32_t x = 0; x < width; x += 2) {
319 const uint8_t* rgb[4]; 325 const uint8_t* rgb[4];
320 uint8_t yuv[4][3]; 326 uint8_t yuv[4][3];
321 rgb[0] = start + y * pitch + x * bytes; 327 rgb[0] = start + y * pitch + x * bytes;
322 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); 328 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0);
323 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); 329 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0);
324 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); 330 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0);
325 for (size_t i = 0; i < 4; ++i) { 331 for (size_t i = 0; i < 4; ++i) {
326 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], 332 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos],
327 &yuv[i][0], &yuv[i][1], &yuv[i][2]); 333 &yuv[i][0], &yuv[i][1], &yuv[i][2]);
328 } 334 }
329 frame->GetYPlane()[stride_y * y + x] = yuv[0][0]; 335 plane_y[stride_y * y + x] = yuv[0][0];
330 if ((x + 1) < width) { 336 if ((x + 1) < width) {
331 frame->GetYPlane()[stride_y * y + x + 1] = yuv[1][0]; 337 plane_y[stride_y * y + x + 1] = yuv[1][0];
332 } 338 }
333 if ((y + 1) < height) { 339 if ((y + 1) < height) {
334 frame->GetYPlane()[stride_y * (y + 1) + x] = yuv[2][0]; 340 plane_y[stride_y * (y + 1) + x] = yuv[2][0];
335 if ((x + 1) < width) { 341 if ((x + 1) < width) {
336 frame->GetYPlane()[stride_y * (y + 1) + x + 1] = yuv[3][0]; 342 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0];
337 } 343 }
338 } 344 }
339 frame->GetUPlane()[stride_u * (y / 2) + x / 2] = 345 plane_u[stride_u * (y / 2) + x / 2] =
340 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; 346 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4;
341 frame->GetVPlane()[stride_v * (y / 2) + x / 2] = 347 plane_v[stride_v * (y / 2) + x / 2] =
342 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; 348 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4;
343 } 349 }
344 } 350 }
345 return true; 351 return true;
346 } 352 }
347 353
348 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. 354 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia.
349 void ConvertRgbPixel(uint8_t r, 355 void ConvertRgbPixel(uint8_t r,
350 uint8_t g, 356 uint8_t g,
351 uint8_t b, 357 uint8_t b,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 } else if (fourcc == cricket::FOURCC_ARGB) { 394 } else if (fourcc == cricket::FOURCC_ARGB) {
389 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory. 395 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory.
390 } else { 396 } else {
391 return false; 397 return false;
392 } 398 }
393 return true; 399 return true;
394 } 400 }
395 401
396 // Comparison functions for testing. 402 // Comparison functions for testing.
397 static bool IsNull(const cricket::VideoFrame& frame) { 403 static bool IsNull(const cricket::VideoFrame& frame) {
398 return !frame.GetYPlane(); 404 return !frame.video_frame_buffer();
399 } 405 }
400 406
401 static bool IsSize(const cricket::VideoFrame& frame, 407 static bool IsSize(const cricket::VideoFrame& frame,
402 int width, 408 int width,
403 int height) { 409 int height) {
404 return !IsNull(frame) && frame.GetYPitch() >= width && 410 return !IsNull(frame) && frame.video_frame_buffer()->StrideY() >= width &&
405 frame.GetUPitch() >= width / 2 && 411 frame.video_frame_buffer()->StrideU() >= width / 2 &&
406 frame.GetVPitch() >= width / 2 && 412 frame.video_frame_buffer()->StrideV() >= width / 2 &&
407 frame.width() == width && frame.height() == height; 413 frame.width() == width && frame.height() == height;
408 } 414 }
409 415
410 static bool IsPlaneEqual(const std::string& name, 416 static bool IsPlaneEqual(const std::string& name,
411 const uint8_t* plane1, 417 const uint8_t* plane1,
412 uint32_t pitch1, 418 uint32_t pitch1,
413 const uint8_t* plane2, 419 const uint8_t* plane2,
414 uint32_t pitch2, 420 uint32_t pitch2,
415 uint32_t width, 421 uint32_t width,
416 uint32_t height, 422 uint32_t height,
(...skipping 20 matching lines...) Expand all
437 int width, 443 int width,
438 int height, 444 int height,
439 int64_t time_stamp, 445 int64_t time_stamp,
440 const uint8_t* y, 446 const uint8_t* y,
441 uint32_t ypitch, 447 uint32_t ypitch,
442 const uint8_t* u, 448 const uint8_t* u,
443 uint32_t upitch, 449 uint32_t upitch,
444 const uint8_t* v, 450 const uint8_t* v,
445 uint32_t vpitch, 451 uint32_t vpitch,
446 int max_error) { 452 int max_error) {
447 return IsSize(frame, width, height) && 453 return IsSize(frame, width, height) && frame.GetTimeStamp() == time_stamp &&
448 frame.GetTimeStamp() == time_stamp && 454 IsPlaneEqual("y", frame.video_frame_buffer()->DataY(),
449 IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch, 455 frame.video_frame_buffer()->StrideY(), y, ypitch,
450 static_cast<uint32_t>(width), 456 static_cast<uint32_t>(width),
451 static_cast<uint32_t>(height), max_error) && 457 static_cast<uint32_t>(height), max_error) &&
452 IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch, 458 IsPlaneEqual("u", frame.video_frame_buffer()->DataU(),
459 frame.video_frame_buffer()->StrideU(), u, upitch,
453 static_cast<uint32_t>((width + 1) / 2), 460 static_cast<uint32_t>((width + 1) / 2),
454 static_cast<uint32_t>((height + 1) / 2), max_error) && 461 static_cast<uint32_t>((height + 1) / 2), max_error) &&
455 IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch, 462 IsPlaneEqual("v", frame.video_frame_buffer()->DataV(),
463 frame.video_frame_buffer()->StrideV(), v, vpitch,
456 static_cast<uint32_t>((width + 1) / 2), 464 static_cast<uint32_t>((width + 1) / 2),
457 static_cast<uint32_t>((height + 1) / 2), max_error); 465 static_cast<uint32_t>((height + 1) / 2), max_error);
458 } 466 }
459 467
460 static bool IsEqual(const cricket::VideoFrame& frame1, 468 static bool IsEqual(const cricket::VideoFrame& frame1,
461 const cricket::VideoFrame& frame2, 469 const cricket::VideoFrame& frame2,
462 int max_error) { 470 int max_error) {
463 return IsEqual(frame1, 471 return IsEqual(frame1,
464 frame2.width(), frame2.height(), 472 frame2.width(), frame2.height(),
465 frame2.GetTimeStamp(), 473 frame2.GetTimeStamp(),
466 frame2.GetYPlane(), frame2.GetYPitch(), 474 frame2.video_frame_buffer()->DataY(),
467 frame2.GetUPlane(), frame2.GetUPitch(), 475 frame2.video_frame_buffer()->StrideY(),
468 frame2.GetVPlane(), frame2.GetVPitch(), 476 frame2.video_frame_buffer()->DataU(),
477 frame2.video_frame_buffer()->StrideU(),
478 frame2.video_frame_buffer()->DataV(),
479 frame2.video_frame_buffer()->StrideV(),
469 max_error); 480 max_error);
470 } 481 }
471 482
472 static bool IsEqualWithCrop(const cricket::VideoFrame& frame1, 483 static bool IsEqualWithCrop(const cricket::VideoFrame& frame1,
473 const cricket::VideoFrame& frame2, 484 const cricket::VideoFrame& frame2,
474 int hcrop, int vcrop, int max_error) { 485 int hcrop, int vcrop, int max_error) {
475 return frame1.width() <= frame2.width() && 486 return frame1.width() <= frame2.width() &&
476 frame1.height() <= frame2.height() && 487 frame1.height() <= frame2.height() &&
477 IsEqual(frame1, 488 IsEqual(frame1,
478 frame2.width() - hcrop * 2, 489 frame2.width() - hcrop * 2,
479 frame2.height() - vcrop * 2, 490 frame2.height() - vcrop * 2,
480 frame2.GetTimeStamp(), 491 frame2.GetTimeStamp(),
481 frame2.GetYPlane() + vcrop * frame2.GetYPitch() 492 frame2.video_frame_buffer()->DataY()
493 + vcrop * frame2.video_frame_buffer()->StrideY()
482 + hcrop, 494 + hcrop,
483 frame2.GetYPitch(), 495 frame2.video_frame_buffer()->StrideY(),
484 frame2.GetUPlane() + vcrop * frame2.GetUPitch() / 2 496 frame2.video_frame_buffer()->DataU()
497 + vcrop * frame2.video_frame_buffer()->StrideU() / 2
485 + hcrop / 2, 498 + hcrop / 2,
486 frame2.GetUPitch(), 499 frame2.video_frame_buffer()->StrideU(),
487 frame2.GetVPlane() + vcrop * frame2.GetVPitch() / 2 500 frame2.video_frame_buffer()->DataV()
501 + vcrop * frame2.video_frame_buffer()->StrideV() / 2
488 + hcrop / 2, 502 + hcrop / 2,
489 frame2.GetVPitch(), 503 frame2.video_frame_buffer()->StrideV(),
490 max_error); 504 max_error);
491 } 505 }
492 506
493 static bool IsBlack(const cricket::VideoFrame& frame) { 507 static bool IsBlack(const cricket::VideoFrame& frame) {
494 return !IsNull(frame) && 508 return !IsNull(frame) &&
495 *frame.GetYPlane() == 16 && 509 *frame.video_frame_buffer()->DataY() == 16 &&
496 *frame.GetUPlane() == 128 && 510 *frame.video_frame_buffer()->DataU() == 128 &&
497 *frame.GetVPlane() == 128; 511 *frame.video_frame_buffer()->DataV() == 128;
498 } 512 }
499 513
500 //////////////////////// 514 ////////////////////////
501 // Construction tests // 515 // Construction tests //
502 //////////////////////// 516 ////////////////////////
503 517
504 // Test constructing an image from a I420 buffer. 518 // Test constructing an image from a I420 buffer.
505 void ConstructI420() { 519 void ConstructI420() {
506 T frame; 520 T frame;
507 EXPECT_TRUE(IsNull(frame)); 521 EXPECT_TRUE(IsNull(frame));
(...skipping 26 matching lines...) Expand all
534 548
535 // Test constructing an image from a I422 buffer. 549 // Test constructing an image from a I422 buffer.
536 void ConstructI422() { 550 void ConstructI422() {
537 T frame1, frame2; 551 T frame1, frame2;
538 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 552 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
539 size_t buf_size = kWidth * kHeight * 2; 553 size_t buf_size = kWidth * kHeight * 2;
540 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); 554 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
541 uint8_t* y = ALIGNP(buf.get(), kAlignment); 555 uint8_t* y = ALIGNP(buf.get(), kAlignment);
542 uint8_t* u = y + kWidth * kHeight; 556 uint8_t* u = y + kWidth * kHeight;
543 uint8_t* v = u + (kWidth / 2) * kHeight; 557 uint8_t* v = u + (kWidth / 2) * kHeight;
544 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(), 558 EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(),
545 frame1.GetUPlane(), frame1.GetUPitch(), 559 frame1.video_frame_buffer()->StrideY(),
546 frame1.GetVPlane(), frame1.GetVPitch(), 560 frame1.video_frame_buffer()->DataU(),
561 frame1.video_frame_buffer()->StrideU(),
562 frame1.video_frame_buffer()->DataV(),
563 frame1.video_frame_buffer()->StrideV(),
547 y, kWidth, 564 y, kWidth,
548 u, kWidth / 2, 565 u, kWidth / 2,
549 v, kWidth / 2, 566 v, kWidth / 2,
550 kWidth, kHeight)); 567 kWidth, kHeight));
551 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422, 568 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422,
552 kWidth, kHeight, &frame2)); 569 kWidth, kHeight, &frame2));
553 EXPECT_TRUE(IsEqual(frame1, frame2, 1)); 570 EXPECT_TRUE(IsEqual(frame1, frame2, 1));
554 } 571 }
555 572
556 // Test constructing an image from a YUY2 buffer. 573 // Test constructing an image from a YUY2 buffer.
557 void ConstructYuy2() { 574 void ConstructYuy2() {
558 T frame1, frame2; 575 T frame1, frame2;
559 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 576 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
560 size_t buf_size = kWidth * kHeight * 2; 577 size_t buf_size = kWidth * kHeight * 2;
561 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]); 578 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
562 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment); 579 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment);
563 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(), 580 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(),
564 frame1.GetUPlane(), frame1.GetUPitch(), 581 frame1.video_frame_buffer()->StrideY(),
565 frame1.GetVPlane(), frame1.GetVPitch(), 582 frame1.video_frame_buffer()->DataU(),
583 frame1.video_frame_buffer()->StrideU(),
584 frame1.video_frame_buffer()->DataV(),
585 frame1.video_frame_buffer()->StrideV(),
566 yuy2, kWidth * 2, 586 yuy2, kWidth * 2,
567 kWidth, kHeight)); 587 kWidth, kHeight));
568 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 588 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
569 kWidth, kHeight, &frame2)); 589 kWidth, kHeight, &frame2));
570 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 590 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
571 } 591 }
572 592
573 // Test constructing an image from a YUY2 buffer with buffer unaligned. 593 // Test constructing an image from a YUY2 buffer with buffer unaligned.
574 void ConstructYuy2Unaligned() { 594 void ConstructYuy2Unaligned() {
575 T frame1, frame2; 595 T frame1, frame2;
576 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 596 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
577 size_t buf_size = kWidth * kHeight * 2; 597 size_t buf_size = kWidth * kHeight * 2;
578 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]); 598 std::unique_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]);
579 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1; 599 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1;
580 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(), 600 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.video_frame_buffer()->DataY(),
581 frame1.GetUPlane(), frame1.GetUPitch(), 601 frame1.video_frame_buffer()->StrideY(),
582 frame1.GetVPlane(), frame1.GetVPitch(), 602 frame1.video_frame_buffer()->DataU(),
603 frame1.video_frame_buffer()->StrideU(),
604 frame1.video_frame_buffer()->DataV(),
605 frame1.video_frame_buffer()->StrideV(),
583 yuy2, kWidth * 2, 606 yuy2, kWidth * 2,
584 kWidth, kHeight)); 607 kWidth, kHeight));
585 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 608 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
586 kWidth, kHeight, &frame2)); 609 kWidth, kHeight, &frame2));
587 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 610 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
588 } 611 }
589 612
590 // Test constructing an image from a wide YUY2 buffer. 613 // Test constructing an image from a wide YUY2 buffer.
591 // Normal is 1280x720. Wide is 12800x72 614 // Normal is 1280x720. Wide is 12800x72
592 void ConstructYuy2Wide() { 615 void ConstructYuy2Wide() {
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
785 ASSERT_TRUE(ms.get() != NULL); \ 808 ASSERT_TRUE(ms.get() != NULL); \
786 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \ 809 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \
787 -kHeight, kWidth, kHeight, \ 810 -kHeight, kWidth, kHeight, \
788 webrtc::kVideoRotation_180, &frame1)); \ 811 webrtc::kVideoRotation_180, &frame1)); \
789 size_t data_size; \ 812 size_t data_size; \
790 bool ret = ms->GetSize(&data_size); \ 813 bool ret = ms->GetSize(&data_size); \
791 EXPECT_TRUE(ret); \ 814 EXPECT_TRUE(ret); \
792 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 815 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
793 kHeight, \ 816 kHeight, \
794 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ 817 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
795 data_size, 0, webrtc::kVideoRotation_0)); \ 818 data_size, 0, webrtc::kVideoRotation_0)); \
796 int width_rotate = frame1.width(); \ 819 int width_rotate = frame1.width(); \
797 int height_rotate = frame1.height(); \ 820 int height_rotate = frame1.height(); \
798 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ 821 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \
799 libyuv::I420Mirror( \ 822 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \
800 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \ 823 frame2.video_frame_buffer()->StrideY(), \
801 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \ 824 frame2.video_frame_buffer()->DataU(), \
802 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \ 825 frame2.video_frame_buffer()->StrideU(), \
803 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \ 826 frame2.video_frame_buffer()->DataV(), \
804 kHeight); \ 827 frame2.video_frame_buffer()->StrideV(), \
828 frame3.video_frame_buffer()->MutableDataY(), \
829 frame3.video_frame_buffer()->StrideY(), \
830 frame3.video_frame_buffer()->MutableDataU(), \
831 frame3.video_frame_buffer()->StrideU(), \
832 frame3.video_frame_buffer()->MutableDataV(), \
833 frame3.video_frame_buffer()->StrideV(), \
834 kWidth, kHeight); \
805 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \ 835 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \
806 } 836 }
807 837
808 TEST_MIRROR(I420, 420) 838 TEST_MIRROR(I420, 420)
809 839
810 // Macro to help test different rotations 840 // Macro to help test different rotations
811 #define TEST_ROTATE(FOURCC, BPP, ROTATE) \ 841 #define TEST_ROTATE(FOURCC, BPP, ROTATE) \
812 void Construct##FOURCC##Rotate##ROTATE() { \ 842 void Construct##FOURCC##Rotate##ROTATE() { \
813 T frame1, frame2, frame3; \ 843 T frame1, frame2, frame3; \
814 std::unique_ptr<rtc::MemoryStream> ms( \ 844 std::unique_ptr<rtc::MemoryStream> ms( \
815 CreateYuvSample(kWidth, kHeight, BPP)); \ 845 CreateYuvSample(kWidth, kHeight, BPP)); \
816 ASSERT_TRUE(ms.get() != NULL); \ 846 ASSERT_TRUE(ms.get() != NULL); \
817 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \ 847 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \
818 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \ 848 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \
819 &frame1)); \ 849 &frame1)); \
820 size_t data_size; \ 850 size_t data_size; \
821 bool ret = ms->GetSize(&data_size); \ 851 bool ret = ms->GetSize(&data_size); \
822 EXPECT_TRUE(ret); \ 852 EXPECT_TRUE(ret); \
823 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 853 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
824 kHeight, \ 854 kHeight, \
825 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ 855 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
826 data_size, 0, webrtc::kVideoRotation_0)); \ 856 data_size, 0, webrtc::kVideoRotation_0)); \
827 int width_rotate = frame1.width(); \ 857 int width_rotate = frame1.width(); \
828 int height_rotate = frame1.height(); \ 858 int height_rotate = frame1.height(); \
829 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ 859 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \
830 libyuv::I420Rotate( \ 860 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \
831 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \ 861 frame2.video_frame_buffer()->StrideY(), \
832 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \ 862 frame2.video_frame_buffer()->DataU(), \
833 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \ 863 frame2.video_frame_buffer()->StrideU(), \
834 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \ 864 frame2.video_frame_buffer()->DataV(), \
835 kHeight, libyuv::kRotate##ROTATE); \ 865 frame2.video_frame_buffer()->StrideV(), \
866 frame3.video_frame_buffer()->MutableDataY(), \
867 frame3.video_frame_buffer()->StrideY(), \
868 frame3.video_frame_buffer()->MutableDataU(), \
869 frame3.video_frame_buffer()->StrideU(), \
870 frame3.video_frame_buffer()->MutableDataV(), \
871 frame3.video_frame_buffer()->StrideV(), \
872 kWidth, kHeight, libyuv::kRotate##ROTATE); \
836 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \ 873 EXPECT_TRUE(IsEqual(frame1, frame3, 0)); \
837 } 874 }
838 875
839 // Test constructing an image with rotation. 876 // Test constructing an image with rotation.
840 TEST_ROTATE(I420, 12, 0) 877 TEST_ROTATE(I420, 12, 0)
841 TEST_ROTATE(I420, 12, 90) 878 TEST_ROTATE(I420, 12, 90)
842 TEST_ROTATE(I420, 12, 180) 879 TEST_ROTATE(I420, 12, 180)
843 TEST_ROTATE(I420, 12, 270) 880 TEST_ROTATE(I420, 12, 270)
844 TEST_ROTATE(YV12, 12, 0) 881 TEST_ROTATE(YV12, 12, 0)
845 TEST_ROTATE(YV12, 12, 90) 882 TEST_ROTATE(YV12, 12, 90)
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
945 T frame; 982 T frame;
946 uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2]; 983 uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2];
947 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2); 984 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2);
948 for (int i = 0; i < repeat_; ++i) { 985 for (int i = 0; i < repeat_; ++i) {
949 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5, 986 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5,
950 sizeof(pixels5x5), 0, 987 sizeof(pixels5x5), 0,
951 webrtc::kVideoRotation_0)); 988 webrtc::kVideoRotation_0));
952 } 989 }
953 EXPECT_EQ(5, frame.width()); 990 EXPECT_EQ(5, frame.width());
954 EXPECT_EQ(5, frame.height()); 991 EXPECT_EQ(5, frame.height());
955 EXPECT_EQ(5, frame.GetYPitch()); 992 EXPECT_EQ(5, frame.video_frame_buffer()->StrideY());
956 EXPECT_EQ(3, frame.GetUPitch()); 993 EXPECT_EQ(3, frame.video_frame_buffer()->StrideU());
957 EXPECT_EQ(3, frame.GetVPitch()); 994 EXPECT_EQ(3, frame.video_frame_buffer()->StrideV());
958 } 995 }
959 996
960 // Test 1 pixel edge case image ARGB buffer. 997 // Test 1 pixel edge case image ARGB buffer.
961 void ConstructARGB1Pixel() { 998 void ConstructARGB1Pixel() {
962 T frame; 999 T frame;
963 uint8_t pixel[4] = {64, 128, 192, 255}; 1000 uint8_t pixel[4] = {64, 128, 192, 255};
964 for (int i = 0; i < repeat_; ++i) { 1001 for (int i = 0; i < repeat_; ++i) {
965 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel, 1002 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel,
966 sizeof(pixel), 0, 1003 sizeof(pixel), 0,
967 webrtc::kVideoRotation_0)); 1004 webrtc::kVideoRotation_0));
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
1114 EXPECT_TRUE(IsEqual(frame1, frame2, 32)); 1151 EXPECT_TRUE(IsEqual(frame1, frame2, 32));
1115 } 1152 }
1116 1153
1117 // Test constructing an image from an I400 MJPG buffer. 1154 // Test constructing an image from an I400 MJPG buffer.
1118 // TODO(fbarchard): Stronger compare on chroma. Compare agaisnt a grey image. 1155 // TODO(fbarchard): Stronger compare on chroma. Compare agaisnt a grey image.
1119 void ConstructMjpgI400() { 1156 void ConstructMjpgI400() {
1120 T frame1, frame2; 1157 T frame1, frame2;
1121 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1158 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1122 ASSERT_TRUE(LoadFrame(kJpeg400Filename, 1159 ASSERT_TRUE(LoadFrame(kJpeg400Filename,
1123 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); 1160 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2));
1124 EXPECT_TRUE(IsPlaneEqual("y", frame1.GetYPlane(), frame1.GetYPitch(), 1161 EXPECT_TRUE(IsPlaneEqual("y", frame1.video_frame_buffer()->DataY(),
1125 frame2.GetYPlane(), frame2.GetYPitch(), 1162 frame1.video_frame_buffer()->StrideY(),
1163 frame2.video_frame_buffer()->DataY(),
1164 frame2.video_frame_buffer()->StrideY(),
1126 kWidth, kHeight, 32)); 1165 kWidth, kHeight, 32));
1127 EXPECT_TRUE(IsEqual(frame1, frame2, 128)); 1166 EXPECT_TRUE(IsEqual(frame1, frame2, 128));
1128 } 1167 }
1129 1168
1130 // Test constructing an image from an I420 MJPG buffer. 1169 // Test constructing an image from an I420 MJPG buffer.
1131 void ValidateFrame(const char* name, 1170 void ValidateFrame(const char* name,
1132 uint32_t fourcc, 1171 uint32_t fourcc,
1133 int data_adjust, 1172 int data_adjust,
1134 int size_adjust, 1173 int size_adjust,
1135 bool expected_result) { 1174 bool expected_result) {
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
1297 } 1336 }
1298 1337
1299 // Test creating a copy and check that it just increments the refcount. 1338 // Test creating a copy and check that it just increments the refcount.
1300 void ConstructCopyIsRef() { 1339 void ConstructCopyIsRef() {
1301 T frame1, frame2; 1340 T frame1, frame2;
1302 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1341 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1303 for (int i = 0; i < repeat_; ++i) { 1342 for (int i = 0; i < repeat_; ++i) {
1304 EXPECT_TRUE(frame2.Init(frame1)); 1343 EXPECT_TRUE(frame2.Init(frame1));
1305 } 1344 }
1306 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 1345 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
1307 EXPECT_EQ(frame1.GetYPlane(), frame2.GetYPlane()); 1346 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer());
1308 EXPECT_EQ(frame1.GetUPlane(), frame2.GetUPlane());
1309 EXPECT_EQ(frame1.GetVPlane(), frame2.GetVPlane());
1310 } 1347 }
1311 1348
1312 // Test creating an empty image and initing it to black. 1349 // Test creating an empty image and initing it to black.
1313 void ConstructBlack() { 1350 void ConstructBlack() {
1314 T frame; 1351 T frame;
1315 for (int i = 0; i < repeat_; ++i) { 1352 for (int i = 0; i < repeat_; ++i) {
1316 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0)); 1353 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0));
1317 } 1354 }
1318 EXPECT_TRUE(IsSize(frame, kWidth, kHeight)); 1355 EXPECT_TRUE(IsSize(frame, kWidth, kHeight));
1319 EXPECT_TRUE(IsBlack(frame)); 1356 EXPECT_TRUE(IsBlack(frame));
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
1412 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1449 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1413 1450
1414 for (int i = 0; i < repeat_to; ++i) { 1451 for (int i = 0; i < repeat_to; ++i) {
1415 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, 1452 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc,
1416 out, 1453 out,
1417 out_size, stride)); 1454 out_size, stride));
1418 } 1455 }
1419 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0)); 1456 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0));
1420 for (int i = 0; i < repeat_from; ++i) { 1457 for (int i = 0; i < repeat_from; ++i) {
1421 EXPECT_EQ(0, RGBToI420(out, stride, 1458 EXPECT_EQ(0, RGBToI420(out, stride,
1422 frame2.GetYPlane(), frame2.GetYPitch(), 1459 frame2.video_frame_buffer()->MutableDataY(),
1423 frame2.GetUPlane(), frame2.GetUPitch(), 1460 frame2.video_frame_buffer()->StrideY(),
1424 frame2.GetVPlane(), frame2.GetVPitch(), 1461 frame2.video_frame_buffer()->MutableDataU(),
1462 frame2.video_frame_buffer()->StrideU(),
1463 frame2.video_frame_buffer()->MutableDataV(),
1464 frame2.video_frame_buffer()->StrideV(),
1425 kWidth, kHeight)); 1465 kWidth, kHeight));
1426 } 1466 }
1427 if (rowpad) { 1467 if (rowpad) {
1428 EXPECT_EQ(0, outtop[kWidth * bpp]); // Ensure stride skipped end of row. 1468 EXPECT_EQ(0, outtop[kWidth * bpp]); // Ensure stride skipped end of row.
1429 EXPECT_NE(0, outtop[astride]); // Ensure pixel at start of 2nd row. 1469 EXPECT_NE(0, outtop[astride]); // Ensure pixel at start of 2nd row.
1430 } else { 1470 } else {
1431 EXPECT_NE(0, outtop[kWidth * bpp]); // Expect something to be here. 1471 EXPECT_NE(0, outtop[kWidth * bpp]); // Expect something to be here.
1432 } 1472 }
1433 EXPECT_EQ(0, outtop[out_size]); // Ensure no overrun. 1473 EXPECT_EQ(0, outtop[out_size]); // Ensure no overrun.
1434 EXPECT_TRUE(IsEqual(frame1, frame2, error)); 1474 EXPECT_TRUE(IsEqual(frame1, frame2, error));
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
1717 // Test converting from I420 to I422. 1757 // Test converting from I420 to I422.
1718 void ConvertToI422Buffer() { 1758 void ConvertToI422Buffer() {
1719 T frame1, frame2; 1759 T frame1, frame2;
1720 size_t out_size = kWidth * kHeight * 2; 1760 size_t out_size = kWidth * kHeight * 2;
1721 std::unique_ptr<uint8_t[]> buf(new uint8_t[out_size + kAlignment]); 1761 std::unique_ptr<uint8_t[]> buf(new uint8_t[out_size + kAlignment]);
1722 uint8_t* y = ALIGNP(buf.get(), kAlignment); 1762 uint8_t* y = ALIGNP(buf.get(), kAlignment);
1723 uint8_t* u = y + kWidth * kHeight; 1763 uint8_t* u = y + kWidth * kHeight;
1724 uint8_t* v = u + (kWidth / 2) * kHeight; 1764 uint8_t* v = u + (kWidth / 2) * kHeight;
1725 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1765 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1726 for (int i = 0; i < repeat_; ++i) { 1766 for (int i = 0; i < repeat_; ++i) {
1727 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(), 1767 EXPECT_EQ(0, libyuv::I420ToI422(frame1.video_frame_buffer()->DataY(),
1728 frame1.GetUPlane(), frame1.GetUPitch(), 1768 frame1.video_frame_buffer()->StrideY(),
1729 frame1.GetVPlane(), frame1.GetVPitch(), 1769 frame1.video_frame_buffer()->DataU(),
1770 frame1.video_frame_buffer()->StrideU(),
1771 frame1.video_frame_buffer()->DataV(),
1772 frame1.video_frame_buffer()->StrideV(),
1730 y, kWidth, 1773 y, kWidth,
1731 u, kWidth / 2, 1774 u, kWidth / 2,
1732 v, kWidth / 2, 1775 v, kWidth / 2,
1733 kWidth, kHeight)); 1776 kWidth, kHeight));
1734 } 1777 }
1735 EXPECT_TRUE(frame2.Init(cricket::FOURCC_I422, kWidth, kHeight, kWidth, 1778 EXPECT_TRUE(frame2.Init(cricket::FOURCC_I422, kWidth, kHeight, kWidth,
1736 kHeight, y, out_size, 1, 1, 0, 1779 kHeight, y, out_size, 1, 1, 0,
1737 webrtc::kVideoRotation_0)); 1780 webrtc::kVideoRotation_0));
1738 EXPECT_TRUE(IsEqual(frame1, frame2, 1)); 1781 EXPECT_TRUE(IsEqual(frame1, frame2, 1));
1739 } 1782 }
1740 1783
1741 /////////////////// 1784 ///////////////////
1742 // General tests // 1785 // General tests //
1743 /////////////////// 1786 ///////////////////
1744 1787
1745 void Copy() { 1788 void Copy() {
1746 std::unique_ptr<T> source(new T); 1789 std::unique_ptr<T> source(new T);
1747 std::unique_ptr<cricket::VideoFrame> target; 1790 std::unique_ptr<cricket::VideoFrame> target;
1748 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); 1791 ASSERT_TRUE(LoadFrameNoRepeat(source.get()));
1749 target.reset(source->Copy()); 1792 target.reset(source->Copy());
1750 EXPECT_TRUE(IsEqual(*source, *target, 0)); 1793 EXPECT_TRUE(IsEqual(*source, *target, 0));
1751 source.reset(); 1794 source.reset();
1752 EXPECT_TRUE(target->GetYPlane() != NULL); 1795 ASSERT_TRUE(target->video_frame_buffer() != NULL);
1796 EXPECT_TRUE(target->video_frame_buffer()->DataY() != NULL);
1753 } 1797 }
1754 1798
1755 void CopyIsRef() { 1799 void CopyIsRef() {
1756 std::unique_ptr<T> source(new T); 1800 std::unique_ptr<T> source(new T);
1757 std::unique_ptr<const cricket::VideoFrame> target; 1801 std::unique_ptr<const cricket::VideoFrame> target;
1758 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); 1802 ASSERT_TRUE(LoadFrameNoRepeat(source.get()));
1759 target.reset(source->Copy()); 1803 target.reset(source->Copy());
1760 EXPECT_TRUE(IsEqual(*source, *target, 0)); 1804 EXPECT_TRUE(IsEqual(*source, *target, 0));
1761 const T* const_source = source.get(); 1805 const T* const_source = source.get();
1762 EXPECT_EQ(const_source->GetYPlane(), target->GetYPlane()); 1806 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer());
1763 EXPECT_EQ(const_source->GetUPlane(), target->GetUPlane());
1764 EXPECT_EQ(const_source->GetVPlane(), target->GetVPlane());
1765 } 1807 }
1766 1808
1767 void StretchToFrame() { 1809 void StretchToFrame() {
1768 // Create the source frame as a black frame. 1810 // Create the source frame as a black frame.
1769 T source; 1811 T source;
1770 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0)); 1812 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0));
1771 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); 1813 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2));
1772 1814
1773 // Create the target frame by loading from a file. 1815 // Create the target frame by loading from a file.
1774 T target1; 1816 T target1;
1775 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); 1817 ASSERT_TRUE(LoadFrameNoRepeat(&target1));
1776 EXPECT_FALSE(IsBlack(target1)); 1818 EXPECT_FALSE(IsBlack(target1));
1777 1819
1778 // Stretch and check if the stretched target is black. 1820 // Stretch and check if the stretched target is black.
1779 source.StretchToFrame(&target1, true, false); 1821 source.StretchToFrame(&target1, true, false);
1780 EXPECT_TRUE(IsBlack(target1)); 1822 EXPECT_TRUE(IsBlack(target1));
1781 1823
1782 // Crop and stretch and check if the stretched target is black. 1824 // Crop and stretch and check if the stretched target is black.
1783 T target2; 1825 T target2;
1784 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); 1826 ASSERT_TRUE(LoadFrameNoRepeat(&target2));
1785 source.StretchToFrame(&target2, true, true); 1827 source.StretchToFrame(&target2, true, true);
1786 EXPECT_TRUE(IsBlack(target2)); 1828 EXPECT_TRUE(IsBlack(target2));
1787 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); 1829 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp());
1788 } 1830 }
1789 1831
1790 int repeat_; 1832 int repeat_;
1791 }; 1833 };
1792 1834
1793 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ 1835 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_
OLDNEW
« no previous file with comments | « webrtc/media/base/videoframe.cc ('k') | webrtc/media/engine/webrtcvideoframe.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698