OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
247 rgb[g_pos] = (x % 63 + y % 63) + 96; | 247 rgb[g_pos] = (x % 63 + y % 63) + 96; |
248 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; | 248 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; |
249 ms->Write(rgb, bytes, NULL, NULL); | 249 ms->Write(rgb, bytes, NULL, NULL); |
250 } | 250 } |
251 } | 251 } |
252 return ms.release(); | 252 return ms.release(); |
253 } | 253 } |
254 | 254 |
255 // Simple conversion routines to verify the optimized VideoFrame routines. | 255 // Simple conversion routines to verify the optimized VideoFrame routines. |
256 // Converts from the specified colorspace to I420. | 256 // Converts from the specified colorspace to I420. |
257 bool ConvertYuv422(const rtc::MemoryStream* ms, | 257 std::unique_ptr<T> ConvertYuv422(const rtc::MemoryStream* ms, |
258 uint32_t fourcc, | 258 uint32_t fourcc, |
259 uint32_t width, | 259 uint32_t width, |
260 uint32_t height, | 260 uint32_t height) { |
261 T* frame) { | |
262 int y1_pos, y2_pos, u_pos, v_pos; | 261 int y1_pos, y2_pos, u_pos, v_pos; |
263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { | 262 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { |
264 return false; | 263 return nullptr; |
265 } | 264 } |
266 | 265 |
| 266 rtc::scoped_refptr<webrtc::I420Buffer> buffer( |
| 267 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height)); |
| 268 |
| 269 buffer->SetToBlack(); |
| 270 |
267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); | 271 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); |
268 int awidth = (width + 1) & ~1; | 272 int awidth = (width + 1) & ~1; |
269 frame->InitToBlack(width, height, 0); | 273 int stride_y = buffer->StrideY(); |
270 int stride_y = frame->video_frame_buffer()->StrideY(); | 274 int stride_u = buffer->StrideU(); |
271 int stride_u = frame->video_frame_buffer()->StrideU(); | 275 int stride_v = buffer->StrideV(); |
272 int stride_v = frame->video_frame_buffer()->StrideV(); | 276 uint8_t* plane_y = buffer->MutableDataY(); |
273 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY(); | 277 uint8_t* plane_u = buffer->MutableDataU(); |
274 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU(); | 278 uint8_t* plane_v = buffer->MutableDataV(); |
275 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV(); | |
276 for (uint32_t y = 0; y < height; ++y) { | 279 for (uint32_t y = 0; y < height; ++y) { |
277 for (uint32_t x = 0; x < width; x += 2) { | 280 for (uint32_t x = 0; x < width; x += 2) { |
278 const uint8_t* quad1 = start + (y * awidth + x) * 2; | 281 const uint8_t* quad1 = start + (y * awidth + x) * 2; |
279 plane_y[stride_y * y + x] = quad1[y1_pos]; | 282 plane_y[stride_y * y + x] = quad1[y1_pos]; |
280 if ((x + 1) < width) { | 283 if ((x + 1) < width) { |
281 plane_y[stride_y * y + x + 1] = quad1[y2_pos]; | 284 plane_y[stride_y * y + x + 1] = quad1[y2_pos]; |
282 } | 285 } |
283 if ((y & 1) == 0) { | 286 if ((y & 1) == 0) { |
284 const uint8_t* quad2 = quad1 + awidth * 2; | 287 const uint8_t* quad2 = quad1 + awidth * 2; |
285 if ((y + 1) >= height) { | 288 if ((y + 1) >= height) { |
286 quad2 = quad1; | 289 quad2 = quad1; |
287 } | 290 } |
288 plane_u[stride_u * (y / 2) + x / 2] = | 291 plane_u[stride_u * (y / 2) + x / 2] = |
289 (quad1[u_pos] + quad2[u_pos] + 1) / 2; | 292 (quad1[u_pos] + quad2[u_pos] + 1) / 2; |
290 plane_v[stride_v * (y / 2) + x / 2] = | 293 plane_v[stride_v * (y / 2) + x / 2] = |
291 (quad1[v_pos] + quad2[v_pos] + 1) / 2; | 294 (quad1[v_pos] + quad2[v_pos] + 1) / 2; |
292 } | 295 } |
293 } | 296 } |
294 } | 297 } |
295 return true; | 298 return std::unique_ptr<T>(new T(buffer, 0, webrtc::kVideoRotation_0)); |
296 } | 299 } |
297 | 300 |
298 // Convert RGB to 420. | 301 // Convert RGB to 420. |
299 // A negative height inverts the image. | 302 // A negative height inverts the image. |
300 bool ConvertRgb(const rtc::MemoryStream* ms, | 303 std::unique_ptr<T> ConvertRgb(const rtc::MemoryStream* ms, |
301 uint32_t fourcc, | 304 uint32_t fourcc, |
302 int32_t width, | 305 int32_t width, |
303 int32_t height, | 306 int32_t height) { |
304 T* frame) { | |
305 int r_pos, g_pos, b_pos, bytes; | 307 int r_pos, g_pos, b_pos, bytes; |
306 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { | 308 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { |
307 return false; | 309 return nullptr; |
308 } | 310 } |
309 int pitch = width * bytes; | 311 int pitch = width * bytes; |
310 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); | 312 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); |
311 if (height < 0) { | 313 if (height < 0) { |
312 height = -height; | 314 height = -height; |
313 start = start + pitch * (height - 1); | 315 start = start + pitch * (height - 1); |
314 pitch = -pitch; | 316 pitch = -pitch; |
315 } | 317 } |
316 frame->InitToBlack(width, height, 0); | 318 rtc::scoped_refptr<webrtc::I420Buffer> buffer( |
317 int stride_y = frame->video_frame_buffer()->StrideY(); | 319 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height)); |
318 int stride_u = frame->video_frame_buffer()->StrideU(); | 320 |
319 int stride_v = frame->video_frame_buffer()->StrideV(); | 321 buffer->SetToBlack(); |
320 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY(); | 322 |
321 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU(); | 323 int stride_y = buffer->StrideY(); |
322 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV(); | 324 int stride_u = buffer->StrideU(); |
| 325 int stride_v = buffer->StrideV(); |
| 326 uint8_t* plane_y = buffer->MutableDataY(); |
| 327 uint8_t* plane_u = buffer->MutableDataU(); |
| 328 uint8_t* plane_v = buffer->MutableDataV(); |
323 for (int32_t y = 0; y < height; y += 2) { | 329 for (int32_t y = 0; y < height; y += 2) { |
324 for (int32_t x = 0; x < width; x += 2) { | 330 for (int32_t x = 0; x < width; x += 2) { |
325 const uint8_t* rgb[4]; | 331 const uint8_t* rgb[4]; |
326 uint8_t yuv[4][3]; | 332 uint8_t yuv[4][3]; |
327 rgb[0] = start + y * pitch + x * bytes; | 333 rgb[0] = start + y * pitch + x * bytes; |
328 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); | 334 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); |
329 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); | 335 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); |
330 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); | 336 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); |
331 for (size_t i = 0; i < 4; ++i) { | 337 for (size_t i = 0; i < 4; ++i) { |
332 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], | 338 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], |
333 &yuv[i][0], &yuv[i][1], &yuv[i][2]); | 339 &yuv[i][0], &yuv[i][1], &yuv[i][2]); |
334 } | 340 } |
335 plane_y[stride_y * y + x] = yuv[0][0]; | 341 plane_y[stride_y * y + x] = yuv[0][0]; |
336 if ((x + 1) < width) { | 342 if ((x + 1) < width) { |
337 plane_y[stride_y * y + x + 1] = yuv[1][0]; | 343 plane_y[stride_y * y + x + 1] = yuv[1][0]; |
338 } | 344 } |
339 if ((y + 1) < height) { | 345 if ((y + 1) < height) { |
340 plane_y[stride_y * (y + 1) + x] = yuv[2][0]; | 346 plane_y[stride_y * (y + 1) + x] = yuv[2][0]; |
341 if ((x + 1) < width) { | 347 if ((x + 1) < width) { |
342 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; | 348 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; |
343 } | 349 } |
344 } | 350 } |
345 plane_u[stride_u * (y / 2) + x / 2] = | 351 plane_u[stride_u * (y / 2) + x / 2] = |
346 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; | 352 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; |
347 plane_v[stride_v * (y / 2) + x / 2] = | 353 plane_v[stride_v * (y / 2) + x / 2] = |
348 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; | 354 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; |
349 } | 355 } |
350 } | 356 } |
351 return true; | 357 return std::unique_ptr<T>(new T(buffer, 0, webrtc::kVideoRotation_0)); |
352 } | 358 } |
353 | 359 |
354 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. | 360 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. |
355 void ConvertRgbPixel(uint8_t r, | 361 void ConvertRgbPixel(uint8_t r, |
356 uint8_t g, | 362 uint8_t g, |
357 uint8_t b, | 363 uint8_t b, |
358 uint8_t* y, | 364 uint8_t* y, |
359 uint8_t* u, | 365 uint8_t* u, |
360 uint8_t* v) { | 366 uint8_t* v) { |
361 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16; | 367 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16; |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
499 frame2.video_frame_buffer()->StrideU(), | 505 frame2.video_frame_buffer()->StrideU(), |
500 frame2.video_frame_buffer()->DataV() | 506 frame2.video_frame_buffer()->DataV() |
501 + vcrop * frame2.video_frame_buffer()->StrideV() / 2 | 507 + vcrop * frame2.video_frame_buffer()->StrideV() / 2 |
502 + hcrop / 2, | 508 + hcrop / 2, |
503 frame2.video_frame_buffer()->StrideV(), | 509 frame2.video_frame_buffer()->StrideV(), |
504 max_error); | 510 max_error); |
505 } | 511 } |
506 | 512 |
507 static bool IsBlack(const cricket::VideoFrame& frame) { | 513 static bool IsBlack(const cricket::VideoFrame& frame) { |
508 return !IsNull(frame) && | 514 return !IsNull(frame) && |
509 *frame.video_frame_buffer()->DataY() == 16 && | 515 *frame.video_frame_buffer()->DataY() <= 16 && |
510 *frame.video_frame_buffer()->DataU() == 128 && | 516 *frame.video_frame_buffer()->DataU() == 128 && |
511 *frame.video_frame_buffer()->DataV() == 128; | 517 *frame.video_frame_buffer()->DataV() == 128; |
512 } | 518 } |
513 | 519 |
514 //////////////////////// | 520 //////////////////////// |
515 // Construction tests // | 521 // Construction tests // |
516 //////////////////////// | 522 //////////////////////// |
517 | 523 |
518 // Test constructing an image from a I420 buffer. | 524 // Test constructing an image from a I420 buffer. |
519 void ConstructI420() { | 525 void ConstructI420() { |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
606 yuy2, kWidth * 2, | 612 yuy2, kWidth * 2, |
607 kWidth, kHeight)); | 613 kWidth, kHeight)); |
608 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, | 614 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, |
609 kWidth, kHeight, &frame2)); | 615 kWidth, kHeight, &frame2)); |
610 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 616 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
611 } | 617 } |
612 | 618 |
613 // Test constructing an image from a wide YUY2 buffer. | 619 // Test constructing an image from a wide YUY2 buffer. |
614 // Normal is 1280x720. Wide is 12800x72 | 620 // Normal is 1280x720. Wide is 12800x72 |
615 void ConstructYuy2Wide() { | 621 void ConstructYuy2Wide() { |
616 T frame1, frame2; | |
617 std::unique_ptr<rtc::MemoryStream> ms( | 622 std::unique_ptr<rtc::MemoryStream> ms( |
618 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth * 10, kHeight / 10)); | 623 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth * 10, kHeight / 10)); |
619 ASSERT_TRUE(ms.get() != NULL); | 624 ASSERT_TRUE(ms.get() != NULL); |
620 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, | 625 std::unique_ptr<T> frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, |
621 kWidth * 10, kHeight / 10, | 626 kWidth * 10, kHeight / 10); |
622 &frame1)); | 627 ASSERT_TRUE(frame1); |
| 628 T frame2; |
623 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, | 629 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, |
624 kWidth * 10, kHeight / 10, &frame2)); | 630 kWidth * 10, kHeight / 10, &frame2)); |
625 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 631 EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); |
626 } | 632 } |
627 | 633 |
628 // Test constructing an image from a UYVY buffer. | 634 // Test constructing an image from a UYVY buffer. |
629 void ConstructUyvy() { | 635 void ConstructUyvy() { |
630 T frame1, frame2; | |
631 std::unique_ptr<rtc::MemoryStream> ms( | 636 std::unique_ptr<rtc::MemoryStream> ms( |
632 CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); | 637 CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); |
633 ASSERT_TRUE(ms.get() != NULL); | 638 ASSERT_TRUE(ms.get() != NULL); |
634 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, | 639 std::unique_ptr<T> frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, |
635 &frame1)); | 640 kWidth, kHeight); |
| 641 T frame2; |
636 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, | 642 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, |
637 kWidth, kHeight, &frame2)); | 643 kWidth, kHeight, &frame2)); |
638 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 644 EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); |
639 } | 645 } |
640 | 646 |
641 // Test constructing an image from a random buffer. | 647 // Test constructing an image from a random buffer. |
642 // We are merely verifying that the code succeeds and is free of crashes. | 648 // We are merely verifying that the code succeeds and is free of crashes. |
643 void ConstructM420() { | 649 void ConstructM420() { |
644 T frame; | 650 T frame; |
645 std::unique_ptr<rtc::MemoryStream> ms( | 651 std::unique_ptr<rtc::MemoryStream> ms( |
646 CreateYuvSample(kWidth, kHeight, 12)); | 652 CreateYuvSample(kWidth, kHeight, 12)); |
647 ASSERT_TRUE(ms.get() != NULL); | 653 ASSERT_TRUE(ms.get() != NULL); |
648 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_M420, | 654 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_M420, |
(...skipping 14 matching lines...) Expand all Loading... |
663 std::unique_ptr<rtc::MemoryStream> ms( | 669 std::unique_ptr<rtc::MemoryStream> ms( |
664 CreateYuvSample(kWidth, kHeight, 12)); | 670 CreateYuvSample(kWidth, kHeight, 12)); |
665 ASSERT_TRUE(ms.get() != NULL); | 671 ASSERT_TRUE(ms.get() != NULL); |
666 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV12, | 672 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV12, |
667 kWidth, kHeight, &frame)); | 673 kWidth, kHeight, &frame)); |
668 } | 674 } |
669 | 675 |
670 // Test constructing an image from a ABGR buffer | 676 // Test constructing an image from a ABGR buffer |
671 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 677 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
672 void ConstructABGR() { | 678 void ConstructABGR() { |
673 T frame1, frame2; | |
674 std::unique_ptr<rtc::MemoryStream> ms( | 679 std::unique_ptr<rtc::MemoryStream> ms( |
675 CreateRgbSample(cricket::FOURCC_ABGR, kWidth, kHeight)); | 680 CreateRgbSample(cricket::FOURCC_ABGR, kWidth, kHeight)); |
676 ASSERT_TRUE(ms.get() != NULL); | 681 ASSERT_TRUE(ms.get() != NULL); |
677 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ABGR, kWidth, kHeight, | 682 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ABGR, |
678 &frame1)); | 683 kWidth, kHeight); |
| 684 ASSERT_TRUE(frame1); |
| 685 T frame2; |
679 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ABGR, | 686 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ABGR, |
680 kWidth, kHeight, &frame2)); | 687 kWidth, kHeight, &frame2)); |
681 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 688 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
682 } | 689 } |
683 | 690 |
684 // Test constructing an image from a ARGB buffer | 691 // Test constructing an image from a ARGB buffer |
685 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 692 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
686 void ConstructARGB() { | 693 void ConstructARGB() { |
687 T frame1, frame2; | |
688 std::unique_ptr<rtc::MemoryStream> ms( | 694 std::unique_ptr<rtc::MemoryStream> ms( |
689 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); | 695 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); |
690 ASSERT_TRUE(ms.get() != NULL); | 696 ASSERT_TRUE(ms.get() != NULL); |
691 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, | 697 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
692 &frame1)); | 698 kWidth, kHeight); |
| 699 ASSERT_TRUE(frame1); |
| 700 T frame2; |
693 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 701 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
694 kWidth, kHeight, &frame2)); | 702 kWidth, kHeight, &frame2)); |
695 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 703 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
696 } | 704 } |
697 | 705 |
698 // Test constructing an image from a wide ARGB buffer | 706 // Test constructing an image from a wide ARGB buffer |
699 // Normal is 1280x720. Wide is 12800x72 | 707 // Normal is 1280x720. Wide is 12800x72 |
700 void ConstructARGBWide() { | 708 void ConstructARGBWide() { |
701 T frame1, frame2; | |
702 std::unique_ptr<rtc::MemoryStream> ms( | 709 std::unique_ptr<rtc::MemoryStream> ms( |
703 CreateRgbSample(cricket::FOURCC_ARGB, kWidth * 10, kHeight / 10)); | 710 CreateRgbSample(cricket::FOURCC_ARGB, kWidth * 10, kHeight / 10)); |
704 ASSERT_TRUE(ms.get() != NULL); | 711 ASSERT_TRUE(ms.get() != NULL); |
705 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 712 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
706 kWidth * 10, kHeight / 10, &frame1)); | 713 kWidth * 10, kHeight / 10); |
| 714 ASSERT_TRUE(frame1); |
| 715 T frame2; |
707 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 716 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
708 kWidth * 10, kHeight / 10, &frame2)); | 717 kWidth * 10, kHeight / 10, &frame2)); |
709 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 718 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
710 } | 719 } |
711 | 720 |
712 // Test constructing an image from an BGRA buffer. | 721 // Test constructing an image from an BGRA buffer. |
713 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 722 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
714 void ConstructBGRA() { | 723 void ConstructBGRA() { |
715 T frame1, frame2; | |
716 std::unique_ptr<rtc::MemoryStream> ms( | 724 std::unique_ptr<rtc::MemoryStream> ms( |
717 CreateRgbSample(cricket::FOURCC_BGRA, kWidth, kHeight)); | 725 CreateRgbSample(cricket::FOURCC_BGRA, kWidth, kHeight)); |
718 ASSERT_TRUE(ms.get() != NULL); | 726 ASSERT_TRUE(ms.get() != NULL); |
719 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_BGRA, kWidth, kHeight, | 727 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_BGRA, |
720 &frame1)); | 728 kWidth, kHeight); |
| 729 ASSERT_TRUE(frame1); |
| 730 T frame2; |
721 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_BGRA, | 731 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_BGRA, |
722 kWidth, kHeight, &frame2)); | 732 kWidth, kHeight, &frame2)); |
723 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 733 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
724 } | 734 } |
725 | 735 |
726 // Test constructing an image from a 24BG buffer. | 736 // Test constructing an image from a 24BG buffer. |
727 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 737 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
728 void Construct24BG() { | 738 void Construct24BG() { |
729 T frame1, frame2; | |
730 std::unique_ptr<rtc::MemoryStream> ms( | 739 std::unique_ptr<rtc::MemoryStream> ms( |
731 CreateRgbSample(cricket::FOURCC_24BG, kWidth, kHeight)); | 740 CreateRgbSample(cricket::FOURCC_24BG, kWidth, kHeight)); |
732 ASSERT_TRUE(ms.get() != NULL); | 741 ASSERT_TRUE(ms.get() != NULL); |
733 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_24BG, kWidth, kHeight, | 742 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_24BG, |
734 &frame1)); | 743 kWidth, kHeight); |
| 744 ASSERT_TRUE(frame1); |
| 745 T frame2; |
735 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_24BG, | 746 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_24BG, |
736 kWidth, kHeight, &frame2)); | 747 kWidth, kHeight, &frame2)); |
737 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 748 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
738 } | 749 } |
739 | 750 |
740 // Test constructing an image from a raw RGB buffer. | 751 // Test constructing an image from a raw RGB buffer. |
741 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 752 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
742 void ConstructRaw() { | 753 void ConstructRaw() { |
743 T frame1, frame2; | |
744 std::unique_ptr<rtc::MemoryStream> ms( | 754 std::unique_ptr<rtc::MemoryStream> ms( |
745 CreateRgbSample(cricket::FOURCC_RAW, kWidth, kHeight)); | 755 CreateRgbSample(cricket::FOURCC_RAW, kWidth, kHeight)); |
746 ASSERT_TRUE(ms.get() != NULL); | 756 ASSERT_TRUE(ms.get() != NULL); |
747 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_RAW, kWidth, kHeight, | 757 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_RAW, |
748 &frame1)); | 758 kWidth, kHeight); |
| 759 ASSERT_TRUE(frame1); |
| 760 T frame2; |
749 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, | 761 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, |
750 kWidth, kHeight, &frame2)); | 762 kWidth, kHeight, &frame2)); |
751 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); | 763 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); |
752 } | 764 } |
753 | 765 |
754 // Test constructing an image from a RGB565 buffer | 766 // Test constructing an image from a RGB565 buffer |
755 void ConstructRGB565() { | 767 void ConstructRGB565() { |
756 T frame1, frame2; | 768 T frame1, frame2; |
757 size_t out_size = kWidth * kHeight * 2; | 769 size_t out_size = kWidth * kHeight * 2; |
758 std::unique_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); | 770 std::unique_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); |
759 uint8_t* out = ALIGNP(outbuf.get(), kAlignment); | 771 uint8_t* out = ALIGNP(outbuf.get(), kAlignment); |
760 T frame; | 772 T frame; |
761 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 773 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
811 webrtc::kVideoRotation_180, &frame1)); \ | 823 webrtc::kVideoRotation_180, &frame1)); \ |
812 size_t data_size; \ | 824 size_t data_size; \ |
813 bool ret = ms->GetSize(&data_size); \ | 825 bool ret = ms->GetSize(&data_size); \ |
814 EXPECT_TRUE(ret); \ | 826 EXPECT_TRUE(ret); \ |
815 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ | 827 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ |
816 kHeight, \ | 828 kHeight, \ |
817 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ | 829 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ |
818 data_size, 0, webrtc::kVideoRotation_0)); \ | 830 data_size, 0, webrtc::kVideoRotation_0)); \ |
819 int width_rotate = frame1.width(); \ | 831 int width_rotate = frame1.width(); \ |
820 int height_rotate = frame1.height(); \ | 832 int height_rotate = frame1.height(); \ |
821 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ | 833 frame3.InitToEmptyBuffer(width_rotate, height_rotate, 0); \ |
822 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ | 834 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ |
823 frame2.video_frame_buffer()->StrideY(), \ | 835 frame2.video_frame_buffer()->StrideY(), \ |
824 frame2.video_frame_buffer()->DataU(), \ | 836 frame2.video_frame_buffer()->DataU(), \ |
825 frame2.video_frame_buffer()->StrideU(), \ | 837 frame2.video_frame_buffer()->StrideU(), \ |
826 frame2.video_frame_buffer()->DataV(), \ | 838 frame2.video_frame_buffer()->DataV(), \ |
827 frame2.video_frame_buffer()->StrideV(), \ | 839 frame2.video_frame_buffer()->StrideV(), \ |
828 frame3.video_frame_buffer()->MutableDataY(), \ | 840 frame3.video_frame_buffer()->MutableDataY(), \ |
829 frame3.video_frame_buffer()->StrideY(), \ | 841 frame3.video_frame_buffer()->StrideY(), \ |
830 frame3.video_frame_buffer()->MutableDataU(), \ | 842 frame3.video_frame_buffer()->MutableDataU(), \ |
831 frame3.video_frame_buffer()->StrideU(), \ | 843 frame3.video_frame_buffer()->StrideU(), \ |
(...skipping 17 matching lines...) Expand all Loading... |
849 &frame1)); \ | 861 &frame1)); \ |
850 size_t data_size; \ | 862 size_t data_size; \ |
851 bool ret = ms->GetSize(&data_size); \ | 863 bool ret = ms->GetSize(&data_size); \ |
852 EXPECT_TRUE(ret); \ | 864 EXPECT_TRUE(ret); \ |
853 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ | 865 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ |
854 kHeight, \ | 866 kHeight, \ |
855 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ | 867 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ |
856 data_size, 0, webrtc::kVideoRotation_0)); \ | 868 data_size, 0, webrtc::kVideoRotation_0)); \ |
857 int width_rotate = frame1.width(); \ | 869 int width_rotate = frame1.width(); \ |
858 int height_rotate = frame1.height(); \ | 870 int height_rotate = frame1.height(); \ |
859 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ | 871 frame3.InitToEmptyBuffer(width_rotate, height_rotate, 0); \ |
860 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ | 872 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ |
861 frame2.video_frame_buffer()->StrideY(), \ | 873 frame2.video_frame_buffer()->StrideY(), \ |
862 frame2.video_frame_buffer()->DataU(), \ | 874 frame2.video_frame_buffer()->DataU(), \ |
863 frame2.video_frame_buffer()->StrideU(), \ | 875 frame2.video_frame_buffer()->StrideU(), \ |
864 frame2.video_frame_buffer()->DataV(), \ | 876 frame2.video_frame_buffer()->DataV(), \ |
865 frame2.video_frame_buffer()->StrideV(), \ | 877 frame2.video_frame_buffer()->StrideV(), \ |
866 frame3.video_frame_buffer()->MutableDataY(), \ | 878 frame3.video_frame_buffer()->MutableDataY(), \ |
867 frame3.video_frame_buffer()->StrideY(), \ | 879 frame3.video_frame_buffer()->StrideY(), \ |
868 frame3.video_frame_buffer()->MutableDataU(), \ | 880 frame3.video_frame_buffer()->MutableDataU(), \ |
869 frame3.video_frame_buffer()->StrideU(), \ | 881 frame3.video_frame_buffer()->StrideU(), \ |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1073 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, | 1085 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, |
1074 &frame1)); | 1086 &frame1)); |
1075 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, | 1087 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, |
1076 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, | 1088 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, |
1077 &frame2)); | 1089 &frame2)); |
1078 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); | 1090 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); |
1079 } | 1091 } |
1080 | 1092 |
1081 // Test constructing an image from an ARGB buffer with horizontal cropping. | 1093 // Test constructing an image from an ARGB buffer with horizontal cropping. |
1082 void ConstructARGBCropHorizontal() { | 1094 void ConstructARGBCropHorizontal() { |
1083 T frame1, frame2; | |
1084 std::unique_ptr<rtc::MemoryStream> ms( | 1095 std::unique_ptr<rtc::MemoryStream> ms( |
1085 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); | 1096 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); |
1086 ASSERT_TRUE(ms.get() != NULL); | 1097 ASSERT_TRUE(ms.get() != NULL); |
1087 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, | 1098 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
1088 &frame1)); | 1099 kWidth, kHeight); |
| 1100 ASSERT_TRUE(frame1); |
| 1101 T frame2; |
1089 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, | 1102 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, |
1090 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, | 1103 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, |
1091 &frame2)); | 1104 &frame2)); |
1092 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 2)); | 1105 EXPECT_TRUE(IsEqualWithCrop(frame2, *frame1, kWidth / 8, 0, 2)); |
1093 } | 1106 } |
1094 | 1107 |
1095 // Test constructing an image from an I420 buffer, cropping top and bottom. | 1108 // Test constructing an image from an I420 buffer, cropping top and bottom. |
1096 void ConstructI420CropVertical() { | 1109 void ConstructI420CropVertical() { |
1097 T frame1, frame2; | 1110 T frame1, frame2; |
1098 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1111 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1099 ASSERT_TRUE(LoadFrame(kImageFilename, cricket::FOURCC_I420, kWidth, kHeight, | 1112 ASSERT_TRUE(LoadFrame(kImageFilename, cricket::FOURCC_I420, kWidth, kHeight, |
1100 kWidth, kHeight * 3 / 4, webrtc::kVideoRotation_0, | 1113 kWidth, kHeight * 3 / 4, webrtc::kVideoRotation_0, |
1101 &frame2)); | 1114 &frame2)); |
1102 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, 0, kHeight / 8, 0)); | 1115 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, 0, kHeight / 8, 0)); |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1339 void ConstructCopyIsRef() { | 1352 void ConstructCopyIsRef() { |
1340 T frame1, frame2; | 1353 T frame1, frame2; |
1341 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1354 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1342 for (int i = 0; i < repeat_; ++i) { | 1355 for (int i = 0; i < repeat_; ++i) { |
1343 EXPECT_TRUE(frame2.Init(frame1)); | 1356 EXPECT_TRUE(frame2.Init(frame1)); |
1344 } | 1357 } |
1345 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 1358 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
1346 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); | 1359 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); |
1347 } | 1360 } |
1348 | 1361 |
1349 // Test creating an empty image and initing it to black. | |
1350 void ConstructBlack() { | |
1351 T frame; | |
1352 for (int i = 0; i < repeat_; ++i) { | |
1353 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0)); | |
1354 } | |
1355 EXPECT_TRUE(IsSize(frame, kWidth, kHeight)); | |
1356 EXPECT_TRUE(IsBlack(frame)); | |
1357 } | |
1358 | |
1359 // Test constructing an image from a YUY2 buffer with a range of sizes. | 1362 // Test constructing an image from a YUY2 buffer with a range of sizes. |
1360 // Only tests that conversion does not crash or corrupt heap. | 1363 // Only tests that conversion does not crash or corrupt heap. |
1361 void ConstructYuy2AllSizes() { | 1364 void ConstructYuy2AllSizes() { |
1362 T frame1, frame2; | 1365 T frame1, frame2; |
1363 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { | 1366 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { |
1364 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { | 1367 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { |
1365 std::unique_ptr<rtc::MemoryStream> ms( | 1368 std::unique_ptr<rtc::MemoryStream> ms( |
1366 CreateYuv422Sample(cricket::FOURCC_YUY2, width, height)); | 1369 CreateYuv422Sample(cricket::FOURCC_YUY2, width, height)); |
1367 ASSERT_TRUE(ms.get() != NULL); | 1370 ASSERT_TRUE(ms.get() != NULL); |
1368 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, width, height, | 1371 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, width, height, |
1369 &frame1)); | 1372 &frame1)); |
1370 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, | 1373 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, |
1371 width, height, &frame2)); | 1374 width, height, &frame2)); |
1372 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 1375 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
1373 } | 1376 } |
1374 } | 1377 } |
1375 } | 1378 } |
1376 | 1379 |
1377 // Test constructing an image from a ARGB buffer with a range of sizes. | 1380 // Test constructing an image from a ARGB buffer with a range of sizes. |
1378 // Only tests that conversion does not crash or corrupt heap. | 1381 // Only tests that conversion does not crash or corrupt heap. |
1379 void ConstructARGBAllSizes() { | 1382 void ConstructARGBAllSizes() { |
1380 T frame1, frame2; | |
1381 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { | 1383 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { |
1382 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { | 1384 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { |
1383 std::unique_ptr<rtc::MemoryStream> ms( | 1385 std::unique_ptr<rtc::MemoryStream> ms( |
1384 CreateRgbSample(cricket::FOURCC_ARGB, width, height)); | 1386 CreateRgbSample(cricket::FOURCC_ARGB, width, height)); |
1385 ASSERT_TRUE(ms.get() != NULL); | 1387 ASSERT_TRUE(ms.get() != NULL); |
1386 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, width, height, | 1388 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
1387 &frame1)); | 1389 width, height); |
| 1390 ASSERT_TRUE(frame1); |
| 1391 T frame2; |
1388 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 1392 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
1389 width, height, &frame2)); | 1393 width, height, &frame2)); |
1390 EXPECT_TRUE(IsEqual(frame1, frame2, 64)); | 1394 EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); |
1391 } | 1395 } |
1392 } | 1396 } |
1393 // Test a practical window size for screencasting usecase. | 1397 // Test a practical window size for screencasting usecase. |
1394 const int kOddWidth = 1228; | 1398 const int kOddWidth = 1228; |
1395 const int kOddHeight = 260; | 1399 const int kOddHeight = 260; |
1396 for (int j = 0; j < 2; ++j) { | 1400 for (int j = 0; j < 2; ++j) { |
1397 for (int i = 0; i < 2; ++i) { | 1401 for (int i = 0; i < 2; ++i) { |
1398 std::unique_ptr<rtc::MemoryStream> ms( | 1402 std::unique_ptr<rtc::MemoryStream> ms( |
1399 CreateRgbSample(cricket::FOURCC_ARGB, kOddWidth + i, kOddHeight + j)); | 1403 CreateRgbSample(cricket::FOURCC_ARGB, kOddWidth + i, kOddHeight + j)); |
1400 ASSERT_TRUE(ms.get() != NULL); | 1404 ASSERT_TRUE(ms.get() != NULL); |
1401 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 1405 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
1402 kOddWidth + i, kOddHeight + j, | 1406 kOddWidth + i, kOddHeight + j); |
1403 &frame1)); | 1407 ASSERT_TRUE(frame1); |
| 1408 T frame2; |
1404 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 1409 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
1405 kOddWidth + i, kOddHeight + j, &frame2)); | 1410 kOddWidth + i, kOddHeight + j, &frame2)); |
1406 EXPECT_TRUE(IsEqual(frame1, frame2, 64)); | 1411 EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); |
1407 } | 1412 } |
1408 } | 1413 } |
1409 } | 1414 } |
1410 | 1415 |
1411 ////////////////////// | 1416 ////////////////////// |
1412 // Conversion tests // | 1417 // Conversion tests // |
1413 ////////////////////// | 1418 ////////////////////// |
1414 | 1419 |
1415 enum ToFrom { TO, FROM }; | 1420 enum ToFrom { TO, FROM }; |
1416 | 1421 |
(...skipping 29 matching lines...) Expand all Loading... |
1446 out += (kHeight - 1) * stride; // Point to last row. | 1451 out += (kHeight - 1) * stride; // Point to last row. |
1447 stride = -stride; | 1452 stride = -stride; |
1448 } | 1453 } |
1449 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1454 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1450 | 1455 |
1451 for (int i = 0; i < repeat_to; ++i) { | 1456 for (int i = 0; i < repeat_to; ++i) { |
1452 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, | 1457 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, |
1453 out, | 1458 out, |
1454 out_size, stride)); | 1459 out_size, stride)); |
1455 } | 1460 } |
1456 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0)); | 1461 frame2.InitToEmptyBuffer(kWidth, kHeight, 0); |
1457 for (int i = 0; i < repeat_from; ++i) { | 1462 for (int i = 0; i < repeat_from; ++i) { |
1458 EXPECT_EQ(0, RGBToI420(out, stride, | 1463 EXPECT_EQ(0, RGBToI420(out, stride, |
1459 frame2.video_frame_buffer()->MutableDataY(), | 1464 frame2.video_frame_buffer()->MutableDataY(), |
1460 frame2.video_frame_buffer()->StrideY(), | 1465 frame2.video_frame_buffer()->StrideY(), |
1461 frame2.video_frame_buffer()->MutableDataU(), | 1466 frame2.video_frame_buffer()->MutableDataU(), |
1462 frame2.video_frame_buffer()->StrideU(), | 1467 frame2.video_frame_buffer()->StrideU(), |
1463 frame2.video_frame_buffer()->MutableDataV(), | 1468 frame2.video_frame_buffer()->MutableDataV(), |
1464 frame2.video_frame_buffer()->StrideV(), | 1469 frame2.video_frame_buffer()->StrideV(), |
1465 kWidth, kHeight)); | 1470 kWidth, kHeight)); |
1466 } | 1471 } |
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1801 std::unique_ptr<const cricket::VideoFrame> target; | 1806 std::unique_ptr<const cricket::VideoFrame> target; |
1802 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); | 1807 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); |
1803 target.reset(source->Copy()); | 1808 target.reset(source->Copy()); |
1804 EXPECT_TRUE(IsEqual(*source, *target, 0)); | 1809 EXPECT_TRUE(IsEqual(*source, *target, 0)); |
1805 const T* const_source = source.get(); | 1810 const T* const_source = source.get(); |
1806 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer()); | 1811 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer()); |
1807 } | 1812 } |
1808 | 1813 |
1809 void StretchToFrame() { | 1814 void StretchToFrame() { |
1810 // Create the source frame as a black frame. | 1815 // Create the source frame as a black frame. |
1811 T source; | 1816 rtc::scoped_refptr<webrtc::I420Buffer> buffer( |
1812 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0)); | 1817 new rtc::RefCountedObject<webrtc::I420Buffer>(kWidth * 2, kHeight * 2)); |
| 1818 |
| 1819 buffer->SetToBlack(); |
| 1820 T source(buffer, 0, webrtc::kVideoRotation_0); |
| 1821 |
1813 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); | 1822 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); |
1814 | 1823 |
1815 // Create the target frame by loading from a file. | 1824 // Create the target frame by loading from a file. |
1816 T target1; | 1825 T target1; |
1817 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); | 1826 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); |
1818 EXPECT_FALSE(IsBlack(target1)); | 1827 EXPECT_FALSE(IsBlack(target1)); |
1819 | 1828 |
1820 // Stretch and check if the stretched target is black. | 1829 // Stretch and check if the stretched target is black. |
1821 source.StretchToFrame(&target1, true, false); | 1830 source.StretchToFrame(&target1, true, false); |
1822 EXPECT_TRUE(IsBlack(target1)); | 1831 EXPECT_TRUE(IsBlack(target1)); |
1823 | 1832 |
1824 // Crop and stretch and check if the stretched target is black. | 1833 // Crop and stretch and check if the stretched target is black. |
1825 T target2; | 1834 T target2; |
1826 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); | 1835 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); |
1827 source.StretchToFrame(&target2, true, true); | 1836 source.StretchToFrame(&target2, true, true); |
1828 EXPECT_TRUE(IsBlack(target2)); | 1837 EXPECT_TRUE(IsBlack(target2)); |
1829 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); | 1838 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); |
1830 } | 1839 } |
1831 | 1840 |
1832 int repeat_; | 1841 int repeat_; |
1833 }; | 1842 }; |
1834 | 1843 |
1835 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ | 1844 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ |
OLD | NEW |