OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2004 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
247 rgb[g_pos] = (x % 63 + y % 63) + 96; | 247 rgb[g_pos] = (x % 63 + y % 63) + 96; |
248 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; | 248 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; |
249 ms->Write(rgb, bytes, NULL, NULL); | 249 ms->Write(rgb, bytes, NULL, NULL); |
250 } | 250 } |
251 } | 251 } |
252 return ms.release(); | 252 return ms.release(); |
253 } | 253 } |
254 | 254 |
255 // Simple conversion routines to verify the optimized VideoFrame routines. | 255 // Simple conversion routines to verify the optimized VideoFrame routines. |
256 // Converts from the specified colorspace to I420. | 256 // Converts from the specified colorspace to I420. |
257 std::unique_ptr<T> ConvertYuv422(const rtc::MemoryStream* ms, | 257 bool ConvertYuv422(const rtc::MemoryStream* ms, |
258 uint32_t fourcc, | 258 uint32_t fourcc, |
259 uint32_t width, | 259 uint32_t width, |
260 uint32_t height) { | 260 uint32_t height, |
| 261 T* frame) { |
261 int y1_pos, y2_pos, u_pos, v_pos; | 262 int y1_pos, y2_pos, u_pos, v_pos; |
262 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { | 263 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { |
263 return nullptr; | 264 return false; |
264 } | 265 } |
265 | 266 |
266 rtc::scoped_refptr<webrtc::I420Buffer> buffer( | |
267 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height)); | |
268 | |
269 buffer->SetToBlack(); | |
270 | |
271 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); | 267 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); |
272 int awidth = (width + 1) & ~1; | 268 int awidth = (width + 1) & ~1; |
273 int stride_y = buffer->StrideY(); | 269 frame->InitToBlack(width, height, 0); |
274 int stride_u = buffer->StrideU(); | 270 int stride_y = frame->video_frame_buffer()->StrideY(); |
275 int stride_v = buffer->StrideV(); | 271 int stride_u = frame->video_frame_buffer()->StrideU(); |
276 uint8_t* plane_y = buffer->MutableDataY(); | 272 int stride_v = frame->video_frame_buffer()->StrideV(); |
277 uint8_t* plane_u = buffer->MutableDataU(); | 273 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY(); |
278 uint8_t* plane_v = buffer->MutableDataV(); | 274 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU(); |
| 275 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV(); |
279 for (uint32_t y = 0; y < height; ++y) { | 276 for (uint32_t y = 0; y < height; ++y) { |
280 for (uint32_t x = 0; x < width; x += 2) { | 277 for (uint32_t x = 0; x < width; x += 2) { |
281 const uint8_t* quad1 = start + (y * awidth + x) * 2; | 278 const uint8_t* quad1 = start + (y * awidth + x) * 2; |
282 plane_y[stride_y * y + x] = quad1[y1_pos]; | 279 plane_y[stride_y * y + x] = quad1[y1_pos]; |
283 if ((x + 1) < width) { | 280 if ((x + 1) < width) { |
284 plane_y[stride_y * y + x + 1] = quad1[y2_pos]; | 281 plane_y[stride_y * y + x + 1] = quad1[y2_pos]; |
285 } | 282 } |
286 if ((y & 1) == 0) { | 283 if ((y & 1) == 0) { |
287 const uint8_t* quad2 = quad1 + awidth * 2; | 284 const uint8_t* quad2 = quad1 + awidth * 2; |
288 if ((y + 1) >= height) { | 285 if ((y + 1) >= height) { |
289 quad2 = quad1; | 286 quad2 = quad1; |
290 } | 287 } |
291 plane_u[stride_u * (y / 2) + x / 2] = | 288 plane_u[stride_u * (y / 2) + x / 2] = |
292 (quad1[u_pos] + quad2[u_pos] + 1) / 2; | 289 (quad1[u_pos] + quad2[u_pos] + 1) / 2; |
293 plane_v[stride_v * (y / 2) + x / 2] = | 290 plane_v[stride_v * (y / 2) + x / 2] = |
294 (quad1[v_pos] + quad2[v_pos] + 1) / 2; | 291 (quad1[v_pos] + quad2[v_pos] + 1) / 2; |
295 } | 292 } |
296 } | 293 } |
297 } | 294 } |
298 return std::unique_ptr<T>(new T(buffer, 0, webrtc::kVideoRotation_0)); | 295 return true; |
299 } | 296 } |
300 | 297 |
301 // Convert RGB to 420. | 298 // Convert RGB to 420. |
302 // A negative height inverts the image. | 299 // A negative height inverts the image. |
303 std::unique_ptr<T> ConvertRgb(const rtc::MemoryStream* ms, | 300 bool ConvertRgb(const rtc::MemoryStream* ms, |
304 uint32_t fourcc, | 301 uint32_t fourcc, |
305 int32_t width, | 302 int32_t width, |
306 int32_t height) { | 303 int32_t height, |
| 304 T* frame) { |
307 int r_pos, g_pos, b_pos, bytes; | 305 int r_pos, g_pos, b_pos, bytes; |
308 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { | 306 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { |
309 return nullptr; | 307 return false; |
310 } | 308 } |
311 int pitch = width * bytes; | 309 int pitch = width * bytes; |
312 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); | 310 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer()); |
313 if (height < 0) { | 311 if (height < 0) { |
314 height = -height; | 312 height = -height; |
315 start = start + pitch * (height - 1); | 313 start = start + pitch * (height - 1); |
316 pitch = -pitch; | 314 pitch = -pitch; |
317 } | 315 } |
318 rtc::scoped_refptr<webrtc::I420Buffer> buffer( | 316 frame->InitToBlack(width, height, 0); |
319 new rtc::RefCountedObject<webrtc::I420Buffer>(width, height)); | 317 int stride_y = frame->video_frame_buffer()->StrideY(); |
320 | 318 int stride_u = frame->video_frame_buffer()->StrideU(); |
321 buffer->SetToBlack(); | 319 int stride_v = frame->video_frame_buffer()->StrideV(); |
322 | 320 uint8_t* plane_y = frame->video_frame_buffer()->MutableDataY(); |
323 int stride_y = buffer->StrideY(); | 321 uint8_t* plane_u = frame->video_frame_buffer()->MutableDataU(); |
324 int stride_u = buffer->StrideU(); | 322 uint8_t* plane_v = frame->video_frame_buffer()->MutableDataV(); |
325 int stride_v = buffer->StrideV(); | |
326 uint8_t* plane_y = buffer->MutableDataY(); | |
327 uint8_t* plane_u = buffer->MutableDataU(); | |
328 uint8_t* plane_v = buffer->MutableDataV(); | |
329 for (int32_t y = 0; y < height; y += 2) { | 323 for (int32_t y = 0; y < height; y += 2) { |
330 for (int32_t x = 0; x < width; x += 2) { | 324 for (int32_t x = 0; x < width; x += 2) { |
331 const uint8_t* rgb[4]; | 325 const uint8_t* rgb[4]; |
332 uint8_t yuv[4][3]; | 326 uint8_t yuv[4][3]; |
333 rgb[0] = start + y * pitch + x * bytes; | 327 rgb[0] = start + y * pitch + x * bytes; |
334 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); | 328 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); |
335 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); | 329 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); |
336 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); | 330 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); |
337 for (size_t i = 0; i < 4; ++i) { | 331 for (size_t i = 0; i < 4; ++i) { |
338 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], | 332 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], |
339 &yuv[i][0], &yuv[i][1], &yuv[i][2]); | 333 &yuv[i][0], &yuv[i][1], &yuv[i][2]); |
340 } | 334 } |
341 plane_y[stride_y * y + x] = yuv[0][0]; | 335 plane_y[stride_y * y + x] = yuv[0][0]; |
342 if ((x + 1) < width) { | 336 if ((x + 1) < width) { |
343 plane_y[stride_y * y + x + 1] = yuv[1][0]; | 337 plane_y[stride_y * y + x + 1] = yuv[1][0]; |
344 } | 338 } |
345 if ((y + 1) < height) { | 339 if ((y + 1) < height) { |
346 plane_y[stride_y * (y + 1) + x] = yuv[2][0]; | 340 plane_y[stride_y * (y + 1) + x] = yuv[2][0]; |
347 if ((x + 1) < width) { | 341 if ((x + 1) < width) { |
348 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; | 342 plane_y[stride_y * (y + 1) + x + 1] = yuv[3][0]; |
349 } | 343 } |
350 } | 344 } |
351 plane_u[stride_u * (y / 2) + x / 2] = | 345 plane_u[stride_u * (y / 2) + x / 2] = |
352 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; | 346 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; |
353 plane_v[stride_v * (y / 2) + x / 2] = | 347 plane_v[stride_v * (y / 2) + x / 2] = |
354 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; | 348 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; |
355 } | 349 } |
356 } | 350 } |
357 return std::unique_ptr<T>(new T(buffer, 0, webrtc::kVideoRotation_0)); | 351 return true; |
358 } | 352 } |
359 | 353 |
360 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. | 354 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. |
361 void ConvertRgbPixel(uint8_t r, | 355 void ConvertRgbPixel(uint8_t r, |
362 uint8_t g, | 356 uint8_t g, |
363 uint8_t b, | 357 uint8_t b, |
364 uint8_t* y, | 358 uint8_t* y, |
365 uint8_t* u, | 359 uint8_t* u, |
366 uint8_t* v) { | 360 uint8_t* v) { |
367 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16; | 361 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16; |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
505 frame2.video_frame_buffer()->StrideU(), | 499 frame2.video_frame_buffer()->StrideU(), |
506 frame2.video_frame_buffer()->DataV() | 500 frame2.video_frame_buffer()->DataV() |
507 + vcrop * frame2.video_frame_buffer()->StrideV() / 2 | 501 + vcrop * frame2.video_frame_buffer()->StrideV() / 2 |
508 + hcrop / 2, | 502 + hcrop / 2, |
509 frame2.video_frame_buffer()->StrideV(), | 503 frame2.video_frame_buffer()->StrideV(), |
510 max_error); | 504 max_error); |
511 } | 505 } |
512 | 506 |
513 static bool IsBlack(const cricket::VideoFrame& frame) { | 507 static bool IsBlack(const cricket::VideoFrame& frame) { |
514 return !IsNull(frame) && | 508 return !IsNull(frame) && |
515 *frame.video_frame_buffer()->DataY() <= 16 && | 509 *frame.video_frame_buffer()->DataY() == 16 && |
516 *frame.video_frame_buffer()->DataU() == 128 && | 510 *frame.video_frame_buffer()->DataU() == 128 && |
517 *frame.video_frame_buffer()->DataV() == 128; | 511 *frame.video_frame_buffer()->DataV() == 128; |
518 } | 512 } |
519 | 513 |
520 //////////////////////// | 514 //////////////////////// |
521 // Construction tests // | 515 // Construction tests // |
522 //////////////////////// | 516 //////////////////////// |
523 | 517 |
524 // Test constructing an image from a I420 buffer. | 518 // Test constructing an image from a I420 buffer. |
525 void ConstructI420() { | 519 void ConstructI420() { |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
612 yuy2, kWidth * 2, | 606 yuy2, kWidth * 2, |
613 kWidth, kHeight)); | 607 kWidth, kHeight)); |
614 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, | 608 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, |
615 kWidth, kHeight, &frame2)); | 609 kWidth, kHeight, &frame2)); |
616 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 610 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
617 } | 611 } |
618 | 612 |
619 // Test constructing an image from a wide YUY2 buffer. | 613 // Test constructing an image from a wide YUY2 buffer. |
620 // Normal is 1280x720. Wide is 12800x72 | 614 // Normal is 1280x720. Wide is 12800x72 |
621 void ConstructYuy2Wide() { | 615 void ConstructYuy2Wide() { |
| 616 T frame1, frame2; |
622 std::unique_ptr<rtc::MemoryStream> ms( | 617 std::unique_ptr<rtc::MemoryStream> ms( |
623 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth * 10, kHeight / 10)); | 618 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth * 10, kHeight / 10)); |
624 ASSERT_TRUE(ms.get() != NULL); | 619 ASSERT_TRUE(ms.get() != NULL); |
625 std::unique_ptr<T> frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, | 620 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, |
626 kWidth * 10, kHeight / 10); | 621 kWidth * 10, kHeight / 10, |
627 ASSERT_TRUE(frame1); | 622 &frame1)); |
628 T frame2; | |
629 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, | 623 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, |
630 kWidth * 10, kHeight / 10, &frame2)); | 624 kWidth * 10, kHeight / 10, &frame2)); |
631 EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); | 625 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
632 } | 626 } |
633 | 627 |
634 // Test constructing an image from a UYVY buffer. | 628 // Test constructing an image from a UYVY buffer. |
635 void ConstructUyvy() { | 629 void ConstructUyvy() { |
| 630 T frame1, frame2; |
636 std::unique_ptr<rtc::MemoryStream> ms( | 631 std::unique_ptr<rtc::MemoryStream> ms( |
637 CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); | 632 CreateYuv422Sample(cricket::FOURCC_UYVY, kWidth, kHeight)); |
638 ASSERT_TRUE(ms.get() != NULL); | 633 ASSERT_TRUE(ms.get() != NULL); |
639 std::unique_ptr<T> frame1 = ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, | 634 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_UYVY, kWidth, kHeight, |
640 kWidth, kHeight); | 635 &frame1)); |
641 T frame2; | |
642 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, | 636 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_UYVY, |
643 kWidth, kHeight, &frame2)); | 637 kWidth, kHeight, &frame2)); |
644 EXPECT_TRUE(IsEqual(*frame1, frame2, 0)); | 638 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
645 } | 639 } |
646 | 640 |
647 // Test constructing an image from a random buffer. | 641 // Test constructing an image from a random buffer. |
648 // We are merely verifying that the code succeeds and is free of crashes. | 642 // We are merely verifying that the code succeeds and is free of crashes. |
649 void ConstructM420() { | 643 void ConstructM420() { |
650 T frame; | 644 T frame; |
651 std::unique_ptr<rtc::MemoryStream> ms( | 645 std::unique_ptr<rtc::MemoryStream> ms( |
652 CreateYuvSample(kWidth, kHeight, 12)); | 646 CreateYuvSample(kWidth, kHeight, 12)); |
653 ASSERT_TRUE(ms.get() != NULL); | 647 ASSERT_TRUE(ms.get() != NULL); |
654 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_M420, | 648 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_M420, |
(...skipping 14 matching lines...) Expand all Loading... |
669 std::unique_ptr<rtc::MemoryStream> ms( | 663 std::unique_ptr<rtc::MemoryStream> ms( |
670 CreateYuvSample(kWidth, kHeight, 12)); | 664 CreateYuvSample(kWidth, kHeight, 12)); |
671 ASSERT_TRUE(ms.get() != NULL); | 665 ASSERT_TRUE(ms.get() != NULL); |
672 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV12, | 666 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_NV12, |
673 kWidth, kHeight, &frame)); | 667 kWidth, kHeight, &frame)); |
674 } | 668 } |
675 | 669 |
676 // Test constructing an image from a ABGR buffer | 670 // Test constructing an image from a ABGR buffer |
677 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 671 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
678 void ConstructABGR() { | 672 void ConstructABGR() { |
| 673 T frame1, frame2; |
679 std::unique_ptr<rtc::MemoryStream> ms( | 674 std::unique_ptr<rtc::MemoryStream> ms( |
680 CreateRgbSample(cricket::FOURCC_ABGR, kWidth, kHeight)); | 675 CreateRgbSample(cricket::FOURCC_ABGR, kWidth, kHeight)); |
681 ASSERT_TRUE(ms.get() != NULL); | 676 ASSERT_TRUE(ms.get() != NULL); |
682 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ABGR, | 677 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ABGR, kWidth, kHeight, |
683 kWidth, kHeight); | 678 &frame1)); |
684 ASSERT_TRUE(frame1); | |
685 T frame2; | |
686 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ABGR, | 679 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ABGR, |
687 kWidth, kHeight, &frame2)); | 680 kWidth, kHeight, &frame2)); |
688 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 681 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
689 } | 682 } |
690 | 683 |
691 // Test constructing an image from a ARGB buffer | 684 // Test constructing an image from a ARGB buffer |
692 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 685 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
693 void ConstructARGB() { | 686 void ConstructARGB() { |
| 687 T frame1, frame2; |
694 std::unique_ptr<rtc::MemoryStream> ms( | 688 std::unique_ptr<rtc::MemoryStream> ms( |
695 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); | 689 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); |
696 ASSERT_TRUE(ms.get() != NULL); | 690 ASSERT_TRUE(ms.get() != NULL); |
697 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 691 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, |
698 kWidth, kHeight); | 692 &frame1)); |
699 ASSERT_TRUE(frame1); | |
700 T frame2; | |
701 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 693 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
702 kWidth, kHeight, &frame2)); | 694 kWidth, kHeight, &frame2)); |
703 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 695 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
704 } | 696 } |
705 | 697 |
706 // Test constructing an image from a wide ARGB buffer | 698 // Test constructing an image from a wide ARGB buffer |
707 // Normal is 1280x720. Wide is 12800x72 | 699 // Normal is 1280x720. Wide is 12800x72 |
708 void ConstructARGBWide() { | 700 void ConstructARGBWide() { |
| 701 T frame1, frame2; |
709 std::unique_ptr<rtc::MemoryStream> ms( | 702 std::unique_ptr<rtc::MemoryStream> ms( |
710 CreateRgbSample(cricket::FOURCC_ARGB, kWidth * 10, kHeight / 10)); | 703 CreateRgbSample(cricket::FOURCC_ARGB, kWidth * 10, kHeight / 10)); |
711 ASSERT_TRUE(ms.get() != NULL); | 704 ASSERT_TRUE(ms.get() != NULL); |
712 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 705 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
713 kWidth * 10, kHeight / 10); | 706 kWidth * 10, kHeight / 10, &frame1)); |
714 ASSERT_TRUE(frame1); | |
715 T frame2; | |
716 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 707 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
717 kWidth * 10, kHeight / 10, &frame2)); | 708 kWidth * 10, kHeight / 10, &frame2)); |
718 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 709 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
719 } | 710 } |
720 | 711 |
721 // Test constructing an image from an BGRA buffer. | 712 // Test constructing an image from an BGRA buffer. |
722 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 713 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
723 void ConstructBGRA() { | 714 void ConstructBGRA() { |
| 715 T frame1, frame2; |
724 std::unique_ptr<rtc::MemoryStream> ms( | 716 std::unique_ptr<rtc::MemoryStream> ms( |
725 CreateRgbSample(cricket::FOURCC_BGRA, kWidth, kHeight)); | 717 CreateRgbSample(cricket::FOURCC_BGRA, kWidth, kHeight)); |
726 ASSERT_TRUE(ms.get() != NULL); | 718 ASSERT_TRUE(ms.get() != NULL); |
727 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_BGRA, | 719 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_BGRA, kWidth, kHeight, |
728 kWidth, kHeight); | 720 &frame1)); |
729 ASSERT_TRUE(frame1); | |
730 T frame2; | |
731 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_BGRA, | 721 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_BGRA, |
732 kWidth, kHeight, &frame2)); | 722 kWidth, kHeight, &frame2)); |
733 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 723 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
734 } | 724 } |
735 | 725 |
736 // Test constructing an image from a 24BG buffer. | 726 // Test constructing an image from a 24BG buffer. |
737 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 727 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
738 void Construct24BG() { | 728 void Construct24BG() { |
| 729 T frame1, frame2; |
739 std::unique_ptr<rtc::MemoryStream> ms( | 730 std::unique_ptr<rtc::MemoryStream> ms( |
740 CreateRgbSample(cricket::FOURCC_24BG, kWidth, kHeight)); | 731 CreateRgbSample(cricket::FOURCC_24BG, kWidth, kHeight)); |
741 ASSERT_TRUE(ms.get() != NULL); | 732 ASSERT_TRUE(ms.get() != NULL); |
742 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_24BG, | 733 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_24BG, kWidth, kHeight, |
743 kWidth, kHeight); | 734 &frame1)); |
744 ASSERT_TRUE(frame1); | |
745 T frame2; | |
746 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_24BG, | 735 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_24BG, |
747 kWidth, kHeight, &frame2)); | 736 kWidth, kHeight, &frame2)); |
748 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 737 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
749 } | 738 } |
750 | 739 |
751 // Test constructing an image from a raw RGB buffer. | 740 // Test constructing an image from a raw RGB buffer. |
752 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. | 741 // Due to rounding, some pixels may differ slightly from the VideoFrame impl. |
753 void ConstructRaw() { | 742 void ConstructRaw() { |
| 743 T frame1, frame2; |
754 std::unique_ptr<rtc::MemoryStream> ms( | 744 std::unique_ptr<rtc::MemoryStream> ms( |
755 CreateRgbSample(cricket::FOURCC_RAW, kWidth, kHeight)); | 745 CreateRgbSample(cricket::FOURCC_RAW, kWidth, kHeight)); |
756 ASSERT_TRUE(ms.get() != NULL); | 746 ASSERT_TRUE(ms.get() != NULL); |
757 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_RAW, | 747 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_RAW, kWidth, kHeight, |
758 kWidth, kHeight); | 748 &frame1)); |
759 ASSERT_TRUE(frame1); | |
760 T frame2; | |
761 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, | 749 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, |
762 kWidth, kHeight, &frame2)); | 750 kWidth, kHeight, &frame2)); |
763 EXPECT_TRUE(IsEqual(*frame1, frame2, 2)); | 751 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); |
764 } | 752 } |
765 | 753 |
766 // Test constructing an image from a RGB565 buffer | 754 // Test constructing an image from a RGB565 buffer |
767 void ConstructRGB565() { | 755 void ConstructRGB565() { |
768 T frame1, frame2; | 756 T frame1, frame2; |
769 size_t out_size = kWidth * kHeight * 2; | 757 size_t out_size = kWidth * kHeight * 2; |
770 std::unique_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); | 758 std::unique_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]); |
771 uint8_t* out = ALIGNP(outbuf.get(), kAlignment); | 759 uint8_t* out = ALIGNP(outbuf.get(), kAlignment); |
772 T frame; | 760 T frame; |
773 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 761 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
823 webrtc::kVideoRotation_180, &frame1)); \ | 811 webrtc::kVideoRotation_180, &frame1)); \ |
824 size_t data_size; \ | 812 size_t data_size; \ |
825 bool ret = ms->GetSize(&data_size); \ | 813 bool ret = ms->GetSize(&data_size); \ |
826 EXPECT_TRUE(ret); \ | 814 EXPECT_TRUE(ret); \ |
827 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ | 815 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ |
828 kHeight, \ | 816 kHeight, \ |
829 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ | 817 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ |
830 data_size, 0, webrtc::kVideoRotation_0)); \ | 818 data_size, 0, webrtc::kVideoRotation_0)); \ |
831 int width_rotate = frame1.width(); \ | 819 int width_rotate = frame1.width(); \ |
832 int height_rotate = frame1.height(); \ | 820 int height_rotate = frame1.height(); \ |
833 frame3.InitToEmptyBuffer(width_rotate, height_rotate, 0); \ | 821 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ |
834 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ | 822 libyuv::I420Mirror(frame2.video_frame_buffer()->DataY(), \ |
835 frame2.video_frame_buffer()->StrideY(), \ | 823 frame2.video_frame_buffer()->StrideY(), \ |
836 frame2.video_frame_buffer()->DataU(), \ | 824 frame2.video_frame_buffer()->DataU(), \ |
837 frame2.video_frame_buffer()->StrideU(), \ | 825 frame2.video_frame_buffer()->StrideU(), \ |
838 frame2.video_frame_buffer()->DataV(), \ | 826 frame2.video_frame_buffer()->DataV(), \ |
839 frame2.video_frame_buffer()->StrideV(), \ | 827 frame2.video_frame_buffer()->StrideV(), \ |
840 frame3.video_frame_buffer()->MutableDataY(), \ | 828 frame3.video_frame_buffer()->MutableDataY(), \ |
841 frame3.video_frame_buffer()->StrideY(), \ | 829 frame3.video_frame_buffer()->StrideY(), \ |
842 frame3.video_frame_buffer()->MutableDataU(), \ | 830 frame3.video_frame_buffer()->MutableDataU(), \ |
843 frame3.video_frame_buffer()->StrideU(), \ | 831 frame3.video_frame_buffer()->StrideU(), \ |
(...skipping 17 matching lines...) Expand all Loading... |
861 &frame1)); \ | 849 &frame1)); \ |
862 size_t data_size; \ | 850 size_t data_size; \ |
863 bool ret = ms->GetSize(&data_size); \ | 851 bool ret = ms->GetSize(&data_size); \ |
864 EXPECT_TRUE(ret); \ | 852 EXPECT_TRUE(ret); \ |
865 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ | 853 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ |
866 kHeight, \ | 854 kHeight, \ |
867 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ | 855 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \ |
868 data_size, 0, webrtc::kVideoRotation_0)); \ | 856 data_size, 0, webrtc::kVideoRotation_0)); \ |
869 int width_rotate = frame1.width(); \ | 857 int width_rotate = frame1.width(); \ |
870 int height_rotate = frame1.height(); \ | 858 int height_rotate = frame1.height(); \ |
871 frame3.InitToEmptyBuffer(width_rotate, height_rotate, 0); \ | 859 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 0)); \ |
872 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ | 860 libyuv::I420Rotate(frame2.video_frame_buffer()->DataY(), \ |
873 frame2.video_frame_buffer()->StrideY(), \ | 861 frame2.video_frame_buffer()->StrideY(), \ |
874 frame2.video_frame_buffer()->DataU(), \ | 862 frame2.video_frame_buffer()->DataU(), \ |
875 frame2.video_frame_buffer()->StrideU(), \ | 863 frame2.video_frame_buffer()->StrideU(), \ |
876 frame2.video_frame_buffer()->DataV(), \ | 864 frame2.video_frame_buffer()->DataV(), \ |
877 frame2.video_frame_buffer()->StrideV(), \ | 865 frame2.video_frame_buffer()->StrideV(), \ |
878 frame3.video_frame_buffer()->MutableDataY(), \ | 866 frame3.video_frame_buffer()->MutableDataY(), \ |
879 frame3.video_frame_buffer()->StrideY(), \ | 867 frame3.video_frame_buffer()->StrideY(), \ |
880 frame3.video_frame_buffer()->MutableDataU(), \ | 868 frame3.video_frame_buffer()->MutableDataU(), \ |
881 frame3.video_frame_buffer()->StrideU(), \ | 869 frame3.video_frame_buffer()->StrideU(), \ |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1085 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, | 1073 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, |
1086 &frame1)); | 1074 &frame1)); |
1087 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, | 1075 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, |
1088 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, | 1076 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, |
1089 &frame2)); | 1077 &frame2)); |
1090 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); | 1078 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 0)); |
1091 } | 1079 } |
1092 | 1080 |
1093 // Test constructing an image from an ARGB buffer with horizontal cropping. | 1081 // Test constructing an image from an ARGB buffer with horizontal cropping. |
1094 void ConstructARGBCropHorizontal() { | 1082 void ConstructARGBCropHorizontal() { |
| 1083 T frame1, frame2; |
1095 std::unique_ptr<rtc::MemoryStream> ms( | 1084 std::unique_ptr<rtc::MemoryStream> ms( |
1096 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); | 1085 CreateRgbSample(cricket::FOURCC_ARGB, kWidth, kHeight)); |
1097 ASSERT_TRUE(ms.get() != NULL); | 1086 ASSERT_TRUE(ms.get() != NULL); |
1098 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 1087 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, |
1099 kWidth, kHeight); | 1088 &frame1)); |
1100 ASSERT_TRUE(frame1); | |
1101 T frame2; | |
1102 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, | 1089 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, kWidth, kHeight, |
1103 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, | 1090 kWidth * 3 / 4, kHeight, webrtc::kVideoRotation_0, |
1104 &frame2)); | 1091 &frame2)); |
1105 EXPECT_TRUE(IsEqualWithCrop(frame2, *frame1, kWidth / 8, 0, 2)); | 1092 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, kWidth / 8, 0, 2)); |
1106 } | 1093 } |
1107 | 1094 |
1108 // Test constructing an image from an I420 buffer, cropping top and bottom. | 1095 // Test constructing an image from an I420 buffer, cropping top and bottom. |
1109 void ConstructI420CropVertical() { | 1096 void ConstructI420CropVertical() { |
1110 T frame1, frame2; | 1097 T frame1, frame2; |
1111 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1098 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1112 ASSERT_TRUE(LoadFrame(kImageFilename, cricket::FOURCC_I420, kWidth, kHeight, | 1099 ASSERT_TRUE(LoadFrame(kImageFilename, cricket::FOURCC_I420, kWidth, kHeight, |
1113 kWidth, kHeight * 3 / 4, webrtc::kVideoRotation_0, | 1100 kWidth, kHeight * 3 / 4, webrtc::kVideoRotation_0, |
1114 &frame2)); | 1101 &frame2)); |
1115 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, 0, kHeight / 8, 0)); | 1102 EXPECT_TRUE(IsEqualWithCrop(frame2, frame1, 0, kHeight / 8, 0)); |
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1352 void ConstructCopyIsRef() { | 1339 void ConstructCopyIsRef() { |
1353 T frame1, frame2; | 1340 T frame1, frame2; |
1354 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1341 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1355 for (int i = 0; i < repeat_; ++i) { | 1342 for (int i = 0; i < repeat_; ++i) { |
1356 EXPECT_TRUE(frame2.Init(frame1)); | 1343 EXPECT_TRUE(frame2.Init(frame1)); |
1357 } | 1344 } |
1358 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 1345 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
1359 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); | 1346 EXPECT_EQ(frame1.video_frame_buffer(), frame2.video_frame_buffer()); |
1360 } | 1347 } |
1361 | 1348 |
| 1349 // Test creating an empty image and initing it to black. |
| 1350 void ConstructBlack() { |
| 1351 T frame; |
| 1352 for (int i = 0; i < repeat_; ++i) { |
| 1353 EXPECT_TRUE(frame.InitToBlack(kWidth, kHeight, 0)); |
| 1354 } |
| 1355 EXPECT_TRUE(IsSize(frame, kWidth, kHeight)); |
| 1356 EXPECT_TRUE(IsBlack(frame)); |
| 1357 } |
| 1358 |
1362 // Test constructing an image from a YUY2 buffer with a range of sizes. | 1359 // Test constructing an image from a YUY2 buffer with a range of sizes. |
1363 // Only tests that conversion does not crash or corrupt heap. | 1360 // Only tests that conversion does not crash or corrupt heap. |
1364 void ConstructYuy2AllSizes() { | 1361 void ConstructYuy2AllSizes() { |
1365 T frame1, frame2; | 1362 T frame1, frame2; |
1366 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { | 1363 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { |
1367 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { | 1364 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { |
1368 std::unique_ptr<rtc::MemoryStream> ms( | 1365 std::unique_ptr<rtc::MemoryStream> ms( |
1369 CreateYuv422Sample(cricket::FOURCC_YUY2, width, height)); | 1366 CreateYuv422Sample(cricket::FOURCC_YUY2, width, height)); |
1370 ASSERT_TRUE(ms.get() != NULL); | 1367 ASSERT_TRUE(ms.get() != NULL); |
1371 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, width, height, | 1368 EXPECT_TRUE(ConvertYuv422(ms.get(), cricket::FOURCC_YUY2, width, height, |
1372 &frame1)); | 1369 &frame1)); |
1373 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, | 1370 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, |
1374 width, height, &frame2)); | 1371 width, height, &frame2)); |
1375 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); | 1372 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); |
1376 } | 1373 } |
1377 } | 1374 } |
1378 } | 1375 } |
1379 | 1376 |
1380 // Test constructing an image from a ARGB buffer with a range of sizes. | 1377 // Test constructing an image from a ARGB buffer with a range of sizes. |
1381 // Only tests that conversion does not crash or corrupt heap. | 1378 // Only tests that conversion does not crash or corrupt heap. |
1382 void ConstructARGBAllSizes() { | 1379 void ConstructARGBAllSizes() { |
| 1380 T frame1, frame2; |
1383 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { | 1381 for (int height = kMinHeightAll; height <= kMaxHeightAll; ++height) { |
1384 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { | 1382 for (int width = kMinWidthAll; width <= kMaxWidthAll; ++width) { |
1385 std::unique_ptr<rtc::MemoryStream> ms( | 1383 std::unique_ptr<rtc::MemoryStream> ms( |
1386 CreateRgbSample(cricket::FOURCC_ARGB, width, height)); | 1384 CreateRgbSample(cricket::FOURCC_ARGB, width, height)); |
1387 ASSERT_TRUE(ms.get() != NULL); | 1385 ASSERT_TRUE(ms.get() != NULL); |
1388 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 1386 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, width, height, |
1389 width, height); | 1387 &frame1)); |
1390 ASSERT_TRUE(frame1); | |
1391 T frame2; | |
1392 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 1388 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
1393 width, height, &frame2)); | 1389 width, height, &frame2)); |
1394 EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); | 1390 EXPECT_TRUE(IsEqual(frame1, frame2, 64)); |
1395 } | 1391 } |
1396 } | 1392 } |
1397 // Test a practical window size for screencasting usecase. | 1393 // Test a practical window size for screencasting usecase. |
1398 const int kOddWidth = 1228; | 1394 const int kOddWidth = 1228; |
1399 const int kOddHeight = 260; | 1395 const int kOddHeight = 260; |
1400 for (int j = 0; j < 2; ++j) { | 1396 for (int j = 0; j < 2; ++j) { |
1401 for (int i = 0; i < 2; ++i) { | 1397 for (int i = 0; i < 2; ++i) { |
1402 std::unique_ptr<rtc::MemoryStream> ms( | 1398 std::unique_ptr<rtc::MemoryStream> ms( |
1403 CreateRgbSample(cricket::FOURCC_ARGB, kOddWidth + i, kOddHeight + j)); | 1399 CreateRgbSample(cricket::FOURCC_ARGB, kOddWidth + i, kOddHeight + j)); |
1404 ASSERT_TRUE(ms.get() != NULL); | 1400 ASSERT_TRUE(ms.get() != NULL); |
1405 std::unique_ptr<T> frame1 = ConvertRgb(ms.get(), cricket::FOURCC_ARGB, | 1401 EXPECT_TRUE(ConvertRgb(ms.get(), cricket::FOURCC_ARGB, |
1406 kOddWidth + i, kOddHeight + j); | 1402 kOddWidth + i, kOddHeight + j, |
1407 ASSERT_TRUE(frame1); | 1403 &frame1)); |
1408 T frame2; | |
1409 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, | 1404 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_ARGB, |
1410 kOddWidth + i, kOddHeight + j, &frame2)); | 1405 kOddWidth + i, kOddHeight + j, &frame2)); |
1411 EXPECT_TRUE(IsEqual(*frame1, frame2, 64)); | 1406 EXPECT_TRUE(IsEqual(frame1, frame2, 64)); |
1412 } | 1407 } |
1413 } | 1408 } |
1414 } | 1409 } |
1415 | 1410 |
1416 ////////////////////// | 1411 ////////////////////// |
1417 // Conversion tests // | 1412 // Conversion tests // |
1418 ////////////////////// | 1413 ////////////////////// |
1419 | 1414 |
1420 enum ToFrom { TO, FROM }; | 1415 enum ToFrom { TO, FROM }; |
1421 | 1416 |
(...skipping 29 matching lines...) Expand all Loading... |
1451 out += (kHeight - 1) * stride; // Point to last row. | 1446 out += (kHeight - 1) * stride; // Point to last row. |
1452 stride = -stride; | 1447 stride = -stride; |
1453 } | 1448 } |
1454 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); | 1449 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); |
1455 | 1450 |
1456 for (int i = 0; i < repeat_to; ++i) { | 1451 for (int i = 0; i < repeat_to; ++i) { |
1457 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, | 1452 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, |
1458 out, | 1453 out, |
1459 out_size, stride)); | 1454 out_size, stride)); |
1460 } | 1455 } |
1461 frame2.InitToEmptyBuffer(kWidth, kHeight, 0); | 1456 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 0)); |
1462 for (int i = 0; i < repeat_from; ++i) { | 1457 for (int i = 0; i < repeat_from; ++i) { |
1463 EXPECT_EQ(0, RGBToI420(out, stride, | 1458 EXPECT_EQ(0, RGBToI420(out, stride, |
1464 frame2.video_frame_buffer()->MutableDataY(), | 1459 frame2.video_frame_buffer()->MutableDataY(), |
1465 frame2.video_frame_buffer()->StrideY(), | 1460 frame2.video_frame_buffer()->StrideY(), |
1466 frame2.video_frame_buffer()->MutableDataU(), | 1461 frame2.video_frame_buffer()->MutableDataU(), |
1467 frame2.video_frame_buffer()->StrideU(), | 1462 frame2.video_frame_buffer()->StrideU(), |
1468 frame2.video_frame_buffer()->MutableDataV(), | 1463 frame2.video_frame_buffer()->MutableDataV(), |
1469 frame2.video_frame_buffer()->StrideV(), | 1464 frame2.video_frame_buffer()->StrideV(), |
1470 kWidth, kHeight)); | 1465 kWidth, kHeight)); |
1471 } | 1466 } |
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1806 std::unique_ptr<const cricket::VideoFrame> target; | 1801 std::unique_ptr<const cricket::VideoFrame> target; |
1807 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); | 1802 ASSERT_TRUE(LoadFrameNoRepeat(source.get())); |
1808 target.reset(source->Copy()); | 1803 target.reset(source->Copy()); |
1809 EXPECT_TRUE(IsEqual(*source, *target, 0)); | 1804 EXPECT_TRUE(IsEqual(*source, *target, 0)); |
1810 const T* const_source = source.get(); | 1805 const T* const_source = source.get(); |
1811 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer()); | 1806 EXPECT_EQ(const_source->video_frame_buffer(), target->video_frame_buffer()); |
1812 } | 1807 } |
1813 | 1808 |
1814 void StretchToFrame() { | 1809 void StretchToFrame() { |
1815 // Create the source frame as a black frame. | 1810 // Create the source frame as a black frame. |
1816 rtc::scoped_refptr<webrtc::I420Buffer> buffer( | 1811 T source; |
1817 new rtc::RefCountedObject<webrtc::I420Buffer>(kWidth * 2, kHeight * 2)); | 1812 EXPECT_TRUE(source.InitToBlack(kWidth * 2, kHeight * 2, 0)); |
1818 | |
1819 buffer->SetToBlack(); | |
1820 T source(buffer, 0, webrtc::kVideoRotation_0); | |
1821 | |
1822 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); | 1813 EXPECT_TRUE(IsSize(source, kWidth * 2, kHeight * 2)); |
1823 | 1814 |
1824 // Create the target frame by loading from a file. | 1815 // Create the target frame by loading from a file. |
1825 T target1; | 1816 T target1; |
1826 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); | 1817 ASSERT_TRUE(LoadFrameNoRepeat(&target1)); |
1827 EXPECT_FALSE(IsBlack(target1)); | 1818 EXPECT_FALSE(IsBlack(target1)); |
1828 | 1819 |
1829 // Stretch and check if the stretched target is black. | 1820 // Stretch and check if the stretched target is black. |
1830 source.StretchToFrame(&target1, true, false); | 1821 source.StretchToFrame(&target1, true, false); |
1831 EXPECT_TRUE(IsBlack(target1)); | 1822 EXPECT_TRUE(IsBlack(target1)); |
1832 | 1823 |
1833 // Crop and stretch and check if the stretched target is black. | 1824 // Crop and stretch and check if the stretched target is black. |
1834 T target2; | 1825 T target2; |
1835 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); | 1826 ASSERT_TRUE(LoadFrameNoRepeat(&target2)); |
1836 source.StretchToFrame(&target2, true, true); | 1827 source.StretchToFrame(&target2, true, true); |
1837 EXPECT_TRUE(IsBlack(target2)); | 1828 EXPECT_TRUE(IsBlack(target2)); |
1838 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); | 1829 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); |
1839 } | 1830 } |
1840 | 1831 |
1841 int repeat_; | 1832 int repeat_; |
1842 }; | 1833 }; |
1843 | 1834 |
1844 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ | 1835 #endif // WEBRTC_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ |
OLD | NEW |