Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(673)

Side by Side Diff: talk/media/base/videoframe_unittest.h

Issue 1362503003: Use suffixed {uint,int}{8,16,32,64}_t types. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: google::int32 Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * libjingle 2 * libjingle
3 * Copyright 2004 Google Inc. 3 * Copyright 2004 Google Inc.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met: 6 * modification, are permitted provided that the following conditions are met:
7 * 7 *
8 * 1. Redistributions of source code must retain the above copyright notice, 8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer. 9 * this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright notice, 10 * 2. Redistributions in binary form must reproduce the above copyright notice,
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
75 // Load a video frame from disk. 75 // Load a video frame from disk.
76 bool LoadFrameNoRepeat(T* frame) { 76 bool LoadFrameNoRepeat(T* frame) {
77 int save_repeat = repeat_; // This LoadFrame disables repeat. 77 int save_repeat = repeat_; // This LoadFrame disables repeat.
78 repeat_ = 1; 78 repeat_ = 1;
79 bool success = LoadFrame(kImageFilename, cricket::FOURCC_I420, 79 bool success = LoadFrame(kImageFilename, cricket::FOURCC_I420,
80 kWidth, kHeight, frame); 80 kWidth, kHeight, frame);
81 repeat_ = save_repeat; 81 repeat_ = save_repeat;
82 return success; 82 return success;
83 } 83 }
84 84
85 bool LoadFrame(const std::string& filename, uint32 format, 85 bool LoadFrame(const std::string& filename,
86 int32 width, int32 height, T* frame) { 86 uint32_t format,
87 int32_t width,
88 int32_t height,
89 T* frame) {
87 return LoadFrame(filename, format, width, height, width, abs(height), 90 return LoadFrame(filename, format, width, height, width, abs(height),
88 webrtc::kVideoRotation_0, frame); 91 webrtc::kVideoRotation_0, frame);
89 } 92 }
90 bool LoadFrame(const std::string& filename, 93 bool LoadFrame(const std::string& filename,
91 uint32 format, 94 uint32_t format,
92 int32 width, 95 int32_t width,
93 int32 height, 96 int32_t height,
94 int dw, 97 int dw,
95 int dh, 98 int dh,
96 webrtc::VideoRotation rotation, 99 webrtc::VideoRotation rotation,
97 T* frame) { 100 T* frame) {
98 rtc::scoped_ptr<rtc::MemoryStream> ms(LoadSample(filename)); 101 rtc::scoped_ptr<rtc::MemoryStream> ms(LoadSample(filename));
99 return LoadFrame(ms.get(), format, width, height, dw, dh, rotation, frame); 102 return LoadFrame(ms.get(), format, width, height, dw, dh, rotation, frame);
100 } 103 }
101 // Load a video frame from a memory stream. 104 // Load a video frame from a memory stream.
102 bool LoadFrame(rtc::MemoryStream* ms, uint32 format, 105 bool LoadFrame(rtc::MemoryStream* ms,
103 int32 width, int32 height, T* frame) { 106 uint32_t format,
107 int32_t width,
108 int32_t height,
109 T* frame) {
104 return LoadFrame(ms, format, width, height, width, abs(height), 110 return LoadFrame(ms, format, width, height, width, abs(height),
105 webrtc::kVideoRotation_0, frame); 111 webrtc::kVideoRotation_0, frame);
106 } 112 }
107 bool LoadFrame(rtc::MemoryStream* ms, 113 bool LoadFrame(rtc::MemoryStream* ms,
108 uint32 format, 114 uint32_t format,
109 int32 width, 115 int32_t width,
110 int32 height, 116 int32_t height,
111 int dw, 117 int dw,
112 int dh, 118 int dh,
113 webrtc::VideoRotation rotation, 119 webrtc::VideoRotation rotation,
114 T* frame) { 120 T* frame) {
115 if (!ms) { 121 if (!ms) {
116 return false; 122 return false;
117 } 123 }
118 size_t data_size; 124 size_t data_size;
119 bool ret = ms->GetSize(&data_size); 125 bool ret = ms->GetSize(&data_size);
120 EXPECT_TRUE(ret); 126 EXPECT_TRUE(ret);
121 if (ret) { 127 if (ret) {
122 ret = LoadFrame(reinterpret_cast<uint8*>(ms->GetBuffer()), data_size, 128 ret = LoadFrame(reinterpret_cast<uint8_t*>(ms->GetBuffer()), data_size,
123 format, width, height, dw, dh, rotation, frame); 129 format, width, height, dw, dh, rotation, frame);
124 } 130 }
125 return ret; 131 return ret;
126 } 132 }
127 // Load a frame from a raw buffer. 133 // Load a frame from a raw buffer.
128 bool LoadFrame(uint8* sample, size_t sample_size, uint32 format, 134 bool LoadFrame(uint8_t* sample,
129 int32 width, int32 height, T* frame) { 135 size_t sample_size,
136 uint32_t format,
137 int32_t width,
138 int32_t height,
139 T* frame) {
130 return LoadFrame(sample, sample_size, format, width, height, width, 140 return LoadFrame(sample, sample_size, format, width, height, width,
131 abs(height), webrtc::kVideoRotation_0, frame); 141 abs(height), webrtc::kVideoRotation_0, frame);
132 } 142 }
133 bool LoadFrame(uint8* sample, 143 bool LoadFrame(uint8_t* sample,
134 size_t sample_size, 144 size_t sample_size,
135 uint32 format, 145 uint32_t format,
136 int32 width, 146 int32_t width,
137 int32 height, 147 int32_t height,
138 int dw, 148 int dw,
139 int dh, 149 int dh,
140 webrtc::VideoRotation rotation, 150 webrtc::VideoRotation rotation,
141 T* frame) { 151 T* frame) {
142 bool ret = false; 152 bool ret = false;
143 for (int i = 0; i < repeat_; ++i) { 153 for (int i = 0; i < repeat_; ++i) {
144 ret = frame->Init(format, width, height, dw, dh, 154 ret = frame->Init(format, width, height, dw, dh,
145 sample, sample_size, 1, 1, 0, 0, rotation); 155 sample, sample_size, 1, 1, 0, 0, rotation);
146 } 156 }
147 return ret; 157 return ret;
(...skipping 23 matching lines...) Expand all
171 } 181 }
172 182
173 // Write an I420 frame out to disk. 183 // Write an I420 frame out to disk.
174 bool DumpFrame(const std::string& prefix, 184 bool DumpFrame(const std::string& prefix,
175 const cricket::VideoFrame& frame) { 185 const cricket::VideoFrame& frame) {
176 char filename[256]; 186 char filename[256];
177 rtc::sprintfn(filename, sizeof(filename), "%s.%dx%d_P420.yuv", 187 rtc::sprintfn(filename, sizeof(filename), "%s.%dx%d_P420.yuv",
178 prefix.c_str(), frame.GetWidth(), frame.GetHeight()); 188 prefix.c_str(), frame.GetWidth(), frame.GetHeight());
179 size_t out_size = cricket::VideoFrame::SizeOf(frame.GetWidth(), 189 size_t out_size = cricket::VideoFrame::SizeOf(frame.GetWidth(),
180 frame.GetHeight()); 190 frame.GetHeight());
181 rtc::scoped_ptr<uint8[]> out(new uint8[out_size]); 191 rtc::scoped_ptr<uint8_t[]> out(new uint8_t[out_size]);
182 frame.CopyToBuffer(out.get(), out_size); 192 frame.CopyToBuffer(out.get(), out_size);
183 return DumpSample(filename, out.get(), out_size); 193 return DumpSample(filename, out.get(), out_size);
184 } 194 }
185 195
186 bool DumpSample(const std::string& filename, const void* buffer, int size) { 196 bool DumpSample(const std::string& filename, const void* buffer, int size) {
187 rtc::Pathname path(filename); 197 rtc::Pathname path(filename);
188 rtc::scoped_ptr<rtc::FileStream> fs( 198 rtc::scoped_ptr<rtc::FileStream> fs(
189 rtc::Filesystem::OpenFile(path, "wb")); 199 rtc::Filesystem::OpenFile(path, "wb"));
190 if (!fs.get()) { 200 if (!fs.get()) {
191 return false; 201 return false;
192 } 202 }
193 203
194 return (fs->Write(buffer, size, NULL, NULL) == rtc::SR_SUCCESS); 204 return (fs->Write(buffer, size, NULL, NULL) == rtc::SR_SUCCESS);
195 } 205 }
196 206
197 // Create a test image in the desired color space. 207 // Create a test image in the desired color space.
198 // The image is a checkerboard pattern with 63x63 squares, which allows 208 // The image is a checkerboard pattern with 63x63 squares, which allows
199 // I420 chroma artifacts to easily be seen on the square boundaries. 209 // I420 chroma artifacts to easily be seen on the square boundaries.
200 // The pattern is { { green, orange }, { blue, purple } } 210 // The pattern is { { green, orange }, { blue, purple } }
201 // There is also a gradient within each square to ensure that the luma 211 // There is also a gradient within each square to ensure that the luma
202 // values are handled properly. 212 // values are handled properly.
203 rtc::MemoryStream* CreateYuv422Sample(uint32 fourcc, 213 rtc::MemoryStream* CreateYuv422Sample(uint32_t fourcc,
204 uint32 width, uint32 height) { 214 uint32_t width,
215 uint32_t height) {
205 int y1_pos, y2_pos, u_pos, v_pos; 216 int y1_pos, y2_pos, u_pos, v_pos;
206 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { 217 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) {
207 return NULL; 218 return NULL;
208 } 219 }
209 220
210 rtc::scoped_ptr<rtc::MemoryStream> ms( 221 rtc::scoped_ptr<rtc::MemoryStream> ms(
211 new rtc::MemoryStream); 222 new rtc::MemoryStream);
212 int awidth = (width + 1) & ~1; 223 int awidth = (width + 1) & ~1;
213 int size = awidth * 2 * height; 224 int size = awidth * 2 * height;
214 if (!ms->ReserveSize(size)) { 225 if (!ms->ReserveSize(size)) {
215 return NULL; 226 return NULL;
216 } 227 }
217 for (uint32 y = 0; y < height; ++y) { 228 for (uint32_t y = 0; y < height; ++y) {
218 for (int x = 0; x < awidth; x += 2) { 229 for (int x = 0; x < awidth; x += 2) {
219 uint8 quad[4]; 230 uint8_t quad[4];
220 quad[y1_pos] = (x % 63 + y % 63) + 64; 231 quad[y1_pos] = (x % 63 + y % 63) + 64;
221 quad[y2_pos] = ((x + 1) % 63 + y % 63) + 64; 232 quad[y2_pos] = ((x + 1) % 63 + y % 63) + 64;
222 quad[u_pos] = ((x / 63) & 1) ? 192 : 64; 233 quad[u_pos] = ((x / 63) & 1) ? 192 : 64;
223 quad[v_pos] = ((y / 63) & 1) ? 192 : 64; 234 quad[v_pos] = ((y / 63) & 1) ? 192 : 64;
224 ms->Write(quad, sizeof(quad), NULL, NULL); 235 ms->Write(quad, sizeof(quad), NULL, NULL);
225 } 236 }
226 } 237 }
227 return ms.release(); 238 return ms.release();
228 } 239 }
229 240
230 // Create a test image for YUV 420 formats with 12 bits per pixel. 241 // Create a test image for YUV 420 formats with 12 bits per pixel.
231 rtc::MemoryStream* CreateYuvSample(uint32 width, uint32 height, 242 rtc::MemoryStream* CreateYuvSample(uint32_t width,
232 uint32 bpp) { 243 uint32_t height,
244 uint32_t bpp) {
233 rtc::scoped_ptr<rtc::MemoryStream> ms( 245 rtc::scoped_ptr<rtc::MemoryStream> ms(
234 new rtc::MemoryStream); 246 new rtc::MemoryStream);
235 if (!ms->ReserveSize(width * height * bpp / 8)) { 247 if (!ms->ReserveSize(width * height * bpp / 8)) {
236 return NULL; 248 return NULL;
237 } 249 }
238 250
239 for (uint32 i = 0; i < width * height * bpp / 8; ++i) { 251 for (uint32_t i = 0; i < width * height * bpp / 8; ++i) {
240 char value = ((i / 63) & 1) ? 192 : 64; 252 char value = ((i / 63) & 1) ? 192 : 64;
241 ms->Write(&value, sizeof(value), NULL, NULL); 253 ms->Write(&value, sizeof(value), NULL, NULL);
242 } 254 }
243 return ms.release(); 255 return ms.release();
244 } 256 }
245 257
246 rtc::MemoryStream* CreateRgbSample(uint32 fourcc, 258 rtc::MemoryStream* CreateRgbSample(uint32_t fourcc,
247 uint32 width, uint32 height) { 259 uint32_t width,
260 uint32_t height) {
248 int r_pos, g_pos, b_pos, bytes; 261 int r_pos, g_pos, b_pos, bytes;
249 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { 262 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
250 return NULL; 263 return NULL;
251 } 264 }
252 265
253 rtc::scoped_ptr<rtc::MemoryStream> ms( 266 rtc::scoped_ptr<rtc::MemoryStream> ms(
254 new rtc::MemoryStream); 267 new rtc::MemoryStream);
255 if (!ms->ReserveSize(width * height * bytes)) { 268 if (!ms->ReserveSize(width * height * bytes)) {
256 return NULL; 269 return NULL;
257 } 270 }
258 271
259 for (uint32 y = 0; y < height; ++y) { 272 for (uint32_t y = 0; y < height; ++y) {
260 for (uint32 x = 0; x < width; ++x) { 273 for (uint32_t x = 0; x < width; ++x) {
261 uint8 rgb[4] = { 255, 255, 255, 255 }; 274 uint8_t rgb[4] = {255, 255, 255, 255};
262 rgb[r_pos] = ((x / 63) & 1) ? 224 : 32; 275 rgb[r_pos] = ((x / 63) & 1) ? 224 : 32;
263 rgb[g_pos] = (x % 63 + y % 63) + 96; 276 rgb[g_pos] = (x % 63 + y % 63) + 96;
264 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32; 277 rgb[b_pos] = ((y / 63) & 1) ? 224 : 32;
265 ms->Write(rgb, bytes, NULL, NULL); 278 ms->Write(rgb, bytes, NULL, NULL);
266 } 279 }
267 } 280 }
268 return ms.release(); 281 return ms.release();
269 } 282 }
270 283
271 // Simple conversion routines to verify the optimized VideoFrame routines. 284 // Simple conversion routines to verify the optimized VideoFrame routines.
272 // Converts from the specified colorspace to I420. 285 // Converts from the specified colorspace to I420.
273 bool ConvertYuv422(const rtc::MemoryStream* ms, 286 bool ConvertYuv422(const rtc::MemoryStream* ms,
274 uint32 fourcc, uint32 width, uint32 height, 287 uint32_t fourcc,
288 uint32_t width,
289 uint32_t height,
275 T* frame) { 290 T* frame) {
276 int y1_pos, y2_pos, u_pos, v_pos; 291 int y1_pos, y2_pos, u_pos, v_pos;
277 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) { 292 if (!GetYuv422Packing(fourcc, &y1_pos, &y2_pos, &u_pos, &v_pos)) {
278 return false; 293 return false;
279 } 294 }
280 295
281 const uint8* start = reinterpret_cast<const uint8*>(ms->GetBuffer()); 296 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
282 int awidth = (width + 1) & ~1; 297 int awidth = (width + 1) & ~1;
283 frame->InitToBlack(width, height, 1, 1, 0, 0); 298 frame->InitToBlack(width, height, 1, 1, 0, 0);
284 int stride_y = frame->GetYPitch(); 299 int stride_y = frame->GetYPitch();
285 int stride_u = frame->GetUPitch(); 300 int stride_u = frame->GetUPitch();
286 int stride_v = frame->GetVPitch(); 301 int stride_v = frame->GetVPitch();
287 for (uint32 y = 0; y < height; ++y) { 302 for (uint32_t y = 0; y < height; ++y) {
288 for (uint32 x = 0; x < width; x += 2) { 303 for (uint32_t x = 0; x < width; x += 2) {
289 const uint8* quad1 = start + (y * awidth + x) * 2; 304 const uint8_t* quad1 = start + (y * awidth + x) * 2;
290 frame->GetYPlane()[stride_y * y + x] = quad1[y1_pos]; 305 frame->GetYPlane()[stride_y * y + x] = quad1[y1_pos];
291 if ((x + 1) < width) { 306 if ((x + 1) < width) {
292 frame->GetYPlane()[stride_y * y + x + 1] = quad1[y2_pos]; 307 frame->GetYPlane()[stride_y * y + x + 1] = quad1[y2_pos];
293 } 308 }
294 if ((y & 1) == 0) { 309 if ((y & 1) == 0) {
295 const uint8* quad2 = quad1 + awidth * 2; 310 const uint8_t* quad2 = quad1 + awidth * 2;
296 if ((y + 1) >= height) { 311 if ((y + 1) >= height) {
297 quad2 = quad1; 312 quad2 = quad1;
298 } 313 }
299 frame->GetUPlane()[stride_u * (y / 2) + x / 2] = 314 frame->GetUPlane()[stride_u * (y / 2) + x / 2] =
300 (quad1[u_pos] + quad2[u_pos] + 1) / 2; 315 (quad1[u_pos] + quad2[u_pos] + 1) / 2;
301 frame->GetVPlane()[stride_v * (y / 2) + x / 2] = 316 frame->GetVPlane()[stride_v * (y / 2) + x / 2] =
302 (quad1[v_pos] + quad2[v_pos] + 1) / 2; 317 (quad1[v_pos] + quad2[v_pos] + 1) / 2;
303 } 318 }
304 } 319 }
305 } 320 }
306 return true; 321 return true;
307 } 322 }
308 323
309 // Convert RGB to 420. 324 // Convert RGB to 420.
310 // A negative height inverts the image. 325 // A negative height inverts the image.
311 bool ConvertRgb(const rtc::MemoryStream* ms, 326 bool ConvertRgb(const rtc::MemoryStream* ms,
312 uint32 fourcc, int32 width, int32 height, 327 uint32_t fourcc,
328 int32_t width,
329 int32_t height,
313 T* frame) { 330 T* frame) {
314 int r_pos, g_pos, b_pos, bytes; 331 int r_pos, g_pos, b_pos, bytes;
315 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) { 332 if (!GetRgbPacking(fourcc, &r_pos, &g_pos, &b_pos, &bytes)) {
316 return false; 333 return false;
317 } 334 }
318 int pitch = width * bytes; 335 int pitch = width * bytes;
319 const uint8* start = reinterpret_cast<const uint8*>(ms->GetBuffer()); 336 const uint8_t* start = reinterpret_cast<const uint8_t*>(ms->GetBuffer());
320 if (height < 0) { 337 if (height < 0) {
321 height = -height; 338 height = -height;
322 start = start + pitch * (height - 1); 339 start = start + pitch * (height - 1);
323 pitch = -pitch; 340 pitch = -pitch;
324 } 341 }
325 frame->InitToBlack(width, height, 1, 1, 0, 0); 342 frame->InitToBlack(width, height, 1, 1, 0, 0);
326 int stride_y = frame->GetYPitch(); 343 int stride_y = frame->GetYPitch();
327 int stride_u = frame->GetUPitch(); 344 int stride_u = frame->GetUPitch();
328 int stride_v = frame->GetVPitch(); 345 int stride_v = frame->GetVPitch();
329 for (int32 y = 0; y < height; y += 2) { 346 for (int32_t y = 0; y < height; y += 2) {
330 for (int32 x = 0; x < width; x += 2) { 347 for (int32_t x = 0; x < width; x += 2) {
331 const uint8* rgb[4]; 348 const uint8_t* rgb[4];
332 uint8 yuv[4][3]; 349 uint8_t yuv[4][3];
333 rgb[0] = start + y * pitch + x * bytes; 350 rgb[0] = start + y * pitch + x * bytes;
334 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0); 351 rgb[1] = rgb[0] + ((x + 1) < width ? bytes : 0);
335 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0); 352 rgb[2] = rgb[0] + ((y + 1) < height ? pitch : 0);
336 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0); 353 rgb[3] = rgb[2] + ((x + 1) < width ? bytes : 0);
337 for (size_t i = 0; i < 4; ++i) { 354 for (size_t i = 0; i < 4; ++i) {
338 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos], 355 ConvertRgbPixel(rgb[i][r_pos], rgb[i][g_pos], rgb[i][b_pos],
339 &yuv[i][0], &yuv[i][1], &yuv[i][2]); 356 &yuv[i][0], &yuv[i][1], &yuv[i][2]);
340 } 357 }
341 frame->GetYPlane()[stride_y * y + x] = yuv[0][0]; 358 frame->GetYPlane()[stride_y * y + x] = yuv[0][0];
342 if ((x + 1) < width) { 359 if ((x + 1) < width) {
343 frame->GetYPlane()[stride_y * y + x + 1] = yuv[1][0]; 360 frame->GetYPlane()[stride_y * y + x + 1] = yuv[1][0];
344 } 361 }
345 if ((y + 1) < height) { 362 if ((y + 1) < height) {
346 frame->GetYPlane()[stride_y * (y + 1) + x] = yuv[2][0]; 363 frame->GetYPlane()[stride_y * (y + 1) + x] = yuv[2][0];
347 if ((x + 1) < width) { 364 if ((x + 1) < width) {
348 frame->GetYPlane()[stride_y * (y + 1) + x + 1] = yuv[3][0]; 365 frame->GetYPlane()[stride_y * (y + 1) + x + 1] = yuv[3][0];
349 } 366 }
350 } 367 }
351 frame->GetUPlane()[stride_u * (y / 2) + x / 2] = 368 frame->GetUPlane()[stride_u * (y / 2) + x / 2] =
352 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4; 369 (yuv[0][1] + yuv[1][1] + yuv[2][1] + yuv[3][1] + 2) / 4;
353 frame->GetVPlane()[stride_v * (y / 2) + x / 2] = 370 frame->GetVPlane()[stride_v * (y / 2) + x / 2] =
354 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4; 371 (yuv[0][2] + yuv[1][2] + yuv[2][2] + yuv[3][2] + 2) / 4;
355 } 372 }
356 } 373 }
357 return true; 374 return true;
358 } 375 }
359 376
360 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia. 377 // Simple and slow RGB->YUV conversion. From NTSC standard, c/o Wikipedia.
361 void ConvertRgbPixel(uint8 r, uint8 g, uint8 b, 378 void ConvertRgbPixel(uint8_t r,
362 uint8* y, uint8* u, uint8* v) { 379 uint8_t g,
380 uint8_t b,
381 uint8_t* y,
382 uint8_t* u,
383 uint8_t* v) {
363 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16; 384 *y = static_cast<int>(.257 * r + .504 * g + .098 * b) + 16;
364 *u = static_cast<int>(-.148 * r - .291 * g + .439 * b) + 128; 385 *u = static_cast<int>(-.148 * r - .291 * g + .439 * b) + 128;
365 *v = static_cast<int>(.439 * r - .368 * g - .071 * b) + 128; 386 *v = static_cast<int>(.439 * r - .368 * g - .071 * b) + 128;
366 } 387 }
367 388
368 bool GetYuv422Packing(uint32 fourcc, 389 bool GetYuv422Packing(uint32_t fourcc,
369 int* y1_pos, int* y2_pos, int* u_pos, int* v_pos) { 390 int* y1_pos,
391 int* y2_pos,
392 int* u_pos,
393 int* v_pos) {
370 if (fourcc == cricket::FOURCC_YUY2) { 394 if (fourcc == cricket::FOURCC_YUY2) {
371 *y1_pos = 0; *u_pos = 1; *y2_pos = 2; *v_pos = 3; 395 *y1_pos = 0; *u_pos = 1; *y2_pos = 2; *v_pos = 3;
372 } else if (fourcc == cricket::FOURCC_UYVY) { 396 } else if (fourcc == cricket::FOURCC_UYVY) {
373 *u_pos = 0; *y1_pos = 1; *v_pos = 2; *y2_pos = 3; 397 *u_pos = 0; *y1_pos = 1; *v_pos = 2; *y2_pos = 3;
374 } else { 398 } else {
375 return false; 399 return false;
376 } 400 }
377 return true; 401 return true;
378 } 402 }
379 403
380 bool GetRgbPacking(uint32 fourcc, 404 bool GetRgbPacking(uint32_t fourcc,
381 int* r_pos, int* g_pos, int* b_pos, int* bytes) { 405 int* r_pos,
406 int* g_pos,
407 int* b_pos,
408 int* bytes) {
382 if (fourcc == cricket::FOURCC_RAW) { 409 if (fourcc == cricket::FOURCC_RAW) {
383 *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 3; // RGB in memory. 410 *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 3; // RGB in memory.
384 } else if (fourcc == cricket::FOURCC_24BG) { 411 } else if (fourcc == cricket::FOURCC_24BG) {
385 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 3; // BGR in memory. 412 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 3; // BGR in memory.
386 } else if (fourcc == cricket::FOURCC_ABGR) { 413 } else if (fourcc == cricket::FOURCC_ABGR) {
387 *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 4; // RGBA in memory. 414 *r_pos = 0; *g_pos = 1; *b_pos = 2; *bytes = 4; // RGBA in memory.
388 } else if (fourcc == cricket::FOURCC_BGRA) { 415 } else if (fourcc == cricket::FOURCC_BGRA) {
389 *r_pos = 1; *g_pos = 2; *b_pos = 3; *bytes = 4; // ARGB in memory. 416 *r_pos = 1; *g_pos = 2; *b_pos = 3; *bytes = 4; // ARGB in memory.
390 } else if (fourcc == cricket::FOURCC_ARGB) { 417 } else if (fourcc == cricket::FOURCC_ARGB) {
391 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory. 418 *r_pos = 2; *g_pos = 1; *b_pos = 0; *bytes = 4; // BGRA in memory.
392 } else { 419 } else {
393 return false; 420 return false;
394 } 421 }
395 return true; 422 return true;
396 } 423 }
397 424
398 // Comparison functions for testing. 425 // Comparison functions for testing.
399 static bool IsNull(const cricket::VideoFrame& frame) { 426 static bool IsNull(const cricket::VideoFrame& frame) {
400 return !frame.GetYPlane(); 427 return !frame.GetYPlane();
401 } 428 }
402 429
403 static bool IsSize(const cricket::VideoFrame& frame, 430 static bool IsSize(const cricket::VideoFrame& frame,
404 uint32 width, uint32 height) { 431 uint32_t width,
405 return !IsNull(frame) && 432 uint32_t height) {
406 frame.GetYPitch() >= static_cast<int32>(width) && 433 return !IsNull(frame) && frame.GetYPitch() >= static_cast<int32_t>(width) &&
407 frame.GetUPitch() >= static_cast<int32>(width) / 2 && 434 frame.GetUPitch() >= static_cast<int32_t>(width) / 2 &&
408 frame.GetVPitch() >= static_cast<int32>(width) / 2 && 435 frame.GetVPitch() >= static_cast<int32_t>(width) / 2 &&
409 frame.GetWidth() == width && frame.GetHeight() == height; 436 frame.GetWidth() == width && frame.GetHeight() == height;
410 } 437 }
411 438
412 static bool IsPlaneEqual(const std::string& name, 439 static bool IsPlaneEqual(const std::string& name,
413 const uint8* plane1, uint32 pitch1, 440 const uint8_t* plane1,
414 const uint8* plane2, uint32 pitch2, 441 uint32_t pitch1,
415 uint32 width, uint32 height, 442 const uint8_t* plane2,
443 uint32_t pitch2,
444 uint32_t width,
445 uint32_t height,
416 int max_error) { 446 int max_error) {
417 const uint8* r1 = plane1; 447 const uint8_t* r1 = plane1;
418 const uint8* r2 = plane2; 448 const uint8_t* r2 = plane2;
419 for (uint32 y = 0; y < height; ++y) { 449 for (uint32_t y = 0; y < height; ++y) {
420 for (uint32 x = 0; x < width; ++x) { 450 for (uint32_t x = 0; x < width; ++x) {
421 if (abs(static_cast<int>(r1[x] - r2[x])) > max_error) { 451 if (abs(static_cast<int>(r1[x] - r2[x])) > max_error) {
422 LOG(LS_INFO) << "IsPlaneEqual(" << name << "): pixel[" 452 LOG(LS_INFO) << "IsPlaneEqual(" << name << "): pixel["
423 << x << "," << y << "] differs: " 453 << x << "," << y << "] differs: "
424 << static_cast<int>(r1[x]) << " vs " 454 << static_cast<int>(r1[x]) << " vs "
425 << static_cast<int>(r2[x]); 455 << static_cast<int>(r2[x]);
426 return false; 456 return false;
427 } 457 }
428 } 458 }
429 r1 += pitch1; 459 r1 += pitch1;
430 r2 += pitch2; 460 r2 += pitch2;
431 } 461 }
432 return true; 462 return true;
433 } 463 }
434 464
435 static bool IsEqual(const cricket::VideoFrame& frame, 465 static bool IsEqual(const cricket::VideoFrame& frame,
436 size_t width, size_t height, 466 size_t width,
437 size_t pixel_width, size_t pixel_height, 467 size_t height,
438 int64 elapsed_time, int64 time_stamp, 468 size_t pixel_width,
439 const uint8* y, uint32 ypitch, 469 size_t pixel_height,
440 const uint8* u, uint32 upitch, 470 int64_t elapsed_time,
441 const uint8* v, uint32 vpitch, 471 int64_t time_stamp,
472 const uint8_t* y,
473 uint32_t ypitch,
474 const uint8_t* u,
475 uint32_t upitch,
476 const uint8_t* v,
477 uint32_t vpitch,
442 int max_error) { 478 int max_error) {
443 return IsSize(frame, 479 return IsSize(frame, static_cast<uint32_t>(width),
444 static_cast<uint32>(width), 480 static_cast<uint32_t>(height)) &&
445 static_cast<uint32>(height)) && 481 frame.GetPixelWidth() == pixel_width &&
446 frame.GetPixelWidth() == pixel_width && 482 frame.GetPixelHeight() == pixel_height &&
447 frame.GetPixelHeight() == pixel_height && 483 frame.GetElapsedTime() == elapsed_time &&
448 frame.GetElapsedTime() == elapsed_time && 484 frame.GetTimeStamp() == time_stamp &&
449 frame.GetTimeStamp() == time_stamp && 485 IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch,
450 IsPlaneEqual("y", frame.GetYPlane(), frame.GetYPitch(), y, ypitch, 486 static_cast<uint32_t>(width),
451 static_cast<uint32>(width), 487 static_cast<uint32_t>(height), max_error) &&
452 static_cast<uint32>(height), max_error) && 488 IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch,
453 IsPlaneEqual("u", frame.GetUPlane(), frame.GetUPitch(), u, upitch, 489 static_cast<uint32_t>((width + 1) / 2),
454 static_cast<uint32>((width + 1) / 2), 490 static_cast<uint32_t>((height + 1) / 2), max_error) &&
455 static_cast<uint32>((height + 1) / 2), max_error) && 491 IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch,
456 IsPlaneEqual("v", frame.GetVPlane(), frame.GetVPitch(), v, vpitch, 492 static_cast<uint32_t>((width + 1) / 2),
457 static_cast<uint32>((width + 1) / 2), 493 static_cast<uint32_t>((height + 1) / 2), max_error);
458 static_cast<uint32>((height + 1) / 2), max_error);
459 } 494 }
460 495
461 static bool IsEqual(const cricket::VideoFrame& frame1, 496 static bool IsEqual(const cricket::VideoFrame& frame1,
462 const cricket::VideoFrame& frame2, 497 const cricket::VideoFrame& frame2,
463 int max_error) { 498 int max_error) {
464 return IsEqual(frame1, 499 return IsEqual(frame1,
465 frame2.GetWidth(), frame2.GetHeight(), 500 frame2.GetWidth(), frame2.GetHeight(),
466 frame2.GetPixelWidth(), frame2.GetPixelHeight(), 501 frame2.GetPixelWidth(), frame2.GetPixelHeight(),
467 frame2.GetElapsedTime(), frame2.GetTimeStamp(), 502 frame2.GetElapsedTime(), frame2.GetTimeStamp(),
468 frame2.GetYPlane(), frame2.GetYPitch(), 503 frame2.GetYPlane(), frame2.GetYPitch(),
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
506 541
507 // Test constructing an image from a I420 buffer. 542 // Test constructing an image from a I420 buffer.
508 void ConstructI420() { 543 void ConstructI420() {
509 T frame; 544 T frame;
510 EXPECT_TRUE(IsNull(frame)); 545 EXPECT_TRUE(IsNull(frame));
511 rtc::scoped_ptr<rtc::MemoryStream> ms( 546 rtc::scoped_ptr<rtc::MemoryStream> ms(
512 CreateYuvSample(kWidth, kHeight, 12)); 547 CreateYuvSample(kWidth, kHeight, 12));
513 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, 548 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420,
514 kWidth, kHeight, &frame)); 549 kWidth, kHeight, &frame));
515 550
516 const uint8* y = reinterpret_cast<uint8*>(ms.get()->GetBuffer()); 551 const uint8_t* y = reinterpret_cast<uint8_t*>(ms.get()->GetBuffer());
517 const uint8* u = y + kWidth * kHeight; 552 const uint8_t* u = y + kWidth * kHeight;
518 const uint8* v = u + kWidth * kHeight / 4; 553 const uint8_t* v = u + kWidth * kHeight / 4;
519 EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0, 554 EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0,
520 y, kWidth, u, kWidth / 2, v, kWidth / 2, 0)); 555 y, kWidth, u, kWidth / 2, v, kWidth / 2, 0));
521 } 556 }
522 557
523 // Test constructing an image from a YV12 buffer. 558 // Test constructing an image from a YV12 buffer.
524 void ConstructYV12() { 559 void ConstructYV12() {
525 T frame; 560 T frame;
526 rtc::scoped_ptr<rtc::MemoryStream> ms( 561 rtc::scoped_ptr<rtc::MemoryStream> ms(
527 CreateYuvSample(kWidth, kHeight, 12)); 562 CreateYuvSample(kWidth, kHeight, 12));
528 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YV12, 563 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YV12,
529 kWidth, kHeight, &frame)); 564 kWidth, kHeight, &frame));
530 565
531 const uint8* y = reinterpret_cast<uint8*>(ms.get()->GetBuffer()); 566 const uint8_t* y = reinterpret_cast<uint8_t*>(ms.get()->GetBuffer());
532 const uint8* v = y + kWidth * kHeight; 567 const uint8_t* v = y + kWidth * kHeight;
533 const uint8* u = v + kWidth * kHeight / 4; 568 const uint8_t* u = v + kWidth * kHeight / 4;
534 EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0, 569 EXPECT_TRUE(IsEqual(frame, kWidth, kHeight, 1, 1, 0, 0,
535 y, kWidth, u, kWidth / 2, v, kWidth / 2, 0)); 570 y, kWidth, u, kWidth / 2, v, kWidth / 2, 0));
536 } 571 }
537 572
538 // Test constructing an image from a I422 buffer. 573 // Test constructing an image from a I422 buffer.
539 void ConstructI422() { 574 void ConstructI422() {
540 T frame1, frame2; 575 T frame1, frame2;
541 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 576 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
542 size_t buf_size = kWidth * kHeight * 2; 577 size_t buf_size = kWidth * kHeight * 2;
543 rtc::scoped_ptr<uint8[]> buf(new uint8[buf_size + kAlignment]); 578 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
544 uint8* y = ALIGNP(buf.get(), kAlignment); 579 uint8_t* y = ALIGNP(buf.get(), kAlignment);
545 uint8* u = y + kWidth * kHeight; 580 uint8_t* u = y + kWidth * kHeight;
546 uint8* v = u + (kWidth / 2) * kHeight; 581 uint8_t* v = u + (kWidth / 2) * kHeight;
547 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(), 582 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(),
548 frame1.GetUPlane(), frame1.GetUPitch(), 583 frame1.GetUPlane(), frame1.GetUPitch(),
549 frame1.GetVPlane(), frame1.GetVPitch(), 584 frame1.GetVPlane(), frame1.GetVPitch(),
550 y, kWidth, 585 y, kWidth,
551 u, kWidth / 2, 586 u, kWidth / 2,
552 v, kWidth / 2, 587 v, kWidth / 2,
553 kWidth, kHeight)); 588 kWidth, kHeight));
554 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422, 589 EXPECT_TRUE(LoadFrame(y, buf_size, cricket::FOURCC_I422,
555 kWidth, kHeight, &frame2)); 590 kWidth, kHeight, &frame2));
556 EXPECT_TRUE(IsEqual(frame1, frame2, 1)); 591 EXPECT_TRUE(IsEqual(frame1, frame2, 1));
557 } 592 }
558 593
559 // Test constructing an image from a YUY2 buffer. 594 // Test constructing an image from a YUY2 buffer.
560 void ConstructYuy2() { 595 void ConstructYuy2() {
561 T frame1, frame2; 596 T frame1, frame2;
562 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 597 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
563 size_t buf_size = kWidth * kHeight * 2; 598 size_t buf_size = kWidth * kHeight * 2;
564 rtc::scoped_ptr<uint8[]> buf(new uint8[buf_size + kAlignment]); 599 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment]);
565 uint8* yuy2 = ALIGNP(buf.get(), kAlignment); 600 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment);
566 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(), 601 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(),
567 frame1.GetUPlane(), frame1.GetUPitch(), 602 frame1.GetUPlane(), frame1.GetUPitch(),
568 frame1.GetVPlane(), frame1.GetVPitch(), 603 frame1.GetVPlane(), frame1.GetVPitch(),
569 yuy2, kWidth * 2, 604 yuy2, kWidth * 2,
570 kWidth, kHeight)); 605 kWidth, kHeight));
571 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 606 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
572 kWidth, kHeight, &frame2)); 607 kWidth, kHeight, &frame2));
573 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 608 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
574 } 609 }
575 610
576 // Test constructing an image from a YUY2 buffer with buffer unaligned. 611 // Test constructing an image from a YUY2 buffer with buffer unaligned.
577 void ConstructYuy2Unaligned() { 612 void ConstructYuy2Unaligned() {
578 T frame1, frame2; 613 T frame1, frame2;
579 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 614 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
580 size_t buf_size = kWidth * kHeight * 2; 615 size_t buf_size = kWidth * kHeight * 2;
581 rtc::scoped_ptr<uint8[]> buf(new uint8[buf_size + kAlignment + 1]); 616 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[buf_size + kAlignment + 1]);
582 uint8* yuy2 = ALIGNP(buf.get(), kAlignment) + 1; 617 uint8_t* yuy2 = ALIGNP(buf.get(), kAlignment) + 1;
583 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(), 618 EXPECT_EQ(0, libyuv::I420ToYUY2(frame1.GetYPlane(), frame1.GetYPitch(),
584 frame1.GetUPlane(), frame1.GetUPitch(), 619 frame1.GetUPlane(), frame1.GetUPitch(),
585 frame1.GetVPlane(), frame1.GetVPitch(), 620 frame1.GetVPlane(), frame1.GetVPitch(),
586 yuy2, kWidth * 2, 621 yuy2, kWidth * 2,
587 kWidth, kHeight)); 622 kWidth, kHeight));
588 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2, 623 EXPECT_TRUE(LoadFrame(yuy2, buf_size, cricket::FOURCC_YUY2,
589 kWidth, kHeight, &frame2)); 624 kWidth, kHeight, &frame2));
590 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 625 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
591 } 626 }
592 627
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
728 &frame1)); 763 &frame1));
729 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW, 764 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_RAW,
730 kWidth, kHeight, &frame2)); 765 kWidth, kHeight, &frame2));
731 EXPECT_TRUE(IsEqual(frame1, frame2, 2)); 766 EXPECT_TRUE(IsEqual(frame1, frame2, 2));
732 } 767 }
733 768
734 // Test constructing an image from a RGB565 buffer 769 // Test constructing an image from a RGB565 buffer
735 void ConstructRGB565() { 770 void ConstructRGB565() {
736 T frame1, frame2; 771 T frame1, frame2;
737 size_t out_size = kWidth * kHeight * 2; 772 size_t out_size = kWidth * kHeight * 2;
738 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]); 773 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]);
739 uint8* out = ALIGNP(outbuf.get(), kAlignment); 774 uint8_t* out = ALIGNP(outbuf.get(), kAlignment);
740 T frame; 775 T frame;
741 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 776 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
742 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBP, 777 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBP,
743 out, 778 out,
744 out_size, kWidth * 2)); 779 out_size, kWidth * 2));
745 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_RGBP, 780 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_RGBP,
746 kWidth, kHeight, &frame2)); 781 kWidth, kHeight, &frame2));
747 EXPECT_TRUE(IsEqual(frame1, frame2, 20)); 782 EXPECT_TRUE(IsEqual(frame1, frame2, 20));
748 } 783 }
749 784
750 // Test constructing an image from a ARGB1555 buffer 785 // Test constructing an image from a ARGB1555 buffer
751 void ConstructARGB1555() { 786 void ConstructARGB1555() {
752 T frame1, frame2; 787 T frame1, frame2;
753 size_t out_size = kWidth * kHeight * 2; 788 size_t out_size = kWidth * kHeight * 2;
754 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]); 789 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]);
755 uint8* out = ALIGNP(outbuf.get(), kAlignment); 790 uint8_t* out = ALIGNP(outbuf.get(), kAlignment);
756 T frame; 791 T frame;
757 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 792 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
758 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBO, 793 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_RGBO,
759 out, 794 out,
760 out_size, kWidth * 2)); 795 out_size, kWidth * 2));
761 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_RGBO, 796 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_RGBO,
762 kWidth, kHeight, &frame2)); 797 kWidth, kHeight, &frame2));
763 EXPECT_TRUE(IsEqual(frame1, frame2, 20)); 798 EXPECT_TRUE(IsEqual(frame1, frame2, 20));
764 } 799 }
765 800
766 // Test constructing an image from a ARGB4444 buffer 801 // Test constructing an image from a ARGB4444 buffer
767 void ConstructARGB4444() { 802 void ConstructARGB4444() {
768 T frame1, frame2; 803 T frame1, frame2;
769 size_t out_size = kWidth * kHeight * 2; 804 size_t out_size = kWidth * kHeight * 2;
770 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]); 805 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]);
771 uint8* out = ALIGNP(outbuf.get(), kAlignment); 806 uint8_t* out = ALIGNP(outbuf.get(), kAlignment);
772 T frame; 807 T frame;
773 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 808 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
774 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_R444, 809 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(cricket::FOURCC_R444,
775 out, 810 out,
776 out_size, kWidth * 2)); 811 out_size, kWidth * 2));
777 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_R444, 812 EXPECT_TRUE(LoadFrame(out, out_size, cricket::FOURCC_R444,
778 kWidth, kHeight, &frame2)); 813 kWidth, kHeight, &frame2));
779 EXPECT_TRUE(IsEqual(frame1, frame2, 20)); 814 EXPECT_TRUE(IsEqual(frame1, frame2, 20));
780 } 815 }
781 816
782 // Macro to help test different rotations 817 // Macro to help test different rotations
783 #define TEST_MIRROR(FOURCC, BPP) \ 818 #define TEST_MIRROR(FOURCC, BPP) \
784 void Construct##FOURCC##Mirror() { \ 819 void Construct##FOURCC##Mirror() { \
785 T frame1, frame2, frame3; \ 820 T frame1, frame2, frame3; \
786 rtc::scoped_ptr<rtc::MemoryStream> ms( \ 821 rtc::scoped_ptr<rtc::MemoryStream> ms( \
787 CreateYuvSample(kWidth, kHeight, BPP)); \ 822 CreateYuvSample(kWidth, kHeight, BPP)); \
788 ASSERT_TRUE(ms.get() != NULL); \ 823 ASSERT_TRUE(ms.get() != NULL); \
789 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \ 824 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, \
790 -kHeight, kWidth, kHeight, \ 825 -kHeight, kWidth, kHeight, \
791 webrtc::kVideoRotation_180, &frame1)); \ 826 webrtc::kVideoRotation_180, &frame1)); \
792 size_t data_size; \ 827 size_t data_size; \
793 bool ret = ms->GetSize(&data_size); \ 828 bool ret = ms->GetSize(&data_size); \
794 EXPECT_TRUE(ret); \ 829 EXPECT_TRUE(ret); \
795 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 830 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
796 kHeight, \ 831 kHeight, \
797 reinterpret_cast<uint8*>(ms->GetBuffer()), \ 832 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
798 data_size, 1, 1, 0, 0, webrtc::kVideoRotation_0)); \ 833 data_size, 1, 1, 0, 0, webrtc::kVideoRotation_0)); \
799 int width_rotate = static_cast<int>(frame1.GetWidth()); \ 834 int width_rotate = static_cast<int>(frame1.GetWidth()); \
800 int height_rotate = static_cast<int>(frame1.GetHeight()); \ 835 int height_rotate = static_cast<int>(frame1.GetHeight()); \
801 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \ 836 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \
802 libyuv::I420Mirror( \ 837 libyuv::I420Mirror( \
803 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \ 838 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \
804 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \ 839 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \
805 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \ 840 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \
806 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \ 841 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \
807 kHeight); \ 842 kHeight); \
(...skipping 10 matching lines...) Expand all
818 CreateYuvSample(kWidth, kHeight, BPP)); \ 853 CreateYuvSample(kWidth, kHeight, BPP)); \
819 ASSERT_TRUE(ms.get() != NULL); \ 854 ASSERT_TRUE(ms.get() != NULL); \
820 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \ 855 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_##FOURCC, kWidth, kHeight, \
821 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \ 856 kWidth, kHeight, webrtc::kVideoRotation_##ROTATE, \
822 &frame1)); \ 857 &frame1)); \
823 size_t data_size; \ 858 size_t data_size; \
824 bool ret = ms->GetSize(&data_size); \ 859 bool ret = ms->GetSize(&data_size); \
825 EXPECT_TRUE(ret); \ 860 EXPECT_TRUE(ret); \
826 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \ 861 EXPECT_TRUE(frame2.Init(cricket::FOURCC_##FOURCC, kWidth, kHeight, kWidth, \
827 kHeight, \ 862 kHeight, \
828 reinterpret_cast<uint8*>(ms->GetBuffer()), \ 863 reinterpret_cast<uint8_t*>(ms->GetBuffer()), \
829 data_size, 1, 1, 0, 0, webrtc::kVideoRotation_0)); \ 864 data_size, 1, 1, 0, 0, webrtc::kVideoRotation_0)); \
830 int width_rotate = static_cast<int>(frame1.GetWidth()); \ 865 int width_rotate = static_cast<int>(frame1.GetWidth()); \
831 int height_rotate = static_cast<int>(frame1.GetHeight()); \ 866 int height_rotate = static_cast<int>(frame1.GetHeight()); \
832 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \ 867 EXPECT_TRUE(frame3.InitToBlack(width_rotate, height_rotate, 1, 1, 0, 0)); \
833 libyuv::I420Rotate( \ 868 libyuv::I420Rotate( \
834 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \ 869 frame2.GetYPlane(), frame2.GetYPitch(), frame2.GetUPlane(), \
835 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \ 870 frame2.GetUPitch(), frame2.GetVPlane(), frame2.GetVPitch(), \
836 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \ 871 frame3.GetYPlane(), frame3.GetYPitch(), frame3.GetUPlane(), \
837 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \ 872 frame3.GetUPitch(), frame3.GetVPlane(), frame3.GetVPitch(), kWidth, \
838 kHeight, libyuv::kRotate##ROTATE); \ 873 kHeight, libyuv::kRotate##ROTATE); \
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
925 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight)); 960 CreateYuv422Sample(cricket::FOURCC_YUY2, kWidth, kHeight));
926 ASSERT_TRUE(ms.get() != NULL); 961 ASSERT_TRUE(ms.get() != NULL);
927 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight, 962 EXPECT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_YUY2, kWidth, kHeight,
928 kWidth, kHeight, webrtc::kVideoRotation_270, 963 kWidth, kHeight, webrtc::kVideoRotation_270,
929 &frame2)); 964 &frame2));
930 } 965 }
931 966
932 // Test 1 pixel edge case image I420 buffer. 967 // Test 1 pixel edge case image I420 buffer.
933 void ConstructI4201Pixel() { 968 void ConstructI4201Pixel() {
934 T frame; 969 T frame;
935 uint8 pixel[3] = { 1, 2, 3 }; 970 uint8_t pixel[3] = {1, 2, 3};
936 for (int i = 0; i < repeat_; ++i) { 971 for (int i = 0; i < repeat_; ++i) {
937 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 1, 1, 1, 1, pixel, 972 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 1, 1, 1, 1, pixel,
938 sizeof(pixel), 1, 1, 0, 0, 973 sizeof(pixel), 1, 1, 0, 0,
939 webrtc::kVideoRotation_0)); 974 webrtc::kVideoRotation_0));
940 } 975 }
941 const uint8* y = pixel; 976 const uint8_t* y = pixel;
942 const uint8* u = y + 1; 977 const uint8_t* u = y + 1;
943 const uint8* v = u + 1; 978 const uint8_t* v = u + 1;
944 EXPECT_TRUE(IsEqual(frame, 1, 1, 1, 1, 0, 0, 979 EXPECT_TRUE(IsEqual(frame, 1, 1, 1, 1, 0, 0,
945 y, 1, u, 1, v, 1, 0)); 980 y, 1, u, 1, v, 1, 0));
946 } 981 }
947 982
948 // Test 5 pixel edge case image. 983 // Test 5 pixel edge case image.
949 void ConstructI4205Pixel() { 984 void ConstructI4205Pixel() {
950 T frame; 985 T frame;
951 uint8 pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2]; 986 uint8_t pixels5x5[5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2];
952 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2); 987 memset(pixels5x5, 1, 5 * 5 + ((5 + 1) / 2 * (5 + 1) / 2) * 2);
953 for (int i = 0; i < repeat_; ++i) { 988 for (int i = 0; i < repeat_; ++i) {
954 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5, 989 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 5, 5, 5, 5, pixels5x5,
955 sizeof(pixels5x5), 1, 1, 0, 0, 990 sizeof(pixels5x5), 1, 1, 0, 0,
956 webrtc::kVideoRotation_0)); 991 webrtc::kVideoRotation_0));
957 } 992 }
958 EXPECT_EQ(5u, frame.GetWidth()); 993 EXPECT_EQ(5u, frame.GetWidth());
959 EXPECT_EQ(5u, frame.GetHeight()); 994 EXPECT_EQ(5u, frame.GetHeight());
960 EXPECT_EQ(5, frame.GetYPitch()); 995 EXPECT_EQ(5, frame.GetYPitch());
961 EXPECT_EQ(3, frame.GetUPitch()); 996 EXPECT_EQ(3, frame.GetUPitch());
962 EXPECT_EQ(3, frame.GetVPitch()); 997 EXPECT_EQ(3, frame.GetVPitch());
963 } 998 }
964 999
965 // Test 1 pixel edge case image ARGB buffer. 1000 // Test 1 pixel edge case image ARGB buffer.
966 void ConstructARGB1Pixel() { 1001 void ConstructARGB1Pixel() {
967 T frame; 1002 T frame;
968 uint8 pixel[4] = { 64, 128, 192, 255 }; 1003 uint8_t pixel[4] = {64, 128, 192, 255};
969 for (int i = 0; i < repeat_; ++i) { 1004 for (int i = 0; i < repeat_; ++i) {
970 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel, 1005 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 1, 1, 1, 1, pixel,
971 sizeof(pixel), 1, 1, 0, 0, 1006 sizeof(pixel), 1, 1, 0, 0,
972 webrtc::kVideoRotation_0)); 1007 webrtc::kVideoRotation_0));
973 } 1008 }
974 // Convert back to ARGB. 1009 // Convert back to ARGB.
975 size_t out_size = 4; 1010 size_t out_size = 4;
976 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]); 1011 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]);
977 uint8* out = ALIGNP(outbuf.get(), kAlignment); 1012 uint8_t* out = ALIGNP(outbuf.get(), kAlignment);
978 1013
979 EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB, 1014 EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
980 out, 1015 out,
981 out_size, // buffer size 1016 out_size, // buffer size
982 out_size)); // stride 1017 out_size)); // stride
983 #ifdef USE_LMI_CONVERT 1018 #ifdef USE_LMI_CONVERT
984 // TODO(fbarchard): Expected to fail, but not crash. 1019 // TODO(fbarchard): Expected to fail, but not crash.
985 EXPECT_FALSE(IsPlaneEqual("argb", pixel, 4, out, 4, 3, 1, 2)); 1020 EXPECT_FALSE(IsPlaneEqual("argb", pixel, 4, out, 4, 3, 1, 2));
986 #else 1021 #else
987 // TODO(fbarchard): Check for overwrite. 1022 // TODO(fbarchard): Check for overwrite.
988 EXPECT_TRUE(IsPlaneEqual("argb", pixel, 4, out, 4, 3, 1, 2)); 1023 EXPECT_TRUE(IsPlaneEqual("argb", pixel, 4, out, 4, 3, 1, 2));
989 #endif 1024 #endif
990 } 1025 }
991 1026
992 // Test Black, White and Grey pixels. 1027 // Test Black, White and Grey pixels.
993 void ConstructARGBBlackWhitePixel() { 1028 void ConstructARGBBlackWhitePixel() {
994 T frame; 1029 T frame;
995 uint8 pixel[10 * 4] = { 0, 0, 0, 255, // Black. 1030 uint8_t pixel[10 * 4] = {
996 0, 0, 0, 255, 1031 0, 0, 0, 255, // Black.
997 64, 64, 64, 255, // Dark Grey. 1032 0, 0, 0, 255, 64, 64, 64, 255, // Dark Grey.
998 64, 64, 64, 255, 1033 64, 64, 64, 255, 128, 128, 128, 255, // Grey.
999 128, 128, 128, 255, // Grey. 1034 128, 128, 128, 255, 196, 196, 196, 255, // Light Grey.
1000 128, 128, 128, 255, 1035 196, 196, 196, 255, 255, 255, 255, 255, // White.
1001 196, 196, 196, 255, // Light Grey. 1036 255, 255, 255, 255};
1002 196, 196, 196, 255,
1003 255, 255, 255, 255, // White.
1004 255, 255, 255, 255 };
1005 1037
1006 for (int i = 0; i < repeat_; ++i) { 1038 for (int i = 0; i < repeat_; ++i) {
1007 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 10, 1, 10, 1, pixel, 1039 EXPECT_TRUE(frame.Init(cricket::FOURCC_ARGB, 10, 1, 10, 1, pixel,
1008 sizeof(pixel), 1, 1, 0, 0, 1040 sizeof(pixel), 1, 1, 0, 0,
1009 webrtc::kVideoRotation_0)); 1041 webrtc::kVideoRotation_0));
1010 } 1042 }
1011 // Convert back to ARGB 1043 // Convert back to ARGB
1012 size_t out_size = 10 * 4; 1044 size_t out_size = 10 * 4;
1013 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment]); 1045 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment]);
1014 uint8* out = ALIGNP(outbuf.get(), kAlignment); 1046 uint8_t* out = ALIGNP(outbuf.get(), kAlignment);
1015 1047
1016 EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB, 1048 EXPECT_EQ(out_size, frame.ConvertToRgbBuffer(cricket::FOURCC_ARGB,
1017 out, 1049 out,
1018 out_size, // buffer size. 1050 out_size, // buffer size.
1019 out_size)); // stride. 1051 out_size)); // stride.
1020 EXPECT_TRUE(IsPlaneEqual("argb", pixel, out_size, 1052 EXPECT_TRUE(IsPlaneEqual("argb", pixel, out_size,
1021 out, out_size, 1053 out, out_size,
1022 out_size, 1, 2)); 1054 out_size, 1, 2));
1023 } 1055 }
1024 1056
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
1126 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1158 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1127 ASSERT_TRUE(LoadFrame(kJpeg400Filename, 1159 ASSERT_TRUE(LoadFrame(kJpeg400Filename,
1128 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2)); 1160 cricket::FOURCC_MJPG, kWidth, kHeight, &frame2));
1129 EXPECT_TRUE(IsPlaneEqual("y", frame1.GetYPlane(), frame1.GetYPitch(), 1161 EXPECT_TRUE(IsPlaneEqual("y", frame1.GetYPlane(), frame1.GetYPitch(),
1130 frame2.GetYPlane(), frame2.GetYPitch(), 1162 frame2.GetYPlane(), frame2.GetYPitch(),
1131 kWidth, kHeight, 32)); 1163 kWidth, kHeight, 32));
1132 EXPECT_TRUE(IsEqual(frame1, frame2, 128)); 1164 EXPECT_TRUE(IsEqual(frame1, frame2, 128));
1133 } 1165 }
1134 1166
1135 // Test constructing an image from an I420 MJPG buffer. 1167 // Test constructing an image from an I420 MJPG buffer.
1136 void ValidateFrame(const char* name, uint32 fourcc, int data_adjust, 1168 void ValidateFrame(const char* name,
1137 int size_adjust, bool expected_result) { 1169 uint32_t fourcc,
1170 int data_adjust,
1171 int size_adjust,
1172 bool expected_result) {
1138 T frame; 1173 T frame;
1139 rtc::scoped_ptr<rtc::MemoryStream> ms(LoadSample(name)); 1174 rtc::scoped_ptr<rtc::MemoryStream> ms(LoadSample(name));
1140 ASSERT_TRUE(ms.get() != NULL); 1175 ASSERT_TRUE(ms.get() != NULL);
1141 const uint8* sample = reinterpret_cast<const uint8*>(ms.get()->GetBuffer()); 1176 const uint8_t* sample =
1177 reinterpret_cast<const uint8_t*>(ms.get()->GetBuffer());
1142 size_t sample_size; 1178 size_t sample_size;
1143 ms->GetSize(&sample_size); 1179 ms->GetSize(&sample_size);
1144 // Optional adjust size to test invalid size. 1180 // Optional adjust size to test invalid size.
1145 size_t data_size = sample_size + data_adjust; 1181 size_t data_size = sample_size + data_adjust;
1146 1182
1147 // Allocate a buffer with end page aligned. 1183 // Allocate a buffer with end page aligned.
1148 const int kPadToHeapSized = 16 * 1024 * 1024; 1184 const int kPadToHeapSized = 16 * 1024 * 1024;
1149 rtc::scoped_ptr<uint8[]> page_buffer( 1185 rtc::scoped_ptr<uint8_t[]> page_buffer(
1150 new uint8[((data_size + kPadToHeapSized + 4095) & ~4095)]); 1186 new uint8_t[((data_size + kPadToHeapSized + 4095) & ~4095)]);
1151 uint8* data_ptr = page_buffer.get(); 1187 uint8_t* data_ptr = page_buffer.get();
1152 if (!data_ptr) { 1188 if (!data_ptr) {
1153 LOG(LS_WARNING) << "Failed to allocate memory for ValidateFrame test."; 1189 LOG(LS_WARNING) << "Failed to allocate memory for ValidateFrame test.";
1154 EXPECT_FALSE(expected_result); // NULL is okay if failure was expected. 1190 EXPECT_FALSE(expected_result); // NULL is okay if failure was expected.
1155 return; 1191 return;
1156 } 1192 }
1157 data_ptr += kPadToHeapSized + (-(static_cast<int>(data_size)) & 4095); 1193 data_ptr += kPadToHeapSized + (-(static_cast<int>(data_size)) & 4095);
1158 memcpy(data_ptr, sample, std::min(data_size, sample_size)); 1194 memcpy(data_ptr, sample, std::min(data_size, sample_size));
1159 for (int i = 0; i < repeat_; ++i) { 1195 for (int i = 0; i < repeat_; ++i) {
1160 EXPECT_EQ(expected_result, frame.Validate(fourcc, kWidth, kHeight, 1196 EXPECT_EQ(expected_result, frame.Validate(fourcc, kWidth, kHeight,
1161 data_ptr, 1197 data_ptr,
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 rtc::scoped_ptr<rtc::MemoryStream> ms( 1414 rtc::scoped_ptr<rtc::MemoryStream> ms(
1379 LoadSample(kImageFilename)); 1415 LoadSample(kImageFilename));
1380 ASSERT_TRUE(ms.get() != NULL); 1416 ASSERT_TRUE(ms.get() != NULL);
1381 size_t data_size; 1417 size_t data_size;
1382 ms->GetSize(&data_size); 1418 ms->GetSize(&data_size);
1383 EXPECT_TRUE(frame1.InitToBlack(kWidth, kHeight, 1, 1, 0, 0)); 1419 EXPECT_TRUE(frame1.InitToBlack(kWidth, kHeight, 1, 1, 0, 0));
1384 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 1, 1, 0, 0)); 1420 EXPECT_TRUE(frame2.InitToBlack(kWidth, kHeight, 1, 1, 0, 0));
1385 EXPECT_TRUE(IsBlack(frame1)); 1421 EXPECT_TRUE(IsBlack(frame1));
1386 EXPECT_TRUE(IsEqual(frame1, frame2, 0)); 1422 EXPECT_TRUE(IsEqual(frame1, frame2, 0));
1387 EXPECT_TRUE(frame1.Reset(cricket::FOURCC_I420, kWidth, kHeight, kWidth, 1423 EXPECT_TRUE(frame1.Reset(cricket::FOURCC_I420, kWidth, kHeight, kWidth,
1388 kHeight, reinterpret_cast<uint8*>(ms->GetBuffer()), 1424 kHeight,
1389 data_size, 1, 1, 0, 0, rotation, 1425 reinterpret_cast<uint8_t*>(ms->GetBuffer()),
1390 apply_rotation)); 1426 data_size, 1, 1, 0, 0, rotation, apply_rotation));
1391 if (apply_rotation) 1427 if (apply_rotation)
1392 EXPECT_EQ(webrtc::kVideoRotation_0, frame1.GetVideoRotation()); 1428 EXPECT_EQ(webrtc::kVideoRotation_0, frame1.GetVideoRotation());
1393 else 1429 else
1394 EXPECT_EQ(rotation, frame1.GetVideoRotation()); 1430 EXPECT_EQ(rotation, frame1.GetVideoRotation());
1395 1431
1396 // Swapp width and height if the frame is rotated 90 or 270 degrees. 1432 // Swapp width and height if the frame is rotated 90 or 270 degrees.
1397 if (apply_rotation && (rotation == webrtc::kVideoRotation_90 1433 if (apply_rotation && (rotation == webrtc::kVideoRotation_90
1398 || rotation == webrtc::kVideoRotation_270)) { 1434 || rotation == webrtc::kVideoRotation_270)) {
1399 EXPECT_TRUE(kHeight == frame1.GetWidth()); 1435 EXPECT_TRUE(kHeight == frame1.GetWidth());
1400 EXPECT_TRUE(kWidth == frame1.GetHeight()); 1436 EXPECT_TRUE(kWidth == frame1.GetHeight());
(...skipping 13 matching lines...) Expand all
1414 Reset(webrtc::kVideoRotation_90, false); 1450 Reset(webrtc::kVideoRotation_90, false);
1415 } 1451 }
1416 1452
1417 ////////////////////// 1453 //////////////////////
1418 // Conversion tests // 1454 // Conversion tests //
1419 ////////////////////// 1455 //////////////////////
1420 1456
1421 enum ToFrom { TO, FROM }; 1457 enum ToFrom { TO, FROM };
1422 1458
1423 // Helper function for test converting from I420 to packed formats. 1459 // Helper function for test converting from I420 to packed formats.
1424 inline void ConvertToBuffer(int bpp, int rowpad, bool invert, ToFrom to_from, 1460 inline void ConvertToBuffer(int bpp,
1425 int error, uint32 fourcc, 1461 int rowpad,
1426 int (*RGBToI420)(const uint8* src_frame, int src_stride_frame, 1462 bool invert,
1427 uint8* dst_y, int dst_stride_y, 1463 ToFrom to_from,
1428 uint8* dst_u, int dst_stride_u, 1464 int error,
1429 uint8* dst_v, int dst_stride_v, 1465 uint32_t fourcc,
1430 int width, int height)) { 1466 int (*RGBToI420)(const uint8_t* src_frame,
1467 int src_stride_frame,
1468 uint8_t* dst_y,
1469 int dst_stride_y,
1470 uint8_t* dst_u,
1471 int dst_stride_u,
1472 uint8_t* dst_v,
1473 int dst_stride_v,
1474 int width,
1475 int height)) {
1431 T frame1, frame2; 1476 T frame1, frame2;
1432 int repeat_to = (to_from == TO) ? repeat_ : 1; 1477 int repeat_to = (to_from == TO) ? repeat_ : 1;
1433 int repeat_from = (to_from == FROM) ? repeat_ : 1; 1478 int repeat_from = (to_from == FROM) ? repeat_ : 1;
1434 1479
1435 int astride = kWidth * bpp + rowpad; 1480 int astride = kWidth * bpp + rowpad;
1436 size_t out_size = astride * kHeight; 1481 size_t out_size = astride * kHeight;
1437 rtc::scoped_ptr<uint8[]> outbuf(new uint8[out_size + kAlignment + 1]); 1482 rtc::scoped_ptr<uint8_t[]> outbuf(new uint8_t[out_size + kAlignment + 1]);
1438 memset(outbuf.get(), 0, out_size + kAlignment + 1); 1483 memset(outbuf.get(), 0, out_size + kAlignment + 1);
1439 uint8* outtop = ALIGNP(outbuf.get(), kAlignment); 1484 uint8_t* outtop = ALIGNP(outbuf.get(), kAlignment);
1440 uint8* out = outtop; 1485 uint8_t* out = outtop;
1441 int stride = astride; 1486 int stride = astride;
1442 if (invert) { 1487 if (invert) {
1443 out += (kHeight - 1) * stride; // Point to last row. 1488 out += (kHeight - 1) * stride; // Point to last row.
1444 stride = -stride; 1489 stride = -stride;
1445 } 1490 }
1446 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1491 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1447 1492
1448 for (int i = 0; i < repeat_to; ++i) { 1493 for (int i = 0; i < repeat_to; ++i) {
1449 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc, 1494 EXPECT_EQ(out_size, frame1.ConvertToRgbBuffer(fourcc,
1450 out, 1495 out,
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
1745 } 1790 }
1746 void ConvertFromUYVYBufferInverted() { 1791 void ConvertFromUYVYBufferInverted() {
1747 ConvertToBuffer(2, 0, true, FROM, kError, 1792 ConvertToBuffer(2, 0, true, FROM, kError,
1748 cricket::FOURCC_UYVY, libyuv::UYVYToI420); 1793 cricket::FOURCC_UYVY, libyuv::UYVYToI420);
1749 } 1794 }
1750 1795
1751 // Test converting from I420 to I422. 1796 // Test converting from I420 to I422.
1752 void ConvertToI422Buffer() { 1797 void ConvertToI422Buffer() {
1753 T frame1, frame2; 1798 T frame1, frame2;
1754 size_t out_size = kWidth * kHeight * 2; 1799 size_t out_size = kWidth * kHeight * 2;
1755 rtc::scoped_ptr<uint8[]> buf(new uint8[out_size + kAlignment]); 1800 rtc::scoped_ptr<uint8_t[]> buf(new uint8_t[out_size + kAlignment]);
1756 uint8* y = ALIGNP(buf.get(), kAlignment); 1801 uint8_t* y = ALIGNP(buf.get(), kAlignment);
1757 uint8* u = y + kWidth * kHeight; 1802 uint8_t* u = y + kWidth * kHeight;
1758 uint8* v = u + (kWidth / 2) * kHeight; 1803 uint8_t* v = u + (kWidth / 2) * kHeight;
1759 ASSERT_TRUE(LoadFrameNoRepeat(&frame1)); 1804 ASSERT_TRUE(LoadFrameNoRepeat(&frame1));
1760 for (int i = 0; i < repeat_; ++i) { 1805 for (int i = 0; i < repeat_; ++i) {
1761 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(), 1806 EXPECT_EQ(0, libyuv::I420ToI422(frame1.GetYPlane(), frame1.GetYPitch(),
1762 frame1.GetUPlane(), frame1.GetUPitch(), 1807 frame1.GetUPlane(), frame1.GetUPitch(),
1763 frame1.GetVPlane(), frame1.GetVPitch(), 1808 frame1.GetVPlane(), frame1.GetVPitch(),
1764 y, kWidth, 1809 y, kWidth,
1765 u, kWidth / 2, 1810 u, kWidth / 2,
1766 v, kWidth / 2, 1811 v, kWidth / 2,
1767 kWidth, kHeight)); 1812 kWidth, kHeight));
1768 } 1813 }
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1811 } 1856 }
1812 1857
1813 void CopyToBuffer() { 1858 void CopyToBuffer() {
1814 T frame; 1859 T frame;
1815 rtc::scoped_ptr<rtc::MemoryStream> ms( 1860 rtc::scoped_ptr<rtc::MemoryStream> ms(
1816 LoadSample(kImageFilename)); 1861 LoadSample(kImageFilename));
1817 ASSERT_TRUE(ms.get() != NULL); 1862 ASSERT_TRUE(ms.get() != NULL);
1818 ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight, 1863 ASSERT_TRUE(LoadFrame(ms.get(), cricket::FOURCC_I420, kWidth, kHeight,
1819 &frame)); 1864 &frame));
1820 size_t out_size = kWidth * kHeight * 3 / 2; 1865 size_t out_size = kWidth * kHeight * 3 / 2;
1821 rtc::scoped_ptr<uint8[]> out(new uint8[out_size]); 1866 rtc::scoped_ptr<uint8_t[]> out(new uint8_t[out_size]);
1822 for (int i = 0; i < repeat_; ++i) { 1867 for (int i = 0; i < repeat_; ++i) {
1823 EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size)); 1868 EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size));
1824 } 1869 }
1825 EXPECT_EQ(0, memcmp(out.get(), ms->GetBuffer(), out_size)); 1870 EXPECT_EQ(0, memcmp(out.get(), ms->GetBuffer(), out_size));
1826 } 1871 }
1827 1872
1828 void CopyToFrame() { 1873 void CopyToFrame() {
1829 T source; 1874 T source;
1830 rtc::scoped_ptr<rtc::MemoryStream> ms( 1875 rtc::scoped_ptr<rtc::MemoryStream> ms(
1831 LoadSample(kImageFilename)); 1876 LoadSample(kImageFilename));
(...skipping 27 matching lines...) Expand all
1859 ms2.SetPosition(0u); // Useful when repeat_ > 1. 1904 ms2.SetPosition(0u); // Useful when repeat_ > 1.
1860 int error; 1905 int error;
1861 EXPECT_EQ(rtc::SR_SUCCESS, frame.Write(&ms2, &error)); 1906 EXPECT_EQ(rtc::SR_SUCCESS, frame.Write(&ms2, &error));
1862 } 1907 }
1863 size_t out_size = cricket::VideoFrame::SizeOf(kWidth, kHeight); 1908 size_t out_size = cricket::VideoFrame::SizeOf(kWidth, kHeight);
1864 EXPECT_EQ(0, memcmp(ms2.GetBuffer(), ms->GetBuffer(), out_size)); 1909 EXPECT_EQ(0, memcmp(ms2.GetBuffer(), ms->GetBuffer(), out_size));
1865 } 1910 }
1866 1911
1867 void CopyToBuffer1Pixel() { 1912 void CopyToBuffer1Pixel() {
1868 size_t out_size = 3; 1913 size_t out_size = 3;
1869 rtc::scoped_ptr<uint8[]> out(new uint8[out_size + 1]); 1914 rtc::scoped_ptr<uint8_t[]> out(new uint8_t[out_size + 1]);
1870 memset(out.get(), 0xfb, out_size + 1); // Fill buffer 1915 memset(out.get(), 0xfb, out_size + 1); // Fill buffer
1871 uint8 pixel[3] = { 1, 2, 3 }; 1916 uint8_t pixel[3] = {1, 2, 3};
1872 T frame; 1917 T frame;
1873 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 1, 1, 1, 1, pixel, 1918 EXPECT_TRUE(frame.Init(cricket::FOURCC_I420, 1, 1, 1, 1, pixel,
1874 sizeof(pixel), 1, 1, 0, 0, 1919 sizeof(pixel), 1, 1, 0, 0,
1875 webrtc::kVideoRotation_0)); 1920 webrtc::kVideoRotation_0));
1876 for (int i = 0; i < repeat_; ++i) { 1921 for (int i = 0; i < repeat_; ++i) {
1877 EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size)); 1922 EXPECT_EQ(out_size, frame.CopyToBuffer(out.get(), out_size));
1878 } 1923 }
1879 EXPECT_EQ(1, out.get()[0]); // Check Y. Should be 1. 1924 EXPECT_EQ(1, out.get()[0]); // Check Y. Should be 1.
1880 EXPECT_EQ(2, out.get()[1]); // Check U. Should be 2. 1925 EXPECT_EQ(2, out.get()[1]); // Check U. Should be 2.
1881 EXPECT_EQ(3, out.get()[2]); // Check V. Should be 3. 1926 EXPECT_EQ(3, out.get()[2]); // Check V. Should be 3.
(...skipping 21 matching lines...) Expand all
1903 source.StretchToFrame(&target2, true, true); 1948 source.StretchToFrame(&target2, true, true);
1904 EXPECT_TRUE(IsBlack(target2)); 1949 EXPECT_TRUE(IsBlack(target2));
1905 EXPECT_EQ(source.GetElapsedTime(), target2.GetElapsedTime()); 1950 EXPECT_EQ(source.GetElapsedTime(), target2.GetElapsedTime());
1906 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp()); 1951 EXPECT_EQ(source.GetTimeStamp(), target2.GetTimeStamp());
1907 } 1952 }
1908 1953
1909 int repeat_; 1954 int repeat_;
1910 }; 1955 };
1911 1956
1912 #endif // TALK_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_ 1957 #endif // TALK_MEDIA_BASE_VIDEOFRAME_UNITTEST_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698