| OLD | NEW |
| 1 /* | 1 /* |
| 2 * libjingle | 2 * libjingle |
| 3 * Copyright 2010 Google Inc. | 3 * Copyright 2010 Google Inc. |
| 4 * | 4 * |
| 5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
| 6 * modification, are permitted provided that the following conditions are met: | 6 * modification, are permitted provided that the following conditions are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright notice, | 8 * 1. Redistributions of source code must retain the above copyright notice, |
| 9 * this list of conditions and the following disclaimer. | 9 * this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright notice, | 10 * 2. Redistributions in binary form must reproduce the above copyright notice, |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 51 // user. When all frontends to this consider removing the black frame business. | 51 // user. When all frontends to this consider removing the black frame business. |
| 52 const int kNumBlackFramesOnMute = 30; | 52 const int kNumBlackFramesOnMute = 30; |
| 53 | 53 |
| 54 // MessageHandler constants. | 54 // MessageHandler constants. |
| 55 enum { | 55 enum { |
| 56 MSG_DO_PAUSE = 0, | 56 MSG_DO_PAUSE = 0, |
| 57 MSG_DO_UNPAUSE, | 57 MSG_DO_UNPAUSE, |
| 58 MSG_STATE_CHANGE | 58 MSG_STATE_CHANGE |
| 59 }; | 59 }; |
| 60 | 60 |
| 61 static const int64 kMaxDistance = ~(static_cast<int64>(1) << 63); | 61 static const int64_t kMaxDistance = ~(static_cast<int64_t>(1) << 63); |
| 62 #ifdef LINUX | 62 #ifdef LINUX |
| 63 static const int kYU12Penalty = 16; // Needs to be higher than MJPG index. | 63 static const int kYU12Penalty = 16; // Needs to be higher than MJPG index. |
| 64 #endif | 64 #endif |
| 65 static const int kDefaultScreencastFps = 5; | 65 static const int kDefaultScreencastFps = 5; |
| 66 typedef rtc::TypedMessageData<CaptureState> StateChangeParams; | 66 typedef rtc::TypedMessageData<CaptureState> StateChangeParams; |
| 67 | 67 |
| 68 // Limit stats data collections to ~20 seconds of 30fps data before dropping | 68 // Limit stats data collections to ~20 seconds of 30fps data before dropping |
| 69 // old data in case stats aren't reset for long periods of time. | 69 // old data in case stats aren't reset for long periods of time. |
| 70 static const size_t kMaxAccumulatorSize = 600; | 70 static const size_t kMaxAccumulatorSize = 600; |
| 71 | 71 |
| 72 } // namespace | 72 } // namespace |
| 73 | 73 |
| 74 ///////////////////////////////////////////////////////////////////// | 74 ///////////////////////////////////////////////////////////////////// |
| 75 // Implementation of struct CapturedFrame | 75 // Implementation of struct CapturedFrame |
| 76 ///////////////////////////////////////////////////////////////////// | 76 ///////////////////////////////////////////////////////////////////// |
| 77 CapturedFrame::CapturedFrame() | 77 CapturedFrame::CapturedFrame() |
| 78 : width(0), | 78 : width(0), |
| 79 height(0), | 79 height(0), |
| 80 fourcc(0), | 80 fourcc(0), |
| 81 pixel_width(0), | 81 pixel_width(0), |
| 82 pixel_height(0), | 82 pixel_height(0), |
| 83 time_stamp(0), | 83 time_stamp(0), |
| 84 data_size(0), | 84 data_size(0), |
| 85 rotation(0), | 85 rotation(0), |
| 86 data(NULL) {} | 86 data(NULL) {} |
| 87 | 87 |
| 88 // TODO(fbarchard): Remove this function once lmimediaengine stops using it. | 88 // TODO(fbarchard): Remove this function once lmimediaengine stops using it. |
| 89 bool CapturedFrame::GetDataSize(uint32* size) const { | 89 bool CapturedFrame::GetDataSize(uint32_t* size) const { |
| 90 if (!size || data_size == CapturedFrame::kUnknownDataSize) { | 90 if (!size || data_size == CapturedFrame::kUnknownDataSize) { |
| 91 return false; | 91 return false; |
| 92 } | 92 } |
| 93 *size = data_size; | 93 *size = data_size; |
| 94 return true; | 94 return true; |
| 95 } | 95 } |
| 96 | 96 |
| 97 webrtc::VideoRotation CapturedFrame::GetRotation() const { | 97 webrtc::VideoRotation CapturedFrame::GetRotation() const { |
| 98 ASSERT(rotation == 0 || rotation == 90 || rotation == 180 || rotation == 270); | 98 ASSERT(rotation == 0 || rotation == 90 || rotation == 180 || rotation == 270); |
| 99 return static_cast<webrtc::VideoRotation>(rotation); | 99 return static_cast<webrtc::VideoRotation>(rotation); |
| (...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 268 bool VideoCapturer::GetBestCaptureFormat(const VideoFormat& format, | 268 bool VideoCapturer::GetBestCaptureFormat(const VideoFormat& format, |
| 269 VideoFormat* best_format) { | 269 VideoFormat* best_format) { |
| 270 // TODO(fbarchard): Directly support max_format. | 270 // TODO(fbarchard): Directly support max_format. |
| 271 UpdateFilteredSupportedFormats(); | 271 UpdateFilteredSupportedFormats(); |
| 272 const std::vector<VideoFormat>* supported_formats = GetSupportedFormats(); | 272 const std::vector<VideoFormat>* supported_formats = GetSupportedFormats(); |
| 273 | 273 |
| 274 if (supported_formats->empty()) { | 274 if (supported_formats->empty()) { |
| 275 return false; | 275 return false; |
| 276 } | 276 } |
| 277 LOG(LS_INFO) << " Capture Requested " << format.ToString(); | 277 LOG(LS_INFO) << " Capture Requested " << format.ToString(); |
| 278 int64 best_distance = kMaxDistance; | 278 int64_t best_distance = kMaxDistance; |
| 279 std::vector<VideoFormat>::const_iterator best = supported_formats->end(); | 279 std::vector<VideoFormat>::const_iterator best = supported_formats->end(); |
| 280 std::vector<VideoFormat>::const_iterator i; | 280 std::vector<VideoFormat>::const_iterator i; |
| 281 for (i = supported_formats->begin(); i != supported_formats->end(); ++i) { | 281 for (i = supported_formats->begin(); i != supported_formats->end(); ++i) { |
| 282 int64 distance = GetFormatDistance(format, *i); | 282 int64_t distance = GetFormatDistance(format, *i); |
| 283 // TODO(fbarchard): Reduce to LS_VERBOSE if/when camera capture is | 283 // TODO(fbarchard): Reduce to LS_VERBOSE if/when camera capture is |
| 284 // relatively bug free. | 284 // relatively bug free. |
| 285 LOG(LS_INFO) << " Supported " << i->ToString() << " distance " << distance; | 285 LOG(LS_INFO) << " Supported " << i->ToString() << " distance " << distance; |
| 286 if (distance < best_distance) { | 286 if (distance < best_distance) { |
| 287 best_distance = distance; | 287 best_distance = distance; |
| 288 best = i; | 288 best = i; |
| 289 } | 289 } |
| 290 } | 290 } |
| 291 if (supported_formats->end() == best) { | 291 if (supported_formats->end() == best) { |
| 292 LOG(LS_ERROR) << " No acceptable camera format found"; | 292 LOG(LS_ERROR) << " No acceptable camera format found"; |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 354 } else { | 354 } else { |
| 355 --black_frame_count_down_; | 355 --black_frame_count_down_; |
| 356 } | 356 } |
| 357 } | 357 } |
| 358 | 358 |
| 359 if (SignalVideoFrame.is_empty()) { | 359 if (SignalVideoFrame.is_empty()) { |
| 360 return; | 360 return; |
| 361 } | 361 } |
| 362 | 362 |
| 363 // Use a temporary buffer to scale | 363 // Use a temporary buffer to scale |
| 364 rtc::scoped_ptr<uint8[]> scale_buffer; | 364 rtc::scoped_ptr<uint8_t[]> scale_buffer; |
| 365 | 365 |
| 366 if (IsScreencast()) { | 366 if (IsScreencast()) { |
| 367 int scaled_width, scaled_height; | 367 int scaled_width, scaled_height; |
| 368 if (screencast_max_pixels_ > 0) { | 368 if (screencast_max_pixels_ > 0) { |
| 369 ComputeScaleMaxPixels(captured_frame->width, captured_frame->height, | 369 ComputeScaleMaxPixels(captured_frame->width, captured_frame->height, |
| 370 screencast_max_pixels_, &scaled_width, &scaled_height); | 370 screencast_max_pixels_, &scaled_width, &scaled_height); |
| 371 } else { | 371 } else { |
| 372 int desired_screencast_fps = capture_format_.get() ? | 372 int desired_screencast_fps = capture_format_.get() ? |
| 373 VideoFormat::IntervalToFps(capture_format_->interval) : | 373 VideoFormat::IntervalToFps(capture_format_->interval) : |
| 374 kDefaultScreencastFps; | 374 kDefaultScreencastFps; |
| 375 ComputeScale(captured_frame->width, captured_frame->height, | 375 ComputeScale(captured_frame->width, captured_frame->height, |
| 376 desired_screencast_fps, &scaled_width, &scaled_height); | 376 desired_screencast_fps, &scaled_width, &scaled_height); |
| 377 } | 377 } |
| 378 | 378 |
| 379 if (FOURCC_ARGB == captured_frame->fourcc && | 379 if (FOURCC_ARGB == captured_frame->fourcc && |
| 380 (scaled_width != captured_frame->width || | 380 (scaled_width != captured_frame->width || |
| 381 scaled_height != captured_frame->height)) { | 381 scaled_height != captured_frame->height)) { |
| 382 if (scaled_width != scaled_width_ || scaled_height != scaled_height_) { | 382 if (scaled_width != scaled_width_ || scaled_height != scaled_height_) { |
| 383 LOG(LS_INFO) << "Scaling Screencast from " | 383 LOG(LS_INFO) << "Scaling Screencast from " |
| 384 << captured_frame->width << "x" | 384 << captured_frame->width << "x" |
| 385 << captured_frame->height << " to " | 385 << captured_frame->height << " to " |
| 386 << scaled_width << "x" << scaled_height; | 386 << scaled_width << "x" << scaled_height; |
| 387 scaled_width_ = scaled_width; | 387 scaled_width_ = scaled_width; |
| 388 scaled_height_ = scaled_height; | 388 scaled_height_ = scaled_height; |
| 389 } | 389 } |
| 390 CapturedFrame* modified_frame = | 390 CapturedFrame* modified_frame = |
| 391 const_cast<CapturedFrame*>(captured_frame); | 391 const_cast<CapturedFrame*>(captured_frame); |
| 392 const int modified_frame_size = scaled_width * scaled_height * 4; | 392 const int modified_frame_size = scaled_width * scaled_height * 4; |
| 393 scale_buffer.reset(new uint8[modified_frame_size]); | 393 scale_buffer.reset(new uint8_t[modified_frame_size]); |
| 394 // Compute new width such that width * height is less than maximum but | 394 // Compute new width such that width * height is less than maximum but |
| 395 // maintains original captured frame aspect ratio. | 395 // maintains original captured frame aspect ratio. |
| 396 // Round down width to multiple of 4 so odd width won't round up beyond | 396 // Round down width to multiple of 4 so odd width won't round up beyond |
| 397 // maximum, and so chroma channel is even width to simplify spatial | 397 // maximum, and so chroma channel is even width to simplify spatial |
| 398 // resampling. | 398 // resampling. |
| 399 libyuv::ARGBScale(reinterpret_cast<const uint8*>(captured_frame->data), | 399 libyuv::ARGBScale(reinterpret_cast<const uint8_t*>(captured_frame->data), |
| 400 captured_frame->width * 4, captured_frame->width, | 400 captured_frame->width * 4, captured_frame->width, |
| 401 captured_frame->height, | 401 captured_frame->height, scale_buffer.get(), |
| 402 scale_buffer.get(), | |
| 403 scaled_width * 4, scaled_width, scaled_height, | 402 scaled_width * 4, scaled_width, scaled_height, |
| 404 libyuv::kFilterBilinear); | 403 libyuv::kFilterBilinear); |
| 405 modified_frame->width = scaled_width; | 404 modified_frame->width = scaled_width; |
| 406 modified_frame->height = scaled_height; | 405 modified_frame->height = scaled_height; |
| 407 modified_frame->data_size = scaled_width * 4 * scaled_height; | 406 modified_frame->data_size = scaled_width * 4 * scaled_height; |
| 408 modified_frame->data = scale_buffer.get(); | 407 modified_frame->data = scale_buffer.get(); |
| 409 } | 408 } |
| 410 } | 409 } |
| 411 | 410 |
| 412 const int kYuy2Bpp = 2; | 411 const int kYuy2Bpp = 2; |
| 413 const int kArgbBpp = 4; | 412 const int kArgbBpp = 4; |
| 414 // TODO(fbarchard): Make a helper function to adjust pixels to square. | 413 // TODO(fbarchard): Make a helper function to adjust pixels to square. |
| 415 // TODO(fbarchard): Hook up experiment to scaling. | 414 // TODO(fbarchard): Hook up experiment to scaling. |
| 416 // TODO(fbarchard): Avoid scale and convert if muted. | 415 // TODO(fbarchard): Avoid scale and convert if muted. |
| 417 // Temporary buffer is scoped here so it will persist until i420_frame.Init() | 416 // Temporary buffer is scoped here so it will persist until i420_frame.Init() |
| 418 // makes a copy of the frame, converting to I420. | 417 // makes a copy of the frame, converting to I420. |
| 419 rtc::scoped_ptr<uint8[]> temp_buffer; | 418 rtc::scoped_ptr<uint8_t[]> temp_buffer; |
| 420 // YUY2 can be scaled vertically using an ARGB scaler. Aspect ratio is only | 419 // YUY2 can be scaled vertically using an ARGB scaler. Aspect ratio is only |
| 421 // a problem on OSX. OSX always converts webcams to YUY2 or UYVY. | 420 // a problem on OSX. OSX always converts webcams to YUY2 or UYVY. |
| 422 bool can_scale = | 421 bool can_scale = |
| 423 FOURCC_YUY2 == CanonicalFourCC(captured_frame->fourcc) || | 422 FOURCC_YUY2 == CanonicalFourCC(captured_frame->fourcc) || |
| 424 FOURCC_UYVY == CanonicalFourCC(captured_frame->fourcc); | 423 FOURCC_UYVY == CanonicalFourCC(captured_frame->fourcc); |
| 425 | 424 |
| 426 // If pixels are not square, optionally use vertical scaling to make them | 425 // If pixels are not square, optionally use vertical scaling to make them |
| 427 // square. Square pixels simplify the rest of the pipeline, including | 426 // square. Square pixels simplify the rest of the pipeline, including |
| 428 // effects and rendering. | 427 // effects and rendering. |
| 429 if (can_scale && square_pixel_aspect_ratio_ && | 428 if (can_scale && square_pixel_aspect_ratio_ && |
| (...skipping 13 matching lines...) Expand all Loading... |
| 443 << captured_frame->width << "x" | 442 << captured_frame->width << "x" |
| 444 << captured_frame->height << " to " | 443 << captured_frame->height << " to " |
| 445 << scaled_width << "x" << scaled_height | 444 << scaled_width << "x" << scaled_height |
| 446 << " for PAR " | 445 << " for PAR " |
| 447 << captured_frame->pixel_width << "x" | 446 << captured_frame->pixel_width << "x" |
| 448 << captured_frame->pixel_height; | 447 << captured_frame->pixel_height; |
| 449 scaled_width_ = scaled_width; | 448 scaled_width_ = scaled_width; |
| 450 scaled_height_ = scaled_height; | 449 scaled_height_ = scaled_height; |
| 451 } | 450 } |
| 452 const int modified_frame_size = scaled_width * scaled_height * kYuy2Bpp; | 451 const int modified_frame_size = scaled_width * scaled_height * kYuy2Bpp; |
| 453 uint8* temp_buffer_data; | 452 uint8_t* temp_buffer_data; |
| 454 // Pixels are wide and short; Increasing height. Requires temporary buffer. | 453 // Pixels are wide and short; Increasing height. Requires temporary buffer. |
| 455 if (scaled_height > captured_frame->height) { | 454 if (scaled_height > captured_frame->height) { |
| 456 temp_buffer.reset(new uint8[modified_frame_size]); | 455 temp_buffer.reset(new uint8_t[modified_frame_size]); |
| 457 temp_buffer_data = temp_buffer.get(); | 456 temp_buffer_data = temp_buffer.get(); |
| 458 } else { | 457 } else { |
| 459 // Pixels are narrow and tall; Decreasing height. Scale will be done | 458 // Pixels are narrow and tall; Decreasing height. Scale will be done |
| 460 // in place. | 459 // in place. |
| 461 temp_buffer_data = reinterpret_cast<uint8*>(captured_frame->data); | 460 temp_buffer_data = reinterpret_cast<uint8_t*>(captured_frame->data); |
| 462 } | 461 } |
| 463 | 462 |
| 464 // Use ARGBScaler to vertically scale the YUY2 image, adjusting for 16 bpp. | 463 // Use ARGBScaler to vertically scale the YUY2 image, adjusting for 16 bpp. |
| 465 libyuv::ARGBScale(reinterpret_cast<const uint8*>(captured_frame->data), | 464 libyuv::ARGBScale(reinterpret_cast<const uint8_t*>(captured_frame->data), |
| 466 captured_frame->width * kYuy2Bpp, // Stride for YUY2. | 465 captured_frame->width * kYuy2Bpp, // Stride for YUY2. |
| 467 captured_frame->width * kYuy2Bpp / kArgbBpp, // Width. | 466 captured_frame->width * kYuy2Bpp / kArgbBpp, // Width. |
| 468 abs(captured_frame->height), // Height. | 467 abs(captured_frame->height), // Height. |
| 469 temp_buffer_data, | 468 temp_buffer_data, |
| 470 scaled_width * kYuy2Bpp, // Stride for YUY2. | 469 scaled_width * kYuy2Bpp, // Stride for YUY2. |
| 471 scaled_width * kYuy2Bpp / kArgbBpp, // Width. | 470 scaled_width * kYuy2Bpp / kArgbBpp, // Width. |
| 472 abs(scaled_height), // New height. | 471 abs(scaled_height), // New height. |
| 473 libyuv::kFilterBilinear); | 472 libyuv::kFilterBilinear); |
| 474 modified_frame->width = scaled_width; | 473 modified_frame->width = scaled_width; |
| 475 modified_frame->height = scaled_height; | 474 modified_frame->height = scaled_height; |
| 476 modified_frame->pixel_width = 1; | 475 modified_frame->pixel_width = 1; |
| 477 modified_frame->pixel_height = 1; | 476 modified_frame->pixel_height = 1; |
| 478 modified_frame->data_size = modified_frame_size; | 477 modified_frame->data_size = modified_frame_size; |
| 479 modified_frame->data = temp_buffer_data; | 478 modified_frame->data = temp_buffer_data; |
| 480 } | 479 } |
| 481 | 480 |
| 482 // Size to crop captured frame to. This adjusts the captured frames | 481 // Size to crop captured frame to. This adjusts the captured frames |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 582 } | 581 } |
| 583 } | 582 } |
| 584 | 583 |
| 585 // Get the distance between the supported and desired formats. | 584 // Get the distance between the supported and desired formats. |
| 586 // Prioritization is done according to this algorithm: | 585 // Prioritization is done according to this algorithm: |
| 587 // 1) Width closeness. If not same, we prefer wider. | 586 // 1) Width closeness. If not same, we prefer wider. |
| 588 // 2) Height closeness. If not same, we prefer higher. | 587 // 2) Height closeness. If not same, we prefer higher. |
| 589 // 3) Framerate closeness. If not same, we prefer faster. | 588 // 3) Framerate closeness. If not same, we prefer faster. |
| 590 // 4) Compression. If desired format has a specific fourcc, we need exact match; | 589 // 4) Compression. If desired format has a specific fourcc, we need exact match; |
| 591 // otherwise, we use preference. | 590 // otherwise, we use preference. |
| 592 int64 VideoCapturer::GetFormatDistance(const VideoFormat& desired, | 591 int64_t VideoCapturer::GetFormatDistance(const VideoFormat& desired, |
| 593 const VideoFormat& supported) { | 592 const VideoFormat& supported) { |
| 594 int64 distance = kMaxDistance; | 593 int64_t distance = kMaxDistance; |
| 595 | 594 |
| 596 // Check fourcc. | 595 // Check fourcc. |
| 597 uint32 supported_fourcc = CanonicalFourCC(supported.fourcc); | 596 uint32_t supported_fourcc = CanonicalFourCC(supported.fourcc); |
| 598 int64 delta_fourcc = kMaxDistance; | 597 int64_t delta_fourcc = kMaxDistance; |
| 599 if (FOURCC_ANY == desired.fourcc) { | 598 if (FOURCC_ANY == desired.fourcc) { |
| 600 // Any fourcc is OK for the desired. Use preference to find best fourcc. | 599 // Any fourcc is OK for the desired. Use preference to find best fourcc. |
| 601 std::vector<uint32> preferred_fourccs; | 600 std::vector<uint32_t> preferred_fourccs; |
| 602 if (!GetPreferredFourccs(&preferred_fourccs)) { | 601 if (!GetPreferredFourccs(&preferred_fourccs)) { |
| 603 return distance; | 602 return distance; |
| 604 } | 603 } |
| 605 | 604 |
| 606 for (size_t i = 0; i < preferred_fourccs.size(); ++i) { | 605 for (size_t i = 0; i < preferred_fourccs.size(); ++i) { |
| 607 if (supported_fourcc == CanonicalFourCC(preferred_fourccs[i])) { | 606 if (supported_fourcc == CanonicalFourCC(preferred_fourccs[i])) { |
| 608 delta_fourcc = i; | 607 delta_fourcc = i; |
| 609 #ifdef LINUX | 608 #ifdef LINUX |
| 610 // For HD avoid YU12 which is a software conversion and has 2 bugs | 609 // For HD avoid YU12 which is a software conversion and has 2 bugs |
| 611 // b/7326348 b/6960899. Reenable when fixed. | 610 // b/7326348 b/6960899. Reenable when fixed. |
| (...skipping 10 matching lines...) Expand all Loading... |
| 622 } | 621 } |
| 623 | 622 |
| 624 if (kMaxDistance == delta_fourcc) { | 623 if (kMaxDistance == delta_fourcc) { |
| 625 // Failed to match fourcc. | 624 // Failed to match fourcc. |
| 626 return distance; | 625 return distance; |
| 627 } | 626 } |
| 628 | 627 |
| 629 // Check resolution and fps. | 628 // Check resolution and fps. |
| 630 int desired_width = desired.width; | 629 int desired_width = desired.width; |
| 631 int desired_height = desired.height; | 630 int desired_height = desired.height; |
| 632 int64 delta_w = supported.width - desired_width; | 631 int64_t delta_w = supported.width - desired_width; |
| 633 float supported_fps = VideoFormat::IntervalToFpsFloat(supported.interval); | 632 float supported_fps = VideoFormat::IntervalToFpsFloat(supported.interval); |
| 634 float delta_fps = | 633 float delta_fps = |
| 635 supported_fps - VideoFormat::IntervalToFpsFloat(desired.interval); | 634 supported_fps - VideoFormat::IntervalToFpsFloat(desired.interval); |
| 636 // Check height of supported height compared to height we would like it to be. | 635 // Check height of supported height compared to height we would like it to be. |
| 637 int64 aspect_h = | 636 int64_t aspect_h = desired_width |
| 638 desired_width ? supported.width * desired_height / desired_width | 637 ? supported.width * desired_height / desired_width |
| 639 : desired_height; | 638 : desired_height; |
| 640 int64 delta_h = supported.height - aspect_h; | 639 int64_t delta_h = supported.height - aspect_h; |
| 641 | 640 |
| 642 distance = 0; | 641 distance = 0; |
| 643 // Set high penalty if the supported format is lower than the desired format. | 642 // Set high penalty if the supported format is lower than the desired format. |
| 644 // 3x means we would prefer down to down to 3/4, than up to double. | 643 // 3x means we would prefer down to down to 3/4, than up to double. |
| 645 // But we'd prefer up to double than down to 1/2. This is conservative, | 644 // But we'd prefer up to double than down to 1/2. This is conservative, |
| 646 // strongly avoiding going down in resolution, similar to | 645 // strongly avoiding going down in resolution, similar to |
| 647 // the old method, but not completely ruling it out in extreme situations. | 646 // the old method, but not completely ruling it out in extreme situations. |
| 648 // It also ignores framerate, which is often very low at high resolutions. | 647 // It also ignores framerate, which is often very low at high resolutions. |
| 649 // TODO(fbarchard): Improve logic to use weighted factors. | 648 // TODO(fbarchard): Improve logic to use weighted factors. |
| 650 static const int kDownPenalty = -3; | 649 static const int kDownPenalty = -3; |
| 651 if (delta_w < 0) { | 650 if (delta_w < 0) { |
| 652 delta_w = delta_w * kDownPenalty; | 651 delta_w = delta_w * kDownPenalty; |
| 653 } | 652 } |
| 654 if (delta_h < 0) { | 653 if (delta_h < 0) { |
| 655 delta_h = delta_h * kDownPenalty; | 654 delta_h = delta_h * kDownPenalty; |
| 656 } | 655 } |
| 657 // Require camera fps to be at least 80% of what is requested if resolution | 656 // Require camera fps to be at least 80% of what is requested if resolution |
| 658 // matches. | 657 // matches. |
| 659 // Require camera fps to be at least 96% of what is requested, or higher, | 658 // Require camera fps to be at least 96% of what is requested, or higher, |
| 660 // if resolution differs. 96% allows for slight variations in fps. e.g. 29.97 | 659 // if resolution differs. 96% allows for slight variations in fps. e.g. 29.97 |
| 661 if (delta_fps < 0) { | 660 if (delta_fps < 0) { |
| 662 float min_desirable_fps = delta_w ? | 661 float min_desirable_fps = delta_w ? |
| 663 VideoFormat::IntervalToFpsFloat(desired.interval) * 28.f / 30.f : | 662 VideoFormat::IntervalToFpsFloat(desired.interval) * 28.f / 30.f : |
| 664 VideoFormat::IntervalToFpsFloat(desired.interval) * 23.f / 30.f; | 663 VideoFormat::IntervalToFpsFloat(desired.interval) * 23.f / 30.f; |
| 665 delta_fps = -delta_fps; | 664 delta_fps = -delta_fps; |
| 666 if (supported_fps < min_desirable_fps) { | 665 if (supported_fps < min_desirable_fps) { |
| 667 distance |= static_cast<int64>(1) << 62; | 666 distance |= static_cast<int64_t>(1) << 62; |
| 668 } else { | 667 } else { |
| 669 distance |= static_cast<int64>(1) << 15; | 668 distance |= static_cast<int64_t>(1) << 15; |
| 670 } | 669 } |
| 671 } | 670 } |
| 672 int64 idelta_fps = static_cast<int>(delta_fps); | 671 int64_t idelta_fps = static_cast<int>(delta_fps); |
| 673 | 672 |
| 674 // 12 bits for width and height and 8 bits for fps and fourcc. | 673 // 12 bits for width and height and 8 bits for fps and fourcc. |
| 675 distance |= | 674 distance |= |
| 676 (delta_w << 28) | (delta_h << 16) | (idelta_fps << 8) | delta_fourcc; | 675 (delta_w << 28) | (delta_h << 16) | (idelta_fps << 8) | delta_fourcc; |
| 677 | 676 |
| 678 return distance; | 677 return distance; |
| 679 } | 678 } |
| 680 | 679 |
| 681 void VideoCapturer::UpdateFilteredSupportedFormats() { | 680 void VideoCapturer::UpdateFilteredSupportedFormats() { |
| 682 filtered_supported_formats_.clear(); | 681 filtered_supported_formats_.clear(); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 731 void VideoCapturer::GetVariableSnapshot( | 730 void VideoCapturer::GetVariableSnapshot( |
| 732 const rtc::RollingAccumulator<T>& data, | 731 const rtc::RollingAccumulator<T>& data, |
| 733 VariableInfo<T>* stats) { | 732 VariableInfo<T>* stats) { |
| 734 stats->max_val = data.ComputeMax(); | 733 stats->max_val = data.ComputeMax(); |
| 735 stats->mean = data.ComputeMean(); | 734 stats->mean = data.ComputeMean(); |
| 736 stats->min_val = data.ComputeMin(); | 735 stats->min_val = data.ComputeMin(); |
| 737 stats->variance = data.ComputeVariance(); | 736 stats->variance = data.ComputeVariance(); |
| 738 } | 737 } |
| 739 | 738 |
| 740 } // namespace cricket | 739 } // namespace cricket |
| OLD | NEW |