| Index: webrtc/media/base/videocapturer.cc
|
| diff --git a/webrtc/media/base/videocapturer.cc b/webrtc/media/base/videocapturer.cc
|
| index 9d297bf9141e51941d606df52a3e9589754545b3..964ba99ab1917131ed716c2cb3e6c7401f177456 100644
|
| --- a/webrtc/media/base/videocapturer.cc
|
| +++ b/webrtc/media/base/videocapturer.cc
|
| @@ -70,7 +70,6 @@ void VideoCapturer::Construct() {
|
| SignalFrameCaptured.connect(this, &VideoCapturer::OnFrameCaptured);
|
| scaled_width_ = 0;
|
| scaled_height_ = 0;
|
| - enable_video_adapter_ = true;
|
| // There are lots of video capturers out there that don't call
|
| // set_frame_factory. We can either go change all of them, or we
|
| // can set this default.
|
| @@ -168,6 +167,14 @@ std::string VideoCapturer::ToString(const CapturedFrame* captured_frame) const {
|
| return ss.str();
|
| }
|
|
|
| +bool VideoCapturer::enable_video_adapter() const {
|
| + return !IsScreencast() && adapted_source_.enable_video_adapter();
|
| +}
|
| +void VideoCapturer::set_enable_video_adapter(bool enable_video_adapter) {
|
| + adapted_source_.set_enable_video_adapter(
|
| + enable_video_adapter && !IsScreencast());
|
| +}
|
| +
|
| void VideoCapturer::set_frame_factory(VideoFrameFactory* frame_factory) {
|
| frame_factory_.reset(frame_factory);
|
| if (frame_factory) {
|
| @@ -189,16 +196,16 @@ bool VideoCapturer::GetInputSize(int* width, int* height) {
|
| void VideoCapturer::RemoveSink(
|
| rtc::VideoSinkInterface<cricket::VideoFrame>* sink) {
|
| RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| - broadcaster_.RemoveSink(sink);
|
| - OnSinkWantsChanged(broadcaster_.wants());
|
| + adapted_source_.RemoveSink(sink);
|
| + OnSinkWantsChanged(adapted_source_.wants());
|
| }
|
|
|
| void VideoCapturer::AddOrUpdateSink(
|
| rtc::VideoSinkInterface<cricket::VideoFrame>* sink,
|
| const rtc::VideoSinkWants& wants) {
|
| RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| - broadcaster_.AddOrUpdateSink(sink, wants);
|
| - OnSinkWantsChanged(broadcaster_.wants());
|
| + adapted_source_.AddOrUpdateSink(sink, wants);
|
| + OnSinkWantsChanged(adapted_source_.wants());
|
| }
|
|
|
| void VideoCapturer::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) {
|
| @@ -207,56 +214,6 @@ void VideoCapturer::OnSinkWantsChanged(const rtc::VideoSinkWants& wants) {
|
| if (frame_factory_) {
|
| frame_factory_->SetApplyRotation(apply_rotation_);
|
| }
|
| -
|
| - if (video_adapter()) {
|
| - video_adapter()->OnResolutionRequest(wants.max_pixel_count,
|
| - wants.max_pixel_count_step_up);
|
| - }
|
| -}
|
| -
|
| -bool VideoCapturer::AdaptFrame(int width,
|
| - int height,
|
| - int64_t camera_time_us,
|
| - int64_t system_time_us,
|
| - int* out_width,
|
| - int* out_height,
|
| - int* crop_width,
|
| - int* crop_height,
|
| - int* crop_x,
|
| - int* crop_y,
|
| - int64_t* translated_camera_time_us) {
|
| - int64_t offset_us =
|
| - translated_camera_time_us
|
| - ? timestamp_aligner_.UpdateOffset(camera_time_us, system_time_us)
|
| - : 0;
|
| -
|
| - if (!broadcaster_.frame_wanted()) {
|
| - return false;
|
| - }
|
| -
|
| - if (enable_video_adapter_ && !IsScreencast()) {
|
| - if (!video_adapter_.AdaptFrameResolution(
|
| - width, height, camera_time_us * rtc::kNumNanosecsPerMicrosec,
|
| - crop_width, crop_height, out_width, out_height)) {
|
| - // VideoAdapter dropped the frame.
|
| - return false;
|
| - }
|
| - *crop_x = (width - *crop_width) / 2;
|
| - *crop_y = (height - *crop_height) / 2;
|
| - } else {
|
| - *out_width = width;
|
| - *out_height = height;
|
| - *crop_width = width;
|
| - *crop_height = height;
|
| - *crop_x = 0;
|
| - *crop_y = 0;
|
| - }
|
| -
|
| - if (translated_camera_time_us) {
|
| - *translated_camera_time_us = timestamp_aligner_.ClipTimestamp(
|
| - camera_time_us + offset_us, system_time_us);
|
| - }
|
| - return true;
|
| }
|
|
|
| void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
| @@ -274,11 +231,12 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
| // test. Probably not worth the effort to fix, instead, try to
|
| // delete or refactor all code using VideoFrameFactory and
|
| // SignalCapturedFrame.
|
| - if (!AdaptFrame(captured_frame->width, captured_frame->height,
|
| - captured_frame->time_stamp / rtc::kNumNanosecsPerMicrosec,
|
| - 0,
|
| - &out_width, &out_height,
|
| - &crop_width, &crop_height, &crop_x, &crop_y, nullptr)) {
|
| + if (!adapted_source_.AdaptFrame(
|
| + captured_frame->width, captured_frame->height,
|
| + captured_frame->time_stamp / rtc::kNumNanosecsPerMicrosec,
|
| + 0,
|
| + &out_width, &out_height,
|
| + &crop_width, &crop_height, &crop_x, &crop_y, nullptr)) {
|
| return;
|
| }
|
|
|
| @@ -306,7 +264,7 @@ void VideoCapturer::OnFrameCaptured(VideoCapturer*,
|
| void VideoCapturer::OnFrame(const VideoFrame& frame,
|
| int orig_width,
|
| int orig_height) {
|
| - broadcaster_.OnFrame(frame);
|
| + adapted_source_.OnFrame(frame);
|
| UpdateInputSize(orig_width, orig_height);
|
| }
|
|
|
|
|