| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2015 The WebRTC project authors. All Rights Reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #include "avfoundationvideocapturer.h" | |
| 12 | |
| 13 #import <AVFoundation/AVFoundation.h> | |
| 14 | |
| 15 #import "RTCAVFoundationVideoCapturerInternal.h" | |
| 16 #import "RTCDispatcher+Private.h" | |
| 17 #import "WebRTC/RTCLogging.h" | |
| 18 | |
| 19 #include "avfoundationformatmapper.h" | |
| 20 | |
| 21 #include "webrtc/api/video/video_rotation.h" | |
| 22 #include "webrtc/base/bind.h" | |
| 23 #include "webrtc/base/checks.h" | |
| 24 #include "webrtc/base/logging.h" | |
| 25 #include "webrtc/base/thread.h" | |
| 26 #include "webrtc/sdk/objc/Framework/Classes/Video/corevideo_frame_buffer.h" | |
| 27 | |
| 28 namespace webrtc { | |
| 29 | |
| 30 enum AVFoundationVideoCapturerMessageType : uint32_t { | |
| 31 kMessageTypeFrame, | |
| 32 }; | |
| 33 | |
| 34 AVFoundationVideoCapturer::AVFoundationVideoCapturer() : _capturer(nil) { | |
| 35 _capturer = | |
| 36 [[RTCAVFoundationVideoCapturerInternal alloc] initWithCapturer:this]; | |
| 37 | |
| 38 std::set<cricket::VideoFormat> front_camera_video_formats = | |
| 39 GetSupportedVideoFormatsForDevice([_capturer frontCaptureDevice]); | |
| 40 std::set<cricket::VideoFormat> back_camera_video_formats = | |
| 41 GetSupportedVideoFormatsForDevice([_capturer backCaptureDevice]); | |
| 42 std::vector<cricket::VideoFormat> intersection_video_formats; | |
| 43 if (back_camera_video_formats.empty()) { | |
| 44 intersection_video_formats.assign(front_camera_video_formats.begin(), | |
| 45 front_camera_video_formats.end()); | |
| 46 | |
| 47 } else if (front_camera_video_formats.empty()) { | |
| 48 intersection_video_formats.assign(back_camera_video_formats.begin(), | |
| 49 back_camera_video_formats.end()); | |
| 50 } else { | |
| 51 std::set_intersection( | |
| 52 front_camera_video_formats.begin(), front_camera_video_formats.end(), | |
| 53 back_camera_video_formats.begin(), back_camera_video_formats.end(), | |
| 54 std::back_inserter(intersection_video_formats)); | |
| 55 } | |
| 56 SetSupportedFormats(intersection_video_formats); | |
| 57 } | |
| 58 | |
| 59 AVFoundationVideoCapturer::~AVFoundationVideoCapturer() { | |
| 60 _capturer = nil; | |
| 61 } | |
| 62 | |
| 63 cricket::CaptureState AVFoundationVideoCapturer::Start( | |
| 64 const cricket::VideoFormat& format) { | |
| 65 if (!_capturer) { | |
| 66 LOG(LS_ERROR) << "Failed to create AVFoundation capturer."; | |
| 67 return cricket::CaptureState::CS_FAILED; | |
| 68 } | |
| 69 if (_capturer.isRunning) { | |
| 70 LOG(LS_ERROR) << "The capturer is already running."; | |
| 71 return cricket::CaptureState::CS_FAILED; | |
| 72 } | |
| 73 | |
| 74 AVCaptureDevice* device = [_capturer getActiveCaptureDevice]; | |
| 75 AVCaptureSession* session = _capturer.captureSession; | |
| 76 | |
| 77 if (!SetFormatForCaptureDevice(device, session, format)) { | |
| 78 return cricket::CaptureState::CS_FAILED; | |
| 79 } | |
| 80 | |
| 81 SetCaptureFormat(&format); | |
| 82 // This isn't super accurate because it takes a while for the AVCaptureSession | |
| 83 // to spin up, and this call returns async. | |
| 84 // TODO(tkchin): make this better. | |
| 85 [_capturer start]; | |
| 86 SetCaptureState(cricket::CaptureState::CS_RUNNING); | |
| 87 | |
| 88 return cricket::CaptureState::CS_STARTING; | |
| 89 } | |
| 90 | |
| 91 void AVFoundationVideoCapturer::Stop() { | |
| 92 [_capturer stop]; | |
| 93 SetCaptureFormat(NULL); | |
| 94 } | |
| 95 | |
| 96 bool AVFoundationVideoCapturer::IsRunning() { | |
| 97 return _capturer.isRunning; | |
| 98 } | |
| 99 | |
| 100 AVCaptureSession* AVFoundationVideoCapturer::GetCaptureSession() { | |
| 101 return _capturer.captureSession; | |
| 102 } | |
| 103 | |
| 104 bool AVFoundationVideoCapturer::CanUseBackCamera() const { | |
| 105 return _capturer.canUseBackCamera; | |
| 106 } | |
| 107 | |
| 108 void AVFoundationVideoCapturer::SetUseBackCamera(bool useBackCamera) { | |
| 109 _capturer.useBackCamera = useBackCamera; | |
| 110 } | |
| 111 | |
| 112 bool AVFoundationVideoCapturer::GetUseBackCamera() const { | |
| 113 return _capturer.useBackCamera; | |
| 114 } | |
| 115 | |
| 116 void AVFoundationVideoCapturer::AdaptOutputFormat(int width, int height, int fps
) { | |
| 117 cricket::VideoFormat format(width, height, cricket::VideoFormat::FpsToInterval
(fps), 0); | |
| 118 video_adapter()->OnOutputFormatRequest(format); | |
| 119 } | |
| 120 | |
| 121 void AVFoundationVideoCapturer::CaptureSampleBuffer( | |
| 122 CMSampleBufferRef sample_buffer, VideoRotation rotation) { | |
| 123 if (CMSampleBufferGetNumSamples(sample_buffer) != 1 || | |
| 124 !CMSampleBufferIsValid(sample_buffer) || | |
| 125 !CMSampleBufferDataIsReady(sample_buffer)) { | |
| 126 return; | |
| 127 } | |
| 128 | |
| 129 CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(sample_buffer); | |
| 130 if (image_buffer == NULL) { | |
| 131 return; | |
| 132 } | |
| 133 | |
| 134 int captured_width = CVPixelBufferGetWidth(image_buffer); | |
| 135 int captured_height = CVPixelBufferGetHeight(image_buffer); | |
| 136 | |
| 137 int adapted_width; | |
| 138 int adapted_height; | |
| 139 int crop_width; | |
| 140 int crop_height; | |
| 141 int crop_x; | |
| 142 int crop_y; | |
| 143 int64_t translated_camera_time_us; | |
| 144 | |
| 145 if (!AdaptFrame(captured_width, captured_height, | |
| 146 rtc::TimeNanos() / rtc::kNumNanosecsPerMicrosec, | |
| 147 rtc::TimeMicros(), &adapted_width, &adapted_height, | |
| 148 &crop_width, &crop_height, &crop_x, &crop_y, | |
| 149 &translated_camera_time_us)) { | |
| 150 return; | |
| 151 } | |
| 152 | |
| 153 rtc::scoped_refptr<VideoFrameBuffer> buffer = | |
| 154 new rtc::RefCountedObject<CoreVideoFrameBuffer>( | |
| 155 image_buffer, | |
| 156 adapted_width, adapted_height, | |
| 157 crop_width, crop_height, | |
| 158 crop_x, crop_y); | |
| 159 | |
| 160 // Applying rotation is only supported for legacy reasons and performance is | |
| 161 // not critical here. | |
| 162 if (apply_rotation() && rotation != kVideoRotation_0) { | |
| 163 buffer = I420Buffer::Rotate(*buffer->NativeToI420Buffer(), | |
| 164 rotation); | |
| 165 if (rotation == kVideoRotation_90 || rotation == kVideoRotation_270) { | |
| 166 std::swap(captured_width, captured_height); | |
| 167 } | |
| 168 | |
| 169 rotation = kVideoRotation_0; | |
| 170 } | |
| 171 | |
| 172 OnFrame(webrtc::VideoFrame(buffer, rotation, translated_camera_time_us), | |
| 173 captured_width, captured_height); | |
| 174 } | |
| 175 | |
| 176 } // namespace webrtc | |
| OLD | NEW |