| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * libjingle | |
| 3 * Copyright 2015 Google Inc. | |
| 4 * | |
| 5 * Redistribution and use in source and binary forms, with or without | |
| 6 * modification, are permitted provided that the following conditions are met: | |
| 7 * | |
| 8 * 1. Redistributions of source code must retain the above copyright notice, | |
| 9 * this list of conditions and the following disclaimer. | |
| 10 * 2. Redistributions in binary form must reproduce the above copyright notice, | |
| 11 * this list of conditions and the following disclaimer in the documentation | |
| 12 * and/or other materials provided with the distribution. | |
| 13 * 3. The name of the author may not be used to endorse or promote products | |
| 14 * derived from this software without specific prior written permission. | |
| 15 * | |
| 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
| 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
| 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
| 19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
| 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
| 22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
| 23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
| 24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
| 25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 26 */ | |
| 27 | |
| 28 #include "talk/app/webrtc/objc/avfoundationvideocapturer.h" | |
| 29 | |
| 30 #include "webrtc/base/bind.h" | |
| 31 #include "webrtc/base/checks.h" | |
| 32 #include "webrtc/base/thread.h" | |
| 33 | |
| 34 #import <AVFoundation/AVFoundation.h> | |
| 35 #import <Foundation/Foundation.h> | |
| 36 #import <UIKit/UIKit.h> | |
| 37 | |
| 38 #import "RTCDispatcher+Private.h" | |
| 39 #import "RTCLogging.h" | |
| 40 | |
| 41 // TODO(tkchin): support other formats. | |
| 42 static NSString *const kDefaultPreset = AVCaptureSessionPreset640x480; | |
| 43 static cricket::VideoFormat const kDefaultFormat = | |
| 44 cricket::VideoFormat(640, | |
| 45 480, | |
| 46 cricket::VideoFormat::FpsToInterval(30), | |
| 47 cricket::FOURCC_NV12); | |
| 48 | |
| 49 // This class used to capture frames using AVFoundation APIs on iOS. It is meant | |
| 50 // to be owned by an instance of AVFoundationVideoCapturer. The reason for this | |
| 51 // because other webrtc objects own cricket::VideoCapturer, which is not | |
| 52 // ref counted. To prevent bad behavior we do not expose this class directly. | |
| 53 @interface RTCAVFoundationVideoCapturerInternal : NSObject | |
| 54 <AVCaptureVideoDataOutputSampleBufferDelegate> | |
| 55 | |
| 56 @property(nonatomic, readonly) AVCaptureSession *captureSession; | |
| 57 @property(nonatomic, readonly) BOOL isRunning; | |
| 58 @property(nonatomic, readonly) BOOL canUseBackCamera; | |
| 59 @property(nonatomic, assign) BOOL useBackCamera; // Defaults to NO. | |
| 60 | |
| 61 // We keep a pointer back to AVFoundationVideoCapturer to make callbacks on it | |
| 62 // when we receive frames. This is safe because this object should be owned by | |
| 63 // it. | |
| 64 - (instancetype)initWithCapturer:(webrtc::AVFoundationVideoCapturer *)capturer; | |
| 65 | |
| 66 // Starts and stops the capture session asynchronously. We cannot do this | |
| 67 // synchronously without blocking a WebRTC thread. | |
| 68 - (void)start; | |
| 69 - (void)stop; | |
| 70 | |
| 71 @end | |
| 72 | |
| 73 @implementation RTCAVFoundationVideoCapturerInternal { | |
| 74 // Keep pointers to inputs for convenience. | |
| 75 AVCaptureDeviceInput *_frontCameraInput; | |
| 76 AVCaptureDeviceInput *_backCameraInput; | |
| 77 AVCaptureVideoDataOutput *_videoDataOutput; | |
| 78 // The cricket::VideoCapturer that owns this class. Should never be NULL. | |
| 79 webrtc::AVFoundationVideoCapturer *_capturer; | |
| 80 BOOL _orientationHasChanged; | |
| 81 } | |
| 82 | |
| 83 @synthesize captureSession = _captureSession; | |
| 84 @synthesize isRunning = _isRunning; | |
| 85 @synthesize useBackCamera = _useBackCamera; | |
| 86 | |
| 87 // This is called from the thread that creates the video source, which is likely | |
| 88 // the main thread. | |
| 89 - (instancetype)initWithCapturer:(webrtc::AVFoundationVideoCapturer *)capturer { | |
| 90 RTC_DCHECK(capturer); | |
| 91 if (self = [super init]) { | |
| 92 _capturer = capturer; | |
| 93 // Create the capture session and all relevant inputs and outputs. We need | |
| 94 // to do this in init because the application may want the capture session | |
| 95 // before we start the capturer for e.g. AVCapturePreviewLayer. All objects | |
| 96 // created here are retained until dealloc and never recreated. | |
| 97 if (![self setupCaptureSession]) { | |
| 98 return nil; | |
| 99 } | |
| 100 NSNotificationCenter *center = [NSNotificationCenter defaultCenter]; | |
| 101 [center addObserver:self | |
| 102 selector:@selector(deviceOrientationDidChange:) | |
| 103 name:UIDeviceOrientationDidChangeNotification | |
| 104 object:nil]; | |
| 105 [center addObserverForName:AVCaptureSessionRuntimeErrorNotification | |
| 106 object:nil | |
| 107 queue:nil | |
| 108 usingBlock:^(NSNotification *notification) { | |
| 109 RTCLogError(@"Capture session error: %@", notification.userInfo); | |
| 110 }]; | |
| 111 } | |
| 112 return self; | |
| 113 } | |
| 114 | |
| 115 - (void)dealloc { | |
| 116 RTC_DCHECK(!_isRunning); | |
| 117 [[NSNotificationCenter defaultCenter] removeObserver:self]; | |
| 118 _capturer = nullptr; | |
| 119 } | |
| 120 | |
| 121 - (AVCaptureSession *)captureSession { | |
| 122 return _captureSession; | |
| 123 } | |
| 124 | |
| 125 // Called from any thread (likely main thread). | |
| 126 - (BOOL)canUseBackCamera { | |
| 127 return _backCameraInput != nil; | |
| 128 } | |
| 129 | |
| 130 // Called from any thread (likely main thread). | |
| 131 - (BOOL)useBackCamera { | |
| 132 @synchronized(self) { | |
| 133 return _useBackCamera; | |
| 134 } | |
| 135 } | |
| 136 | |
| 137 // Called from any thread (likely main thread). | |
| 138 - (void)setUseBackCamera:(BOOL)useBackCamera { | |
| 139 if (!self.canUseBackCamera) { | |
| 140 if (useBackCamera) { | |
| 141 RTCLogWarning(@"No rear-facing camera exists or it cannot be used;" | |
| 142 "not switching."); | |
| 143 } | |
| 144 return; | |
| 145 } | |
| 146 @synchronized(self) { | |
| 147 if (_useBackCamera == useBackCamera) { | |
| 148 return; | |
| 149 } | |
| 150 _useBackCamera = useBackCamera; | |
| 151 [self updateSessionInputForUseBackCamera:useBackCamera]; | |
| 152 } | |
| 153 } | |
| 154 | |
| 155 // Called from WebRTC thread. | |
| 156 - (void)start { | |
| 157 if (_isRunning) { | |
| 158 return; | |
| 159 } | |
| 160 _isRunning = YES; | |
| 161 [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession | |
| 162 block:^{ | |
| 163 _orientationHasChanged = NO; | |
| 164 [self updateOrientation]; | |
| 165 [[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications]; | |
| 166 AVCaptureSession *captureSession = self.captureSession; | |
| 167 [captureSession startRunning]; | |
| 168 }]; | |
| 169 } | |
| 170 | |
| 171 // Called from same thread as start. | |
| 172 - (void)stop { | |
| 173 if (!_isRunning) { | |
| 174 return; | |
| 175 } | |
| 176 _isRunning = NO; | |
| 177 [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession | |
| 178 block:^{ | |
| 179 [_videoDataOutput setSampleBufferDelegate:nil queue:nullptr]; | |
| 180 [_captureSession stopRunning]; | |
| 181 [[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications]; | |
| 182 }]; | |
| 183 } | |
| 184 | |
| 185 #pragma mark AVCaptureVideoDataOutputSampleBufferDelegate | |
| 186 | |
| 187 - (void)captureOutput:(AVCaptureOutput *)captureOutput | |
| 188 didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer | |
| 189 fromConnection:(AVCaptureConnection *)connection { | |
| 190 NSParameterAssert(captureOutput == _videoDataOutput); | |
| 191 if (!_isRunning) { | |
| 192 return; | |
| 193 } | |
| 194 _capturer->CaptureSampleBuffer(sampleBuffer); | |
| 195 } | |
| 196 | |
| 197 - (void)captureOutput:(AVCaptureOutput *)captureOutput | |
| 198 didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer | |
| 199 fromConnection:(AVCaptureConnection *)connection { | |
| 200 RTCLogError(@"Dropped sample buffer."); | |
| 201 } | |
| 202 | |
| 203 #pragma mark - Private | |
| 204 | |
| 205 - (BOOL)setupCaptureSession { | |
| 206 AVCaptureSession *captureSession = [[AVCaptureSession alloc] init]; | |
| 207 #if defined(__IPHONE_7_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_7_0 | |
| 208 NSString *version = [[UIDevice currentDevice] systemVersion]; | |
| 209 if ([version integerValue] >= 7) { | |
| 210 captureSession.usesApplicationAudioSession = NO; | |
| 211 } | |
| 212 #endif | |
| 213 if (![captureSession canSetSessionPreset:kDefaultPreset]) { | |
| 214 RTCLogError(@"Session preset unsupported."); | |
| 215 return NO; | |
| 216 } | |
| 217 captureSession.sessionPreset = kDefaultPreset; | |
| 218 | |
| 219 // Add the output. | |
| 220 AVCaptureVideoDataOutput *videoDataOutput = [self videoDataOutput]; | |
| 221 if (![captureSession canAddOutput:videoDataOutput]) { | |
| 222 RTCLogError(@"Video data output unsupported."); | |
| 223 return NO; | |
| 224 } | |
| 225 [captureSession addOutput:videoDataOutput]; | |
| 226 | |
| 227 // Get the front and back cameras. If there isn't a front camera | |
| 228 // give up. | |
| 229 AVCaptureDeviceInput *frontCameraInput = [self frontCameraInput]; | |
| 230 AVCaptureDeviceInput *backCameraInput = [self backCameraInput]; | |
| 231 if (!frontCameraInput) { | |
| 232 RTCLogError(@"No front camera for capture session."); | |
| 233 return NO; | |
| 234 } | |
| 235 | |
| 236 // Add the inputs. | |
| 237 if (![captureSession canAddInput:frontCameraInput] || | |
| 238 (backCameraInput && ![captureSession canAddInput:backCameraInput])) { | |
| 239 RTCLogError(@"Session does not support capture inputs."); | |
| 240 return NO; | |
| 241 } | |
| 242 AVCaptureDeviceInput *input = self.useBackCamera ? | |
| 243 backCameraInput : frontCameraInput; | |
| 244 [captureSession addInput:input]; | |
| 245 _captureSession = captureSession; | |
| 246 return YES; | |
| 247 } | |
| 248 | |
| 249 - (AVCaptureVideoDataOutput *)videoDataOutput { | |
| 250 if (!_videoDataOutput) { | |
| 251 // Make the capturer output NV12. Ideally we want I420 but that's not | |
| 252 // currently supported on iPhone / iPad. | |
| 253 AVCaptureVideoDataOutput *videoDataOutput = | |
| 254 [[AVCaptureVideoDataOutput alloc] init]; | |
| 255 videoDataOutput = [[AVCaptureVideoDataOutput alloc] init]; | |
| 256 videoDataOutput.videoSettings = @{ | |
| 257 (NSString *)kCVPixelBufferPixelFormatTypeKey : | |
| 258 @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) | |
| 259 }; | |
| 260 videoDataOutput.alwaysDiscardsLateVideoFrames = NO; | |
| 261 dispatch_queue_t queue = | |
| 262 [RTCDispatcher dispatchQueueForType:RTCDispatcherTypeCaptureSession]; | |
| 263 [videoDataOutput setSampleBufferDelegate:self queue:queue]; | |
| 264 _videoDataOutput = videoDataOutput; | |
| 265 } | |
| 266 return _videoDataOutput; | |
| 267 } | |
| 268 | |
| 269 - (AVCaptureDevice *)videoCaptureDeviceForPosition: | |
| 270 (AVCaptureDevicePosition)position { | |
| 271 for (AVCaptureDevice *captureDevice in | |
| 272 [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) { | |
| 273 if (captureDevice.position == position) { | |
| 274 return captureDevice; | |
| 275 } | |
| 276 } | |
| 277 return nil; | |
| 278 } | |
| 279 | |
| 280 - (AVCaptureDeviceInput *)frontCameraInput { | |
| 281 if (!_frontCameraInput) { | |
| 282 AVCaptureDevice *frontCameraDevice = | |
| 283 [self videoCaptureDeviceForPosition:AVCaptureDevicePositionFront]; | |
| 284 if (!frontCameraDevice) { | |
| 285 RTCLogWarning(@"Failed to find front capture device."); | |
| 286 return nil; | |
| 287 } | |
| 288 NSError *error = nil; | |
| 289 AVCaptureDeviceInput *frontCameraInput = | |
| 290 [AVCaptureDeviceInput deviceInputWithDevice:frontCameraDevice | |
| 291 error:&error]; | |
| 292 if (!frontCameraInput) { | |
| 293 RTCLogError(@"Failed to create front camera input: %@", | |
| 294 error.localizedDescription); | |
| 295 return nil; | |
| 296 } | |
| 297 _frontCameraInput = frontCameraInput; | |
| 298 } | |
| 299 return _frontCameraInput; | |
| 300 } | |
| 301 | |
| 302 - (AVCaptureDeviceInput *)backCameraInput { | |
| 303 if (!_backCameraInput) { | |
| 304 AVCaptureDevice *backCameraDevice = | |
| 305 [self videoCaptureDeviceForPosition:AVCaptureDevicePositionBack]; | |
| 306 if (!backCameraDevice) { | |
| 307 RTCLogWarning(@"Failed to find front capture device."); | |
| 308 return nil; | |
| 309 } | |
| 310 NSError *error = nil; | |
| 311 AVCaptureDeviceInput *backCameraInput = | |
| 312 [AVCaptureDeviceInput deviceInputWithDevice:backCameraDevice | |
| 313 error:&error]; | |
| 314 if (!backCameraInput) { | |
| 315 RTCLogError(@"Failed to create front camera input: %@", | |
| 316 error.localizedDescription); | |
| 317 return nil; | |
| 318 } | |
| 319 _backCameraInput = backCameraInput; | |
| 320 } | |
| 321 return _backCameraInput; | |
| 322 } | |
| 323 | |
| 324 - (void)deviceOrientationDidChange:(NSNotification *)notification { | |
| 325 [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession | |
| 326 block:^{ | |
| 327 _orientationHasChanged = YES; | |
| 328 [self updateOrientation]; | |
| 329 }]; | |
| 330 } | |
| 331 | |
| 332 // Called from capture session queue. | |
| 333 - (void)updateOrientation { | |
| 334 AVCaptureConnection *connection = | |
| 335 [_videoDataOutput connectionWithMediaType:AVMediaTypeVideo]; | |
| 336 if (!connection.supportsVideoOrientation) { | |
| 337 // TODO(tkchin): set rotation bit on frames. | |
| 338 return; | |
| 339 } | |
| 340 AVCaptureVideoOrientation orientation = AVCaptureVideoOrientationPortrait; | |
| 341 switch ([UIDevice currentDevice].orientation) { | |
| 342 case UIDeviceOrientationPortrait: | |
| 343 orientation = AVCaptureVideoOrientationPortrait; | |
| 344 break; | |
| 345 case UIDeviceOrientationPortraitUpsideDown: | |
| 346 orientation = AVCaptureVideoOrientationPortraitUpsideDown; | |
| 347 break; | |
| 348 case UIDeviceOrientationLandscapeLeft: | |
| 349 orientation = AVCaptureVideoOrientationLandscapeRight; | |
| 350 break; | |
| 351 case UIDeviceOrientationLandscapeRight: | |
| 352 orientation = AVCaptureVideoOrientationLandscapeLeft; | |
| 353 break; | |
| 354 case UIDeviceOrientationFaceUp: | |
| 355 case UIDeviceOrientationFaceDown: | |
| 356 case UIDeviceOrientationUnknown: | |
| 357 if (!_orientationHasChanged) { | |
| 358 connection.videoOrientation = orientation; | |
| 359 } | |
| 360 return; | |
| 361 } | |
| 362 connection.videoOrientation = orientation; | |
| 363 } | |
| 364 | |
| 365 // Update the current session input to match what's stored in _useBackCamera. | |
| 366 - (void)updateSessionInputForUseBackCamera:(BOOL)useBackCamera { | |
| 367 [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession | |
| 368 block:^{ | |
| 369 [_captureSession beginConfiguration]; | |
| 370 AVCaptureDeviceInput *oldInput = _backCameraInput; | |
| 371 AVCaptureDeviceInput *newInput = _frontCameraInput; | |
| 372 if (useBackCamera) { | |
| 373 oldInput = _frontCameraInput; | |
| 374 newInput = _backCameraInput; | |
| 375 } | |
| 376 if (oldInput) { | |
| 377 // Ok to remove this even if it's not attached. Will be no-op. | |
| 378 [_captureSession removeInput:oldInput]; | |
| 379 } | |
| 380 if (newInput) { | |
| 381 [_captureSession addInput:newInput]; | |
| 382 } | |
| 383 [self updateOrientation]; | |
| 384 [_captureSession commitConfiguration]; | |
| 385 }]; | |
| 386 } | |
| 387 | |
| 388 @end | |
| 389 | |
| 390 namespace webrtc { | |
| 391 | |
| 392 enum AVFoundationVideoCapturerMessageType : uint32_t { | |
| 393 kMessageTypeFrame, | |
| 394 }; | |
| 395 | |
| 396 struct AVFoundationFrame { | |
| 397 AVFoundationFrame(CVImageBufferRef buffer, int64_t time) | |
| 398 : image_buffer(buffer), capture_time(time) {} | |
| 399 CVImageBufferRef image_buffer; | |
| 400 int64_t capture_time; | |
| 401 }; | |
| 402 | |
| 403 AVFoundationVideoCapturer::AVFoundationVideoCapturer() | |
| 404 : _capturer(nil), _startThread(nullptr) { | |
| 405 // Set our supported formats. This matches kDefaultPreset. | |
| 406 std::vector<cricket::VideoFormat> supportedFormats; | |
| 407 supportedFormats.push_back(cricket::VideoFormat(kDefaultFormat)); | |
| 408 SetSupportedFormats(supportedFormats); | |
| 409 _capturer = | |
| 410 [[RTCAVFoundationVideoCapturerInternal alloc] initWithCapturer:this]; | |
| 411 } | |
| 412 | |
| 413 AVFoundationVideoCapturer::~AVFoundationVideoCapturer() { | |
| 414 _capturer = nil; | |
| 415 } | |
| 416 | |
| 417 cricket::CaptureState AVFoundationVideoCapturer::Start( | |
| 418 const cricket::VideoFormat& format) { | |
| 419 if (!_capturer) { | |
| 420 LOG(LS_ERROR) << "Failed to create AVFoundation capturer."; | |
| 421 return cricket::CaptureState::CS_FAILED; | |
| 422 } | |
| 423 if (_capturer.isRunning) { | |
| 424 LOG(LS_ERROR) << "The capturer is already running."; | |
| 425 return cricket::CaptureState::CS_FAILED; | |
| 426 } | |
| 427 if (format != kDefaultFormat) { | |
| 428 LOG(LS_ERROR) << "Unsupported format provided."; | |
| 429 return cricket::CaptureState::CS_FAILED; | |
| 430 } | |
| 431 | |
| 432 // Keep track of which thread capture started on. This is the thread that | |
| 433 // frames need to be sent to. | |
| 434 RTC_DCHECK(!_startThread); | |
| 435 _startThread = rtc::Thread::Current(); | |
| 436 | |
| 437 SetCaptureFormat(&format); | |
| 438 // This isn't super accurate because it takes a while for the AVCaptureSession | |
| 439 // to spin up, and this call returns async. | |
| 440 // TODO(tkchin): make this better. | |
| 441 [_capturer start]; | |
| 442 SetCaptureState(cricket::CaptureState::CS_RUNNING); | |
| 443 | |
| 444 return cricket::CaptureState::CS_STARTING; | |
| 445 } | |
| 446 | |
| 447 void AVFoundationVideoCapturer::Stop() { | |
| 448 [_capturer stop]; | |
| 449 SetCaptureFormat(NULL); | |
| 450 _startThread = nullptr; | |
| 451 } | |
| 452 | |
| 453 bool AVFoundationVideoCapturer::IsRunning() { | |
| 454 return _capturer.isRunning; | |
| 455 } | |
| 456 | |
| 457 AVCaptureSession* AVFoundationVideoCapturer::GetCaptureSession() { | |
| 458 return _capturer.captureSession; | |
| 459 } | |
| 460 | |
| 461 bool AVFoundationVideoCapturer::CanUseBackCamera() const { | |
| 462 return _capturer.canUseBackCamera; | |
| 463 } | |
| 464 | |
| 465 void AVFoundationVideoCapturer::SetUseBackCamera(bool useBackCamera) { | |
| 466 _capturer.useBackCamera = useBackCamera; | |
| 467 } | |
| 468 | |
| 469 bool AVFoundationVideoCapturer::GetUseBackCamera() const { | |
| 470 return _capturer.useBackCamera; | |
| 471 } | |
| 472 | |
| 473 void AVFoundationVideoCapturer::CaptureSampleBuffer( | |
| 474 CMSampleBufferRef sampleBuffer) { | |
| 475 if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || | |
| 476 !CMSampleBufferIsValid(sampleBuffer) || | |
| 477 !CMSampleBufferDataIsReady(sampleBuffer)) { | |
| 478 return; | |
| 479 } | |
| 480 | |
| 481 CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(sampleBuffer); | |
| 482 if (image_buffer == NULL) { | |
| 483 return; | |
| 484 } | |
| 485 | |
| 486 // Retain the buffer and post it to the webrtc thread. It will be released | |
| 487 // after it has successfully been signaled. | |
| 488 CVBufferRetain(image_buffer); | |
| 489 AVFoundationFrame frame(image_buffer, rtc::TimeNanos()); | |
| 490 _startThread->Post(RTC_FROM_HERE, this, kMessageTypeFrame, | |
| 491 new rtc::TypedMessageData<AVFoundationFrame>(frame)); | |
| 492 } | |
| 493 | |
| 494 void AVFoundationVideoCapturer::OnMessage(rtc::Message *msg) { | |
| 495 switch (msg->message_id) { | |
| 496 case kMessageTypeFrame: { | |
| 497 rtc::TypedMessageData<AVFoundationFrame>* data = | |
| 498 static_cast<rtc::TypedMessageData<AVFoundationFrame>*>(msg->pdata); | |
| 499 const AVFoundationFrame& frame = data->data(); | |
| 500 OnFrameMessage(frame.image_buffer, frame.capture_time); | |
| 501 delete data; | |
| 502 break; | |
| 503 } | |
| 504 } | |
| 505 } | |
| 506 | |
| 507 void AVFoundationVideoCapturer::OnFrameMessage(CVImageBufferRef image_buffer, | |
| 508 int64_t capture_time) { | |
| 509 RTC_DCHECK(_startThread->IsCurrent()); | |
| 510 | |
| 511 // Base address must be unlocked to access frame data. | |
| 512 CVOptionFlags lock_flags = kCVPixelBufferLock_ReadOnly; | |
| 513 CVReturn ret = CVPixelBufferLockBaseAddress(image_buffer, lock_flags); | |
| 514 if (ret != kCVReturnSuccess) { | |
| 515 return; | |
| 516 } | |
| 517 | |
| 518 static size_t const kYPlaneIndex = 0; | |
| 519 static size_t const kUVPlaneIndex = 1; | |
| 520 uint8_t* y_plane_address = | |
| 521 static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(image_buffer, | |
| 522 kYPlaneIndex)); | |
| 523 size_t y_plane_height = | |
| 524 CVPixelBufferGetHeightOfPlane(image_buffer, kYPlaneIndex); | |
| 525 size_t y_plane_width = | |
| 526 CVPixelBufferGetWidthOfPlane(image_buffer, kYPlaneIndex); | |
| 527 size_t y_plane_bytes_per_row = | |
| 528 CVPixelBufferGetBytesPerRowOfPlane(image_buffer, kYPlaneIndex); | |
| 529 size_t uv_plane_height = | |
| 530 CVPixelBufferGetHeightOfPlane(image_buffer, kUVPlaneIndex); | |
| 531 size_t uv_plane_bytes_per_row = | |
| 532 CVPixelBufferGetBytesPerRowOfPlane(image_buffer, kUVPlaneIndex); | |
| 533 size_t frame_size = y_plane_bytes_per_row * y_plane_height + | |
| 534 uv_plane_bytes_per_row * uv_plane_height; | |
| 535 | |
| 536 // Sanity check assumption that planar bytes are contiguous. | |
| 537 uint8_t* uv_plane_address = | |
| 538 static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(image_buffer, | |
| 539 kUVPlaneIndex)); | |
| 540 RTC_DCHECK(uv_plane_address == | |
| 541 y_plane_address + y_plane_height * y_plane_bytes_per_row); | |
| 542 | |
| 543 // Stuff data into a cricket::CapturedFrame. | |
| 544 cricket::CapturedFrame frame; | |
| 545 frame.width = y_plane_width; | |
| 546 frame.height = y_plane_height; | |
| 547 frame.pixel_width = 1; | |
| 548 frame.pixel_height = 1; | |
| 549 frame.fourcc = static_cast<uint32_t>(cricket::FOURCC_NV12); | |
| 550 frame.time_stamp = capture_time; | |
| 551 frame.data = y_plane_address; | |
| 552 frame.data_size = frame_size; | |
| 553 | |
| 554 // This will call a superclass method that will perform the frame conversion | |
| 555 // to I420. | |
| 556 SignalFrameCaptured(this, &frame); | |
| 557 | |
| 558 CVPixelBufferUnlockBaseAddress(image_buffer, lock_flags); | |
| 559 CVBufferRelease(image_buffer); | |
| 560 } | |
| 561 | |
| 562 } // namespace webrtc | |
| OLD | NEW |