Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(26)

Unified Diff: webrtc/sdk/objc/Framework/Classes/RTCAVFoundationVideoCapturerInternal.mm

Issue 2488973002: Split avfoundationcapturer classes in separate files. (Closed)
Patch Set: Format Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/sdk/objc/Framework/Classes/RTCAVFoundationVideoCapturerInternal.mm
diff --git a/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm b/webrtc/sdk/objc/Framework/Classes/RTCAVFoundationVideoCapturerInternal.mm
similarity index 43%
copy from webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm
copy to webrtc/sdk/objc/Framework/Classes/RTCAVFoundationVideoCapturerInternal.mm
index 49b56817277fdc86a015fe8cc888220c4657c2c8..2ae88a3e0be0e6000e3987ed7a25ebffe165dc24 100644
--- a/webrtc/sdk/objc/Framework/Classes/avfoundationvideocapturer.mm
+++ b/webrtc/sdk/objc/Framework/Classes/RTCAVFoundationVideoCapturerInternal.mm
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 The WebRTC project authors. All Rights Reserved.
+ * Copyright 2016 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
@@ -8,186 +8,18 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#include "avfoundationvideocapturer.h"
+#import "RTCAVFoundationVideoCapturerInternal.h"
-#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
#if TARGET_OS_IPHONE
#import <UIKit/UIKit.h>
#endif
#import "RTCDispatcher+Private.h"
+#import "RTCAVFoundationFormatMapper.h"
#import "WebRTC/RTCLogging.h"
-#if TARGET_OS_IPHONE
-#import "WebRTC/UIDevice+RTCDevice.h"
-#endif
-
-#include "libyuv/rotate.h"
-
-#include "webrtc/base/bind.h"
-#include "webrtc/base/checks.h"
-#include "webrtc/base/logging.h"
-#include "webrtc/base/thread.h"
-#include "webrtc/common_video/include/corevideo_frame_buffer.h"
-#include "webrtc/common_video/rotation.h"
-
-// TODO(denicija): add support for higher frame rates.
-// See http://crbug/webrtc/6355 for more info.
-static const int kFramesPerSecond = 30;
-
-static inline BOOL IsMediaSubTypeSupported(FourCharCode mediaSubType) {
- return (mediaSubType == kCVPixelFormatType_420YpCbCr8PlanarFullRange ||
- mediaSubType == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
-}
-
-static inline BOOL IsFrameRateWithinRange(int fps, AVFrameRateRange *range) {
- return range.minFrameRate <= fps && range.maxFrameRate >= fps;
-}
-
-// Returns filtered array of device formats based on predefined constraints our
-// stack imposes.
-static NSArray<AVCaptureDeviceFormat *> *GetEligibleDeviceFormats(
- const AVCaptureDevice *device,
- int supportedFps) {
- NSMutableArray<AVCaptureDeviceFormat *> *eligibleDeviceFormats =
- [NSMutableArray array];
-
- for (AVCaptureDeviceFormat *format in device.formats) {
- // Filter out subTypes that we currently don't support in the stack
- FourCharCode mediaSubType =
- CMFormatDescriptionGetMediaSubType(format.formatDescription);
- if (!IsMediaSubTypeSupported(mediaSubType)) {
- continue;
- }
-
- // Filter out frame rate ranges that we currently don't support in the stack
- for (AVFrameRateRange *frameRateRange in format.videoSupportedFrameRateRanges) {
- if (IsFrameRateWithinRange(supportedFps, frameRateRange)) {
- [eligibleDeviceFormats addObject:format];
- break;
- }
- }
- }
-
- return [eligibleDeviceFormats copy];
-}
-
-// Mapping from cricket::VideoFormat to AVCaptureDeviceFormat.
-static AVCaptureDeviceFormat *GetDeviceFormatForVideoFormat(
- const AVCaptureDevice *device,
- const cricket::VideoFormat &videoFormat) {
- AVCaptureDeviceFormat *desiredDeviceFormat = nil;
- NSArray<AVCaptureDeviceFormat *> *eligibleFormats =
- GetEligibleDeviceFormats(device, videoFormat.framerate());
-
- for (AVCaptureDeviceFormat *deviceFormat in eligibleFormats) {
- CMVideoDimensions dimension =
- CMVideoFormatDescriptionGetDimensions(deviceFormat.formatDescription);
- FourCharCode mediaSubType =
- CMFormatDescriptionGetMediaSubType(deviceFormat.formatDescription);
-
- if (videoFormat.width == dimension.width &&
- videoFormat.height == dimension.height) {
- if (mediaSubType == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
- // This is the preferred format so no need to wait for better option.
- return deviceFormat;
- } else {
- // This is good candidate, but let's wait for something better.
- desiredDeviceFormat = deviceFormat;
- }
- }
- }
-
- return desiredDeviceFormat;
-}
-// Mapping from AVCaptureDeviceFormat to cricket::VideoFormat for given input
-// device.
-static std::set<cricket::VideoFormat> GetSupportedVideoFormatsForDevice(
- AVCaptureDevice *device) {
- std::set<cricket::VideoFormat> supportedFormats;
-
- NSArray<AVCaptureDeviceFormat *> *eligibleFormats =
- GetEligibleDeviceFormats(device, kFramesPerSecond);
-
- for (AVCaptureDeviceFormat *deviceFormat in eligibleFormats) {
- CMVideoDimensions dimension =
- CMVideoFormatDescriptionGetDimensions(deviceFormat.formatDescription);
- cricket::VideoFormat format = cricket::VideoFormat(
- dimension.width, dimension.height,
- cricket::VideoFormat::FpsToInterval(kFramesPerSecond),
- cricket::FOURCC_NV12);
- supportedFormats.insert(format);
- }
-
- return supportedFormats;
-}
-
-// Sets device format for the provided capture device. Returns YES/NO depending on success.
-// TODO(denicija): When this file is split this static method should be reconsidered.
-// Perhaps adding a category on AVCaptureDevice would be better.
-static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
- AVCaptureSession *session,
- const cricket::VideoFormat &format) {
- AVCaptureDeviceFormat *deviceFormat =
- GetDeviceFormatForVideoFormat(device, format);
- const int fps = cricket::VideoFormat::IntervalToFps(format.interval);
-
- NSError *error = nil;
- BOOL success = YES;
- [session beginConfiguration];
- if ([device lockForConfiguration:&error]) {
- @try {
- device.activeFormat = deviceFormat;
- device.activeVideoMinFrameDuration = CMTimeMake(1, fps);
- } @catch (NSException *exception) {
- RTCLogError(
- @"Failed to set active format!\n User info:%@",
- exception.userInfo);
- success = NO;
- }
-
- [device unlockForConfiguration];
- } else {
- RTCLogError(
- @"Failed to lock device %@. Error: %@",
- device, error.userInfo);
- success = NO;
- }
- [session commitConfiguration];
-
- return success;
-}
-
-// This class used to capture frames using AVFoundation APIs on iOS. It is meant
-// to be owned by an instance of AVFoundationVideoCapturer. The reason for this
-// because other webrtc objects own cricket::VideoCapturer, which is not
-// ref counted. To prevent bad behavior we do not expose this class directly.
-@interface RTCAVFoundationVideoCapturerInternal : NSObject
- <AVCaptureVideoDataOutputSampleBufferDelegate>
-
-@property(nonatomic, readonly) AVCaptureSession *captureSession;
-@property(nonatomic, readonly) dispatch_queue_t frameQueue;
-@property(nonatomic, readonly) BOOL canUseBackCamera;
-@property(nonatomic, assign) BOOL useBackCamera; // Defaults to NO.
-@property(atomic, assign) BOOL isRunning; // Whether the capture session is running.
-@property(atomic, assign) BOOL hasStarted; // Whether we have an unmatched start.
-
-// We keep a pointer back to AVFoundationVideoCapturer to make callbacks on it
-// when we receive frames. This is safe because this object should be owned by
-// it.
-- (instancetype)initWithCapturer:(webrtc::AVFoundationVideoCapturer *)capturer;
-- (AVCaptureDevice *)getActiveCaptureDevice;
-
-- (nullable AVCaptureDevice *)frontCaptureDevice;
-- (nullable AVCaptureDevice *)backCaptureDevice;
-
-// Starts and stops the capture session asynchronously. We cannot do this
-// synchronously without blocking a WebRTC thread.
-- (void)start;
-- (void)stop;
-
-@end
+NS_ASSUME_NONNULL_BEGIN
tkchin_webrtc 2016/11/21 18:51:47 Shouldn't need this in implementation files.
daniela-webrtc 2016/11/22 11:43:44 Done.
@implementation RTCAVFoundationVideoCapturerInternal {
// Keep pointers to inputs for convenience.
@@ -272,22 +104,20 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
return self.useBackCamera ? _backCameraInput.device : _frontCameraInput.device;
}
-- (AVCaptureDevice *)frontCaptureDevice {
+- (nullable AVCaptureDevice *)frontCaptureDevice {
return _frontCameraInput.device;
}
-- (AVCaptureDevice *)backCaptureDevice {
+- (nullable AVCaptureDevice *)backCaptureDevice {
return _backCameraInput.device;
}
- (dispatch_queue_t)frameQueue {
if (!_frameQueue) {
_frameQueue =
- dispatch_queue_create("org.webrtc.avfoundationvideocapturer.video",
- DISPATCH_QUEUE_SERIAL);
- dispatch_set_target_queue(
- _frameQueue,
- dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
+ dispatch_queue_create("org.webrtc.avfoundationvideocapturer.video", DISPATCH_QUEUE_SERIAL);
+ dispatch_set_target_queue(_frameQueue,
+ dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
}
return _frameQueue;
}
@@ -309,7 +139,7 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
if (!self.canUseBackCamera) {
if (useBackCamera) {
RTCLogWarning(@"No rear-facing camera exists or it cannot be used;"
- "not switching.");
+ "not switching.");
}
return;
}
@@ -328,23 +158,24 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
return;
}
self.hasStarted = YES;
- [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
- block:^{
+ [RTCDispatcher
tkchin_webrtc 2016/11/21 18:51:47 restore indentation to previous here and elsewhere
daniela-webrtc 2016/11/22 11:43:44 Is this remarks towards decreasing the diff size?
tkchin_webrtc 2016/11/22 21:26:07 The previous indentation conforms to indentation u
daniela-webrtc 2016/11/23 15:04:51 I see your point. Yes hitting the 100 char line ha
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
#if TARGET_OS_IPHONE
- // Default to portrait orientation on iPhone. This will be reset in
- // updateOrientation unless orientation is unknown/faceup/facedown.
- _rotation = webrtc::kVideoRotation_90;
+ // Default to portrait orientation on iPhone. This will be reset in
+ // updateOrientation unless orientation is unknown/faceup/facedown.
+ _rotation = webrtc::kVideoRotation_90;
#else
- // No rotation on Mac.
- _rotation = webrtc::kVideoRotation_0;
+ // No rotation on Mac.
+ _rotation = webrtc::kVideoRotation_0;
#endif
- [self updateOrientation];
+ [self updateOrientation];
#if TARGET_OS_IPHONE
- [[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
+ [[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
#endif
- AVCaptureSession *captureSession = self.captureSession;
- [captureSession startRunning];
- }];
+ AVCaptureSession *captureSession = self.captureSession;
+ [captureSession startRunning];
+ }];
}
// Called from same thread as start.
@@ -356,14 +187,15 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
// Due to this async block, it's possible that the ObjC object outlives the
// C++ one. In order to not invoke functions on the C++ object, we set
// hasStarted immediately instead of dispatching it async.
- [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
- block:^{
- [_videoDataOutput setSampleBufferDelegate:nil queue:nullptr];
- [_captureSession stopRunning];
+ [RTCDispatcher
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ [_videoDataOutput setSampleBufferDelegate:nil queue:nullptr];
+ [_captureSession stopRunning];
#if TARGET_OS_IPHONE
- [[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
+ [[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
#endif
- }];
+ }];
}
#pragma mark iOS notifications
@@ -372,8 +204,8 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
- (void)deviceOrientationDidChange:(NSNotification *)notification {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
- [self updateOrientation];
- }];
+ [self updateOrientation];
+ }];
}
#endif
@@ -399,10 +231,9 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
NSString *reasonString = nil;
-#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \
- && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
- NSNumber *reason =
- notification.userInfo[AVCaptureSessionInterruptionReasonKey];
+#if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) && \
+ __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
+ NSNumber *reason = notification.userInfo[AVCaptureSessionInterruptionReasonKey];
if (reason) {
switch (reason.intValue) {
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableInBackground:
@@ -430,22 +261,21 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
}
- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
- NSError *error =
- [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
+ NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
RTCLogError(@"Capture session runtime error: %@", error);
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
#if TARGET_OS_IPHONE
- if (error.code == AVErrorMediaServicesWereReset) {
- [self handleNonFatalError];
- } else {
- [self handleFatalError];
- }
+ if (error.code == AVErrorMediaServicesWereReset) {
+ [self handleNonFatalError];
+ } else {
+ [self handleFatalError];
+ }
#else
- [self handleFatalError];
+ [self handleFatalError];
#endif
- }];
+ }];
}
- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
@@ -454,10 +284,10 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
self.isRunning = YES;
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
- // If we successfully restarted after an unknown error, allow future
- // retries on fatal errors.
- _hasRetriedOnFatalError = NO;
- }];
+ // If we successfully restarted after an unknown error,
+ // allow future retries on fatal errors.
+ _hasRetriedOnFatalError = NO;
+ }];
}
- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
@@ -466,26 +296,27 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
}
- (void)handleFatalError {
- [RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
- block:^{
- if (!_hasRetriedOnFatalError) {
- RTCLogWarning(@"Attempting to recover from fatal capture error.");
- [self handleNonFatalError];
- _hasRetriedOnFatalError = YES;
- } else {
- RTCLogError(@"Previous fatal error recovery failed.");
- }
- }];
+ [RTCDispatcher
+ dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
+ block:^{
+ if (!_hasRetriedOnFatalError) {
+ RTCLogWarning(@"Attempting to recover from fatal capture error.");
+ [self handleNonFatalError];
+ _hasRetriedOnFatalError = YES;
+ } else {
+ RTCLogError(@"Previous fatal error recovery failed.");
+ }
+ }];
}
- (void)handleNonFatalError {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
- if (self.hasStarted) {
- RTCLog(@"Restarting capture session after error.");
- [self.captureSession startRunning];
- }
- }];
+ if (self.hasStarted) {
+ RTCLog(@"Restarting capture session after error.");
+ [self.captureSession startRunning];
+ }
+ }];
}
#if TARGET_OS_IPHONE
@@ -495,11 +326,11 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
- if (self.hasStarted && !self.captureSession.isRunning) {
- RTCLog(@"Restarting capture session on active.");
- [self.captureSession startRunning];
- }
- }];
+ if (self.hasStarted && !self.captureSession.isRunning) {
+ RTCLog(@"Restarting capture session on active.");
+ [self.captureSession startRunning];
+ }
+ }];
}
#endif // TARGET_OS_IPHONE
@@ -534,8 +365,7 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
RTCLogError(@"Session does not support capture inputs.");
return NO;
}
- AVCaptureDeviceInput *input = self.useBackCamera ?
- backCameraInput : frontCameraInput;
+ AVCaptureDeviceInput *input = self.useBackCamera ? backCameraInput : frontCameraInput;
[captureSession addInput:input];
_captureSession = captureSession;
@@ -546,11 +376,11 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
if (!_videoDataOutput) {
// Make the capturer output NV12. Ideally we want I420 but that's not
// currently supported on iPhone / iPad.
- AVCaptureVideoDataOutput *videoDataOutput =
- [[AVCaptureVideoDataOutput alloc] init];
+ AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
videoDataOutput.videoSettings = @{
- (NSString *)kCVPixelBufferPixelFormatTypeKey :
- @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
+ (NSString *)
+ // TODO(denicija): Remove this color conversion and use the original capture format directly.
+ kCVPixelBufferPixelFormatTypeKey : @(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
};
videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
[videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
@@ -559,10 +389,8 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
return _videoDataOutput;
}
-- (AVCaptureDevice *)videoCaptureDeviceForPosition:
- (AVCaptureDevicePosition)position {
- for (AVCaptureDevice *captureDevice in
- [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
+- (AVCaptureDevice *)videoCaptureDeviceForPosition:(AVCaptureDevicePosition)position {
+ for (AVCaptureDevice *captureDevice in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if (captureDevice.position == position) {
return captureDevice;
}
@@ -585,11 +413,9 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
}
NSError *error = nil;
AVCaptureDeviceInput *frontCameraInput =
- [AVCaptureDeviceInput deviceInputWithDevice:frontCameraDevice
- error:&error];
+ [AVCaptureDeviceInput deviceInputWithDevice:frontCameraDevice error:&error];
if (!frontCameraInput) {
- RTCLogError(@"Failed to create front camera input: %@",
- error.localizedDescription);
+ RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
return nil;
}
_frontCameraInput = frontCameraInput;
@@ -607,11 +433,9 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
}
NSError *error = nil;
AVCaptureDeviceInput *backCameraInput =
- [AVCaptureDeviceInput deviceInputWithDevice:backCameraDevice
- error:&error];
+ [AVCaptureDeviceInput deviceInputWithDevice:backCameraDevice error:&error];
if (!backCameraInput) {
- RTCLogError(@"Failed to create front camera input: %@",
- error.localizedDescription);
+ RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
return nil;
}
_backCameraInput = backCameraInput;
@@ -630,12 +454,12 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
_rotation = webrtc::kVideoRotation_270;
break;
case UIDeviceOrientationLandscapeLeft:
- _rotation = _capturer->GetUseBackCamera() ? webrtc::kVideoRotation_0
- : webrtc::kVideoRotation_180;
+ _rotation =
+ _capturer->GetUseBackCamera() ? webrtc::kVideoRotation_0 : webrtc::kVideoRotation_180;
break;
case UIDeviceOrientationLandscapeRight:
- _rotation = _capturer->GetUseBackCamera() ? webrtc::kVideoRotation_180
- : webrtc::kVideoRotation_0;
+ _rotation =
+ _capturer->GetUseBackCamera() ? webrtc::kVideoRotation_180 : webrtc::kVideoRotation_0;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
@@ -650,181 +474,31 @@ static BOOL SetFormatForCaptureDevice(AVCaptureDevice *device,
- (void)updateSessionInputForUseBackCamera:(BOOL)useBackCamera {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
- [_captureSession beginConfiguration];
- AVCaptureDeviceInput *oldInput = _backCameraInput;
- AVCaptureDeviceInput *newInput = _frontCameraInput;
- if (useBackCamera) {
- oldInput = _frontCameraInput;
- newInput = _backCameraInput;
- }
- if (oldInput) {
- // Ok to remove this even if it's not attached. Will be no-op.
- [_captureSession removeInput:oldInput];
- }
- if (newInput) {
- [_captureSession addInput:newInput];
- }
- [self updateOrientation];
- AVCaptureDevice *newDevice = newInput.device;
- const cricket::VideoFormat *format = _capturer->GetCaptureFormat();
- SetFormatForCaptureDevice(newDevice, _captureSession, *format);
- [_captureSession commitConfiguration];
- }];
+ [_captureSession beginConfiguration];
+ AVCaptureDeviceInput *oldInput = _backCameraInput;
+ AVCaptureDeviceInput *newInput = _frontCameraInput;
+ if (useBackCamera) {
+ oldInput = _frontCameraInput;
+ newInput = _backCameraInput;
+ }
+ if (oldInput) {
+ // Ok to remove this even if it's not attached. Will be no-op.
+ [_captureSession removeInput:oldInput];
+ }
+ if (newInput) {
+ [_captureSession addInput:newInput];
+ }
+ [self updateOrientation];
+ AVCaptureDevice *newDevice = newInput.device;
+ const cricket::VideoFormat *format =
+ _capturer->GetCaptureFormat();
+ [RTCAVFoundationFormatMapper setFormat:*format
+ forCaptureDevice:newDevice
+ captureSession:_captureSession];
+ [_captureSession commitConfiguration];
+ }];
}
@end
-namespace webrtc {
-
-enum AVFoundationVideoCapturerMessageType : uint32_t {
- kMessageTypeFrame,
-};
-
-AVFoundationVideoCapturer::AVFoundationVideoCapturer() : _capturer(nil) {
- _capturer =
- [[RTCAVFoundationVideoCapturerInternal alloc] initWithCapturer:this];
-
- std::set<cricket::VideoFormat> front_camera_video_formats =
- GetSupportedVideoFormatsForDevice([_capturer frontCaptureDevice]);
-
- std::set<cricket::VideoFormat> back_camera_video_formats =
- GetSupportedVideoFormatsForDevice([_capturer backCaptureDevice]);
-
- std::vector<cricket::VideoFormat> intersection_video_formats;
- if (back_camera_video_formats.empty()) {
- intersection_video_formats.assign(front_camera_video_formats.begin(),
- front_camera_video_formats.end());
-
- } else if (front_camera_video_formats.empty()) {
- intersection_video_formats.assign(back_camera_video_formats.begin(),
- back_camera_video_formats.end());
- } else {
- std::set_intersection(
- front_camera_video_formats.begin(), front_camera_video_formats.end(),
- back_camera_video_formats.begin(), back_camera_video_formats.end(),
- std::back_inserter(intersection_video_formats));
- }
- SetSupportedFormats(intersection_video_formats);
-}
-
-AVFoundationVideoCapturer::~AVFoundationVideoCapturer() {
- _capturer = nil;
-}
-
-cricket::CaptureState AVFoundationVideoCapturer::Start(
- const cricket::VideoFormat& format) {
- if (!_capturer) {
- LOG(LS_ERROR) << "Failed to create AVFoundation capturer.";
- return cricket::CaptureState::CS_FAILED;
- }
- if (_capturer.isRunning) {
- LOG(LS_ERROR) << "The capturer is already running.";
- return cricket::CaptureState::CS_FAILED;
- }
-
- AVCaptureDevice* device = [_capturer getActiveCaptureDevice];
- AVCaptureSession* session = _capturer.captureSession;
-
- if (!SetFormatForCaptureDevice(device, session, format)) {
- return cricket::CaptureState::CS_FAILED;
- }
-
- SetCaptureFormat(&format);
- // This isn't super accurate because it takes a while for the AVCaptureSession
- // to spin up, and this call returns async.
- // TODO(tkchin): make this better.
- [_capturer start];
- SetCaptureState(cricket::CaptureState::CS_RUNNING);
-
- return cricket::CaptureState::CS_STARTING;
-}
-
-void AVFoundationVideoCapturer::Stop() {
- [_capturer stop];
- SetCaptureFormat(NULL);
-}
-
-bool AVFoundationVideoCapturer::IsRunning() {
- return _capturer.isRunning;
-}
-
-AVCaptureSession* AVFoundationVideoCapturer::GetCaptureSession() {
- return _capturer.captureSession;
-}
-
-bool AVFoundationVideoCapturer::CanUseBackCamera() const {
- return _capturer.canUseBackCamera;
-}
-
-void AVFoundationVideoCapturer::SetUseBackCamera(bool useBackCamera) {
- _capturer.useBackCamera = useBackCamera;
-}
-
-bool AVFoundationVideoCapturer::GetUseBackCamera() const {
- return _capturer.useBackCamera;
-}
-
-void AVFoundationVideoCapturer::CaptureSampleBuffer(
- CMSampleBufferRef sample_buffer, VideoRotation rotation) {
- if (CMSampleBufferGetNumSamples(sample_buffer) != 1 ||
- !CMSampleBufferIsValid(sample_buffer) ||
- !CMSampleBufferDataIsReady(sample_buffer)) {
- return;
- }
-
- CVImageBufferRef image_buffer = CMSampleBufferGetImageBuffer(sample_buffer);
- if (image_buffer == NULL) {
- return;
- }
-
- const int captured_width = CVPixelBufferGetWidth(image_buffer);
- const int captured_height = CVPixelBufferGetHeight(image_buffer);
-
- int adapted_width;
- int adapted_height;
- int crop_width;
- int crop_height;
- int crop_x;
- int crop_y;
- int64_t translated_camera_time_us;
-
- if (!AdaptFrame(captured_width, captured_height,
- rtc::TimeNanos() / rtc::kNumNanosecsPerMicrosec,
- rtc::TimeMicros(), &adapted_width, &adapted_height,
- &crop_width, &crop_height, &crop_x, &crop_y,
- &translated_camera_time_us)) {
- return;
- }
-
- rtc::scoped_refptr<VideoFrameBuffer> buffer =
- new rtc::RefCountedObject<CoreVideoFrameBuffer>(
- image_buffer,
- adapted_width, adapted_height,
- crop_width, crop_height,
- crop_x, crop_y);
-
- // Applying rotation is only supported for legacy reasons and performance is
- // not critical here.
- if (apply_rotation() && rotation != kVideoRotation_0) {
- buffer = buffer->NativeToI420Buffer();
- rtc::scoped_refptr<I420Buffer> rotated_buffer =
- (rotation == kVideoRotation_180)
- ? I420Buffer::Create(adapted_width, adapted_height)
- : I420Buffer::Create(adapted_height, adapted_width);
- libyuv::I420Rotate(
- buffer->DataY(), buffer->StrideY(),
- buffer->DataU(), buffer->StrideU(),
- buffer->DataV(), buffer->StrideV(),
- rotated_buffer->MutableDataY(), rotated_buffer->StrideY(),
- rotated_buffer->MutableDataU(), rotated_buffer->StrideU(),
- rotated_buffer->MutableDataV(), rotated_buffer->StrideV(),
- buffer->width(), buffer->height(),
- static_cast<libyuv::RotationMode>(rotation));
- buffer = rotated_buffer;
- }
-
- OnFrame(webrtc::VideoFrame(buffer, rotation, translated_camera_time_us),
- captured_width, captured_height);
-}
-
-} // namespace webrtc
+NS_ASSUME_NONNULL_END

Powered by Google App Engine
This is Rietveld 408576698