| Index: webrtc/modules/audio_device/ios/objc/RTCAudioSession.mm
|
| diff --git a/webrtc/modules/audio_device/ios/objc/RTCAudioSession.mm b/webrtc/modules/audio_device/ios/objc/RTCAudioSession.mm
|
| index e6b3657a1d6ee68f9550b7de5ba7daeef7488da9..7ef5110f476a9cf950eaa3cda86dcb721e3ae6e1 100644
|
| --- a/webrtc/modules/audio_device/ios/objc/RTCAudioSession.mm
|
| +++ b/webrtc/modules/audio_device/ios/objc/RTCAudioSession.mm
|
| @@ -17,6 +17,7 @@
|
|
|
| #import "WebRTC/RTCLogging.h"
|
| #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
|
| +#import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
|
|
|
| NSString * const kRTCAudioSessionErrorDomain = @"org.webrtc.RTCAudioSession";
|
| NSInteger const kRTCAudioSessionErrorLockRequired = -1;
|
| @@ -32,12 +33,13 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| volatile int _lockRecursionCount;
|
| volatile int _webRTCSessionCount;
|
| BOOL _isActive;
|
| - BOOL _shouldDelayAudioConfiguration;
|
| + BOOL _useManualAudio;
|
| + BOOL _isAudioEnabled;
|
| + BOOL _canPlayOrRecord;
|
| }
|
|
|
| @synthesize session = _session;
|
| @synthesize delegates = _delegates;
|
| -@synthesize savedConfiguration = _savedConfiguration;
|
|
|
| + (instancetype)sharedInstance {
|
| static dispatch_once_t onceToken;
|
| @@ -81,6 +83,9 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| - (NSString *)description {
|
| NSString *format =
|
| @"RTCAudioSession: {\n"
|
| + " category: %@\n"
|
| + " categoryOptions: %ld\n"
|
| + " mode: %@\n"
|
| " isActive: %d\n"
|
| " sampleRate: %.2f\n"
|
| " IOBufferDuration: %f\n"
|
| @@ -90,6 +95,7 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| " inputLatency: %f\n"
|
| "}";
|
| NSString *description = [NSString stringWithFormat:format,
|
| + self.category, (long)self.categoryOptions, self.mode,
|
| self.isActive, self.sampleRate, self.IOBufferDuration,
|
| self.outputNumberOfChannels, self.inputNumberOfChannels,
|
| self.outputLatency, self.inputLatency];
|
| @@ -112,20 +118,35 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| return _lockRecursionCount > 0;
|
| }
|
|
|
| -- (void)setShouldDelayAudioConfiguration:(BOOL)shouldDelayAudioConfiguration {
|
| +- (void)setUseManualAudio:(BOOL)useManualAudio {
|
| @synchronized(self) {
|
| - // No one should be changing this while an audio device is active.
|
| - RTC_DCHECK(!self.isConfiguredForWebRTC);
|
| - if (_shouldDelayAudioConfiguration == shouldDelayAudioConfiguration) {
|
| + if (_useManualAudio == useManualAudio) {
|
| return;
|
| }
|
| - _shouldDelayAudioConfiguration = shouldDelayAudioConfiguration;
|
| + _useManualAudio = useManualAudio;
|
| }
|
| + [self updateCanPlayOrRecord];
|
| }
|
|
|
| -- (BOOL)shouldDelayAudioConfiguration {
|
| +- (BOOL)useManualAudio {
|
| @synchronized(self) {
|
| - return _shouldDelayAudioConfiguration;
|
| + return _useManualAudio;
|
| + }
|
| +}
|
| +
|
| +- (void)setIsAudioEnabled:(BOOL)isAudioEnabled {
|
| + @synchronized(self) {
|
| + if (_isAudioEnabled == isAudioEnabled) {
|
| + return;
|
| + }
|
| + _isAudioEnabled = isAudioEnabled;
|
| + }
|
| + [self updateCanPlayOrRecord];
|
| +}
|
| +
|
| +- (BOOL)isAudioEnabled {
|
| + @synchronized(self) {
|
| + return _isAudioEnabled;
|
| }
|
| }
|
|
|
| @@ -232,6 +253,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| return self.session.sampleRate;
|
| }
|
|
|
| +- (double)preferredSampleRate {
|
| + return self.session.preferredSampleRate;
|
| +}
|
| +
|
| - (NSInteger)inputNumberOfChannels {
|
| return self.session.inputNumberOfChannels;
|
| }
|
| @@ -256,6 +281,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| return self.session.IOBufferDuration;
|
| }
|
|
|
| +- (NSTimeInterval)preferredIOBufferDuration {
|
| + return self.session.preferredIOBufferDuration;
|
| +}
|
| +
|
| // TODO(tkchin): Simplify the amount of locking happening here. Likely that we
|
| // can just do atomic increments / decrements.
|
| - (BOOL)setActive:(BOOL)active
|
| @@ -497,21 +526,6 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| }
|
| }
|
|
|
| -- (void)setSavedConfiguration:(RTCAudioSessionConfiguration *)configuration {
|
| - @synchronized(self) {
|
| - if (_savedConfiguration == configuration) {
|
| - return;
|
| - }
|
| - _savedConfiguration = configuration;
|
| - }
|
| -}
|
| -
|
| -- (RTCAudioSessionConfiguration *)savedConfiguration {
|
| - @synchronized(self) {
|
| - return _savedConfiguration;
|
| - }
|
| -}
|
| -
|
| // TODO(tkchin): check for duplicates.
|
| - (void)pushDelegate:(id<RTCAudioSessionDelegate>)delegate {
|
| @synchronized(self) {
|
| @@ -547,6 +561,10 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| return _webRTCSessionCount;
|
| }
|
|
|
| +- (BOOL)canPlayOrRecord {
|
| + return !self.useManualAudio || self.isAudioEnabled;
|
| +}
|
| +
|
| - (BOOL)checkLock:(NSError **)outError {
|
| // Check ivar instead of trying to acquire lock so that we won't accidentally
|
| // acquire lock if it hasn't already been called.
|
| @@ -566,79 +584,70 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| if (![self checkLock:outError]) {
|
| return NO;
|
| }
|
| - NSInteger sessionCount = rtc::AtomicOps::Increment(&_webRTCSessionCount);
|
| - if (sessionCount > 1) {
|
| - // Should already be configured.
|
| - RTC_DCHECK(self.isConfiguredForWebRTC);
|
| - return YES;
|
| - }
|
| + rtc::AtomicOps::Increment(&_webRTCSessionCount);
|
| + [self notifyDidStartPlayOrRecord];
|
| + return YES;
|
| +}
|
|
|
| - // Only perform configuration steps once. Application might have already
|
| - // configured the session.
|
| - if (self.isConfiguredForWebRTC) {
|
| - // Nothing more to do, already configured.
|
| - return YES;
|
| +- (BOOL)endWebRTCSession:(NSError **)outError {
|
| + if (outError) {
|
| + *outError = nil;
|
| }
|
| + if (![self checkLock:outError]) {
|
| + return NO;
|
| + }
|
| + rtc::AtomicOps::Decrement(&_webRTCSessionCount);
|
| + [self notifyDidStopPlayOrRecord];
|
| + return YES;
|
| +}
|
|
|
| - // If application has prevented automatic configuration, return here and wait
|
| - // for application to call configureWebRTCSession.
|
| - if (self.shouldDelayAudioConfiguration) {
|
| - [self notifyShouldConfigure];
|
| - return YES;
|
| +- (BOOL)configureWebRTCSession:(NSError **)outError {
|
| + if (outError) {
|
| + *outError = nil;
|
| + }
|
| + if (![self checkLock:outError]) {
|
| + return NO;
|
| }
|
| + RTCLog(@"Configuring audio session for WebRTC.");
|
|
|
| - // Configure audio session.
|
| + // Configure the AVAudioSession and activate it.
|
| + // Provide an error even if there isn't one so we can log it.
|
| NSError *error = nil;
|
| - if (![self configureWebRTCSession:&error]) {
|
| - RTCLogError(@"Error configuring audio session: %@",
|
| + RTCAudioSessionConfiguration *webRTCConfig =
|
| + [RTCAudioSessionConfiguration webRTCConfiguration];
|
| + if (![self setConfiguration:webRTCConfig active:YES error:&error]) {
|
| + RTCLogError(@"Failed to set WebRTC audio configuration: %@",
|
| error.localizedDescription);
|
| + [self unconfigureWebRTCSession:nil];
|
| if (outError) {
|
| *outError = error;
|
| }
|
| return NO;
|
| }
|
|
|
| + // Ensure that the device currently supports audio input.
|
| + // TODO(tkchin): Figure out if this is really necessary.
|
| + if (!self.inputAvailable) {
|
| + RTCLogError(@"No audio input path is available!");
|
| + [self unconfigureWebRTCSession:nil];
|
| + if (outError) {
|
| + *outError = [self configurationErrorWithDescription:@"No input path."];
|
| + }
|
| + return NO;
|
| + }
|
| +
|
| return YES;
|
| }
|
|
|
| -- (BOOL)endWebRTCSession:(NSError **)outError {
|
| +- (BOOL)unconfigureWebRTCSession:(NSError **)outError {
|
| if (outError) {
|
| *outError = nil;
|
| }
|
| if (![self checkLock:outError]) {
|
| return NO;
|
| }
|
| - int sessionCount = rtc::AtomicOps::Decrement(&_webRTCSessionCount);
|
| - RTC_DCHECK_GE(sessionCount, 0);
|
| - if (sessionCount != 0) {
|
| - // Should still be configured.
|
| - RTC_DCHECK(self.isConfiguredForWebRTC);
|
| - return YES;
|
| - }
|
| -
|
| - // Only unconfigure if application has not done it.
|
| - if (!self.isConfiguredForWebRTC) {
|
| - // Nothing more to do, already unconfigured.
|
| - return YES;
|
| - }
|
| -
|
| - // If application has prevented automatic configuration, return here and wait
|
| - // for application to call unconfigureWebRTCSession.
|
| - if (self.shouldDelayAudioConfiguration) {
|
| - [self notifyShouldUnconfigure];
|
| - return YES;
|
| - }
|
| -
|
| - // Unconfigure audio session.
|
| - NSError *error = nil;
|
| - if (![self unconfigureWebRTCSession:&error]) {
|
| - RTCLogError(@"Error unconfiguring audio session: %@",
|
| - error.localizedDescription);
|
| - if (outError) {
|
| - *outError = error;
|
| - }
|
| - return NO;
|
| - }
|
| + RTCLog(@"Unconfiguring audio session for WebRTC.");
|
| + [self setActive:NO error:outError];
|
|
|
| return YES;
|
| }
|
| @@ -667,6 +676,22 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| }
|
| }
|
|
|
| +- (void)updateCanPlayOrRecord {
|
| + BOOL canPlayOrRecord = NO;
|
| + BOOL shouldNotify = NO;
|
| + @synchronized(self) {
|
| + canPlayOrRecord = !self.useManualAudio || self.isAudioEnabled;
|
| + if (_canPlayOrRecord == canPlayOrRecord) {
|
| + return;
|
| + }
|
| + _canPlayOrRecord = canPlayOrRecord;
|
| + shouldNotify = YES;
|
| + }
|
| + if (shouldNotify) {
|
| + [self notifyDidChangeCanPlayOrRecord:canPlayOrRecord];
|
| + }
|
| +}
|
| +
|
| - (void)notifyDidBeginInterruption {
|
| for (auto delegate : self.delegates) {
|
| SEL sel = @selector(audioSessionDidBeginInterruption:);
|
| @@ -717,38 +742,29 @@ NSInteger const kRTCAudioSessionErrorConfiguration = -2;
|
| }
|
| }
|
|
|
| -- (void)notifyShouldConfigure {
|
| - for (auto delegate : self.delegates) {
|
| - SEL sel = @selector(audioSessionShouldConfigure:);
|
| - if ([delegate respondsToSelector:sel]) {
|
| - [delegate audioSessionShouldConfigure:self];
|
| - }
|
| - }
|
| -}
|
| -
|
| -- (void)notifyShouldUnconfigure {
|
| +- (void)notifyDidChangeCanPlayOrRecord:(BOOL)canPlayOrRecord {
|
| for (auto delegate : self.delegates) {
|
| - SEL sel = @selector(audioSessionShouldUnconfigure:);
|
| + SEL sel = @selector(audioSession:didChangeCanPlayOrRecord:);
|
| if ([delegate respondsToSelector:sel]) {
|
| - [delegate audioSessionShouldUnconfigure:self];
|
| + [delegate audioSession:self didChangeCanPlayOrRecord:canPlayOrRecord];
|
| }
|
| }
|
| }
|
|
|
| -- (void)notifyDidConfigure {
|
| +- (void)notifyDidStartPlayOrRecord {
|
| for (auto delegate : self.delegates) {
|
| - SEL sel = @selector(audioSessionDidConfigure:);
|
| + SEL sel = @selector(audioSessionDidStartPlayOrRecord:);
|
| if ([delegate respondsToSelector:sel]) {
|
| - [delegate audioSessionDidConfigure:self];
|
| + [delegate audioSessionDidStartPlayOrRecord:self];
|
| }
|
| }
|
| }
|
|
|
| -- (void)notifyDidUnconfigure {
|
| +- (void)notifyDidStopPlayOrRecord {
|
| for (auto delegate : self.delegates) {
|
| - SEL sel = @selector(audioSessionDidUnconfigure:);
|
| + SEL sel = @selector(audioSessionDidStopPlayOrRecord:);
|
| if ([delegate respondsToSelector:sel]) {
|
| - [delegate audioSessionDidUnconfigure:self];
|
| + [delegate audioSessionDidStopPlayOrRecord:self];
|
| }
|
| }
|
| }
|
|
|