Chromium Code Reviews| Index: webrtc/modules/audio_device/ios/audio_device_ios.mm |
| diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
| index 9db9871c359b60820813b00131712c366c35c818..47c310e70a82392c6ab441fa3a37329ba4b44fe6 100644 |
| --- a/webrtc/modules/audio_device/ios/audio_device_ios.mm |
| +++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
| @@ -77,12 +77,14 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
| @autoreleasepool { |
| NSError* error = nil; |
| BOOL success = NO; |
| + |
| // Deactivate the audio session and return if |activate| is false. |
| if (!activate) { |
| success = [session setActive:NO error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| return; |
| } |
| + |
| // Use a category which supports simultaneous recording and playback. |
| // By default, using this category implies that our app’s audio is |
| // nonmixable, hence activating the session will interrupt any other |
| @@ -93,12 +95,14 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
| error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| } |
| + |
| // Specify mode for two-way voice communication (e.g. VoIP). |
| if (session.mode != AVAudioSessionModeVoiceChat) { |
| error = nil; |
| success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| } |
| + |
| // Set the session's sample rate or the hardware sample rate. |
| // It is essential that we use the same sample rate as stream format |
| // to ensure that the I/O unit does not have to do sample rate conversion. |
| @@ -106,6 +110,7 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
| success = |
| [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| + |
| // Set the preferred audio I/O buffer duration, in seconds. |
| // TODO(henrika): add more comments here. |
| error = nil; |
| @@ -113,18 +118,18 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
| error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| - // TODO(henrika): add observers here... |
| - |
| // Activate the audio session. Activation can fail if another active audio |
| // session (e.g. phone call) has higher priority than ours. |
| error = nil; |
| success = [session setActive:YES error:&error]; |
| RTC_DCHECK(CheckAndLogError(success, error)); |
| RTC_CHECK(session.isInputAvailable) << "No input path is available!"; |
| + |
| // Ensure that category and mode are actually activated. |
| RTC_DCHECK( |
| [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); |
| RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); |
| + |
| // Try to set the preferred number of hardware audio channels. These calls |
| // must be done after setting the audio session’s category and mode and |
| // activating the session. |
| @@ -404,22 +409,150 @@ void AudioDeviceIOS::UpdateAudioDeviceBuffer() { |
| audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); |
| } |
| +void AudioDeviceIOS::RegisterNotificationObservers() { |
| + LOGI() << "RegisterNotificationObservers"; |
| + // Get the default notification center of the current process. |
| + NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| + |
| + // Add AVAudioSessionInterruptionNotification observer. |
| + // TODO(henrika): improve this section and try to merge actions with actions |
|
tkchin_webrtc
2015/10/16 16:23:32
imo we should make changes to depot_tools to allow
henrika_webrtc
2015/10/20 10:18:16
No action on adding 100 lines support.
Thanks for
|
| + // for the detected route change. |
| + id interruption_observer = [center |
| + addObserverForName:AVAudioSessionInterruptionNotification |
| + object:nil |
| + queue:[NSOperationQueue mainQueue] |
| + usingBlock:^(NSNotification* notification) { |
| + NSNumber* typeNumber = |
| + [notification userInfo][AVAudioSessionInterruptionTypeKey]; |
|
tkchin_webrtc
2015/10/16 16:23:32
style:
notification.userInfo[AVAudioSessionInterru
henrika_webrtc
2015/10/20 10:18:16
Done.
|
| + AVAudioSessionInterruptionType type = |
| + (AVAudioSessionInterruptionType)[typeNumber |
| + unsignedIntegerValue]; |
| + switch (type) { |
| + case AVAudioSessionInterruptionTypeBegan: |
| + // At this point our audio session has been deactivated and |
| + // the audio unit render callbacks no longer occur. |
| + // Nothing to do. |
| + break; |
| + case AVAudioSessionInterruptionTypeEnded: { |
| + NSError* error = nil; |
| + AVAudioSession* session = [AVAudioSession sharedInstance]; |
| + [session setActive:YES error:&error]; |
| + if (error != nil) { |
| + LOG_F(LS_ERROR) << "Failed to active audio session"; |
| + } |
| + // Post interruption the audio unit render callbacks don't |
|
tkchin_webrtc
2015/10/16 16:23:32
Is this still true today?
henrika_webrtc
2015/10/20 10:18:16
See TODO above. I will check and make changes in t
|
| + // automatically continue, so we restart the unit manually |
| + // here. |
| + AudioOutputUnitStop(vpio_unit_); |
| + AudioOutputUnitStart(vpio_unit_); |
| + break; |
| + } |
| + } |
| + }]; |
| + |
| + // Add AVAudioSessionRouteChangeNotification observer. |
| + id route_change_observer = [center |
| + addObserverForName:AVAudioSessionRouteChangeNotification |
| + object:nil |
| + queue:[NSOperationQueue mainQueue] |
| + usingBlock:^(NSNotification* notification) { |
| + // Get reason for current route change. |
| + NSUInteger reason_value = [[notification.userInfo |
|
tkchin_webrtc
2015/10/16 16:23:32
style:
NSNumber *reason_number = notification.user
henrika_webrtc
2015/10/20 10:18:16
Done.
|
| + valueForKey:AVAudioSessionRouteChangeReasonKey] |
| + unsignedIntegerValue]; |
| + bool valid_route_change = true; |
| + LOG(LS_INFO) << "Route change:"; |
| + switch (reason_value) { |
| + case AVAudioSessionRouteChangeReasonNewDeviceAvailable: |
| + LOG(LS_INFO) << " NewDeviceAvailable"; |
| + break; |
| + case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: |
| + LOG(LS_INFO) << " OldDeviceUnavailable"; |
| + break; |
| + case AVAudioSessionRouteChangeReasonCategoryChange: |
| + LOG(LS_INFO) << " CategoryChange"; |
| + LOG(LS_INFO) << " New category: " |
| + << ios::GetAudioSessionCategory(); |
| + break; |
| + case AVAudioSessionRouteChangeReasonOverride: |
| + LOG(LS_INFO) << " Override"; |
| + break; |
| + case AVAudioSessionRouteChangeReasonWakeFromSleep: |
| + LOG(LS_INFO) << " WakeFromSleep"; |
| + break; |
| + case AVAudioSessionRouteChangeReasonRouteConfigurationChange: |
| + // Ignore this type of route change since we are focusing |
| + // on detecting headset changes. |
| + LOG(LS_INFO) << " RouteConfigurationChange"; |
| + valid_route_change = false; |
| + break; |
| + default: |
| + LOG(LS_INFO) << " ReasonUnknown"; |
| + } |
| + |
| + if (valid_route_change) { |
| + // Log previous route configuration. |
| + AVAudioSessionRouteDescription* prev_route = [notification |
| + userInfo][AVAudioSessionRouteChangePreviousRouteKey]; |
|
tkchin_webrtc
2015/10/16 16:23:32
ditto .userInfo
henrika_webrtc
2015/10/20 10:18:16
Done.
|
| + LOG(LS_INFO) << "Previous route:"; |
| + LOG(LS_INFO) << ios::StdStringFromNSString( |
| + [NSString stringWithFormat:@"%@", prev_route]); |
| + |
| + // Only restart audio for a valid route change and if the |
| + // session sample rate has changed. |
| + const double session_sample_rate = |
| + [[AVAudioSession sharedInstance] sampleRate]; |
|
tkchin_webrtc
2015/10/16 16:23:32
ditto dot syntax for properties
henrika_webrtc
2015/10/20 10:18:16
Done.
|
| + if (playout_parameters_.sample_rate() != |
| + session_sample_rate) { |
| + if (!RestartAudioUnitWithNewFormat(session_sample_rate)) { |
| + LOG(LS_ERROR) << "Audio restart failed"; |
| + } |
| + } |
| + } |
| + |
| + }]; |
| + |
| + // Increment refcount on observers using ARC bridge. Instance variable is a |
| + // void* instead of an id because header is included in other pure C++ |
| + // files. |
| + audio_interruption_observer_ = (__bridge_retained void*)interruption_observer; |
| + route_change_observer_ = (__bridge_retained void*)route_change_observer; |
| +} |
| + |
| +void AudioDeviceIOS::UnregisterNotificationObservers() { |
| + LOGI() << "UnregisterNotificationObservers"; |
| + // Transfer ownership of observer back to ARC, which will deallocate the |
| + // observer once it exits this scope. |
| + NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| + if (audio_interruption_observer_ != nullptr) { |
| + id observer = (__bridge_transfer id)audio_interruption_observer_; |
| + [center removeObserver:observer]; |
| + audio_interruption_observer_ = nullptr; |
| + } |
| + if (route_change_observer_ != nullptr) { |
| + id observer = (__bridge_transfer id)route_change_observer_; |
| + [center removeObserver:observer]; |
| + route_change_observer_ = nullptr; |
| + } |
| +} |
| + |
| void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
| LOGI() << "SetupAudioBuffersForActiveAudioSession"; |
| - AVAudioSession* session = [AVAudioSession sharedInstance]; |
| // Verify the current values once the audio session has been activated. |
| + AVAudioSession* session = [AVAudioSession sharedInstance]; |
| LOG(LS_INFO) << " sample rate: " << session.sampleRate; |
| LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; |
| LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; |
| LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; |
| LOG(LS_INFO) << " output latency: " << session.outputLatency; |
| LOG(LS_INFO) << " input latency: " << session.inputLatency; |
| + |
| // Log a warning message for the case when we are unable to set the preferred |
| // hardware sample rate but continue and use the non-ideal sample rate after |
| - // reinitializing the audio parameters. |
| - if (session.sampleRate != playout_parameters_.sample_rate()) { |
| - LOG(LS_WARNING) |
| - << "Failed to enable an audio session with the preferred sample rate!"; |
| + // reinitializing the audio parameters. Most BT headsets only support 8kHz or |
| + // 16kHz. |
| + if (session.sampleRate != kPreferredSampleRate) { |
| + LOG(LS_WARNING) << "Unable to set the preferred sample rate"; |
| } |
| // At this stage, we also know the exact IO buffer duration and can add |
| @@ -532,8 +665,10 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
| application_format.mBytesPerFrame = kBytesPerSample; |
| application_format.mChannelsPerFrame = kPreferredNumberOfChannels; |
| application_format.mBitsPerChannel = 8 * kBytesPerSample; |
| + // Store the new format. |
| + application_format_ = application_format; |
| #if !defined(NDEBUG) |
| - LogABSD(application_format); |
| + LogABSD(application_format_); |
| #endif |
| // Set the application format on the output scope of the input element/bus. |
| @@ -589,12 +724,48 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
| return true; |
| } |
| +bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
| + LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; |
| + // Stop the active audio unit. |
| + LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
| + "Failed to stop the the Voice-Processing I/O unit"); |
| + |
| + // The stream format is about to be changed and it requires that we first |
| + // uninitialize it to deallocate its resources. |
| + LOG_AND_RETURN_IF_ERROR( |
| + AudioUnitUninitialize(vpio_unit_), |
| + "Failed to uninitialize the the Voice-Processing I/O unit"); |
| + |
| + // Allocate new buffers given the new stream format. |
| + SetupAudioBuffersForActiveAudioSession(); |
| + |
| + // Update the existing application format using the new sample rate. |
| + application_format_.mSampleRate = playout_parameters_.sample_rate(); |
| + UInt32 size = sizeof(application_format_); |
| + AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Output, 1, &application_format_, size); |
| + AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
| + kAudioUnitScope_Input, 0, &application_format_, size); |
| + |
| + // Prepare the audio unit to render audio again. |
| + LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), |
| + "Failed to initialize the Voice-Processing I/O unit"); |
| + |
| + // Start rendering audio using the new format. |
| + LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
| + "Failed to start the Voice-Processing I/O unit"); |
| + return true; |
| +} |
| + |
| bool AudioDeviceIOS::InitPlayOrRecord() { |
| LOGI() << "InitPlayOrRecord"; |
| AVAudioSession* session = [AVAudioSession sharedInstance]; |
| // Activate the audio session and ask for a set of preferred audio parameters. |
| ActivateAudioSession(session, true); |
| + // Start observing audio session interruptions and route changes. |
| + RegisterNotificationObservers(); |
| + |
| // Ensure that we got what what we asked for in our active audio session. |
| SetupAudioBuffersForActiveAudioSession(); |
| @@ -602,59 +773,14 @@ bool AudioDeviceIOS::InitPlayOrRecord() { |
| if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
| return false; |
| } |
| - |
| - // Listen to audio interruptions. |
| - // TODO(henrika): learn this area better. |
| - NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| - id observer = [center |
| - addObserverForName:AVAudioSessionInterruptionNotification |
| - object:nil |
| - queue:[NSOperationQueue mainQueue] |
| - usingBlock:^(NSNotification* notification) { |
| - NSNumber* typeNumber = |
| - [notification userInfo][AVAudioSessionInterruptionTypeKey]; |
| - AVAudioSessionInterruptionType type = |
| - (AVAudioSessionInterruptionType)[typeNumber |
| - unsignedIntegerValue]; |
| - switch (type) { |
| - case AVAudioSessionInterruptionTypeBegan: |
| - // At this point our audio session has been deactivated and |
| - // the audio unit render callbacks no longer occur. |
| - // Nothing to do. |
| - break; |
| - case AVAudioSessionInterruptionTypeEnded: { |
| - NSError* error = nil; |
| - AVAudioSession* session = [AVAudioSession sharedInstance]; |
| - [session setActive:YES error:&error]; |
| - if (error != nil) { |
| - LOG_F(LS_ERROR) << "Failed to active audio session"; |
| - } |
| - // Post interruption the audio unit render callbacks don't |
| - // automatically continue, so we restart the unit manually |
| - // here. |
| - AudioOutputUnitStop(vpio_unit_); |
| - AudioOutputUnitStart(vpio_unit_); |
| - break; |
| - } |
| - } |
| - }]; |
| - // Increment refcount on observer using ARC bridge. Instance variable is a |
| - // void* instead of an id because header is included in other pure C++ |
| - // files. |
| - audio_interruption_observer_ = (__bridge_retained void*)observer; |
| return true; |
| } |
| bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
| LOGI() << "ShutdownPlayOrRecord"; |
| - if (audio_interruption_observer_ != nullptr) { |
| - NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| - // Transfer ownership of observer back to ARC, which will dealloc the |
| - // observer once it exits this scope. |
| - id observer = (__bridge_transfer id)audio_interruption_observer_; |
| - [center removeObserver:observer]; |
| - audio_interruption_observer_ = nullptr; |
| - } |
| + // Remove audio session notification observers. |
| + UnregisterNotificationObservers(); |
| + |
| // Close and delete the voice-processing I/O unit. |
| OSStatus result = -1; |
| if (nullptr != vpio_unit_) { |
| @@ -662,12 +788,17 @@ bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
| if (result != noErr) { |
| LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
| } |
| + result = AudioUnitUninitialize(vpio_unit_); |
|
tkchin_webrtc
2015/10/16 16:23:32
oh dear, were we really not doing this before? Thx
henrika_webrtc
2015/10/20 10:18:16
You are welcome ;-)
|
| + if (result != noErr) { |
| + LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
| + } |
| result = AudioComponentInstanceDispose(vpio_unit_); |
| if (result != noErr) { |
| LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; |
| } |
| vpio_unit_ = nullptr; |
| } |
| + |
| // All I/O should be stopped or paused prior to deactivating the audio |
| // session, hence we deactivate as last action. |
| AVAudioSession* session = [AVAudioSession sharedInstance]; |
| @@ -695,7 +826,6 @@ OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( |
| const AudioTimeStamp* in_time_stamp, |
| UInt32 in_bus_number, |
| UInt32 in_number_frames) { |
| - RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames); |
| OSStatus result = noErr; |
| // Simply return if recording is not enabled. |
| if (!rtc::AtomicOps::AcquireLoad(&recording_)) |