Index: webrtc/modules/audio_device/ios/audio_device_ios.mm |
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
index d7668f7894df500d385a50d959499ffa7911c091..d15a6224aa77b4e291562e236ce684755a2c370a 100644 |
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm |
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
@@ -25,6 +25,16 @@ |
namespace webrtc { |
+// Protects |g_audio_session_activation_count|. |
+static rtc::GlobalLockPod g_lock; |
+ |
+// Counts number of times setActive:YES has been called on the singleton |
+// AVAudioSession instance. Used to ensure that we don't disable an audio |
+// session when it is still in used by other instances of this object. |
+// Member is static to ensure that the value is counted for all instances |
+// and not per instance. |
+static int g_audio_session_activation_count GUARDED_BY(g_lock) = 0; |
+ |
#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
#define LOG_AND_RETURN_IF_ERROR(error, message) \ |
@@ -80,7 +90,8 @@ using ios::CheckAndLogError; |
// Activates an audio session suitable for full duplex VoIP sessions when |
// |activate| is true. Also sets the preferred sample rate and IO buffer |
// duration. Deactivates an active audio session if |activate| is set to false. |
-static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
+static bool ActivateAudioSession(AVAudioSession* session, bool activate) |
+ EXCLUSIVE_LOCKS_REQUIRED(g_lock) { |
LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
@autoreleasepool { |
NSError* error = nil; |
@@ -96,8 +107,7 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
setActive:NO |
withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation |
error:&error]; |
- RTC_DCHECK(CheckAndLogError(success, error)); |
- return; |
+ return CheckAndLogError(success, error); |
} |
// Go ahead and active our own audio session since |activate| is true. |
@@ -129,7 +139,6 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
RTC_DCHECK(CheckAndLogError(success, error)); |
// Set the preferred audio I/O buffer duration, in seconds. |
- // TODO(henrika): add more comments here. |
error = nil; |
success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
error:&error]; |
@@ -139,13 +148,27 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
// session (e.g. phone call) has higher priority than ours. |
error = nil; |
success = [session setActive:YES error:&error]; |
- RTC_DCHECK(CheckAndLogError(success, error)); |
- RTC_CHECK(session.isInputAvailable) << "No input path is available!"; |
+ if (!CheckAndLogError(success, error)) { |
+ return false; |
+ } |
- // Ensure that category and mode are actually activated. |
- RTC_DCHECK( |
- [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); |
- RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); |
+ // Ensure that the device currently supports audio input. |
+ if (!session.isInputAvailable) { |
+ LOG(LS_ERROR) << "No audio input path is available!"; |
+ return false; |
+ } |
+ |
+ // Ensure that the required category and mode are actually activated. |
+ if (![session.category |
+ isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { |
+ LOG(LS_ERROR) |
+ << "Failed to set category to AVAudioSessionCategoryPlayAndRecord"; |
+ return false; |
+ } |
+ if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) { |
+ LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat"; |
+ return false; |
+ } |
// Try to set the preferred number of hardware audio channels. These calls |
// must be done after setting the audio session’s category and mode and |
@@ -164,6 +187,7 @@ static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
[session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels |
error:&error]; |
RTC_DCHECK(CheckAndLogError(success, error)); |
+ return true; |
} |
} |
@@ -212,7 +236,7 @@ AudioDeviceIOS::AudioDeviceIOS() |
} |
AudioDeviceIOS::~AudioDeviceIOS() { |
- LOGI() << "~dtor"; |
+ LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
Terminate(); |
} |
@@ -254,8 +278,15 @@ int32_t AudioDeviceIOS::Terminate() { |
if (!initialized_) { |
return 0; |
} |
- ShutdownPlayOrRecord(); |
+ StopPlayout(); |
+ StopRecording(); |
initialized_ = false; |
+ { |
+ rtc::GlobalLockScope ls(&g_lock); |
+ if (g_audio_session_activation_count != 0) { |
+ LOG(LS_WARNING) << "Object is destructed with an active audio session"; |
+ } |
+ } |
return 0; |
} |
@@ -267,7 +298,7 @@ int32_t AudioDeviceIOS::InitPlayout() { |
RTC_DCHECK(!playing_); |
if (!rec_is_initialized_) { |
if (!InitPlayOrRecord()) { |
- LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; |
return -1; |
} |
} |
@@ -283,7 +314,7 @@ int32_t AudioDeviceIOS::InitRecording() { |
RTC_DCHECK(!recording_); |
if (!play_is_initialized_) { |
if (!InitPlayOrRecord()) { |
- LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; |
return -1; |
} |
} |
@@ -300,7 +331,8 @@ int32_t AudioDeviceIOS::StartPlayout() { |
if (!recording_) { |
OSStatus result = AudioOutputUnitStart(vpio_unit_); |
if (result != noErr) { |
- LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: " |
+ << result; |
return -1; |
} |
} |
@@ -331,7 +363,8 @@ int32_t AudioDeviceIOS::StartRecording() { |
if (!playing_) { |
OSStatus result = AudioOutputUnitStart(vpio_unit_); |
if (result != noErr) { |
- LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: " |
+ << result; |
return -1; |
} |
} |
@@ -427,6 +460,48 @@ void AudioDeviceIOS::UpdateAudioDeviceBuffer() { |
audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); |
} |
+bool AudioDeviceIOS::ActivateAudioSession() { |
pbos-webrtc
2015/11/18 18:51:48
These aren't accessing any members right? This cou
henrika_webrtc
2015/11/18 19:48:21
Missed that. Will fix.
henrika_webrtc
2015/11/19 12:04:19
Done.
|
+ LOGI() << "ActivateAudioSession"; |
+ // An application can create more than one ADM and start audio streaming |
+ // for all of them. It is essential that we only activate the app's audio |
+ // session once (for the first one) and deactivate it once (for the last). |
+ rtc::GlobalLockScope ls(&g_lock); |
+ if (g_audio_session_activation_count == 0) { |
+ // The system provides an audio session object upon launch of an |
+ // application. However, we must initialize the session in order to |
+ // handle interruptions. Implicit initialization occurs when obtaining |
+ // a reference to the AVAudioSession object. |
+ AVAudioSession* session = [AVAudioSession sharedInstance]; |
tkchin_webrtc
2015/11/18 19:25:29
I don't know if there are consequences to managing
henrika_webrtc
2015/11/18 19:48:21
We have done so for a long time now, also done in
|
+ // Try to activate the audio session and ask for a set of preferred audio |
+ // parameters. |
+ if (!webrtc::ActivateAudioSession(session, true)) { |
+ LOG(LS_ERROR) << "Failed to activate the audio session"; |
+ return false; |
+ } |
+ ++g_audio_session_activation_count; |
+ LOG(LS_INFO) << "Our audio session is now activated"; |
+ } |
+ return true; |
+} |
+ |
+bool AudioDeviceIOS::DeactivateAudioSession() { |
+ LOGI() << "DeactivateAudioSession"; |
+ // If more than one object is using the audio session, ensure that only the |
+ // last object deactivates. Apple recommends: "activate your audio session |
+ // only as needed and deactivate it when you are not using audio". |
+ rtc::GlobalLockScope ls(&g_lock); |
+ if (g_audio_session_activation_count == 1) { |
+ AVAudioSession* session = [AVAudioSession sharedInstance]; |
+ if (!webrtc::ActivateAudioSession(session, false)) { |
pbos-webrtc
2015/11/18 18:51:48
You don't need the webrtc:: prefix here or above.
henrika_webrtc
2015/11/18 19:48:21
Acknowledged.
|
+ LOG(LS_ERROR) << "Failed to deactivate the audio session"; |
pbos-webrtc
2015/11/18 18:51:48
+ ", this shouldn't happen, the audio session will
tkchin_webrtc
2015/11/18 19:25:29
It will "leak" in the sense that there is mismatch
henrika_webrtc
2015/11/18 19:48:21
Will make comment more clear. It is not a real lea
|
+ return false; |
+ } |
+ --g_audio_session_activation_count; |
+ LOG(LS_INFO) << "Our audio session is now deactivated"; |
+ } |
+ return true; |
+} |
+ |
void AudioDeviceIOS::RegisterNotificationObservers() { |
LOGI() << "RegisterNotificationObservers"; |
// This code block will be called when AVAudioSessionInterruptionNotification |
@@ -639,7 +714,7 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; |
- RTC_DCHECK(!vpio_unit_); |
+ RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists"; |
// Create an audio component description to identify the Voice-Processing |
// I/O audio unit. |
AudioComponentDescription vpio_unit_description; |
@@ -653,29 +728,42 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
AudioComponentFindNext(nullptr, &vpio_unit_description); |
// Create a Voice-Processing IO audio unit. |
- LOG_AND_RETURN_IF_ERROR( |
- AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_), |
- "Failed to create a VoiceProcessingIO audio unit"); |
+ OSStatus result = noErr; |
+ result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); |
+ if (result != noErr) { |
+ vpio_unit_ = nullptr; |
+ LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result; |
+ return false; |
+ } |
// A VP I/O unit's bus 1 connects to input hardware (microphone). Enable |
// input on the input scope of the input element. |
AudioUnitElement input_bus = 1; |
UInt32 enable_input = 1; |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
- kAudioUnitScope_Input, input_bus, &enable_input, |
- sizeof(enable_input)), |
- "Failed to enable input on input scope of input element"); |
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
+ kAudioUnitScope_Input, input_bus, &enable_input, |
+ sizeof(enable_input)); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) << "Failed to enable input on input scope of input element: " |
+ << result; |
+ return false; |
+ } |
// A VP I/O unit's bus 0 connects to output hardware (speaker). Enable |
// output on the output scope of the output element. |
AudioUnitElement output_bus = 0; |
UInt32 enable_output = 1; |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
- kAudioUnitScope_Output, output_bus, &enable_output, |
- sizeof(enable_output)), |
- "Failed to enable output on output scope of output element"); |
+ result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
+ kAudioUnitScope_Output, output_bus, |
+ &enable_output, sizeof(enable_output)); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) |
+ << "Failed to enable output on output scope of output element: " |
+ << result; |
+ return false; |
+ } |
// Set the application formats for input and output: |
// - use same format in both directions |
@@ -703,38 +791,55 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
#endif |
// Set the application format on the output scope of the input element/bus. |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
- kAudioUnitScope_Output, input_bus, |
- &application_format, size), |
- "Failed to set application format on output scope of input element"); |
+ result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
+ kAudioUnitScope_Output, input_bus, |
+ &application_format, size); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) |
+ << "Failed to set application format on output scope of input bus: " |
+ << result; |
+ return false; |
+ } |
// Set the application format on the input scope of the output element/bus. |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
- kAudioUnitScope_Input, output_bus, |
- &application_format, size), |
- "Failed to set application format on input scope of output element"); |
+ result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
+ kAudioUnitScope_Input, output_bus, |
+ &application_format, size); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) |
+ << "Failed to set application format on input scope of output bus: " |
+ << result; |
+ return false; |
+ } |
// Specify the callback function that provides audio samples to the audio |
// unit. |
AURenderCallbackStruct render_callback; |
render_callback.inputProc = GetPlayoutData; |
render_callback.inputProcRefCon = this; |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback, |
- kAudioUnitScope_Input, output_bus, &render_callback, |
- sizeof(render_callback)), |
- "Failed to specify the render callback on the output element"); |
+ result = AudioUnitSetProperty( |
+ vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, |
+ output_bus, &render_callback, sizeof(render_callback)); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: " |
+ << result; |
+ return false; |
+ } |
// Disable AU buffer allocation for the recorder, we allocate our own. |
// TODO(henrika): not sure that it actually saves resource to make this call. |
UInt32 flag = 0; |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, |
- kAudioUnitScope_Output, input_bus, &flag, |
- sizeof(flag)), |
- "Failed to disable buffer allocation on the input element"); |
+ result = AudioUnitSetProperty( |
+ vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, |
+ kAudioUnitScope_Output, input_bus, &flag, sizeof(flag)); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: " |
+ << result; |
+ } |
// Specify the callback to be called by the I/O thread to us when input audio |
// is available. The recorded samples can then be obtained by calling the |
@@ -742,16 +847,28 @@ bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
AURenderCallbackStruct input_callback; |
input_callback.inputProc = RecordedDataIsAvailable; |
input_callback.inputProcRefCon = this; |
- LOG_AND_RETURN_IF_ERROR( |
- AudioUnitSetProperty(vpio_unit_, |
- kAudioOutputUnitProperty_SetInputCallback, |
- kAudioUnitScope_Global, input_bus, &input_callback, |
- sizeof(input_callback)), |
- "Failed to specify the input callback on the input element"); |
+ result = AudioUnitSetProperty(vpio_unit_, |
+ kAudioOutputUnitProperty_SetInputCallback, |
+ kAudioUnitScope_Global, input_bus, |
+ &input_callback, sizeof(input_callback)); |
+ if (result != noErr) { |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: " |
+ << result; |
+ } |
// Initialize the Voice-Processing I/O unit instance. |
- LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), |
- "Failed to initialize the Voice-Processing I/O unit"); |
+ result = AudioUnitInitialize(vpio_unit_); |
+ if (result != noErr) { |
+ result = AudioUnitUninitialize(vpio_unit_); |
+ if (result != noErr) { |
+ LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
+ } |
+ DisposeAudioUnit(); |
+ LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: " |
+ << result; |
+ return false; |
+ } |
return true; |
} |
@@ -790,9 +907,10 @@ bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
bool AudioDeviceIOS::InitPlayOrRecord() { |
LOGI() << "InitPlayOrRecord"; |
- AVAudioSession* session = [AVAudioSession sharedInstance]; |
- // Activate the audio session and ask for a set of preferred audio parameters. |
- ActivateAudioSession(session, true); |
+ // Activate our own audio session if not already activated. |
pbos-webrtc
2015/11/18 18:51:48
-our own, it makes it sound like we own it.
henrika_webrtc
2015/11/18 19:48:21
Will fix.
|
+ if (!ActivateAudioSession()) { |
+ return false; |
+ } |
// Start observing audio session interruptions and route changes. |
RegisterNotificationObservers(); |
@@ -801,17 +919,15 @@ bool AudioDeviceIOS::InitPlayOrRecord() { |
SetupAudioBuffersForActiveAudioSession(); |
// Create, setup and initialize a new Voice-Processing I/O unit. |
- if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
- return false; |
- } |
+ // TODO(henrika): remove CHECK when we are sure that we no longer see |
pbos-webrtc
2015/11/18 18:51:48
Are you sure? Seems like a good CHECK to keep. May
tkchin_webrtc
2015/11/18 19:25:29
Any of the iOS audio APIs can fail. Crashing due t
henrika_webrtc
2015/11/18 19:48:21
Will try to avoid CHECK. I know that we have discu
pbos-webrtc
2015/11/20 12:29:50
Maybe a DCHECK?
henrika_webrtc
2015/11/20 12:53:48
Would like to avoid DCHECK as well since the curre
|
+ // issues with audio unit initialization. |
+ // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5166 for details. |
+ RTC_CHECK(SetupAndInitializeVoiceProcessingAudioUnit()); |
return true; |
} |
-bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
+void AudioDeviceIOS::ShutdownPlayOrRecord() { |
LOGI() << "ShutdownPlayOrRecord"; |
- // Remove audio session notification observers. |
- UnregisterNotificationObservers(); |
- |
// Close and delete the voice-processing I/O unit. |
OSStatus result = -1; |
if (nullptr != vpio_unit_) { |
pbos-webrtc
2015/11/18 18:51:48
Did you wanna abort early if this wasn't the case?
henrika_webrtc
2015/11/18 19:48:21
No, if ActivateAudiSession has failed, we will nev
|
@@ -823,18 +939,25 @@ bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
if (result != noErr) { |
LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
} |
- result = AudioComponentInstanceDispose(vpio_unit_); |
- if (result != noErr) { |
- LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; |
- } |
- vpio_unit_ = nullptr; |
+ DisposeAudioUnit(); |
} |
+ // Remove audio session notification observers. |
+ UnregisterNotificationObservers(); |
+ |
// All I/O should be stopped or paused prior to deactivating the audio |
// session, hence we deactivate as last action. |
- AVAudioSession* session = [AVAudioSession sharedInstance]; |
- ActivateAudioSession(session, false); |
- return true; |
+ DeactivateAudioSession(); |
pbos-webrtc
2015/11/20 12:29:50
But this can be called even if vpio_unit_ == nullp
henrika_webrtc
2015/11/20 12:53:48
Yep. That is perfectly OK and the preferred way.
|
+} |
+ |
+void AudioDeviceIOS::DisposeAudioUnit() { |
+ if (nullptr == vpio_unit_) |
+ return; |
+ OSStatus result = AudioComponentInstanceDispose(vpio_unit_); |
+ if (result != noErr) { |
+ LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; |
+ } |
+ vpio_unit_ = nullptr; |
} |
OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |