OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #if !defined(__has_feature) || !__has_feature(objc_arc) | 11 #if !defined(__has_feature) || !__has_feature(objc_arc) |
12 #error "This file requires ARC support." | 12 #error "This file requires ARC support." |
13 #endif | 13 #endif |
14 | 14 |
15 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
16 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
17 | 17 |
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
19 | 19 |
20 #include "webrtc/base/atomicops.h" | 20 #include "webrtc/base/atomicops.h" |
21 #include "webrtc/base/checks.h" | 21 #include "webrtc/base/checks.h" |
22 #include "webrtc/base/logging.h" | 22 #include "webrtc/base/logging.h" |
23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
24 #include "webrtc/modules/utility/include/helpers_ios.h" | 24 #include "webrtc/modules/utility/include/helpers_ios.h" |
25 | 25 |
26 namespace webrtc { | 26 namespace webrtc { |
27 | 27 |
28 // Initialization of static members. See audio_device_ios.h for details. | |
pbos-webrtc
2015/11/18 12:54:22
Feel free to remove this comment (or put why it's
henrika_webrtc
2015/11/18 16:05:32
Removed.
| |
29 int AudioDeviceIOS::audio_session_activation_count_ = 0; | |
30 rtc::GlobalLockPod AudioDeviceIOS::lock_; | |
31 | |
28 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" | 32 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
29 | 33 |
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ | 34 #define LOG_AND_RETURN_IF_ERROR(error, message) \ |
31 do { \ | 35 do { \ |
32 OSStatus err = error; \ | 36 OSStatus err = error; \ |
33 if (err) { \ | 37 if (err) { \ |
34 LOG(LS_ERROR) << message << ": " << err; \ | 38 LOG(LS_ERROR) << message << ": " << err; \ |
35 return false; \ | 39 return false; \ |
36 } \ | 40 } \ |
37 } while (0) | 41 } while (0) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
73 // TODO(henrika): these value is not used in combination with built-in AEC. | 77 // TODO(henrika): these value is not used in combination with built-in AEC. |
74 // Can most likely be removed. | 78 // Can most likely be removed. |
75 const UInt16 kFixedPlayoutDelayEstimate = 30; | 79 const UInt16 kFixedPlayoutDelayEstimate = 30; |
76 const UInt16 kFixedRecordDelayEstimate = 30; | 80 const UInt16 kFixedRecordDelayEstimate = 30; |
77 | 81 |
78 using ios::CheckAndLogError; | 82 using ios::CheckAndLogError; |
79 | 83 |
80 // Activates an audio session suitable for full duplex VoIP sessions when | 84 // Activates an audio session suitable for full duplex VoIP sessions when |
81 // |activate| is true. Also sets the preferred sample rate and IO buffer | 85 // |activate| is true. Also sets the preferred sample rate and IO buffer |
82 // duration. Deactivates an active audio session if |activate| is set to false. | 86 // duration. Deactivates an active audio session if |activate| is set to false. |
83 static void ActivateAudioSession(AVAudioSession* session, bool activate) { | 87 static bool ActivateAudioSession(AVAudioSession* session, bool activate) { |
pbos-webrtc
2015/11/18 12:54:22
EXCLUSIVE_LOCKS_REQUIRED(lock_) (before {)
henrika_webrtc
2015/11/18 16:05:32
Can't do that actually since this is not a member
| |
84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | 88 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
85 @autoreleasepool { | 89 @autoreleasepool { |
86 NSError* error = nil; | 90 NSError* error = nil; |
87 BOOL success = NO; | 91 BOOL success = NO; |
88 | 92 |
89 if (!activate) { | 93 if (!activate) { |
90 // Deactivate the audio session using an extra option and then return. | 94 // Deactivate the audio session using an extra option and then return. |
91 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to | 95 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to |
92 // ensure that other audio sessions that were interrupted by our session | 96 // ensure that other audio sessions that were interrupted by our session |
93 // can return to their active state. It is recommended for VoIP apps to | 97 // can return to their active state. It is recommended for VoIP apps to |
94 // use this option. | 98 // use this option. |
95 success = [session | 99 success = [session |
96 setActive:NO | 100 setActive:NO |
97 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation | 101 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation |
98 error:&error]; | 102 error:&error]; |
99 RTC_DCHECK(CheckAndLogError(success, error)); | 103 return CheckAndLogError(success, error); |
100 return; | |
101 } | 104 } |
102 | 105 |
103 // Go ahead and active our own audio session since |activate| is true. | 106 // Go ahead and active our own audio session since |activate| is true. |
104 // Use a category which supports simultaneous recording and playback. | 107 // Use a category which supports simultaneous recording and playback. |
105 // By default, using this category implies that our app’s audio is | 108 // By default, using this category implies that our app’s audio is |
106 // nonmixable, hence activating the session will interrupt any other | 109 // nonmixable, hence activating the session will interrupt any other |
107 // audio sessions which are also nonmixable. | 110 // audio sessions which are also nonmixable. |
108 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | 111 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
109 error = nil; | 112 error = nil; |
110 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | 113 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
(...skipping 11 matching lines...) Expand all Loading... | |
122 | 125 |
123 // Set the session's sample rate or the hardware sample rate. | 126 // Set the session's sample rate or the hardware sample rate. |
124 // It is essential that we use the same sample rate as stream format | 127 // It is essential that we use the same sample rate as stream format |
125 // to ensure that the I/O unit does not have to do sample rate conversion. | 128 // to ensure that the I/O unit does not have to do sample rate conversion. |
126 error = nil; | 129 error = nil; |
127 success = | 130 success = |
128 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; | 131 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
129 RTC_DCHECK(CheckAndLogError(success, error)); | 132 RTC_DCHECK(CheckAndLogError(success, error)); |
130 | 133 |
131 // Set the preferred audio I/O buffer duration, in seconds. | 134 // Set the preferred audio I/O buffer duration, in seconds. |
132 // TODO(henrika): add more comments here. | |
133 error = nil; | 135 error = nil; |
134 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration | 136 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
135 error:&error]; | 137 error:&error]; |
136 RTC_DCHECK(CheckAndLogError(success, error)); | 138 RTC_DCHECK(CheckAndLogError(success, error)); |
137 | 139 |
138 // Activate the audio session. Activation can fail if another active audio | 140 // Activate the audio session. Activation can fail if another active audio |
139 // session (e.g. phone call) has higher priority than ours. | 141 // session (e.g. phone call) has higher priority than ours. |
140 error = nil; | 142 error = nil; |
141 success = [session setActive:YES error:&error]; | 143 success = [session setActive:YES error:&error]; |
142 RTC_DCHECK(CheckAndLogError(success, error)); | 144 if (!CheckAndLogError(success, error)) { |
143 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; | 145 return false; |
146 } | |
144 | 147 |
145 // Ensure that category and mode are actually activated. | 148 // Ensure that the device currently supports audio input. |
146 RTC_DCHECK( | 149 if (!session.isInputAvailable) { |
147 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); | 150 LOG(LS_ERROR) << "No audio input path is available!"; |
148 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); | 151 return false; |
152 } | |
153 | |
154 // Ensure that the required category and mode are actually activated. | |
155 if (![session.category | |
156 isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
157 LOG(LS_ERROR) | |
158 << "Failed to set category to AVAudioSessionCategoryPlayAndRecord"; | |
159 return false; | |
160 } | |
161 if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) { | |
162 LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat"; | |
163 return false; | |
164 } | |
149 | 165 |
150 // Try to set the preferred number of hardware audio channels. These calls | 166 // Try to set the preferred number of hardware audio channels. These calls |
151 // must be done after setting the audio session’s category and mode and | 167 // must be done after setting the audio session’s category and mode and |
152 // activating the session. | 168 // activating the session. |
153 // We try to use mono in both directions to save resources and format | 169 // We try to use mono in both directions to save resources and format |
154 // conversions in the audio unit. Some devices does only support stereo; | 170 // conversions in the audio unit. Some devices does only support stereo; |
155 // e.g. wired headset on iPhone 6. | 171 // e.g. wired headset on iPhone 6. |
156 // TODO(henrika): add support for stereo if needed. | 172 // TODO(henrika): add support for stereo if needed. |
157 error = nil; | 173 error = nil; |
158 success = | 174 success = |
159 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | 175 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels |
160 error:&error]; | 176 error:&error]; |
161 RTC_DCHECK(CheckAndLogError(success, error)); | 177 RTC_DCHECK(CheckAndLogError(success, error)); |
162 error = nil; | 178 error = nil; |
163 success = | 179 success = |
164 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels | 180 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels |
165 error:&error]; | 181 error:&error]; |
166 RTC_DCHECK(CheckAndLogError(success, error)); | 182 RTC_DCHECK(CheckAndLogError(success, error)); |
183 return true; | |
167 } | 184 } |
168 } | 185 } |
169 | 186 |
170 #if !defined(NDEBUG) | 187 #if !defined(NDEBUG) |
171 // Helper method for printing out an AudioStreamBasicDescription structure. | 188 // Helper method for printing out an AudioStreamBasicDescription structure. |
172 static void LogABSD(AudioStreamBasicDescription absd) { | 189 static void LogABSD(AudioStreamBasicDescription absd) { |
173 char formatIDString[5]; | 190 char formatIDString[5]; |
174 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | 191 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); |
175 bcopy(&formatID, formatIDString, 4); | 192 bcopy(&formatID, formatIDString, 4); |
176 formatIDString[4] = '\0'; | 193 formatIDString[4] = '\0'; |
(...skipping 28 matching lines...) Expand all Loading... | |
205 playing_(0), | 222 playing_(0), |
206 initialized_(false), | 223 initialized_(false), |
207 rec_is_initialized_(false), | 224 rec_is_initialized_(false), |
208 play_is_initialized_(false), | 225 play_is_initialized_(false), |
209 audio_interruption_observer_(nullptr), | 226 audio_interruption_observer_(nullptr), |
210 route_change_observer_(nullptr) { | 227 route_change_observer_(nullptr) { |
211 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 228 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
212 } | 229 } |
213 | 230 |
214 AudioDeviceIOS::~AudioDeviceIOS() { | 231 AudioDeviceIOS::~AudioDeviceIOS() { |
215 LOGI() << "~dtor"; | 232 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 233 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
217 Terminate(); | 234 Terminate(); |
218 } | 235 } |
219 | 236 |
220 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 237 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
221 LOGI() << "AttachAudioBuffer"; | 238 LOGI() << "AttachAudioBuffer"; |
222 RTC_DCHECK(audioBuffer); | 239 RTC_DCHECK(audioBuffer); |
223 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 240 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
224 audio_device_buffer_ = audioBuffer; | 241 audio_device_buffer_ = audioBuffer; |
225 } | 242 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
260 } | 277 } |
261 | 278 |
262 int32_t AudioDeviceIOS::InitPlayout() { | 279 int32_t AudioDeviceIOS::InitPlayout() { |
263 LOGI() << "InitPlayout"; | 280 LOGI() << "InitPlayout"; |
264 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 281 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
265 RTC_DCHECK(initialized_); | 282 RTC_DCHECK(initialized_); |
266 RTC_DCHECK(!play_is_initialized_); | 283 RTC_DCHECK(!play_is_initialized_); |
267 RTC_DCHECK(!playing_); | 284 RTC_DCHECK(!playing_); |
268 if (!rec_is_initialized_) { | 285 if (!rec_is_initialized_) { |
269 if (!InitPlayOrRecord()) { | 286 if (!InitPlayOrRecord()) { |
270 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 287 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; |
271 return -1; | 288 return -1; |
272 } | 289 } |
273 } | 290 } |
274 play_is_initialized_ = true; | 291 play_is_initialized_ = true; |
275 return 0; | 292 return 0; |
276 } | 293 } |
277 | 294 |
278 int32_t AudioDeviceIOS::InitRecording() { | 295 int32_t AudioDeviceIOS::InitRecording() { |
279 LOGI() << "InitRecording"; | 296 LOGI() << "InitRecording"; |
280 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 297 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
281 RTC_DCHECK(initialized_); | 298 RTC_DCHECK(initialized_); |
282 RTC_DCHECK(!rec_is_initialized_); | 299 RTC_DCHECK(!rec_is_initialized_); |
283 RTC_DCHECK(!recording_); | 300 RTC_DCHECK(!recording_); |
284 if (!play_is_initialized_) { | 301 if (!play_is_initialized_) { |
285 if (!InitPlayOrRecord()) { | 302 if (!InitPlayOrRecord()) { |
286 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 303 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; |
287 return -1; | 304 return -1; |
288 } | 305 } |
289 } | 306 } |
290 rec_is_initialized_ = true; | 307 rec_is_initialized_ = true; |
291 return 0; | 308 return 0; |
292 } | 309 } |
293 | 310 |
294 int32_t AudioDeviceIOS::StartPlayout() { | 311 int32_t AudioDeviceIOS::StartPlayout() { |
295 LOGI() << "StartPlayout"; | 312 LOGI() << "StartPlayout"; |
296 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 313 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
632 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 649 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
633 audio_record_buffer_list_.mNumberBuffers = 1; | 650 audio_record_buffer_list_.mNumberBuffers = 1; |
634 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; | 651 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; |
635 audio_buffer->mNumberChannels = record_parameters_.channels(); | 652 audio_buffer->mNumberChannels = record_parameters_.channels(); |
636 audio_buffer->mDataByteSize = data_byte_size; | 653 audio_buffer->mDataByteSize = data_byte_size; |
637 audio_buffer->mData = record_audio_buffer_.get(); | 654 audio_buffer->mData = record_audio_buffer_.get(); |
638 } | 655 } |
639 | 656 |
640 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { | 657 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
641 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; | 658 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; |
642 RTC_DCHECK(!vpio_unit_); | 659 RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists"; |
643 // Create an audio component description to identify the Voice-Processing | 660 // Create an audio component description to identify the Voice-Processing |
644 // I/O audio unit. | 661 // I/O audio unit. |
645 AudioComponentDescription vpio_unit_description; | 662 AudioComponentDescription vpio_unit_description; |
646 vpio_unit_description.componentType = kAudioUnitType_Output; | 663 vpio_unit_description.componentType = kAudioUnitType_Output; |
647 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 664 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
648 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; | 665 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; |
649 vpio_unit_description.componentFlags = 0; | 666 vpio_unit_description.componentFlags = 0; |
650 vpio_unit_description.componentFlagsMask = 0; | 667 vpio_unit_description.componentFlagsMask = 0; |
651 // Obtain an audio unit instance given the description. | 668 // Obtain an audio unit instance given the description. |
652 AudioComponent found_vpio_unit_ref = | 669 AudioComponent found_vpio_unit_ref = |
653 AudioComponentFindNext(nullptr, &vpio_unit_description); | 670 AudioComponentFindNext(nullptr, &vpio_unit_description); |
654 | 671 |
655 // Create a Voice-Processing IO audio unit. | 672 // Create a Voice-Processing IO audio unit. |
656 LOG_AND_RETURN_IF_ERROR( | 673 OSStatus result = noErr; |
657 AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_), | 674 result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); |
658 "Failed to create a VoiceProcessingIO audio unit"); | 675 if (result != noErr) { |
676 vpio_unit_ = nullptr; | |
677 LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result; | |
678 return false; | |
679 } | |
659 | 680 |
660 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable | 681 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable |
661 // input on the input scope of the input element. | 682 // input on the input scope of the input element. |
662 AudioUnitElement input_bus = 1; | 683 AudioUnitElement input_bus = 1; |
663 UInt32 enable_input = 1; | 684 UInt32 enable_input = 1; |
664 LOG_AND_RETURN_IF_ERROR( | 685 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
665 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 686 kAudioUnitScope_Input, input_bus, &enable_input, |
666 kAudioUnitScope_Input, input_bus, &enable_input, | 687 sizeof(enable_input)); |
667 sizeof(enable_input)), | 688 if (result != noErr) { |
668 "Failed to enable input on input scope of input element"); | 689 DisposeAudioUnit(); |
690 LOG(LS_ERROR) << "Failed to enable input on input scope of input element: " | |
691 << result; | |
692 return false; | |
693 } | |
669 | 694 |
670 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable | 695 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable |
671 // output on the output scope of the output element. | 696 // output on the output scope of the output element. |
672 AudioUnitElement output_bus = 0; | 697 AudioUnitElement output_bus = 0; |
673 UInt32 enable_output = 1; | 698 UInt32 enable_output = 1; |
674 LOG_AND_RETURN_IF_ERROR( | 699 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
675 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 700 kAudioUnitScope_Output, output_bus, |
676 kAudioUnitScope_Output, output_bus, &enable_output, | 701 &enable_output, sizeof(enable_output)); |
677 sizeof(enable_output)), | 702 if (result != noErr) { |
678 "Failed to enable output on output scope of output element"); | 703 DisposeAudioUnit(); |
704 LOG(LS_ERROR) | |
705 << "Failed to enable output on output scope of output element: " | |
706 << result; | |
707 return false; | |
708 } | |
679 | 709 |
680 // Set the application formats for input and output: | 710 // Set the application formats for input and output: |
681 // - use same format in both directions | 711 // - use same format in both directions |
682 // - avoid resampling in the I/O unit by using the hardware sample rate | 712 // - avoid resampling in the I/O unit by using the hardware sample rate |
683 // - linear PCM => noncompressed audio data format with one frame per packet | 713 // - linear PCM => noncompressed audio data format with one frame per packet |
684 // - no need to specify interleaving since only mono is supported | 714 // - no need to specify interleaving since only mono is supported |
685 AudioStreamBasicDescription application_format = {0}; | 715 AudioStreamBasicDescription application_format = {0}; |
686 UInt32 size = sizeof(application_format); | 716 UInt32 size = sizeof(application_format); |
687 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), | 717 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), |
688 record_parameters_.sample_rate()); | 718 record_parameters_.sample_rate()); |
689 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); | 719 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); |
690 application_format.mSampleRate = playout_parameters_.sample_rate(); | 720 application_format.mSampleRate = playout_parameters_.sample_rate(); |
691 application_format.mFormatID = kAudioFormatLinearPCM; | 721 application_format.mFormatID = kAudioFormatLinearPCM; |
692 application_format.mFormatFlags = | 722 application_format.mFormatFlags = |
693 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 723 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
694 application_format.mBytesPerPacket = kBytesPerSample; | 724 application_format.mBytesPerPacket = kBytesPerSample; |
695 application_format.mFramesPerPacket = 1; // uncompressed | 725 application_format.mFramesPerPacket = 1; // uncompressed |
696 application_format.mBytesPerFrame = kBytesPerSample; | 726 application_format.mBytesPerFrame = kBytesPerSample; |
697 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; | 727 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; |
698 application_format.mBitsPerChannel = 8 * kBytesPerSample; | 728 application_format.mBitsPerChannel = 8 * kBytesPerSample; |
699 // Store the new format. | 729 // Store the new format. |
700 application_format_ = application_format; | 730 application_format_ = application_format; |
701 #if !defined(NDEBUG) | 731 #if !defined(NDEBUG) |
702 LogABSD(application_format_); | 732 LogABSD(application_format_); |
703 #endif | 733 #endif |
704 | 734 |
705 // Set the application format on the output scope of the input element/bus. | 735 // Set the application format on the output scope of the input element/bus. |
706 LOG_AND_RETURN_IF_ERROR( | 736 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
707 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 737 kAudioUnitScope_Output, input_bus, |
708 kAudioUnitScope_Output, input_bus, | 738 &application_format, size); |
709 &application_format, size), | 739 if (result != noErr) { |
710 "Failed to set application format on output scope of input element"); | 740 DisposeAudioUnit(); |
741 LOG(LS_ERROR) | |
742 << "Failed to set application format on output scope of input bus: " | |
743 << result; | |
744 return false; | |
745 } | |
711 | 746 |
712 // Set the application format on the input scope of the output element/bus. | 747 // Set the application format on the input scope of the output element/bus. |
713 LOG_AND_RETURN_IF_ERROR( | 748 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
714 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 749 kAudioUnitScope_Input, output_bus, |
715 kAudioUnitScope_Input, output_bus, | 750 &application_format, size); |
716 &application_format, size), | 751 if (result != noErr) { |
717 "Failed to set application format on input scope of output element"); | 752 DisposeAudioUnit(); |
753 LOG(LS_ERROR) | |
754 << "Failed to set application format on input scope of output bus: " | |
755 << result; | |
756 return false; | |
757 } | |
718 | 758 |
719 // Specify the callback function that provides audio samples to the audio | 759 // Specify the callback function that provides audio samples to the audio |
720 // unit. | 760 // unit. |
721 AURenderCallbackStruct render_callback; | 761 AURenderCallbackStruct render_callback; |
722 render_callback.inputProc = GetPlayoutData; | 762 render_callback.inputProc = GetPlayoutData; |
723 render_callback.inputProcRefCon = this; | 763 render_callback.inputProcRefCon = this; |
724 LOG_AND_RETURN_IF_ERROR( | 764 result = AudioUnitSetProperty( |
725 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback, | 765 vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, |
726 kAudioUnitScope_Input, output_bus, &render_callback, | 766 output_bus, &render_callback, sizeof(render_callback)); |
727 sizeof(render_callback)), | 767 if (result != noErr) { |
728 "Failed to specify the render callback on the output element"); | 768 DisposeAudioUnit(); |
769 LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: " | |
770 << result; | |
771 return false; | |
772 } | |
729 | 773 |
730 // Disable AU buffer allocation for the recorder, we allocate our own. | 774 // Disable AU buffer allocation for the recorder, we allocate our own. |
731 // TODO(henrika): not sure that it actually saves resource to make this call. | 775 // TODO(henrika): not sure that it actually saves resource to make this call. |
732 UInt32 flag = 0; | 776 UInt32 flag = 0; |
733 LOG_AND_RETURN_IF_ERROR( | 777 result = AudioUnitSetProperty( |
734 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | 778 vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, |
735 kAudioUnitScope_Output, input_bus, &flag, | 779 kAudioUnitScope_Output, input_bus, &flag, sizeof(flag)); |
736 sizeof(flag)), | 780 if (result != noErr) { |
737 "Failed to disable buffer allocation on the input element"); | 781 DisposeAudioUnit(); |
782 LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: " | |
783 << result; | |
784 } | |
738 | 785 |
739 // Specify the callback to be called by the I/O thread to us when input audio | 786 // Specify the callback to be called by the I/O thread to us when input audio |
740 // is available. The recorded samples can then be obtained by calling the | 787 // is available. The recorded samples can then be obtained by calling the |
741 // AudioUnitRender() method. | 788 // AudioUnitRender() method. |
742 AURenderCallbackStruct input_callback; | 789 AURenderCallbackStruct input_callback; |
743 input_callback.inputProc = RecordedDataIsAvailable; | 790 input_callback.inputProc = RecordedDataIsAvailable; |
744 input_callback.inputProcRefCon = this; | 791 input_callback.inputProcRefCon = this; |
745 LOG_AND_RETURN_IF_ERROR( | 792 result = AudioUnitSetProperty(vpio_unit_, |
746 AudioUnitSetProperty(vpio_unit_, | 793 kAudioOutputUnitProperty_SetInputCallback, |
747 kAudioOutputUnitProperty_SetInputCallback, | 794 kAudioUnitScope_Global, input_bus, |
748 kAudioUnitScope_Global, input_bus, &input_callback, | 795 &input_callback, sizeof(input_callback)); |
749 sizeof(input_callback)), | 796 if (result != noErr) { |
750 "Failed to specify the input callback on the input element"); | 797 DisposeAudioUnit(); |
798 LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: " | |
799 << result; | |
800 } | |
751 | 801 |
752 // Initialize the Voice-Processing I/O unit instance. | 802 // Initialize the Voice-Processing I/O unit instance. |
753 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), | 803 result = AudioUnitInitialize(vpio_unit_); |
754 "Failed to initialize the Voice-Processing I/O unit"); | 804 if (result != noErr) { |
805 DisposeAudioUnit(); | |
806 LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: " | |
807 << result; | |
808 return false; | |
809 } | |
755 return true; | 810 return true; |
756 } | 811 } |
757 | 812 |
758 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { | 813 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
759 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; | 814 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; |
760 // Stop the active audio unit. | 815 // Stop the active audio unit. |
761 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), | 816 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
762 "Failed to stop the the Voice-Processing I/O unit"); | 817 "Failed to stop the the Voice-Processing I/O unit"); |
763 | 818 |
764 // The stream format is about to be changed and it requires that we first | 819 // The stream format is about to be changed and it requires that we first |
(...skipping 18 matching lines...) Expand all Loading... | |
783 "Failed to initialize the Voice-Processing I/O unit"); | 838 "Failed to initialize the Voice-Processing I/O unit"); |
784 | 839 |
785 // Start rendering audio using the new format. | 840 // Start rendering audio using the new format. |
786 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | 841 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
787 "Failed to start the Voice-Processing I/O unit"); | 842 "Failed to start the Voice-Processing I/O unit"); |
788 return true; | 843 return true; |
789 } | 844 } |
790 | 845 |
791 bool AudioDeviceIOS::InitPlayOrRecord() { | 846 bool AudioDeviceIOS::InitPlayOrRecord() { |
792 LOGI() << "InitPlayOrRecord"; | 847 LOGI() << "InitPlayOrRecord"; |
793 AVAudioSession* session = [AVAudioSession sharedInstance]; | 848 { |
794 // Activate the audio session and ask for a set of preferred audio parameters. | 849 // An application can create more than one ADM and start audio streaming |
795 ActivateAudioSession(session, true); | 850 // for all of them. It is essential that we only activate the app's audio |
851 // session once (for the first one) and deactivate it once (for the last). | |
852 rtc::GlobalLockScope ls(&lock_); | |
853 if (audio_session_activation_count_ == 0) { | |
854 // The system provides an audio session object upon launch of an | |
855 // application. However, we must initialize the session in order to | |
856 // handle interruptions. Implicit initialization occurs when obtaining | |
857 // a reference to the AVAudioSession object. | |
858 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
859 // Try to activate the audio session and ask for a set of preferred audio | |
860 // parameters. | |
861 if (!ActivateAudioSession(session, true)) { | |
862 return false; | |
pbos-webrtc
2015/11/18 12:54:22
LOG with LS_ERROR here?
henrika_webrtc
2015/11/18 16:05:32
Done.
| |
863 } | |
864 ++audio_session_activation_count_; | |
865 LOG(LS_INFO) << "Our audio session is now activated"; | |
866 } | |
867 } | |
796 | 868 |
797 // Start observing audio session interruptions and route changes. | 869 // Start observing audio session interruptions and route changes. |
798 RegisterNotificationObservers(); | 870 RegisterNotificationObservers(); |
799 | 871 |
800 // Ensure that we got what what we asked for in our active audio session. | 872 // Ensure that we got what what we asked for in our active audio session. |
801 SetupAudioBuffersForActiveAudioSession(); | 873 SetupAudioBuffersForActiveAudioSession(); |
802 | 874 |
803 // Create, setup and initialize a new Voice-Processing I/O unit. | 875 // Create, setup and initialize a new Voice-Processing I/O unit. |
804 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 876 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
877 LOG(LS_ERROR) << "SetupAndInitializeVoiceProcessingAudioUnit failed"; | |
805 return false; | 878 return false; |
806 } | 879 } |
807 return true; | 880 return true; |
808 } | 881 } |
809 | 882 |
810 bool AudioDeviceIOS::ShutdownPlayOrRecord() { | 883 bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
pbos-webrtc
2015/11/18 12:54:22
Should this one be bool? There's no sensible way t
henrika_webrtc
2015/11/18 16:05:32
No good way to handle false result here. Tried it
| |
811 LOGI() << "ShutdownPlayOrRecord"; | 884 LOGI() << "ShutdownPlayOrRecord"; |
812 // Remove audio session notification observers. | 885 // Remove audio session notification observers. |
813 UnregisterNotificationObservers(); | 886 UnregisterNotificationObservers(); |
814 | 887 |
815 // Close and delete the voice-processing I/O unit. | 888 // Close and delete the voice-processing I/O unit. |
816 OSStatus result = -1; | 889 OSStatus result = -1; |
817 if (nullptr != vpio_unit_) { | 890 if (nullptr != vpio_unit_) { |
818 result = AudioOutputUnitStop(vpio_unit_); | 891 result = AudioOutputUnitStop(vpio_unit_); |
819 if (result != noErr) { | 892 if (result != noErr) { |
820 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | 893 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
821 } | 894 } |
822 result = AudioUnitUninitialize(vpio_unit_); | 895 result = AudioUnitUninitialize(vpio_unit_); |
823 if (result != noErr) { | 896 if (result != noErr) { |
824 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | 897 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
825 } | 898 } |
826 result = AudioComponentInstanceDispose(vpio_unit_); | 899 DisposeAudioUnit(); |
827 if (result != noErr) { | |
828 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; | |
829 } | |
830 vpio_unit_ = nullptr; | |
831 } | 900 } |
832 | 901 |
833 // All I/O should be stopped or paused prior to deactivating the audio | 902 // All I/O should be stopped or paused prior to deactivating the audio |
834 // session, hence we deactivate as last action. | 903 // session, hence we deactivate as last action. However, if more than one |
835 AVAudioSession* session = [AVAudioSession sharedInstance]; | 904 // object is using the audio session, ensure that only the last object |
836 ActivateAudioSession(session, false); | 905 // deactivates. Apple recommends: "activate your audio session only as needed |
906 // and deactivate it when you are not using audio". | |
907 { | |
908 rtc::GlobalLockScope ls(&lock_); | |
909 if (audio_session_activation_count_ == 1) { | |
pbos-webrtc
2015/11/18 12:54:22
pref: --audio_session_activation_count_ == 0 (and
henrika_webrtc
2015/11/18 16:07:33
Solved in a different manner. Hope you are OK with
| |
910 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
911 if (!ActivateAudioSession(session, false)) { | |
pbos-webrtc
2015/11/18 12:54:22
I'm wondering whether this should be a CHECK, or a
henrika_webrtc
2015/11/18 16:05:32
We have strived to avoid crashing the app in these
| |
912 return false; | |
913 } | |
914 --audio_session_activation_count_; | |
915 LOG(LS_INFO) << "Our audio session is now deactivated"; | |
916 } | |
917 } | |
837 return true; | 918 return true; |
838 } | 919 } |
839 | 920 |
921 void AudioDeviceIOS::DisposeAudioUnit() { | |
922 if (nullptr == vpio_unit_) | |
923 return; | |
924 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | |
925 if (result != noErr) { | |
926 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; | |
927 } | |
928 vpio_unit_ = nullptr; | |
929 } | |
930 | |
840 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | 931 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |
841 void* in_ref_con, | 932 void* in_ref_con, |
842 AudioUnitRenderActionFlags* io_action_flags, | 933 AudioUnitRenderActionFlags* io_action_flags, |
843 const AudioTimeStamp* in_time_stamp, | 934 const AudioTimeStamp* in_time_stamp, |
844 UInt32 in_bus_number, | 935 UInt32 in_bus_number, |
845 UInt32 in_number_frames, | 936 UInt32 in_number_frames, |
846 AudioBufferList* io_data) { | 937 AudioBufferList* io_data) { |
847 RTC_DCHECK_EQ(1u, in_bus_number); | 938 RTC_DCHECK_EQ(1u, in_bus_number); |
848 RTC_DCHECK( | 939 RTC_DCHECK( |
849 !io_data); // no buffer should be allocated for input at this stage | 940 !io_data); // no buffer should be allocated for input at this stage |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
926 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 1017 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
927 // the native I/O audio unit) to a preallocated intermediate buffer and | 1018 // the native I/O audio unit) to a preallocated intermediate buffer and |
928 // copy the result to the audio buffer in the |io_data| destination. | 1019 // copy the result to the audio buffer in the |io_data| destination. |
929 SInt8* source = playout_audio_buffer_.get(); | 1020 SInt8* source = playout_audio_buffer_.get(); |
930 fine_audio_buffer_->GetPlayoutData(source); | 1021 fine_audio_buffer_->GetPlayoutData(source); |
931 memcpy(destination, source, dataSizeInBytes); | 1022 memcpy(destination, source, dataSizeInBytes); |
932 return noErr; | 1023 return noErr; |
933 } | 1024 } |
934 | 1025 |
935 } // namespace webrtc | 1026 } // namespace webrtc |
OLD | NEW |