Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(433)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1435293003: Improved error handling in iOS ADM to avoid race during init (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Fine tuned audio session deactivation scheme Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #if !defined(__has_feature) || !__has_feature(objc_arc) 11 #if !defined(__has_feature) || !__has_feature(objc_arc)
12 #error "This file requires ARC support." 12 #error "This file requires ARC support."
13 #endif 13 #endif
14 14
15 #import <AVFoundation/AVFoundation.h> 15 #import <AVFoundation/AVFoundation.h>
16 #import <Foundation/Foundation.h> 16 #import <Foundation/Foundation.h>
17 17
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
19 19
20 #include "webrtc/base/atomicops.h" 20 #include "webrtc/base/atomicops.h"
21 #include "webrtc/base/checks.h" 21 #include "webrtc/base/checks.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" 23 #include "webrtc/modules/audio_device/fine_audio_buffer.h"
24 #include "webrtc/modules/utility/include/helpers_ios.h" 24 #include "webrtc/modules/utility/include/helpers_ios.h"
25 25
26 namespace webrtc { 26 namespace webrtc {
27 27
28 volatile int AudioDeviceIOS::object_count_ = 0;
29
28 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" 30 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
29 31
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ 32 #define LOG_AND_RETURN_IF_ERROR(error, message) \
31 do { \ 33 do { \
32 OSStatus err = error; \ 34 OSStatus err = error; \
33 if (err) { \ 35 if (err) { \
34 LOG(LS_ERROR) << message << ": " << err; \ 36 LOG(LS_ERROR) << message << ": " << err; \
35 return false; \ 37 return false; \
36 } \ 38 } \
37 } while (0) 39 } while (0)
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 // TODO(henrika): these value is not used in combination with built-in AEC. 75 // TODO(henrika): these value is not used in combination with built-in AEC.
74 // Can most likely be removed. 76 // Can most likely be removed.
75 const UInt16 kFixedPlayoutDelayEstimate = 30; 77 const UInt16 kFixedPlayoutDelayEstimate = 30;
76 const UInt16 kFixedRecordDelayEstimate = 30; 78 const UInt16 kFixedRecordDelayEstimate = 30;
77 79
78 using ios::CheckAndLogError; 80 using ios::CheckAndLogError;
79 81
80 // Activates an audio session suitable for full duplex VoIP sessions when 82 // Activates an audio session suitable for full duplex VoIP sessions when
81 // |activate| is true. Also sets the preferred sample rate and IO buffer 83 // |activate| is true. Also sets the preferred sample rate and IO buffer
82 // duration. Deactivates an active audio session if |activate| is set to false. 84 // duration. Deactivates an active audio session if |activate| is set to false.
83 static void ActivateAudioSession(AVAudioSession* session, bool activate) { 85 static bool ActivateAudioSession(AVAudioSession* session, bool activate) {
tkchin_webrtc 2015/11/16 21:30:08 Would this be suitable for the new rtc::Optional t
henrika_webrtc 2015/11/17 14:46:11 Not sure if I understand. How would it improve the
84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; 86 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
85 @autoreleasepool { 87 @autoreleasepool {
86 NSError* error = nil; 88 NSError* error = nil;
87 BOOL success = NO; 89 BOOL success = NO;
88 90
89 // Deactivate the audio session and return if |activate| is false. 91 // Deactivate the audio session and return if |activate| is false.
90 if (!activate) { 92 if (!activate) {
91 success = [session setActive:NO error:&error]; 93 success = [session setActive:NO error:&error];
92 RTC_DCHECK(CheckAndLogError(success, error)); 94 return CheckAndLogError(success, error);
93 return;
94 } 95 }
95 96
96 // Use a category which supports simultaneous recording and playback. 97 // Use a category which supports simultaneous recording and playback.
97 // By default, using this category implies that our app’s audio is 98 // By default, using this category implies that our app’s audio is
98 // nonmixable, hence activating the session will interrupt any other 99 // nonmixable, hence activating the session will interrupt any other
99 // audio sessions which are also nonmixable. 100 // audio sessions which are also nonmixable.
100 if (session.category != AVAudioSessionCategoryPlayAndRecord) { 101 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
101 error = nil; 102 error = nil;
102 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord 103 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
103 withOptions:AVAudioSessionCategoryOptionAllowBluetooth 104 withOptions:AVAudioSessionCategoryOptionAllowBluetooth
(...skipping 10 matching lines...) Expand all
114 115
115 // Set the session's sample rate or the hardware sample rate. 116 // Set the session's sample rate or the hardware sample rate.
116 // It is essential that we use the same sample rate as stream format 117 // It is essential that we use the same sample rate as stream format
117 // to ensure that the I/O unit does not have to do sample rate conversion. 118 // to ensure that the I/O unit does not have to do sample rate conversion.
118 error = nil; 119 error = nil;
119 success = 120 success =
120 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; 121 [session setPreferredSampleRate:kPreferredSampleRate error:&error];
121 RTC_DCHECK(CheckAndLogError(success, error)); 122 RTC_DCHECK(CheckAndLogError(success, error));
122 123
123 // Set the preferred audio I/O buffer duration, in seconds. 124 // Set the preferred audio I/O buffer duration, in seconds.
124 // TODO(henrika): add more comments here.
125 error = nil; 125 error = nil;
126 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration 126 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
127 error:&error]; 127 error:&error];
128 RTC_DCHECK(CheckAndLogError(success, error)); 128 RTC_DCHECK(CheckAndLogError(success, error));
129 129
130 // Activate the audio session. Activation can fail if another active audio 130 // Activate the audio session. Activation can fail if another active audio
131 // session (e.g. phone call) has higher priority than ours. 131 // session (e.g. phone call) has higher priority than ours.
132 error = nil; 132 error = nil;
133 success = [session setActive:YES error:&error]; 133 success = [session setActive:YES error:&error];
134 RTC_DCHECK(CheckAndLogError(success, error)); 134 if (!CheckAndLogError(success, error)) {
135 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; 135 return false;
136 }
136 137
137 // Ensure that category and mode are actually activated. 138 // Ensure that the device currently supports audio input.
138 RTC_DCHECK( 139 if (!session.isInputAvailable) {
139 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); 140 LOG(LS_ERROR) << "No audio input path is available!";
140 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); 141 return false;
142 }
143
144 // Ensure that the required category and mode are actually activated.
145 if (![session.category
146 isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
147 LOG(LS_ERROR)
148 << "Failed to set category to AVAudioSessionCategoryPlayAndRecord";
149 return false;
150 }
151 if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
152 LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat";
153 return false;
154 }
141 155
142 // Try to set the preferred number of hardware audio channels. These calls 156 // Try to set the preferred number of hardware audio channels. These calls
143 // must be done after setting the audio session’s category and mode and 157 // must be done after setting the audio session’s category and mode and
144 // activating the session. 158 // activating the session.
145 // We try to use mono in both directions to save resources and format 159 // We try to use mono in both directions to save resources and format
146 // conversions in the audio unit. Some devices does only support stereo; 160 // conversions in the audio unit. Some devices does only support stereo;
147 // e.g. wired headset on iPhone 6. 161 // e.g. wired headset on iPhone 6.
148 // TODO(henrika): add support for stereo if needed. 162 // TODO(henrika): add support for stereo if needed.
149 error = nil; 163 error = nil;
150 success = 164 success =
151 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels 165 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
152 error:&error]; 166 error:&error];
153 RTC_DCHECK(CheckAndLogError(success, error)); 167 RTC_DCHECK(CheckAndLogError(success, error));
154 error = nil; 168 error = nil;
155 success = 169 success =
156 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels 170 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
157 error:&error]; 171 error:&error];
158 RTC_DCHECK(CheckAndLogError(success, error)); 172 RTC_DCHECK(CheckAndLogError(success, error));
173 return true;
159 } 174 }
160 } 175 }
161 176
162 #if !defined(NDEBUG) 177 #if !defined(NDEBUG)
163 // Helper method for printing out an AudioStreamBasicDescription structure. 178 // Helper method for printing out an AudioStreamBasicDescription structure.
164 static void LogABSD(AudioStreamBasicDescription absd) { 179 static void LogABSD(AudioStreamBasicDescription absd) {
165 char formatIDString[5]; 180 char formatIDString[5];
166 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); 181 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID);
167 bcopy(&formatID, formatIDString, 4); 182 bcopy(&formatID, formatIDString, 4);
168 formatIDString[4] = '\0'; 183 formatIDString[4] = '\0';
(...skipping 24 matching lines...) Expand all
193 AudioDeviceIOS::AudioDeviceIOS() 208 AudioDeviceIOS::AudioDeviceIOS()
194 : audio_device_buffer_(nullptr), 209 : audio_device_buffer_(nullptr),
195 vpio_unit_(nullptr), 210 vpio_unit_(nullptr),
196 recording_(0), 211 recording_(0),
197 playing_(0), 212 playing_(0),
198 initialized_(false), 213 initialized_(false),
199 rec_is_initialized_(false), 214 rec_is_initialized_(false),
200 play_is_initialized_(false), 215 play_is_initialized_(false),
201 audio_interruption_observer_(nullptr), 216 audio_interruption_observer_(nullptr),
202 route_change_observer_(nullptr) { 217 route_change_observer_(nullptr) {
203 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); 218 const int count = rtc::AtomicOps::Increment(&object_count_);
tkchin_webrtc 2015/11/16 21:30:08 Is AudioDeviceIOS always created from the same thr
henrika_webrtc 2015/11/17 14:46:11 If I make an AppRTC call, we see: (audio_device_i
tkchin_webrtc 2015/11/17 21:08:46 So this count can be potentially inaccurate then.
henrika_webrtc 2015/11/18 09:26:04 Thanks, I am learning something new every day and
219 LOGI() << "ctor[" << count << "]" << ios::GetCurrentThreadDescription();
204 } 220 }
205 221
206 AudioDeviceIOS::~AudioDeviceIOS() { 222 AudioDeviceIOS::~AudioDeviceIOS() {
207 LOGI() << "~dtor"; 223 const int count = rtc::AtomicOps::AcquireLoad(&object_count_);
224 LOGI() << "~dtor[" << count << "]";
225 rtc::AtomicOps::Decrement(&object_count_);
208 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 226 RTC_DCHECK(thread_checker_.CalledOnValidThread());
209 Terminate(); 227 Terminate();
210 } 228 }
211 229
212 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 230 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
213 LOGI() << "AttachAudioBuffer"; 231 LOGI() << "AttachAudioBuffer";
214 RTC_DCHECK(audioBuffer); 232 RTC_DCHECK(audioBuffer);
215 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 233 RTC_DCHECK(thread_checker_.CalledOnValidThread());
216 audio_device_buffer_ = audioBuffer; 234 audio_device_buffer_ = audioBuffer;
217 } 235 }
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
252 } 270 }
253 271
254 int32_t AudioDeviceIOS::InitPlayout() { 272 int32_t AudioDeviceIOS::InitPlayout() {
255 LOGI() << "InitPlayout"; 273 LOGI() << "InitPlayout";
256 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 274 RTC_DCHECK(thread_checker_.CalledOnValidThread());
257 RTC_DCHECK(initialized_); 275 RTC_DCHECK(initialized_);
258 RTC_DCHECK(!play_is_initialized_); 276 RTC_DCHECK(!play_is_initialized_);
259 RTC_DCHECK(!playing_); 277 RTC_DCHECK(!playing_);
260 if (!rec_is_initialized_) { 278 if (!rec_is_initialized_) {
261 if (!InitPlayOrRecord()) { 279 if (!InitPlayOrRecord()) {
262 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 280 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
263 return -1; 281 return -1;
264 } 282 }
265 } 283 }
266 play_is_initialized_ = true; 284 play_is_initialized_ = true;
267 return 0; 285 return 0;
268 } 286 }
269 287
270 int32_t AudioDeviceIOS::InitRecording() { 288 int32_t AudioDeviceIOS::InitRecording() {
271 LOGI() << "InitRecording"; 289 LOGI() << "InitRecording";
272 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 290 RTC_DCHECK(thread_checker_.CalledOnValidThread());
273 RTC_DCHECK(initialized_); 291 RTC_DCHECK(initialized_);
274 RTC_DCHECK(!rec_is_initialized_); 292 RTC_DCHECK(!rec_is_initialized_);
275 RTC_DCHECK(!recording_); 293 RTC_DCHECK(!recording_);
276 if (!play_is_initialized_) { 294 if (!play_is_initialized_) {
277 if (!InitPlayOrRecord()) { 295 if (!InitPlayOrRecord()) {
278 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 296 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
279 return -1; 297 return -1;
280 } 298 }
281 } 299 }
282 rec_is_initialized_ = true; 300 rec_is_initialized_ = true;
283 return 0; 301 return 0;
284 } 302 }
285 303
286 int32_t AudioDeviceIOS::StartPlayout() { 304 int32_t AudioDeviceIOS::StartPlayout() {
287 LOGI() << "StartPlayout"; 305 LOGI() << "StartPlayout";
288 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 306 RTC_DCHECK(thread_checker_.CalledOnValidThread());
(...skipping 335 matching lines...) Expand 10 before | Expand all | Expand 10 after
624 record_audio_buffer_.reset(new SInt8[data_byte_size]); 642 record_audio_buffer_.reset(new SInt8[data_byte_size]);
625 audio_record_buffer_list_.mNumberBuffers = 1; 643 audio_record_buffer_list_.mNumberBuffers = 1;
626 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; 644 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
627 audio_buffer->mNumberChannels = record_parameters_.channels(); 645 audio_buffer->mNumberChannels = record_parameters_.channels();
628 audio_buffer->mDataByteSize = data_byte_size; 646 audio_buffer->mDataByteSize = data_byte_size;
629 audio_buffer->mData = record_audio_buffer_.get(); 647 audio_buffer->mData = record_audio_buffer_.get();
630 } 648 }
631 649
632 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { 650 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
633 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; 651 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
634 RTC_DCHECK(!vpio_unit_); 652 RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists";
635 // Create an audio component description to identify the Voice-Processing 653 // Create an audio component description to identify the Voice-Processing
636 // I/O audio unit. 654 // I/O audio unit.
637 AudioComponentDescription vpio_unit_description; 655 AudioComponentDescription vpio_unit_description;
638 vpio_unit_description.componentType = kAudioUnitType_Output; 656 vpio_unit_description.componentType = kAudioUnitType_Output;
639 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; 657 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
640 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; 658 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
641 vpio_unit_description.componentFlags = 0; 659 vpio_unit_description.componentFlags = 0;
642 vpio_unit_description.componentFlagsMask = 0; 660 vpio_unit_description.componentFlagsMask = 0;
643 // Obtain an audio unit instance given the description. 661 // Obtain an audio unit instance given the description.
644 AudioComponent found_vpio_unit_ref = 662 AudioComponent found_vpio_unit_ref =
645 AudioComponentFindNext(nullptr, &vpio_unit_description); 663 AudioComponentFindNext(nullptr, &vpio_unit_description);
646 664
647 // Create a Voice-Processing IO audio unit. 665 // Create a Voice-Processing IO audio unit.
648 LOG_AND_RETURN_IF_ERROR( 666 OSStatus result = noErr;
649 AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_), 667 result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
650 "Failed to create a VoiceProcessingIO audio unit"); 668 if (result != noErr) {
669 vpio_unit_ = nullptr;
670 LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result;
671 return false;
672 }
651 673
652 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable 674 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
653 // input on the input scope of the input element. 675 // input on the input scope of the input element.
654 AudioUnitElement input_bus = 1; 676 AudioUnitElement input_bus = 1;
655 UInt32 enable_input = 1; 677 UInt32 enable_input = 1;
656 LOG_AND_RETURN_IF_ERROR( 678 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
657 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, 679 kAudioUnitScope_Input, input_bus, &enable_input,
658 kAudioUnitScope_Input, input_bus, &enable_input, 680 sizeof(enable_input));
659 sizeof(enable_input)), 681 if (result != noErr) {
660 "Failed to enable input on input scope of input element"); 682 DisposeAudioUnit();
683 LOG(LS_ERROR) << "Failed to enable input on input scope of input element: "
684 << result;
685 return false;
686 }
661 687
662 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable 688 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
663 // output on the output scope of the output element. 689 // output on the output scope of the output element.
664 AudioUnitElement output_bus = 0; 690 AudioUnitElement output_bus = 0;
665 UInt32 enable_output = 1; 691 UInt32 enable_output = 1;
666 LOG_AND_RETURN_IF_ERROR( 692 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
667 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, 693 kAudioUnitScope_Output, output_bus,
668 kAudioUnitScope_Output, output_bus, &enable_output, 694 &enable_output, sizeof(enable_output));
669 sizeof(enable_output)), 695 if (result != noErr) {
670 "Failed to enable output on output scope of output element"); 696 DisposeAudioUnit();
697 LOG(LS_ERROR)
698 << "Failed to enable output on output scope of output element: "
699 << result;
700 return false;
701 }
671 702
672 // Set the application formats for input and output: 703 // Set the application formats for input and output:
673 // - use same format in both directions 704 // - use same format in both directions
674 // - avoid resampling in the I/O unit by using the hardware sample rate 705 // - avoid resampling in the I/O unit by using the hardware sample rate
675 // - linear PCM => noncompressed audio data format with one frame per packet 706 // - linear PCM => noncompressed audio data format with one frame per packet
676 // - no need to specify interleaving since only mono is supported 707 // - no need to specify interleaving since only mono is supported
677 AudioStreamBasicDescription application_format = {0}; 708 AudioStreamBasicDescription application_format = {0};
678 UInt32 size = sizeof(application_format); 709 UInt32 size = sizeof(application_format);
679 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), 710 RTC_DCHECK_EQ(playout_parameters_.sample_rate(),
680 record_parameters_.sample_rate()); 711 record_parameters_.sample_rate());
681 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); 712 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
682 application_format.mSampleRate = playout_parameters_.sample_rate(); 713 application_format.mSampleRate = playout_parameters_.sample_rate();
683 application_format.mFormatID = kAudioFormatLinearPCM; 714 application_format.mFormatID = kAudioFormatLinearPCM;
684 application_format.mFormatFlags = 715 application_format.mFormatFlags =
685 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 716 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
686 application_format.mBytesPerPacket = kBytesPerSample; 717 application_format.mBytesPerPacket = kBytesPerSample;
687 application_format.mFramesPerPacket = 1; // uncompressed 718 application_format.mFramesPerPacket = 1; // uncompressed
688 application_format.mBytesPerFrame = kBytesPerSample; 719 application_format.mBytesPerFrame = kBytesPerSample;
689 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; 720 application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
690 application_format.mBitsPerChannel = 8 * kBytesPerSample; 721 application_format.mBitsPerChannel = 8 * kBytesPerSample;
691 // Store the new format. 722 // Store the new format.
692 application_format_ = application_format; 723 application_format_ = application_format;
693 #if !defined(NDEBUG) 724 #if !defined(NDEBUG)
694 LogABSD(application_format_); 725 LogABSD(application_format_);
695 #endif 726 #endif
696 727
697 // Set the application format on the output scope of the input element/bus. 728 // Set the application format on the output scope of the input element/bus.
698 LOG_AND_RETURN_IF_ERROR( 729 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
699 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, 730 kAudioUnitScope_Output, input_bus,
700 kAudioUnitScope_Output, input_bus, 731 &application_format, size);
701 &application_format, size), 732 if (result != noErr) {
702 "Failed to set application format on output scope of input element"); 733 DisposeAudioUnit();
734 LOG(LS_ERROR)
735 << "Failed to set application format on output scope of input bus: "
736 << result;
737 return false;
738 }
703 739
704 // Set the application format on the input scope of the output element/bus. 740 // Set the application format on the input scope of the output element/bus.
705 LOG_AND_RETURN_IF_ERROR( 741 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
706 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, 742 kAudioUnitScope_Input, output_bus,
707 kAudioUnitScope_Input, output_bus, 743 &application_format, size);
708 &application_format, size), 744 if (result != noErr) {
709 "Failed to set application format on input scope of output element"); 745 DisposeAudioUnit();
746 LOG(LS_ERROR)
747 << "Failed to set application format on input scope of output bus: "
748 << result;
749 return false;
750 }
710 751
711 // Specify the callback function that provides audio samples to the audio 752 // Specify the callback function that provides audio samples to the audio
712 // unit. 753 // unit.
713 AURenderCallbackStruct render_callback; 754 AURenderCallbackStruct render_callback;
714 render_callback.inputProc = GetPlayoutData; 755 render_callback.inputProc = GetPlayoutData;
715 render_callback.inputProcRefCon = this; 756 render_callback.inputProcRefCon = this;
716 LOG_AND_RETURN_IF_ERROR( 757 result = AudioUnitSetProperty(
717 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback, 758 vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
718 kAudioUnitScope_Input, output_bus, &render_callback, 759 output_bus, &render_callback, sizeof(render_callback));
719 sizeof(render_callback)), 760 if (result != noErr) {
720 "Failed to specify the render callback on the output element"); 761 DisposeAudioUnit();
762 LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: "
763 << result;
764 return false;
765 }
721 766
722 // Disable AU buffer allocation for the recorder, we allocate our own. 767 // Disable AU buffer allocation for the recorder, we allocate our own.
723 // TODO(henrika): not sure that it actually saves resource to make this call. 768 // TODO(henrika): not sure that it actually saves resource to make this call.
724 UInt32 flag = 0; 769 UInt32 flag = 0;
725 LOG_AND_RETURN_IF_ERROR( 770 result = AudioUnitSetProperty(
726 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, 771 vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
727 kAudioUnitScope_Output, input_bus, &flag, 772 kAudioUnitScope_Output, input_bus, &flag, sizeof(flag));
728 sizeof(flag)), 773 if (result != noErr) {
729 "Failed to disable buffer allocation on the input element"); 774 DisposeAudioUnit();
775 LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: "
776 << result;
777 }
730 778
731 // Specify the callback to be called by the I/O thread to us when input audio 779 // Specify the callback to be called by the I/O thread to us when input audio
732 // is available. The recorded samples can then be obtained by calling the 780 // is available. The recorded samples can then be obtained by calling the
733 // AudioUnitRender() method. 781 // AudioUnitRender() method.
734 AURenderCallbackStruct input_callback; 782 AURenderCallbackStruct input_callback;
735 input_callback.inputProc = RecordedDataIsAvailable; 783 input_callback.inputProc = RecordedDataIsAvailable;
736 input_callback.inputProcRefCon = this; 784 input_callback.inputProcRefCon = this;
737 LOG_AND_RETURN_IF_ERROR( 785 result = AudioUnitSetProperty(vpio_unit_,
738 AudioUnitSetProperty(vpio_unit_, 786 kAudioOutputUnitProperty_SetInputCallback,
739 kAudioOutputUnitProperty_SetInputCallback, 787 kAudioUnitScope_Global, input_bus,
740 kAudioUnitScope_Global, input_bus, &input_callback, 788 &input_callback, sizeof(input_callback));
741 sizeof(input_callback)), 789 if (result != noErr) {
742 "Failed to specify the input callback on the input element"); 790 DisposeAudioUnit();
791 LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: "
792 << result;
793 }
743 794
744 // Initialize the Voice-Processing I/O unit instance. 795 // Initialize the Voice-Processing I/O unit instance.
745 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), 796 result = AudioUnitInitialize(vpio_unit_);
746 "Failed to initialize the Voice-Processing I/O unit"); 797 if (result != noErr) {
798 DisposeAudioUnit();
799 LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: "
800 << result;
801 return false;
802 }
747 return true; 803 return true;
748 } 804 }
749 805
750 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { 806 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
751 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; 807 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")";
752 // Stop the active audio unit. 808 // Stop the active audio unit.
753 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), 809 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
754 "Failed to stop the the Voice-Processing I/O unit"); 810 "Failed to stop the the Voice-Processing I/O unit");
755 811
756 // The stream format is about to be changed and it requires that we first 812 // The stream format is about to be changed and it requires that we first
(...skipping 18 matching lines...) Expand all
775 "Failed to initialize the Voice-Processing I/O unit"); 831 "Failed to initialize the Voice-Processing I/O unit");
776 832
777 // Start rendering audio using the new format. 833 // Start rendering audio using the new format.
778 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), 834 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
779 "Failed to start the Voice-Processing I/O unit"); 835 "Failed to start the Voice-Processing I/O unit");
780 return true; 836 return true;
781 } 837 }
782 838
783 bool AudioDeviceIOS::InitPlayOrRecord() { 839 bool AudioDeviceIOS::InitPlayOrRecord() {
784 LOGI() << "InitPlayOrRecord"; 840 LOGI() << "InitPlayOrRecord";
841 // The system provides an audio session object upon launch of an application.
842 // However, we must initialize the session in order to handle interruptions.
843 // Implicit initialization occurs when obtaining a reference to the
844 // AVAudioSession object.
785 AVAudioSession* session = [AVAudioSession sharedInstance]; 845 AVAudioSession* session = [AVAudioSession sharedInstance];
846
786 // Activate the audio session and ask for a set of preferred audio parameters. 847 // Activate the audio session and ask for a set of preferred audio parameters.
787 ActivateAudioSession(session, true); 848 if (!ActivateAudioSession(session, true)) {
849 return false;
850 }
788 851
789 // Start observing audio session interruptions and route changes. 852 // Start observing audio session interruptions and route changes.
790 RegisterNotificationObservers(); 853 RegisterNotificationObservers();
791 854
792 // Ensure that we got what what we asked for in our active audio session. 855 // Ensure that we got what what we asked for in our active audio session.
793 SetupAudioBuffersForActiveAudioSession(); 856 SetupAudioBuffersForActiveAudioSession();
794 857
795 // Create, setup and initialize a new Voice-Processing I/O unit. 858 // Create, setup and initialize a new Voice-Processing I/O unit.
796 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { 859 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
860 LOG(LS_ERROR) << "SetupAndInitializeVoiceProcessingAudioUnit failed";
797 return false; 861 return false;
798 } 862 }
799 return true; 863 return true;
800 } 864 }
801 865
802 bool AudioDeviceIOS::ShutdownPlayOrRecord() { 866 bool AudioDeviceIOS::ShutdownPlayOrRecord() {
803 LOGI() << "ShutdownPlayOrRecord"; 867 LOGI() << "ShutdownPlayOrRecord";
804 // Remove audio session notification observers. 868 // Remove audio session notification observers.
805 UnregisterNotificationObservers(); 869 UnregisterNotificationObservers();
806 870
807 // Close and delete the voice-processing I/O unit. 871 // Close and delete the voice-processing I/O unit.
808 OSStatus result = -1; 872 OSStatus result = -1;
809 if (nullptr != vpio_unit_) { 873 if (nullptr != vpio_unit_) {
810 result = AudioOutputUnitStop(vpio_unit_); 874 result = AudioOutputUnitStop(vpio_unit_);
811 if (result != noErr) { 875 if (result != noErr) {
812 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 876 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
813 } 877 }
814 result = AudioUnitUninitialize(vpio_unit_); 878 result = AudioUnitUninitialize(vpio_unit_);
815 if (result != noErr) { 879 if (result != noErr) {
816 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; 880 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
817 } 881 }
818 result = AudioComponentInstanceDispose(vpio_unit_); 882 DisposeAudioUnit();
819 if (result != noErr) {
820 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
821 }
822 vpio_unit_ = nullptr;
823 } 883 }
824 884
825 // All I/O should be stopped or paused prior to deactivating the audio 885 // All I/O should be stopped or paused prior to deactivating the audio
826 // session, hence we deactivate as last action. 886 // session, hence we deactivate as last action. However, if more than one
827 AVAudioSession* session = [AVAudioSession sharedInstance]; 887 // object is using the audio session, ensure that only the last object
828 ActivateAudioSession(session, false); 888 // deactivates.
889 const int count = rtc::AtomicOps::AcquireLoad(&object_count_);
890 if (count == 1) {
891 AVAudioSession* session = [AVAudioSession sharedInstance];
892 ActivateAudioSession(session, false);
893 }
829 return true; 894 return true;
830 } 895 }
831 896
897 void AudioDeviceIOS::DisposeAudioUnit() {
898 if (nullptr == vpio_unit_)
899 return;
900 OSStatus result = noErr;
901 if ((result = AudioComponentInstanceDispose(vpio_unit_)) != noErr) {
tkchin_webrtc 2015/11/16 21:30:08 Why not just: OSStatus result = udioComponentInsta
henrika_webrtc 2015/11/17 14:46:11 Done.
902 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result;
903 }
904 vpio_unit_ = nullptr;
905 }
906
832 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( 907 OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
833 void* in_ref_con, 908 void* in_ref_con,
834 AudioUnitRenderActionFlags* io_action_flags, 909 AudioUnitRenderActionFlags* io_action_flags,
835 const AudioTimeStamp* in_time_stamp, 910 const AudioTimeStamp* in_time_stamp,
836 UInt32 in_bus_number, 911 UInt32 in_bus_number,
837 UInt32 in_number_frames, 912 UInt32 in_number_frames,
838 AudioBufferList* io_data) { 913 AudioBufferList* io_data) {
839 RTC_DCHECK_EQ(1u, in_bus_number); 914 RTC_DCHECK_EQ(1u, in_bus_number);
840 RTC_DCHECK( 915 RTC_DCHECK(
841 !io_data); // no buffer should be allocated for input at this stage 916 !io_data); // no buffer should be allocated for input at this stage
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 993 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
919 // the native I/O audio unit) to a preallocated intermediate buffer and 994 // the native I/O audio unit) to a preallocated intermediate buffer and
920 // copy the result to the audio buffer in the |io_data| destination. 995 // copy the result to the audio buffer in the |io_data| destination.
921 SInt8* source = playout_audio_buffer_.get(); 996 SInt8* source = playout_audio_buffer_.get();
922 fine_audio_buffer_->GetPlayoutData(source); 997 fine_audio_buffer_->GetPlayoutData(source);
923 memcpy(destination, source, dataSizeInBytes); 998 memcpy(destination, source, dataSizeInBytes);
924 return noErr; 999 return noErr;
925 } 1000 }
926 1001
927 } // namespace webrtc 1002 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698