OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #if !defined(__has_feature) || !__has_feature(objc_arc) | 11 #if !defined(__has_feature) || !__has_feature(objc_arc) |
12 #error "This file requires ARC support." | 12 #error "This file requires ARC support." |
13 #endif | 13 #endif |
14 | 14 |
15 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
16 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
17 | 17 |
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
19 | 19 |
20 #include "webrtc/base/atomicops.h" | 20 #include "webrtc/base/atomicops.h" |
21 #include "webrtc/base/checks.h" | 21 #include "webrtc/base/checks.h" |
22 #include "webrtc/base/criticalsection.h" | 22 #include "webrtc/base/criticalsection.h" |
23 #include "webrtc/base/logging.h" | 23 #include "webrtc/base/logging.h" |
24 #include "webrtc/base/thread_annotations.h" | 24 #include "webrtc/base/thread_annotations.h" |
25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
26 #include "webrtc/modules/utility/include/helpers_ios.h" | 26 #include "webrtc/modules/utility/include/helpers_ios.h" |
27 | 27 |
| 28 #import "webrtc/base/objc/RTCLogging.h" |
28 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" | 29 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" |
| 30 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" |
29 | 31 |
30 namespace webrtc { | 32 namespace webrtc { |
31 | 33 |
32 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" | 34 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
33 | 35 |
34 #define LOG_AND_RETURN_IF_ERROR(error, message) \ | 36 #define LOG_AND_RETURN_IF_ERROR(error, message) \ |
35 do { \ | 37 do { \ |
36 OSStatus err = error; \ | 38 OSStatus err = error; \ |
37 if (err) { \ | 39 if (err) { \ |
38 LOG(LS_ERROR) << message << ": " << err; \ | 40 LOG(LS_ERROR) << message << ": " << err; \ |
39 return false; \ | 41 return false; \ |
40 } \ | 42 } \ |
41 } while (0) | 43 } while (0) |
42 | 44 |
43 #define LOG_IF_ERROR(error, message) \ | 45 #define LOG_IF_ERROR(error, message) \ |
44 do { \ | 46 do { \ |
45 OSStatus err = error; \ | 47 OSStatus err = error; \ |
46 if (err) { \ | 48 if (err) { \ |
47 LOG(LS_ERROR) << message << ": " << err; \ | 49 LOG(LS_ERROR) << message << ": " << err; \ |
48 } \ | 50 } \ |
49 } while (0) | 51 } while (0) |
50 | 52 |
51 // Preferred hardware sample rate (unit is in Hertz). The client sample rate | 53 |
52 // will be set to this value as well to avoid resampling the the audio unit's | |
53 // format converter. Note that, some devices, e.g. BT headsets, only supports | |
54 // 8000Hz as native sample rate. | |
55 const double kHighPerformanceSampleRate = 48000.0; | |
56 // A lower sample rate will be used for devices with only one core | |
57 // (e.g. iPhone 4). The goal is to reduce the CPU load of the application. | |
58 const double kLowComplexitySampleRate = 16000.0; | |
59 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms | |
60 // size used by WebRTC. The exact actual size will differ between devices. | |
61 // Example: using 48kHz on iPhone 6 results in a native buffer size of | |
62 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will | |
63 // take care of any buffering required to convert between native buffers and | |
64 // buffers used by WebRTC. It is beneficial for the performance if the native | |
65 // size is as close to 10ms as possible since it results in "clean" callback | |
66 // sequence without bursts of callbacks back to back. | |
67 const double kHighPerformanceIOBufferDuration = 0.01; | |
68 // Use a larger buffer size on devices with only one core (e.g. iPhone 4). | |
69 // It will result in a lower CPU consumption at the cost of a larger latency. | |
70 // The size of 60ms is based on instrumentation that shows a significant | |
71 // reduction in CPU load compared with 10ms on low-end devices. | |
72 // TODO(henrika): monitor this size and determine if it should be modified. | |
73 const double kLowComplexityIOBufferDuration = 0.06; | |
74 // Try to use mono to save resources. Also avoids channel format conversion | |
75 // in the I/O audio unit. Initial tests have shown that it is possible to use | |
76 // mono natively for built-in microphones and for BT headsets but not for | |
77 // wired headsets. Wired headsets only support stereo as native channel format | |
78 // but it is a low cost operation to do a format conversion to mono in the | |
79 // audio unit. Hence, we will not hit a RTC_CHECK in | |
80 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the | |
81 // preferred number of channels and the actual number of channels. | |
82 const int kPreferredNumberOfChannels = 1; | |
83 // Number of bytes per audio sample for 16-bit signed integer representation. | 54 // Number of bytes per audio sample for 16-bit signed integer representation. |
84 const UInt32 kBytesPerSample = 2; | 55 const UInt32 kBytesPerSample = 2; |
85 // Hardcoded delay estimates based on real measurements. | 56 // Hardcoded delay estimates based on real measurements. |
86 // TODO(henrika): these value is not used in combination with built-in AEC. | 57 // TODO(henrika): these value is not used in combination with built-in AEC. |
87 // Can most likely be removed. | 58 // Can most likely be removed. |
88 const UInt16 kFixedPlayoutDelayEstimate = 30; | 59 const UInt16 kFixedPlayoutDelayEstimate = 30; |
89 const UInt16 kFixedRecordDelayEstimate = 30; | 60 const UInt16 kFixedRecordDelayEstimate = 30; |
90 // Calls to AudioUnitInitialize() can fail if called back-to-back on different | 61 // Calls to AudioUnitInitialize() can fail if called back-to-back on different |
91 // ADM instances. A fall-back solution is to allow multiple sequential calls | 62 // ADM instances. A fall-back solution is to allow multiple sequential calls |
92 // with as small delay between each. This factor sets the max number of allowed | 63 // with as small delay between each. This factor sets the max number of allowed |
93 // initialization attempts. | 64 // initialization attempts. |
94 const int kMaxNumberOfAudioUnitInitializeAttempts = 5; | 65 const int kMaxNumberOfAudioUnitInitializeAttempts = 5; |
95 | 66 |
96 using ios::CheckAndLogError; | 67 using ios::CheckAndLogError; |
97 | 68 |
98 // Return the preferred sample rate given number of CPU cores. Use highest | |
99 // possible if the CPU has more than one core. | |
100 static double GetPreferredSampleRate() { | |
101 return (ios::GetProcessorCount() > 1) ? kHighPerformanceSampleRate | |
102 : kLowComplexitySampleRate; | |
103 } | |
104 | |
105 // Return the preferred I/O buffer size given number of CPU cores. Use smallest | |
106 // possible if the CPU has more than one core. | |
107 static double GetPreferredIOBufferDuration() { | |
108 return (ios::GetProcessorCount() > 1) ? kHighPerformanceIOBufferDuration | |
109 : kLowComplexityIOBufferDuration; | |
110 } | |
111 | |
112 // Verifies that the current audio session supports input audio and that the | |
113 // required category and mode are enabled. | |
114 static bool VerifyAudioSession(RTCAudioSession* session) { | |
115 LOG(LS_INFO) << "VerifyAudioSession"; | |
116 // Ensure that the device currently supports audio input. | |
117 if (!session.inputAvailable) { | |
118 LOG(LS_ERROR) << "No audio input path is available!"; | |
119 return false; | |
120 } | |
121 | |
122 // Ensure that the required category and mode are actually activated. | |
123 if (![session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
124 LOG(LS_ERROR) | |
125 << "Failed to set category to AVAudioSessionCategoryPlayAndRecord"; | |
126 return false; | |
127 } | |
128 if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) { | |
129 LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat"; | |
130 return false; | |
131 } | |
132 return true; | |
133 } | |
134 | |
135 // Activates an audio session suitable for full duplex VoIP sessions when | |
136 // |activate| is true. Also sets the preferred sample rate and IO buffer | |
137 // duration. Deactivates an active audio session if |activate| is set to false. | |
138 static bool ActivateAudioSession(RTCAudioSession* session, bool activate) { | |
139 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | |
140 | |
141 NSError* error = nil; | |
142 BOOL success = NO; | |
143 | |
144 [session lockForConfiguration]; | |
145 if (!activate) { | |
146 success = [session setActive:NO | |
147 error:&error]; | |
148 [session unlockForConfiguration]; | |
149 return CheckAndLogError(success, error); | |
150 } | |
151 | |
152 // Go ahead and active our own audio session since |activate| is true. | |
153 // Use a category which supports simultaneous recording and playback. | |
154 // By default, using this category implies that our app’s audio is | |
155 // nonmixable, hence activating the session will interrupt any other | |
156 // audio sessions which are also nonmixable. | |
157 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | |
158 error = nil; | |
159 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | |
160 withOptions:AVAudioSessionCategoryOptionAllowBluetooth | |
161 error:&error]; | |
162 RTC_DCHECK(CheckAndLogError(success, error)); | |
163 } | |
164 | |
165 // Specify mode for two-way voice communication (e.g. VoIP). | |
166 if (session.mode != AVAudioSessionModeVoiceChat) { | |
167 error = nil; | |
168 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; | |
169 RTC_DCHECK(CheckAndLogError(success, error)); | |
170 } | |
171 | |
172 // Set the session's sample rate or the hardware sample rate. | |
173 // It is essential that we use the same sample rate as stream format | |
174 // to ensure that the I/O unit does not have to do sample rate conversion. | |
175 error = nil; | |
176 success = | |
177 [session setPreferredSampleRate:GetPreferredSampleRate() error:&error]; | |
178 RTC_DCHECK(CheckAndLogError(success, error)); | |
179 | |
180 // Set the preferred audio I/O buffer duration, in seconds. | |
181 error = nil; | |
182 success = [session setPreferredIOBufferDuration:GetPreferredIOBufferDuration() | |
183 error:&error]; | |
184 RTC_DCHECK(CheckAndLogError(success, error)); | |
185 | |
186 // Activate the audio session. Activation can fail if another active audio | |
187 // session (e.g. phone call) has higher priority than ours. | |
188 error = nil; | |
189 success = [session setActive:YES error:&error]; | |
190 if (!CheckAndLogError(success, error)) { | |
191 [session unlockForConfiguration]; | |
192 return false; | |
193 } | |
194 | |
195 // Ensure that the active audio session has the correct category and mode. | |
196 if (!VerifyAudioSession(session)) { | |
197 LOG(LS_ERROR) << "Failed to verify audio session category and mode"; | |
198 [session unlockForConfiguration]; | |
199 return false; | |
200 } | |
201 | |
202 // Try to set the preferred number of hardware audio channels. These calls | |
203 // must be done after setting the audio session’s category and mode and | |
204 // activating the session. | |
205 // We try to use mono in both directions to save resources and format | |
206 // conversions in the audio unit. Some devices does only support stereo; | |
207 // e.g. wired headset on iPhone 6. | |
208 // TODO(henrika): add support for stereo if needed. | |
209 error = nil; | |
210 success = | |
211 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | |
212 error:&error]; | |
213 RTC_DCHECK(CheckAndLogError(success, error)); | |
214 error = nil; | |
215 success = | |
216 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels | |
217 error:&error]; | |
218 RTC_DCHECK(CheckAndLogError(success, error)); | |
219 [session unlockForConfiguration]; | |
220 return true; | |
221 } | |
222 | |
223 // An application can create more than one ADM and start audio streaming | |
224 // for all of them. It is essential that we only activate the app's audio | |
225 // session once (for the first one) and deactivate it once (for the last). | |
226 static bool ActivateAudioSession() { | |
227 LOGI() << "ActivateAudioSession"; | |
228 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | |
229 return ActivateAudioSession(session, true); | |
230 } | |
231 | |
232 // If more than one object is using the audio session, ensure that only the | |
233 // last object deactivates. Apple recommends: "activate your audio session | |
234 // only as needed and deactivate it when you are not using audio". | |
235 static bool DeactivateAudioSession() { | |
236 LOGI() << "DeactivateAudioSession"; | |
237 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | |
238 return ActivateAudioSession(session, false); | |
239 } | |
240 | |
241 #if !defined(NDEBUG) | 69 #if !defined(NDEBUG) |
242 // Helper method for printing out an AudioStreamBasicDescription structure. | 70 // Helper method for printing out an AudioStreamBasicDescription structure. |
243 static void LogABSD(AudioStreamBasicDescription absd) { | 71 static void LogABSD(AudioStreamBasicDescription absd) { |
244 char formatIDString[5]; | 72 char formatIDString[5]; |
245 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | 73 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); |
246 bcopy(&formatID, formatIDString, 4); | 74 bcopy(&formatID, formatIDString, 4); |
247 formatIDString[4] = '\0'; | 75 formatIDString[4] = '\0'; |
248 LOG(LS_INFO) << "LogABSD"; | 76 LOG(LS_INFO) << "LogABSD"; |
249 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate; | 77 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate; |
250 LOG(LS_INFO) << " format ID: " << formatIDString; | 78 LOG(LS_INFO) << " format ID: " << formatIDString; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
306 int32_t AudioDeviceIOS::Init() { | 134 int32_t AudioDeviceIOS::Init() { |
307 LOGI() << "Init"; | 135 LOGI() << "Init"; |
308 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 136 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
309 if (initialized_) { | 137 if (initialized_) { |
310 return 0; | 138 return 0; |
311 } | 139 } |
312 #if !defined(NDEBUG) | 140 #if !defined(NDEBUG) |
313 LogDeviceInfo(); | 141 LogDeviceInfo(); |
314 #endif | 142 #endif |
315 // Store the preferred sample rate and preferred number of channels already | 143 // Store the preferred sample rate and preferred number of channels already |
316 // here. They have not been set and confirmed yet since ActivateAudioSession() | 144 // here. They have not been set and confirmed yet since configureForWebRTC |
317 // is not called until audio is about to start. However, it makes sense to | 145 // is not called until audio is about to start. However, it makes sense to |
318 // store the parameters now and then verify at a later stage. | 146 // store the parameters now and then verify at a later stage. |
319 playout_parameters_.reset(GetPreferredSampleRate(), | 147 RTCAudioSessionConfiguration* config = |
320 kPreferredNumberOfChannels); | 148 [RTCAudioSessionConfiguration webRTCConfiguration]; |
321 record_parameters_.reset(GetPreferredSampleRate(), | 149 playout_parameters_.reset(config.sampleRate, |
322 kPreferredNumberOfChannels); | 150 config.outputNumberOfChannels); |
| 151 record_parameters_.reset(config.sampleRate, |
| 152 config.inputNumberOfChannels); |
323 // Ensure that the audio device buffer (ADB) knows about the internal audio | 153 // Ensure that the audio device buffer (ADB) knows about the internal audio |
324 // parameters. Note that, even if we are unable to get a mono audio session, | 154 // parameters. Note that, even if we are unable to get a mono audio session, |
325 // we will always tell the I/O audio unit to do a channel format conversion | 155 // we will always tell the I/O audio unit to do a channel format conversion |
326 // to guarantee mono on the "input side" of the audio unit. | 156 // to guarantee mono on the "input side" of the audio unit. |
327 UpdateAudioDeviceBuffer(); | 157 UpdateAudioDeviceBuffer(); |
328 initialized_ = true; | 158 initialized_ = true; |
329 return 0; | 159 return 0; |
330 } | 160 } |
331 | 161 |
332 int32_t AudioDeviceIOS::Terminate() { | 162 int32_t AudioDeviceIOS::Terminate() { |
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
666 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; | 496 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; |
667 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; | 497 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; |
668 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; | 498 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; |
669 LOG(LS_INFO) << " output latency: " << session.outputLatency; | 499 LOG(LS_INFO) << " output latency: " << session.outputLatency; |
670 LOG(LS_INFO) << " input latency: " << session.inputLatency; | 500 LOG(LS_INFO) << " input latency: " << session.inputLatency; |
671 | 501 |
672 // Log a warning message for the case when we are unable to set the preferred | 502 // Log a warning message for the case when we are unable to set the preferred |
673 // hardware sample rate but continue and use the non-ideal sample rate after | 503 // hardware sample rate but continue and use the non-ideal sample rate after |
674 // reinitializing the audio parameters. Most BT headsets only support 8kHz or | 504 // reinitializing the audio parameters. Most BT headsets only support 8kHz or |
675 // 16kHz. | 505 // 16kHz. |
676 if (session.sampleRate != GetPreferredSampleRate()) { | 506 RTCAudioSessionConfiguration* webRTCConfig = |
| 507 [RTCAudioSessionConfiguration webRTCConfiguration]; |
| 508 if (session.sampleRate != webRTCConfig.sampleRate) { |
677 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; | 509 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; |
678 } | 510 } |
679 | 511 |
680 // At this stage, we also know the exact IO buffer duration and can add | 512 // At this stage, we also know the exact IO buffer duration and can add |
681 // that info to the existing audio parameters where it is converted into | 513 // that info to the existing audio parameters where it is converted into |
682 // number of audio frames. | 514 // number of audio frames. |
683 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. | 515 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. |
684 // Hence, 128 is the size we expect to see in upcoming render callbacks. | 516 // Hence, 128 is the size we expect to see in upcoming render callbacks. |
685 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), | 517 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), |
686 session.IOBufferDuration); | 518 session.IOBufferDuration); |
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
784 | 616 |
785 // Set the application formats for input and output: | 617 // Set the application formats for input and output: |
786 // - use same format in both directions | 618 // - use same format in both directions |
787 // - avoid resampling in the I/O unit by using the hardware sample rate | 619 // - avoid resampling in the I/O unit by using the hardware sample rate |
788 // - linear PCM => noncompressed audio data format with one frame per packet | 620 // - linear PCM => noncompressed audio data format with one frame per packet |
789 // - no need to specify interleaving since only mono is supported | 621 // - no need to specify interleaving since only mono is supported |
790 AudioStreamBasicDescription application_format = {0}; | 622 AudioStreamBasicDescription application_format = {0}; |
791 UInt32 size = sizeof(application_format); | 623 UInt32 size = sizeof(application_format); |
792 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), | 624 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), |
793 record_parameters_.sample_rate()); | 625 record_parameters_.sample_rate()); |
794 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); | 626 RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels); |
795 application_format.mSampleRate = playout_parameters_.sample_rate(); | 627 application_format.mSampleRate = playout_parameters_.sample_rate(); |
796 application_format.mFormatID = kAudioFormatLinearPCM; | 628 application_format.mFormatID = kAudioFormatLinearPCM; |
797 application_format.mFormatFlags = | 629 application_format.mFormatFlags = |
798 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 630 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
799 application_format.mBytesPerPacket = kBytesPerSample; | 631 application_format.mBytesPerPacket = kBytesPerSample; |
800 application_format.mFramesPerPacket = 1; // uncompressed | 632 application_format.mFramesPerPacket = 1; // uncompressed |
801 application_format.mBytesPerFrame = kBytesPerSample; | 633 application_format.mBytesPerFrame = kBytesPerSample; |
802 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; | 634 application_format.mChannelsPerFrame = |
| 635 kRTCAudioSessionPreferredNumberOfChannels; |
803 application_format.mBitsPerChannel = 8 * kBytesPerSample; | 636 application_format.mBitsPerChannel = 8 * kBytesPerSample; |
804 // Store the new format. | 637 // Store the new format. |
805 application_format_ = application_format; | 638 application_format_ = application_format; |
806 #if !defined(NDEBUG) | 639 #if !defined(NDEBUG) |
807 LogABSD(application_format_); | 640 LogABSD(application_format_); |
808 #endif | 641 #endif |
809 | 642 |
810 // Set the application format on the output scope of the input element/bus. | 643 // Set the application format on the output scope of the input element/bus. |
811 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 644 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
812 kAudioUnitScope_Output, input_bus, | 645 kAudioUnitScope_Output, input_bus, |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
930 | 763 |
931 // Start rendering audio using the new format. | 764 // Start rendering audio using the new format. |
932 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | 765 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
933 "Failed to start the Voice-Processing I/O unit"); | 766 "Failed to start the Voice-Processing I/O unit"); |
934 LOG(LS_INFO) << "Voice-Processing I/O unit is now restarted"; | 767 LOG(LS_INFO) << "Voice-Processing I/O unit is now restarted"; |
935 return true; | 768 return true; |
936 } | 769 } |
937 | 770 |
938 bool AudioDeviceIOS::InitPlayOrRecord() { | 771 bool AudioDeviceIOS::InitPlayOrRecord() { |
939 LOGI() << "InitPlayOrRecord"; | 772 LOGI() << "InitPlayOrRecord"; |
940 // Activate the audio session if not already activated. | 773 |
941 if (!ActivateAudioSession()) { | 774 // Use the correct audio session configuration for WebRTC. |
| 775 // This will attempt to activate the audio session. |
| 776 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 777 [session lockForConfiguration]; |
| 778 NSError* error = nil; |
| 779 if (![session configureWebRTCSession:&error]) { |
| 780 RTCLogError(@"Failed to configure WebRTC session: %@", |
| 781 error.localizedDescription); |
| 782 [session unlockForConfiguration]; |
942 return false; | 783 return false; |
943 } | 784 } |
944 | 785 |
945 // Ensure that the active audio session has the correct category and mode. | |
946 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | |
947 if (!VerifyAudioSession(session)) { | |
948 DeactivateAudioSession(); | |
949 LOG(LS_ERROR) << "Failed to verify audio session category and mode"; | |
950 return false; | |
951 } | |
952 | |
953 // Start observing audio session interruptions and route changes. | 786 // Start observing audio session interruptions and route changes. |
954 RegisterNotificationObservers(); | 787 RegisterNotificationObservers(); |
955 | 788 |
956 // Ensure that we got what what we asked for in our active audio session. | 789 // Ensure that we got what what we asked for in our active audio session. |
957 SetupAudioBuffersForActiveAudioSession(); | 790 SetupAudioBuffersForActiveAudioSession(); |
958 | 791 |
959 // Create, setup and initialize a new Voice-Processing I/O unit. | 792 // Create, setup and initialize a new Voice-Processing I/O unit. |
960 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 793 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
961 // Reduce usage count for the audio session and possibly deactivate it if | 794 [session setActive:NO error:nil]; |
962 // this object is the only user. | 795 [session unlockForConfiguration]; |
963 DeactivateAudioSession(); | |
964 return false; | 796 return false; |
965 } | 797 } |
| 798 [session unlockForConfiguration]; |
966 return true; | 799 return true; |
967 } | 800 } |
968 | 801 |
969 void AudioDeviceIOS::ShutdownPlayOrRecord() { | 802 void AudioDeviceIOS::ShutdownPlayOrRecord() { |
970 LOGI() << "ShutdownPlayOrRecord"; | 803 LOGI() << "ShutdownPlayOrRecord"; |
971 // Close and delete the voice-processing I/O unit. | 804 // Close and delete the voice-processing I/O unit. |
972 OSStatus result = -1; | 805 OSStatus result = -1; |
973 if (nullptr != vpio_unit_) { | 806 if (nullptr != vpio_unit_) { |
974 result = AudioOutputUnitStop(vpio_unit_); | 807 result = AudioOutputUnitStop(vpio_unit_); |
975 if (result != noErr) { | 808 if (result != noErr) { |
976 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | 809 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
977 } | 810 } |
978 result = AudioUnitUninitialize(vpio_unit_); | 811 result = AudioUnitUninitialize(vpio_unit_); |
979 if (result != noErr) { | 812 if (result != noErr) { |
980 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | 813 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
981 } | 814 } |
982 DisposeAudioUnit(); | 815 DisposeAudioUnit(); |
983 } | 816 } |
984 | 817 |
985 // Remove audio session notification observers. | 818 // Remove audio session notification observers. |
986 UnregisterNotificationObservers(); | 819 UnregisterNotificationObservers(); |
987 | 820 |
988 // All I/O should be stopped or paused prior to deactivating the audio | 821 // All I/O should be stopped or paused prior to deactivating the audio |
989 // session, hence we deactivate as last action. | 822 // session, hence we deactivate as last action. |
990 DeactivateAudioSession(); | 823 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 824 [session lockForConfiguration]; |
| 825 [session setActive:NO error:nil]; |
| 826 [session unlockForConfiguration]; |
991 } | 827 } |
992 | 828 |
993 void AudioDeviceIOS::DisposeAudioUnit() { | 829 void AudioDeviceIOS::DisposeAudioUnit() { |
994 if (nullptr == vpio_unit_) | 830 if (nullptr == vpio_unit_) |
995 return; | 831 return; |
996 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | 832 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); |
997 if (result != noErr) { | 833 if (result != noErr) { |
998 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; | 834 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; |
999 } | 835 } |
1000 vpio_unit_ = nullptr; | 836 vpio_unit_ = nullptr; |
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1092 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 928 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
1093 // the native I/O audio unit) to a preallocated intermediate buffer and | 929 // the native I/O audio unit) to a preallocated intermediate buffer and |
1094 // copy the result to the audio buffer in the |io_data| destination. | 930 // copy the result to the audio buffer in the |io_data| destination. |
1095 SInt8* source = playout_audio_buffer_.get(); | 931 SInt8* source = playout_audio_buffer_.get(); |
1096 fine_audio_buffer_->GetPlayoutData(source); | 932 fine_audio_buffer_->GetPlayoutData(source); |
1097 memcpy(destination, source, dataSizeInBytes); | 933 memcpy(destination, source, dataSizeInBytes); |
1098 return noErr; | 934 return noErr; |
1099 } | 935 } |
1100 | 936 |
1101 } // namespace webrtc | 937 } // namespace webrtc |
OLD | NEW |