OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #if !defined(__has_feature) || !__has_feature(objc_arc) | 11 #if !defined(__has_feature) || !__has_feature(objc_arc) |
12 #error "This file requires ARC support." | 12 #error "This file requires ARC support." |
13 #endif | 13 #endif |
14 | 14 |
15 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
16 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
17 | 17 |
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
19 | 19 |
20 #include "webrtc/base/atomicops.h" | 20 #include "webrtc/base/atomicops.h" |
21 #include "webrtc/base/checks.h" | 21 #include "webrtc/base/checks.h" |
22 #include "webrtc/base/logging.h" | 22 #include "webrtc/base/logging.h" |
23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
24 #include "webrtc/modules/utility/include/helpers_ios.h" | 24 #include "webrtc/modules/utility/include/helpers_ios.h" |
25 | 25 |
26 namespace webrtc { | 26 namespace webrtc { |
27 | 27 |
28 // Protects |g_audio_session_activation_count|. | |
29 static rtc::GlobalLockPod g_lock; | |
30 | |
31 // Counts number of times setActive:YES has been called on the singleton | |
32 // AVAudioSession instance. Used to ensure that we don't disable an audio | |
33 // session when it is still in used by other instances of this object. | |
34 // Member is static to ensure that the value is counted for all instances | |
35 // and not per instance. | |
36 static int g_audio_session_activation_count GUARDED_BY(g_lock) = 0; | |
37 | |
28 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" | 38 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
29 | 39 |
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ | 40 #define LOG_AND_RETURN_IF_ERROR(error, message) \ |
31 do { \ | 41 do { \ |
32 OSStatus err = error; \ | 42 OSStatus err = error; \ |
33 if (err) { \ | 43 if (err) { \ |
34 LOG(LS_ERROR) << message << ": " << err; \ | 44 LOG(LS_ERROR) << message << ": " << err; \ |
35 return false; \ | 45 return false; \ |
36 } \ | 46 } \ |
37 } while (0) | 47 } while (0) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
73 // TODO(henrika): these value is not used in combination with built-in AEC. | 83 // TODO(henrika): these value is not used in combination with built-in AEC. |
74 // Can most likely be removed. | 84 // Can most likely be removed. |
75 const UInt16 kFixedPlayoutDelayEstimate = 30; | 85 const UInt16 kFixedPlayoutDelayEstimate = 30; |
76 const UInt16 kFixedRecordDelayEstimate = 30; | 86 const UInt16 kFixedRecordDelayEstimate = 30; |
77 | 87 |
78 using ios::CheckAndLogError; | 88 using ios::CheckAndLogError; |
79 | 89 |
80 // Activates an audio session suitable for full duplex VoIP sessions when | 90 // Activates an audio session suitable for full duplex VoIP sessions when |
81 // |activate| is true. Also sets the preferred sample rate and IO buffer | 91 // |activate| is true. Also sets the preferred sample rate and IO buffer |
82 // duration. Deactivates an active audio session if |activate| is set to false. | 92 // duration. Deactivates an active audio session if |activate| is set to false. |
83 static void ActivateAudioSession(AVAudioSession* session, bool activate) { | 93 static bool ActivateAudioSession(AVAudioSession* session, bool activate) |
94 EXCLUSIVE_LOCKS_REQUIRED(g_lock) { | |
84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | 95 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
85 @autoreleasepool { | 96 @autoreleasepool { |
86 NSError* error = nil; | 97 NSError* error = nil; |
87 BOOL success = NO; | 98 BOOL success = NO; |
88 | 99 |
89 if (!activate) { | 100 if (!activate) { |
90 // Deactivate the audio session using an extra option and then return. | 101 // Deactivate the audio session using an extra option and then return. |
91 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to | 102 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to |
92 // ensure that other audio sessions that were interrupted by our session | 103 // ensure that other audio sessions that were interrupted by our session |
93 // can return to their active state. It is recommended for VoIP apps to | 104 // can return to their active state. It is recommended for VoIP apps to |
94 // use this option. | 105 // use this option. |
95 success = [session | 106 success = [session |
96 setActive:NO | 107 setActive:NO |
97 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation | 108 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation |
98 error:&error]; | 109 error:&error]; |
99 RTC_DCHECK(CheckAndLogError(success, error)); | 110 return CheckAndLogError(success, error); |
100 return; | |
101 } | 111 } |
102 | 112 |
103 // Go ahead and active our own audio session since |activate| is true. | 113 // Go ahead and active our own audio session since |activate| is true. |
104 // Use a category which supports simultaneous recording and playback. | 114 // Use a category which supports simultaneous recording and playback. |
105 // By default, using this category implies that our app’s audio is | 115 // By default, using this category implies that our app’s audio is |
106 // nonmixable, hence activating the session will interrupt any other | 116 // nonmixable, hence activating the session will interrupt any other |
107 // audio sessions which are also nonmixable. | 117 // audio sessions which are also nonmixable. |
108 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | 118 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
109 error = nil; | 119 error = nil; |
110 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | 120 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
(...skipping 11 matching lines...) Expand all Loading... | |
122 | 132 |
123 // Set the session's sample rate or the hardware sample rate. | 133 // Set the session's sample rate or the hardware sample rate. |
124 // It is essential that we use the same sample rate as stream format | 134 // It is essential that we use the same sample rate as stream format |
125 // to ensure that the I/O unit does not have to do sample rate conversion. | 135 // to ensure that the I/O unit does not have to do sample rate conversion. |
126 error = nil; | 136 error = nil; |
127 success = | 137 success = |
128 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; | 138 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
129 RTC_DCHECK(CheckAndLogError(success, error)); | 139 RTC_DCHECK(CheckAndLogError(success, error)); |
130 | 140 |
131 // Set the preferred audio I/O buffer duration, in seconds. | 141 // Set the preferred audio I/O buffer duration, in seconds. |
132 // TODO(henrika): add more comments here. | |
133 error = nil; | 142 error = nil; |
134 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration | 143 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
135 error:&error]; | 144 error:&error]; |
136 RTC_DCHECK(CheckAndLogError(success, error)); | 145 RTC_DCHECK(CheckAndLogError(success, error)); |
137 | 146 |
138 // Activate the audio session. Activation can fail if another active audio | 147 // Activate the audio session. Activation can fail if another active audio |
139 // session (e.g. phone call) has higher priority than ours. | 148 // session (e.g. phone call) has higher priority than ours. |
140 error = nil; | 149 error = nil; |
141 success = [session setActive:YES error:&error]; | 150 success = [session setActive:YES error:&error]; |
142 RTC_DCHECK(CheckAndLogError(success, error)); | 151 if (!CheckAndLogError(success, error)) { |
143 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; | 152 return false; |
153 } | |
144 | 154 |
145 // Ensure that category and mode are actually activated. | 155 // Ensure that the device currently supports audio input. |
146 RTC_DCHECK( | 156 if (!session.isInputAvailable) { |
147 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); | 157 LOG(LS_ERROR) << "No audio input path is available!"; |
148 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); | 158 return false; |
159 } | |
160 | |
161 // Ensure that the required category and mode are actually activated. | |
162 if (![session.category | |
163 isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
164 LOG(LS_ERROR) | |
165 << "Failed to set category to AVAudioSessionCategoryPlayAndRecord"; | |
166 return false; | |
167 } | |
168 if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) { | |
169 LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat"; | |
170 return false; | |
171 } | |
149 | 172 |
150 // Try to set the preferred number of hardware audio channels. These calls | 173 // Try to set the preferred number of hardware audio channels. These calls |
151 // must be done after setting the audio session’s category and mode and | 174 // must be done after setting the audio session’s category and mode and |
152 // activating the session. | 175 // activating the session. |
153 // We try to use mono in both directions to save resources and format | 176 // We try to use mono in both directions to save resources and format |
154 // conversions in the audio unit. Some devices does only support stereo; | 177 // conversions in the audio unit. Some devices does only support stereo; |
155 // e.g. wired headset on iPhone 6. | 178 // e.g. wired headset on iPhone 6. |
156 // TODO(henrika): add support for stereo if needed. | 179 // TODO(henrika): add support for stereo if needed. |
157 error = nil; | 180 error = nil; |
158 success = | 181 success = |
159 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | 182 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels |
160 error:&error]; | 183 error:&error]; |
161 RTC_DCHECK(CheckAndLogError(success, error)); | 184 RTC_DCHECK(CheckAndLogError(success, error)); |
162 error = nil; | 185 error = nil; |
163 success = | 186 success = |
164 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels | 187 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels |
165 error:&error]; | 188 error:&error]; |
166 RTC_DCHECK(CheckAndLogError(success, error)); | 189 RTC_DCHECK(CheckAndLogError(success, error)); |
190 return true; | |
167 } | 191 } |
168 } | 192 } |
169 | 193 |
170 #if !defined(NDEBUG) | 194 #if !defined(NDEBUG) |
171 // Helper method for printing out an AudioStreamBasicDescription structure. | 195 // Helper method for printing out an AudioStreamBasicDescription structure. |
172 static void LogABSD(AudioStreamBasicDescription absd) { | 196 static void LogABSD(AudioStreamBasicDescription absd) { |
173 char formatIDString[5]; | 197 char formatIDString[5]; |
174 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | 198 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); |
175 bcopy(&formatID, formatIDString, 4); | 199 bcopy(&formatID, formatIDString, 4); |
176 formatIDString[4] = '\0'; | 200 formatIDString[4] = '\0'; |
(...skipping 28 matching lines...) Expand all Loading... | |
205 playing_(0), | 229 playing_(0), |
206 initialized_(false), | 230 initialized_(false), |
207 rec_is_initialized_(false), | 231 rec_is_initialized_(false), |
208 play_is_initialized_(false), | 232 play_is_initialized_(false), |
209 audio_interruption_observer_(nullptr), | 233 audio_interruption_observer_(nullptr), |
210 route_change_observer_(nullptr) { | 234 route_change_observer_(nullptr) { |
211 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 235 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
212 } | 236 } |
213 | 237 |
214 AudioDeviceIOS::~AudioDeviceIOS() { | 238 AudioDeviceIOS::~AudioDeviceIOS() { |
215 LOGI() << "~dtor"; | 239 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 240 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
217 Terminate(); | 241 Terminate(); |
218 } | 242 } |
219 | 243 |
220 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 244 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
221 LOGI() << "AttachAudioBuffer"; | 245 LOGI() << "AttachAudioBuffer"; |
222 RTC_DCHECK(audioBuffer); | 246 RTC_DCHECK(audioBuffer); |
223 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 247 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
224 audio_device_buffer_ = audioBuffer; | 248 audio_device_buffer_ = audioBuffer; |
225 } | 249 } |
(...skipping 21 matching lines...) Expand all Loading... | |
247 initialized_ = true; | 271 initialized_ = true; |
248 return 0; | 272 return 0; |
249 } | 273 } |
250 | 274 |
251 int32_t AudioDeviceIOS::Terminate() { | 275 int32_t AudioDeviceIOS::Terminate() { |
252 LOGI() << "Terminate"; | 276 LOGI() << "Terminate"; |
253 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 277 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
254 if (!initialized_) { | 278 if (!initialized_) { |
255 return 0; | 279 return 0; |
256 } | 280 } |
257 ShutdownPlayOrRecord(); | 281 StopPlayout(); |
282 StopRecording(); | |
258 initialized_ = false; | 283 initialized_ = false; |
284 { | |
285 rtc::GlobalLockScope ls(&g_lock); | |
286 if (g_audio_session_activation_count != 0) { | |
287 LOG(LS_WARNING) << "Object is destructed with an active audio session"; | |
288 } | |
289 } | |
259 return 0; | 290 return 0; |
260 } | 291 } |
261 | 292 |
262 int32_t AudioDeviceIOS::InitPlayout() { | 293 int32_t AudioDeviceIOS::InitPlayout() { |
263 LOGI() << "InitPlayout"; | 294 LOGI() << "InitPlayout"; |
264 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 295 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
265 RTC_DCHECK(initialized_); | 296 RTC_DCHECK(initialized_); |
266 RTC_DCHECK(!play_is_initialized_); | 297 RTC_DCHECK(!play_is_initialized_); |
267 RTC_DCHECK(!playing_); | 298 RTC_DCHECK(!playing_); |
268 if (!rec_is_initialized_) { | 299 if (!rec_is_initialized_) { |
269 if (!InitPlayOrRecord()) { | 300 if (!InitPlayOrRecord()) { |
270 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 301 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; |
271 return -1; | 302 return -1; |
272 } | 303 } |
273 } | 304 } |
274 play_is_initialized_ = true; | 305 play_is_initialized_ = true; |
275 return 0; | 306 return 0; |
276 } | 307 } |
277 | 308 |
278 int32_t AudioDeviceIOS::InitRecording() { | 309 int32_t AudioDeviceIOS::InitRecording() { |
279 LOGI() << "InitRecording"; | 310 LOGI() << "InitRecording"; |
280 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 311 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
281 RTC_DCHECK(initialized_); | 312 RTC_DCHECK(initialized_); |
282 RTC_DCHECK(!rec_is_initialized_); | 313 RTC_DCHECK(!rec_is_initialized_); |
283 RTC_DCHECK(!recording_); | 314 RTC_DCHECK(!recording_); |
284 if (!play_is_initialized_) { | 315 if (!play_is_initialized_) { |
285 if (!InitPlayOrRecord()) { | 316 if (!InitPlayOrRecord()) { |
286 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 317 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; |
287 return -1; | 318 return -1; |
288 } | 319 } |
289 } | 320 } |
290 rec_is_initialized_ = true; | 321 rec_is_initialized_ = true; |
291 return 0; | 322 return 0; |
292 } | 323 } |
293 | 324 |
294 int32_t AudioDeviceIOS::StartPlayout() { | 325 int32_t AudioDeviceIOS::StartPlayout() { |
295 LOGI() << "StartPlayout"; | 326 LOGI() << "StartPlayout"; |
296 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 327 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
297 RTC_DCHECK(play_is_initialized_); | 328 RTC_DCHECK(play_is_initialized_); |
298 RTC_DCHECK(!playing_); | 329 RTC_DCHECK(!playing_); |
299 fine_audio_buffer_->ResetPlayout(); | 330 fine_audio_buffer_->ResetPlayout(); |
300 if (!recording_) { | 331 if (!recording_) { |
301 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 332 OSStatus result = AudioOutputUnitStart(vpio_unit_); |
302 if (result != noErr) { | 333 if (result != noErr) { |
303 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 334 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: " |
335 << result; | |
304 return -1; | 336 return -1; |
305 } | 337 } |
306 } | 338 } |
307 rtc::AtomicOps::ReleaseStore(&playing_, 1); | 339 rtc::AtomicOps::ReleaseStore(&playing_, 1); |
308 return 0; | 340 return 0; |
309 } | 341 } |
310 | 342 |
311 int32_t AudioDeviceIOS::StopPlayout() { | 343 int32_t AudioDeviceIOS::StopPlayout() { |
312 LOGI() << "StopPlayout"; | 344 LOGI() << "StopPlayout"; |
313 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 345 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
(...skipping 10 matching lines...) Expand all Loading... | |
324 | 356 |
325 int32_t AudioDeviceIOS::StartRecording() { | 357 int32_t AudioDeviceIOS::StartRecording() { |
326 LOGI() << "StartRecording"; | 358 LOGI() << "StartRecording"; |
327 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 359 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
328 RTC_DCHECK(rec_is_initialized_); | 360 RTC_DCHECK(rec_is_initialized_); |
329 RTC_DCHECK(!recording_); | 361 RTC_DCHECK(!recording_); |
330 fine_audio_buffer_->ResetRecord(); | 362 fine_audio_buffer_->ResetRecord(); |
331 if (!playing_) { | 363 if (!playing_) { |
332 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 364 OSStatus result = AudioOutputUnitStart(vpio_unit_); |
333 if (result != noErr) { | 365 if (result != noErr) { |
334 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 366 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: " |
367 << result; | |
335 return -1; | 368 return -1; |
336 } | 369 } |
337 } | 370 } |
338 rtc::AtomicOps::ReleaseStore(&recording_, 1); | 371 rtc::AtomicOps::ReleaseStore(&recording_, 1); |
339 return 0; | 372 return 0; |
340 } | 373 } |
341 | 374 |
342 int32_t AudioDeviceIOS::StopRecording() { | 375 int32_t AudioDeviceIOS::StopRecording() { |
343 LOGI() << "StopRecording"; | 376 LOGI() << "StopRecording"; |
344 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 377 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
420 // just in case. | 453 // just in case. |
421 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; | 454 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; |
422 // Inform the audio device buffer (ADB) about the new audio format. | 455 // Inform the audio device buffer (ADB) about the new audio format. |
423 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); | 456 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
424 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); | 457 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); |
425 audio_device_buffer_->SetRecordingSampleRate( | 458 audio_device_buffer_->SetRecordingSampleRate( |
426 record_parameters_.sample_rate()); | 459 record_parameters_.sample_rate()); |
427 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); | 460 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); |
428 } | 461 } |
429 | 462 |
463 bool AudioDeviceIOS::ActivateAudioSession() { | |
pbos-webrtc
2015/11/18 18:51:48
These aren't accessing any members right? This cou
henrika_webrtc
2015/11/18 19:48:21
Missed that. Will fix.
henrika_webrtc
2015/11/19 12:04:19
Done.
| |
464 LOGI() << "ActivateAudioSession"; | |
465 // An application can create more than one ADM and start audio streaming | |
466 // for all of them. It is essential that we only activate the app's audio | |
467 // session once (for the first one) and deactivate it once (for the last). | |
468 rtc::GlobalLockScope ls(&g_lock); | |
469 if (g_audio_session_activation_count == 0) { | |
470 // The system provides an audio session object upon launch of an | |
471 // application. However, we must initialize the session in order to | |
472 // handle interruptions. Implicit initialization occurs when obtaining | |
473 // a reference to the AVAudioSession object. | |
474 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
tkchin_webrtc
2015/11/18 19:25:29
I don't know if there are consequences to managing
henrika_webrtc
2015/11/18 19:48:21
We have done so for a long time now, also done in
| |
475 // Try to activate the audio session and ask for a set of preferred audio | |
476 // parameters. | |
477 if (!webrtc::ActivateAudioSession(session, true)) { | |
478 LOG(LS_ERROR) << "Failed to activate the audio session"; | |
479 return false; | |
480 } | |
481 ++g_audio_session_activation_count; | |
482 LOG(LS_INFO) << "Our audio session is now activated"; | |
483 } | |
484 return true; | |
485 } | |
486 | |
487 bool AudioDeviceIOS::DeactivateAudioSession() { | |
488 LOGI() << "DeactivateAudioSession"; | |
489 // If more than one object is using the audio session, ensure that only the | |
490 // last object deactivates. Apple recommends: "activate your audio session | |
491 // only as needed and deactivate it when you are not using audio". | |
492 rtc::GlobalLockScope ls(&g_lock); | |
493 if (g_audio_session_activation_count == 1) { | |
494 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
495 if (!webrtc::ActivateAudioSession(session, false)) { | |
pbos-webrtc
2015/11/18 18:51:48
You don't need the webrtc:: prefix here or above.
henrika_webrtc
2015/11/18 19:48:21
Acknowledged.
| |
496 LOG(LS_ERROR) << "Failed to deactivate the audio session"; | |
pbos-webrtc
2015/11/18 18:51:48
+ ", this shouldn't happen, the audio session will
tkchin_webrtc
2015/11/18 19:25:29
It will "leak" in the sense that there is mismatch
henrika_webrtc
2015/11/18 19:48:21
Will make comment more clear. It is not a real lea
| |
497 return false; | |
498 } | |
499 --g_audio_session_activation_count; | |
500 LOG(LS_INFO) << "Our audio session is now deactivated"; | |
501 } | |
502 return true; | |
503 } | |
504 | |
430 void AudioDeviceIOS::RegisterNotificationObservers() { | 505 void AudioDeviceIOS::RegisterNotificationObservers() { |
431 LOGI() << "RegisterNotificationObservers"; | 506 LOGI() << "RegisterNotificationObservers"; |
432 // This code block will be called when AVAudioSessionInterruptionNotification | 507 // This code block will be called when AVAudioSessionInterruptionNotification |
433 // is observed. | 508 // is observed. |
434 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) { | 509 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) { |
435 NSNumber* type_number = | 510 NSNumber* type_number = |
436 notification.userInfo[AVAudioSessionInterruptionTypeKey]; | 511 notification.userInfo[AVAudioSessionInterruptionTypeKey]; |
437 AVAudioSessionInterruptionType type = | 512 AVAudioSessionInterruptionType type = |
438 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue; | 513 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue; |
439 LOG(LS_INFO) << "Audio session interruption:"; | 514 LOG(LS_INFO) << "Audio session interruption:"; |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
632 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 707 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
633 audio_record_buffer_list_.mNumberBuffers = 1; | 708 audio_record_buffer_list_.mNumberBuffers = 1; |
634 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; | 709 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; |
635 audio_buffer->mNumberChannels = record_parameters_.channels(); | 710 audio_buffer->mNumberChannels = record_parameters_.channels(); |
636 audio_buffer->mDataByteSize = data_byte_size; | 711 audio_buffer->mDataByteSize = data_byte_size; |
637 audio_buffer->mData = record_audio_buffer_.get(); | 712 audio_buffer->mData = record_audio_buffer_.get(); |
638 } | 713 } |
639 | 714 |
640 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { | 715 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
641 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; | 716 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; |
642 RTC_DCHECK(!vpio_unit_); | 717 RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists"; |
643 // Create an audio component description to identify the Voice-Processing | 718 // Create an audio component description to identify the Voice-Processing |
644 // I/O audio unit. | 719 // I/O audio unit. |
645 AudioComponentDescription vpio_unit_description; | 720 AudioComponentDescription vpio_unit_description; |
646 vpio_unit_description.componentType = kAudioUnitType_Output; | 721 vpio_unit_description.componentType = kAudioUnitType_Output; |
647 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 722 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
648 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; | 723 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; |
649 vpio_unit_description.componentFlags = 0; | 724 vpio_unit_description.componentFlags = 0; |
650 vpio_unit_description.componentFlagsMask = 0; | 725 vpio_unit_description.componentFlagsMask = 0; |
651 // Obtain an audio unit instance given the description. | 726 // Obtain an audio unit instance given the description. |
652 AudioComponent found_vpio_unit_ref = | 727 AudioComponent found_vpio_unit_ref = |
653 AudioComponentFindNext(nullptr, &vpio_unit_description); | 728 AudioComponentFindNext(nullptr, &vpio_unit_description); |
654 | 729 |
655 // Create a Voice-Processing IO audio unit. | 730 // Create a Voice-Processing IO audio unit. |
656 LOG_AND_RETURN_IF_ERROR( | 731 OSStatus result = noErr; |
657 AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_), | 732 result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); |
658 "Failed to create a VoiceProcessingIO audio unit"); | 733 if (result != noErr) { |
734 vpio_unit_ = nullptr; | |
735 LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result; | |
736 return false; | |
737 } | |
659 | 738 |
660 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable | 739 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable |
661 // input on the input scope of the input element. | 740 // input on the input scope of the input element. |
662 AudioUnitElement input_bus = 1; | 741 AudioUnitElement input_bus = 1; |
663 UInt32 enable_input = 1; | 742 UInt32 enable_input = 1; |
664 LOG_AND_RETURN_IF_ERROR( | 743 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
665 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 744 kAudioUnitScope_Input, input_bus, &enable_input, |
666 kAudioUnitScope_Input, input_bus, &enable_input, | 745 sizeof(enable_input)); |
667 sizeof(enable_input)), | 746 if (result != noErr) { |
668 "Failed to enable input on input scope of input element"); | 747 DisposeAudioUnit(); |
748 LOG(LS_ERROR) << "Failed to enable input on input scope of input element: " | |
749 << result; | |
750 return false; | |
751 } | |
669 | 752 |
670 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable | 753 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable |
671 // output on the output scope of the output element. | 754 // output on the output scope of the output element. |
672 AudioUnitElement output_bus = 0; | 755 AudioUnitElement output_bus = 0; |
673 UInt32 enable_output = 1; | 756 UInt32 enable_output = 1; |
674 LOG_AND_RETURN_IF_ERROR( | 757 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
675 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 758 kAudioUnitScope_Output, output_bus, |
676 kAudioUnitScope_Output, output_bus, &enable_output, | 759 &enable_output, sizeof(enable_output)); |
677 sizeof(enable_output)), | 760 if (result != noErr) { |
678 "Failed to enable output on output scope of output element"); | 761 DisposeAudioUnit(); |
762 LOG(LS_ERROR) | |
763 << "Failed to enable output on output scope of output element: " | |
764 << result; | |
765 return false; | |
766 } | |
679 | 767 |
680 // Set the application formats for input and output: | 768 // Set the application formats for input and output: |
681 // - use same format in both directions | 769 // - use same format in both directions |
682 // - avoid resampling in the I/O unit by using the hardware sample rate | 770 // - avoid resampling in the I/O unit by using the hardware sample rate |
683 // - linear PCM => noncompressed audio data format with one frame per packet | 771 // - linear PCM => noncompressed audio data format with one frame per packet |
684 // - no need to specify interleaving since only mono is supported | 772 // - no need to specify interleaving since only mono is supported |
685 AudioStreamBasicDescription application_format = {0}; | 773 AudioStreamBasicDescription application_format = {0}; |
686 UInt32 size = sizeof(application_format); | 774 UInt32 size = sizeof(application_format); |
687 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), | 775 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), |
688 record_parameters_.sample_rate()); | 776 record_parameters_.sample_rate()); |
689 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); | 777 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); |
690 application_format.mSampleRate = playout_parameters_.sample_rate(); | 778 application_format.mSampleRate = playout_parameters_.sample_rate(); |
691 application_format.mFormatID = kAudioFormatLinearPCM; | 779 application_format.mFormatID = kAudioFormatLinearPCM; |
692 application_format.mFormatFlags = | 780 application_format.mFormatFlags = |
693 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 781 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
694 application_format.mBytesPerPacket = kBytesPerSample; | 782 application_format.mBytesPerPacket = kBytesPerSample; |
695 application_format.mFramesPerPacket = 1; // uncompressed | 783 application_format.mFramesPerPacket = 1; // uncompressed |
696 application_format.mBytesPerFrame = kBytesPerSample; | 784 application_format.mBytesPerFrame = kBytesPerSample; |
697 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; | 785 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; |
698 application_format.mBitsPerChannel = 8 * kBytesPerSample; | 786 application_format.mBitsPerChannel = 8 * kBytesPerSample; |
699 // Store the new format. | 787 // Store the new format. |
700 application_format_ = application_format; | 788 application_format_ = application_format; |
701 #if !defined(NDEBUG) | 789 #if !defined(NDEBUG) |
702 LogABSD(application_format_); | 790 LogABSD(application_format_); |
703 #endif | 791 #endif |
704 | 792 |
705 // Set the application format on the output scope of the input element/bus. | 793 // Set the application format on the output scope of the input element/bus. |
706 LOG_AND_RETURN_IF_ERROR( | 794 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
707 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 795 kAudioUnitScope_Output, input_bus, |
708 kAudioUnitScope_Output, input_bus, | 796 &application_format, size); |
709 &application_format, size), | 797 if (result != noErr) { |
710 "Failed to set application format on output scope of input element"); | 798 DisposeAudioUnit(); |
799 LOG(LS_ERROR) | |
800 << "Failed to set application format on output scope of input bus: " | |
801 << result; | |
802 return false; | |
803 } | |
711 | 804 |
712 // Set the application format on the input scope of the output element/bus. | 805 // Set the application format on the input scope of the output element/bus. |
713 LOG_AND_RETURN_IF_ERROR( | 806 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
714 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 807 kAudioUnitScope_Input, output_bus, |
715 kAudioUnitScope_Input, output_bus, | 808 &application_format, size); |
716 &application_format, size), | 809 if (result != noErr) { |
717 "Failed to set application format on input scope of output element"); | 810 DisposeAudioUnit(); |
811 LOG(LS_ERROR) | |
812 << "Failed to set application format on input scope of output bus: " | |
813 << result; | |
814 return false; | |
815 } | |
718 | 816 |
719 // Specify the callback function that provides audio samples to the audio | 817 // Specify the callback function that provides audio samples to the audio |
720 // unit. | 818 // unit. |
721 AURenderCallbackStruct render_callback; | 819 AURenderCallbackStruct render_callback; |
722 render_callback.inputProc = GetPlayoutData; | 820 render_callback.inputProc = GetPlayoutData; |
723 render_callback.inputProcRefCon = this; | 821 render_callback.inputProcRefCon = this; |
724 LOG_AND_RETURN_IF_ERROR( | 822 result = AudioUnitSetProperty( |
725 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback, | 823 vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, |
726 kAudioUnitScope_Input, output_bus, &render_callback, | 824 output_bus, &render_callback, sizeof(render_callback)); |
727 sizeof(render_callback)), | 825 if (result != noErr) { |
728 "Failed to specify the render callback on the output element"); | 826 DisposeAudioUnit(); |
827 LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: " | |
828 << result; | |
829 return false; | |
830 } | |
729 | 831 |
730 // Disable AU buffer allocation for the recorder, we allocate our own. | 832 // Disable AU buffer allocation for the recorder, we allocate our own. |
731 // TODO(henrika): not sure that it actually saves resource to make this call. | 833 // TODO(henrika): not sure that it actually saves resource to make this call. |
732 UInt32 flag = 0; | 834 UInt32 flag = 0; |
733 LOG_AND_RETURN_IF_ERROR( | 835 result = AudioUnitSetProperty( |
734 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | 836 vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, |
735 kAudioUnitScope_Output, input_bus, &flag, | 837 kAudioUnitScope_Output, input_bus, &flag, sizeof(flag)); |
736 sizeof(flag)), | 838 if (result != noErr) { |
737 "Failed to disable buffer allocation on the input element"); | 839 DisposeAudioUnit(); |
840 LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: " | |
841 << result; | |
842 } | |
738 | 843 |
739 // Specify the callback to be called by the I/O thread to us when input audio | 844 // Specify the callback to be called by the I/O thread to us when input audio |
740 // is available. The recorded samples can then be obtained by calling the | 845 // is available. The recorded samples can then be obtained by calling the |
741 // AudioUnitRender() method. | 846 // AudioUnitRender() method. |
742 AURenderCallbackStruct input_callback; | 847 AURenderCallbackStruct input_callback; |
743 input_callback.inputProc = RecordedDataIsAvailable; | 848 input_callback.inputProc = RecordedDataIsAvailable; |
744 input_callback.inputProcRefCon = this; | 849 input_callback.inputProcRefCon = this; |
745 LOG_AND_RETURN_IF_ERROR( | 850 result = AudioUnitSetProperty(vpio_unit_, |
746 AudioUnitSetProperty(vpio_unit_, | 851 kAudioOutputUnitProperty_SetInputCallback, |
747 kAudioOutputUnitProperty_SetInputCallback, | 852 kAudioUnitScope_Global, input_bus, |
748 kAudioUnitScope_Global, input_bus, &input_callback, | 853 &input_callback, sizeof(input_callback)); |
749 sizeof(input_callback)), | 854 if (result != noErr) { |
750 "Failed to specify the input callback on the input element"); | 855 DisposeAudioUnit(); |
856 LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: " | |
857 << result; | |
858 } | |
751 | 859 |
752 // Initialize the Voice-Processing I/O unit instance. | 860 // Initialize the Voice-Processing I/O unit instance. |
753 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), | 861 result = AudioUnitInitialize(vpio_unit_); |
754 "Failed to initialize the Voice-Processing I/O unit"); | 862 if (result != noErr) { |
863 result = AudioUnitUninitialize(vpio_unit_); | |
864 if (result != noErr) { | |
865 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | |
866 } | |
867 DisposeAudioUnit(); | |
868 LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: " | |
869 << result; | |
870 return false; | |
871 } | |
755 return true; | 872 return true; |
756 } | 873 } |
757 | 874 |
758 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { | 875 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
759 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; | 876 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; |
760 // Stop the active audio unit. | 877 // Stop the active audio unit. |
761 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), | 878 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
762 "Failed to stop the the Voice-Processing I/O unit"); | 879 "Failed to stop the the Voice-Processing I/O unit"); |
763 | 880 |
764 // The stream format is about to be changed and it requires that we first | 881 // The stream format is about to be changed and it requires that we first |
(...skipping 18 matching lines...) Expand all Loading... | |
783 "Failed to initialize the Voice-Processing I/O unit"); | 900 "Failed to initialize the Voice-Processing I/O unit"); |
784 | 901 |
785 // Start rendering audio using the new format. | 902 // Start rendering audio using the new format. |
786 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | 903 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
787 "Failed to start the Voice-Processing I/O unit"); | 904 "Failed to start the Voice-Processing I/O unit"); |
788 return true; | 905 return true; |
789 } | 906 } |
790 | 907 |
791 bool AudioDeviceIOS::InitPlayOrRecord() { | 908 bool AudioDeviceIOS::InitPlayOrRecord() { |
792 LOGI() << "InitPlayOrRecord"; | 909 LOGI() << "InitPlayOrRecord"; |
793 AVAudioSession* session = [AVAudioSession sharedInstance]; | 910 // Activate our own audio session if not already activated. |
pbos-webrtc
2015/11/18 18:51:48
-our own, it makes it sound like we own it.
henrika_webrtc
2015/11/18 19:48:21
Will fix.
| |
794 // Activate the audio session and ask for a set of preferred audio parameters. | 911 if (!ActivateAudioSession()) { |
795 ActivateAudioSession(session, true); | 912 return false; |
913 } | |
796 | 914 |
797 // Start observing audio session interruptions and route changes. | 915 // Start observing audio session interruptions and route changes. |
798 RegisterNotificationObservers(); | 916 RegisterNotificationObservers(); |
799 | 917 |
800 // Ensure that we got what what we asked for in our active audio session. | 918 // Ensure that we got what what we asked for in our active audio session. |
801 SetupAudioBuffersForActiveAudioSession(); | 919 SetupAudioBuffersForActiveAudioSession(); |
802 | 920 |
803 // Create, setup and initialize a new Voice-Processing I/O unit. | 921 // Create, setup and initialize a new Voice-Processing I/O unit. |
804 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 922 // TODO(henrika): remove CHECK when we are sure that we no longer see |
pbos-webrtc
2015/11/18 18:51:48
Are you sure? Seems like a good CHECK to keep. May
tkchin_webrtc
2015/11/18 19:25:29
Any of the iOS audio APIs can fail. Crashing due t
henrika_webrtc
2015/11/18 19:48:21
Will try to avoid CHECK. I know that we have discu
pbos-webrtc
2015/11/20 12:29:50
Maybe a DCHECK?
henrika_webrtc
2015/11/20 12:53:48
Would like to avoid DCHECK as well since the curre
| |
805 return false; | 923 // issues with audio unit initialization. |
806 } | 924 // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5166 for details. |
925 RTC_CHECK(SetupAndInitializeVoiceProcessingAudioUnit()); | |
807 return true; | 926 return true; |
808 } | 927 } |
809 | 928 |
810 bool AudioDeviceIOS::ShutdownPlayOrRecord() { | 929 void AudioDeviceIOS::ShutdownPlayOrRecord() { |
811 LOGI() << "ShutdownPlayOrRecord"; | 930 LOGI() << "ShutdownPlayOrRecord"; |
812 // Remove audio session notification observers. | |
813 UnregisterNotificationObservers(); | |
814 | |
815 // Close and delete the voice-processing I/O unit. | 931 // Close and delete the voice-processing I/O unit. |
816 OSStatus result = -1; | 932 OSStatus result = -1; |
817 if (nullptr != vpio_unit_) { | 933 if (nullptr != vpio_unit_) { |
pbos-webrtc
2015/11/18 18:51:48
Did you wanna abort early if this wasn't the case?
henrika_webrtc
2015/11/18 19:48:21
No, if ActivateAudiSession has failed, we will nev
| |
818 result = AudioOutputUnitStop(vpio_unit_); | 934 result = AudioOutputUnitStop(vpio_unit_); |
819 if (result != noErr) { | 935 if (result != noErr) { |
820 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | 936 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
821 } | 937 } |
822 result = AudioUnitUninitialize(vpio_unit_); | 938 result = AudioUnitUninitialize(vpio_unit_); |
823 if (result != noErr) { | 939 if (result != noErr) { |
824 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | 940 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
825 } | 941 } |
826 result = AudioComponentInstanceDispose(vpio_unit_); | 942 DisposeAudioUnit(); |
827 if (result != noErr) { | |
828 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; | |
829 } | |
830 vpio_unit_ = nullptr; | |
831 } | 943 } |
832 | 944 |
945 // Remove audio session notification observers. | |
946 UnregisterNotificationObservers(); | |
947 | |
833 // All I/O should be stopped or paused prior to deactivating the audio | 948 // All I/O should be stopped or paused prior to deactivating the audio |
834 // session, hence we deactivate as last action. | 949 // session, hence we deactivate as last action. |
835 AVAudioSession* session = [AVAudioSession sharedInstance]; | 950 DeactivateAudioSession(); |
pbos-webrtc
2015/11/20 12:29:50
But this can be called even if vpio_unit_ == nullp
henrika_webrtc
2015/11/20 12:53:48
Yep. That is perfectly OK and the preferred way.
| |
836 ActivateAudioSession(session, false); | 951 } |
837 return true; | 952 |
953 void AudioDeviceIOS::DisposeAudioUnit() { | |
954 if (nullptr == vpio_unit_) | |
955 return; | |
956 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | |
957 if (result != noErr) { | |
958 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; | |
959 } | |
960 vpio_unit_ = nullptr; | |
838 } | 961 } |
839 | 962 |
840 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | 963 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |
841 void* in_ref_con, | 964 void* in_ref_con, |
842 AudioUnitRenderActionFlags* io_action_flags, | 965 AudioUnitRenderActionFlags* io_action_flags, |
843 const AudioTimeStamp* in_time_stamp, | 966 const AudioTimeStamp* in_time_stamp, |
844 UInt32 in_bus_number, | 967 UInt32 in_bus_number, |
845 UInt32 in_number_frames, | 968 UInt32 in_number_frames, |
846 AudioBufferList* io_data) { | 969 AudioBufferList* io_data) { |
847 RTC_DCHECK_EQ(1u, in_bus_number); | 970 RTC_DCHECK_EQ(1u, in_bus_number); |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
926 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 1049 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
927 // the native I/O audio unit) to a preallocated intermediate buffer and | 1050 // the native I/O audio unit) to a preallocated intermediate buffer and |
928 // copy the result to the audio buffer in the |io_data| destination. | 1051 // copy the result to the audio buffer in the |io_data| destination. |
929 SInt8* source = playout_audio_buffer_.get(); | 1052 SInt8* source = playout_audio_buffer_.get(); |
930 fine_audio_buffer_->GetPlayoutData(source); | 1053 fine_audio_buffer_->GetPlayoutData(source); |
931 memcpy(destination, source, dataSizeInBytes); | 1054 memcpy(destination, source, dataSizeInBytes); |
932 return noErr; | 1055 return noErr; |
933 } | 1056 } |
934 | 1057 |
935 } // namespace webrtc | 1058 } // namespace webrtc |
OLD | NEW |