OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #if !defined(__has_feature) || !__has_feature(objc_arc) | 11 #if !defined(__has_feature) || !__has_feature(objc_arc) |
12 #error "This file requires ARC support." | 12 #error "This file requires ARC support." |
13 #endif | 13 #endif |
14 | 14 |
15 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
16 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
17 | 17 |
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
19 | 19 |
20 #include "webrtc/base/atomicops.h" | 20 #include "webrtc/base/atomicops.h" |
21 #include "webrtc/base/checks.h" | 21 #include "webrtc/base/checks.h" |
22 #include "webrtc/base/criticalsection.h" | |
22 #include "webrtc/base/logging.h" | 23 #include "webrtc/base/logging.h" |
24 #include "webrtc/base/thread_annotations.h" | |
23 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
24 #include "webrtc/modules/utility/include/helpers_ios.h" | 26 #include "webrtc/modules/utility/include/helpers_ios.h" |
25 | 27 |
26 namespace webrtc { | 28 namespace webrtc { |
27 | 29 |
30 // Protects |g_audio_session_users|. | |
31 static rtc::GlobalLockPod g_lock; | |
32 | |
33 // Counts number of users (=instances of this object) who needs an active | |
34 // audio session. This variable is used to ensure that we only activate an audio | |
35 // session for the first user and deactivate it for the last. | |
36 // Member is static to ensure that the value is counted for all instances | |
37 // and not per instance. | |
38 static int g_audio_session_users GUARDED_BY(g_lock) = 0; | |
39 | |
28 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" | 40 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
29 | 41 |
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ | 42 #define LOG_AND_RETURN_IF_ERROR(error, message) \ |
31 do { \ | 43 do { \ |
32 OSStatus err = error; \ | 44 OSStatus err = error; \ |
33 if (err) { \ | 45 if (err) { \ |
34 LOG(LS_ERROR) << message << ": " << err; \ | 46 LOG(LS_ERROR) << message << ": " << err; \ |
35 return false; \ | 47 return false; \ |
36 } \ | 48 } \ |
37 } while (0) | 49 } while (0) |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
70 // Number of bytes per audio sample for 16-bit signed integer representation. | 82 // Number of bytes per audio sample for 16-bit signed integer representation. |
71 const UInt32 kBytesPerSample = 2; | 83 const UInt32 kBytesPerSample = 2; |
72 // Hardcoded delay estimates based on real measurements. | 84 // Hardcoded delay estimates based on real measurements. |
73 // TODO(henrika): these value is not used in combination with built-in AEC. | 85 // TODO(henrika): these value is not used in combination with built-in AEC. |
74 // Can most likely be removed. | 86 // Can most likely be removed. |
75 const UInt16 kFixedPlayoutDelayEstimate = 30; | 87 const UInt16 kFixedPlayoutDelayEstimate = 30; |
76 const UInt16 kFixedRecordDelayEstimate = 30; | 88 const UInt16 kFixedRecordDelayEstimate = 30; |
77 | 89 |
78 using ios::CheckAndLogError; | 90 using ios::CheckAndLogError; |
79 | 91 |
92 // Verifies that the current audio session supports input audio and that the | |
93 // required category and mode are enabled. | |
94 static bool VerifyAudioSession(AVAudioSession* session) { | |
95 LOG(LS_INFO) << "VerifyAudioSession"; | |
96 // Ensure that the device currently supports audio input. | |
97 if (!session.isInputAvailable) { | |
98 LOG(LS_ERROR) << "No audio input path is available!"; | |
99 return false; | |
100 } | |
101 | |
102 // Ensure that the required category and mode are actually activated. | |
103 if (![session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
104 LOG(LS_ERROR) | |
105 << "Failed to set category to AVAudioSessionCategoryPlayAndRecord"; | |
106 return false; | |
107 } | |
108 if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) { | |
109 LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat"; | |
110 return false; | |
111 } | |
112 return true; | |
113 } | |
114 | |
80 // Activates an audio session suitable for full duplex VoIP sessions when | 115 // Activates an audio session suitable for full duplex VoIP sessions when |
81 // |activate| is true. Also sets the preferred sample rate and IO buffer | 116 // |activate| is true. Also sets the preferred sample rate and IO buffer |
82 // duration. Deactivates an active audio session if |activate| is set to false. | 117 // duration. Deactivates an active audio session if |activate| is set to false. |
83 static void ActivateAudioSession(AVAudioSession* session, bool activate) { | 118 static bool ActivateAudioSession(AVAudioSession* session, bool activate) |
119 EXCLUSIVE_LOCKS_REQUIRED(g_lock) { | |
84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | 120 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
85 @autoreleasepool { | 121 @autoreleasepool { |
86 NSError* error = nil; | 122 NSError* error = nil; |
87 BOOL success = NO; | 123 BOOL success = NO; |
88 | 124 |
89 if (!activate) { | 125 if (!activate) { |
90 // Deactivate the audio session using an extra option and then return. | 126 // Deactivate the audio session using an extra option and then return. |
91 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to | 127 // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to |
92 // ensure that other audio sessions that were interrupted by our session | 128 // ensure that other audio sessions that were interrupted by our session |
93 // can return to their active state. It is recommended for VoIP apps to | 129 // can return to their active state. It is recommended for VoIP apps to |
94 // use this option. | 130 // use this option. |
95 success = [session | 131 success = [session |
96 setActive:NO | 132 setActive:NO |
97 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation | 133 withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation |
98 error:&error]; | 134 error:&error]; |
99 RTC_DCHECK(CheckAndLogError(success, error)); | 135 return CheckAndLogError(success, error); |
100 return; | |
101 } | 136 } |
102 | 137 |
103 // Go ahead and active our own audio session since |activate| is true. | 138 // Go ahead and active our own audio session since |activate| is true. |
104 // Use a category which supports simultaneous recording and playback. | 139 // Use a category which supports simultaneous recording and playback. |
105 // By default, using this category implies that our app’s audio is | 140 // By default, using this category implies that our app’s audio is |
106 // nonmixable, hence activating the session will interrupt any other | 141 // nonmixable, hence activating the session will interrupt any other |
107 // audio sessions which are also nonmixable. | 142 // audio sessions which are also nonmixable. |
108 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | 143 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
109 error = nil; | 144 error = nil; |
110 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | 145 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
(...skipping 11 matching lines...) Expand all Loading... | |
122 | 157 |
123 // Set the session's sample rate or the hardware sample rate. | 158 // Set the session's sample rate or the hardware sample rate. |
124 // It is essential that we use the same sample rate as stream format | 159 // It is essential that we use the same sample rate as stream format |
125 // to ensure that the I/O unit does not have to do sample rate conversion. | 160 // to ensure that the I/O unit does not have to do sample rate conversion. |
126 error = nil; | 161 error = nil; |
127 success = | 162 success = |
128 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; | 163 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
129 RTC_DCHECK(CheckAndLogError(success, error)); | 164 RTC_DCHECK(CheckAndLogError(success, error)); |
130 | 165 |
131 // Set the preferred audio I/O buffer duration, in seconds. | 166 // Set the preferred audio I/O buffer duration, in seconds. |
132 // TODO(henrika): add more comments here. | |
133 error = nil; | 167 error = nil; |
134 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration | 168 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
135 error:&error]; | 169 error:&error]; |
136 RTC_DCHECK(CheckAndLogError(success, error)); | 170 RTC_DCHECK(CheckAndLogError(success, error)); |
137 | 171 |
138 // Activate the audio session. Activation can fail if another active audio | 172 // Activate the audio session. Activation can fail if another active audio |
139 // session (e.g. phone call) has higher priority than ours. | 173 // session (e.g. phone call) has higher priority than ours. |
140 error = nil; | 174 error = nil; |
141 success = [session setActive:YES error:&error]; | 175 success = [session setActive:YES error:&error]; |
142 RTC_DCHECK(CheckAndLogError(success, error)); | 176 if (!CheckAndLogError(success, error)) { |
143 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; | 177 return false; |
178 } | |
144 | 179 |
145 // Ensure that category and mode are actually activated. | 180 // Ensure that the active audio session has the correct category and mode. |
146 RTC_DCHECK( | 181 if (!VerifyAudioSession(session)) { |
147 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); | 182 LOG(LS_ERROR) << "Failed to verify audio session category and mode"; |
148 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); | 183 return false; |
184 } | |
149 | 185 |
150 // Try to set the preferred number of hardware audio channels. These calls | 186 // Try to set the preferred number of hardware audio channels. These calls |
151 // must be done after setting the audio session’s category and mode and | 187 // must be done after setting the audio session’s category and mode and |
152 // activating the session. | 188 // activating the session. |
153 // We try to use mono in both directions to save resources and format | 189 // We try to use mono in both directions to save resources and format |
154 // conversions in the audio unit. Some devices does only support stereo; | 190 // conversions in the audio unit. Some devices does only support stereo; |
155 // e.g. wired headset on iPhone 6. | 191 // e.g. wired headset on iPhone 6. |
156 // TODO(henrika): add support for stereo if needed. | 192 // TODO(henrika): add support for stereo if needed. |
157 error = nil; | 193 error = nil; |
158 success = | 194 success = |
159 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | 195 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels |
160 error:&error]; | 196 error:&error]; |
161 RTC_DCHECK(CheckAndLogError(success, error)); | 197 RTC_DCHECK(CheckAndLogError(success, error)); |
162 error = nil; | 198 error = nil; |
163 success = | 199 success = |
164 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels | 200 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels |
165 error:&error]; | 201 error:&error]; |
166 RTC_DCHECK(CheckAndLogError(success, error)); | 202 RTC_DCHECK(CheckAndLogError(success, error)); |
203 return true; | |
167 } | 204 } |
168 } | 205 } |
169 | 206 |
207 // An application can create more than one ADM and start audio streaming | |
208 // for all of them. It is essential that we only activate the app's audio | |
209 // session once (for the first one) and deactivate it once (for the last). | |
210 static bool ActivateAudioSessionWithLock() { | |
pbos-webrtc
2015/11/20 12:29:50
Don't need WithLock in the name here.
henrika_webrtc
2015/11/20 12:53:48
Acknowledged.
| |
211 LOGI() << "ActivateAudioSessionWithLock"; | |
212 rtc::GlobalLockScope ls(&g_lock); | |
213 if (g_audio_session_users == 0) { | |
214 // The system provides an audio session object upon launch of an | |
215 // application. However, we must initialize the session in order to | |
216 // handle interruptions. Implicit initialization occurs when obtaining | |
217 // a reference to the AVAudioSession object. | |
218 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
219 // Try to activate the audio session and ask for a set of preferred audio | |
220 // parameters. | |
221 if (!ActivateAudioSession(session, true)) { | |
222 LOG(LS_ERROR) << "Failed to activate the audio session"; | |
223 return false; | |
224 } | |
225 LOG(LS_INFO) << "The audio session is now activated"; | |
226 } | |
227 ++g_audio_session_users; | |
228 LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users; | |
229 return true; | |
230 } | |
231 | |
232 // If more than one object is using the audio session, ensure that only the | |
233 // last object deactivates. Apple recommends: "activate your audio session | |
234 // only as needed and deactivate it when you are not using audio". | |
235 static bool DeactivateAudioSessionWithLock() { | |
pbos-webrtc
2015/11/20 12:29:50
Same here
henrika_webrtc
2015/11/20 12:53:48
Acknowledged.
| |
236 LOGI() << "DeactivateAudioSessionWithLock"; | |
237 rtc::GlobalLockScope ls(&g_lock); | |
238 if (g_audio_session_users == 1) { | |
239 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
240 if (!ActivateAudioSession(session, false)) { | |
241 LOG(LS_ERROR) << "Failed to deactivate the audio session"; | |
242 return false; | |
243 } | |
244 LOG(LS_INFO) << "Our audio session is now deactivated"; | |
245 } | |
246 --g_audio_session_users; | |
247 LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users; | |
248 return true; | |
249 } | |
250 | |
170 #if !defined(NDEBUG) | 251 #if !defined(NDEBUG) |
171 // Helper method for printing out an AudioStreamBasicDescription structure. | 252 // Helper method for printing out an AudioStreamBasicDescription structure. |
172 static void LogABSD(AudioStreamBasicDescription absd) { | 253 static void LogABSD(AudioStreamBasicDescription absd) { |
173 char formatIDString[5]; | 254 char formatIDString[5]; |
174 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | 255 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); |
175 bcopy(&formatID, formatIDString, 4); | 256 bcopy(&formatID, formatIDString, 4); |
176 formatIDString[4] = '\0'; | 257 formatIDString[4] = '\0'; |
177 LOG(LS_INFO) << "LogABSD"; | 258 LOG(LS_INFO) << "LogABSD"; |
178 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate; | 259 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate; |
179 LOG(LS_INFO) << " format ID: " << formatIDString; | 260 LOG(LS_INFO) << " format ID: " << formatIDString; |
(...skipping 25 matching lines...) Expand all Loading... | |
205 playing_(0), | 286 playing_(0), |
206 initialized_(false), | 287 initialized_(false), |
207 rec_is_initialized_(false), | 288 rec_is_initialized_(false), |
208 play_is_initialized_(false), | 289 play_is_initialized_(false), |
209 audio_interruption_observer_(nullptr), | 290 audio_interruption_observer_(nullptr), |
210 route_change_observer_(nullptr) { | 291 route_change_observer_(nullptr) { |
211 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 292 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
212 } | 293 } |
213 | 294 |
214 AudioDeviceIOS::~AudioDeviceIOS() { | 295 AudioDeviceIOS::~AudioDeviceIOS() { |
215 LOGI() << "~dtor"; | 296 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 297 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
217 Terminate(); | 298 Terminate(); |
218 } | 299 } |
219 | 300 |
220 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 301 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
221 LOGI() << "AttachAudioBuffer"; | 302 LOGI() << "AttachAudioBuffer"; |
222 RTC_DCHECK(audioBuffer); | 303 RTC_DCHECK(audioBuffer); |
223 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 304 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
224 audio_device_buffer_ = audioBuffer; | 305 audio_device_buffer_ = audioBuffer; |
225 } | 306 } |
(...skipping 21 matching lines...) Expand all Loading... | |
247 initialized_ = true; | 328 initialized_ = true; |
248 return 0; | 329 return 0; |
249 } | 330 } |
250 | 331 |
251 int32_t AudioDeviceIOS::Terminate() { | 332 int32_t AudioDeviceIOS::Terminate() { |
252 LOGI() << "Terminate"; | 333 LOGI() << "Terminate"; |
253 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 334 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
254 if (!initialized_) { | 335 if (!initialized_) { |
255 return 0; | 336 return 0; |
256 } | 337 } |
257 ShutdownPlayOrRecord(); | 338 StopPlayout(); |
339 StopRecording(); | |
258 initialized_ = false; | 340 initialized_ = false; |
341 { | |
pbos-webrtc
2015/11/20 12:29:50
mismatched {, this can't compile
henrika_webrtc
2015/11/20 12:53:48
Ooops. Fixed.
| |
342 rtc::GlobalLockScope ls(&g_lock); | |
343 if (g_audio_session_users != 0) { | |
344 LOG(LS_WARNING) << "Object is destructed with an active audio session"; | |
345 } | |
346 RTC_DCHECK_GE(g_audio_session_users, 0); | |
259 return 0; | 347 return 0; |
260 } | 348 } |
261 | 349 |
262 int32_t AudioDeviceIOS::InitPlayout() { | 350 int32_t AudioDeviceIOS::InitPlayout() { |
263 LOGI() << "InitPlayout"; | 351 LOGI() << "InitPlayout"; |
264 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 352 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
265 RTC_DCHECK(initialized_); | 353 RTC_DCHECK(initialized_); |
266 RTC_DCHECK(!play_is_initialized_); | 354 RTC_DCHECK(!play_is_initialized_); |
267 RTC_DCHECK(!playing_); | 355 RTC_DCHECK(!playing_); |
268 if (!rec_is_initialized_) { | 356 if (!rec_is_initialized_) { |
269 if (!InitPlayOrRecord()) { | 357 if (!InitPlayOrRecord()) { |
270 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 358 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; |
271 return -1; | 359 return -1; |
272 } | 360 } |
273 } | 361 } |
274 play_is_initialized_ = true; | 362 play_is_initialized_ = true; |
275 return 0; | 363 return 0; |
276 } | 364 } |
277 | 365 |
278 int32_t AudioDeviceIOS::InitRecording() { | 366 int32_t AudioDeviceIOS::InitRecording() { |
279 LOGI() << "InitRecording"; | 367 LOGI() << "InitRecording"; |
280 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 368 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
281 RTC_DCHECK(initialized_); | 369 RTC_DCHECK(initialized_); |
282 RTC_DCHECK(!rec_is_initialized_); | 370 RTC_DCHECK(!rec_is_initialized_); |
283 RTC_DCHECK(!recording_); | 371 RTC_DCHECK(!recording_); |
284 if (!play_is_initialized_) { | 372 if (!play_is_initialized_) { |
285 if (!InitPlayOrRecord()) { | 373 if (!InitPlayOrRecord()) { |
286 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | 374 LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; |
287 return -1; | 375 return -1; |
288 } | 376 } |
289 } | 377 } |
290 rec_is_initialized_ = true; | 378 rec_is_initialized_ = true; |
291 return 0; | 379 return 0; |
292 } | 380 } |
293 | 381 |
294 int32_t AudioDeviceIOS::StartPlayout() { | 382 int32_t AudioDeviceIOS::StartPlayout() { |
295 LOGI() << "StartPlayout"; | 383 LOGI() << "StartPlayout"; |
296 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 384 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
297 RTC_DCHECK(play_is_initialized_); | 385 RTC_DCHECK(play_is_initialized_); |
298 RTC_DCHECK(!playing_); | 386 RTC_DCHECK(!playing_); |
299 fine_audio_buffer_->ResetPlayout(); | 387 fine_audio_buffer_->ResetPlayout(); |
300 if (!recording_) { | 388 if (!recording_) { |
301 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 389 OSStatus result = AudioOutputUnitStart(vpio_unit_); |
302 if (result != noErr) { | 390 if (result != noErr) { |
303 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 391 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: " |
392 << result; | |
304 return -1; | 393 return -1; |
305 } | 394 } |
395 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | |
306 } | 396 } |
307 rtc::AtomicOps::ReleaseStore(&playing_, 1); | 397 rtc::AtomicOps::ReleaseStore(&playing_, 1); |
308 return 0; | 398 return 0; |
309 } | 399 } |
310 | 400 |
311 int32_t AudioDeviceIOS::StopPlayout() { | 401 int32_t AudioDeviceIOS::StopPlayout() { |
312 LOGI() << "StopPlayout"; | 402 LOGI() << "StopPlayout"; |
313 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 403 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
314 if (!play_is_initialized_ || !playing_) { | 404 if (!play_is_initialized_ || !playing_) { |
315 return 0; | 405 return 0; |
316 } | 406 } |
317 if (!recording_) { | 407 if (!recording_) { |
318 ShutdownPlayOrRecord(); | 408 ShutdownPlayOrRecord(); |
319 } | 409 } |
320 play_is_initialized_ = false; | 410 play_is_initialized_ = false; |
321 rtc::AtomicOps::ReleaseStore(&playing_, 0); | 411 rtc::AtomicOps::ReleaseStore(&playing_, 0); |
322 return 0; | 412 return 0; |
323 } | 413 } |
324 | 414 |
325 int32_t AudioDeviceIOS::StartRecording() { | 415 int32_t AudioDeviceIOS::StartRecording() { |
326 LOGI() << "StartRecording"; | 416 LOGI() << "StartRecording"; |
327 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 417 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
328 RTC_DCHECK(rec_is_initialized_); | 418 RTC_DCHECK(rec_is_initialized_); |
329 RTC_DCHECK(!recording_); | 419 RTC_DCHECK(!recording_); |
330 fine_audio_buffer_->ResetRecord(); | 420 fine_audio_buffer_->ResetRecord(); |
331 if (!playing_) { | 421 if (!playing_) { |
332 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 422 OSStatus result = AudioOutputUnitStart(vpio_unit_); |
333 if (result != noErr) { | 423 if (result != noErr) { |
334 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 424 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: " |
425 << result; | |
335 return -1; | 426 return -1; |
336 } | 427 } |
428 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | |
pbos-webrtc
2015/11/20 12:29:50
stopped?
henrika_webrtc
2015/11/20 12:53:48
No, started. Maybe I am missing something but the
| |
337 } | 429 } |
338 rtc::AtomicOps::ReleaseStore(&recording_, 1); | 430 rtc::AtomicOps::ReleaseStore(&recording_, 1); |
339 return 0; | 431 return 0; |
340 } | 432 } |
341 | 433 |
342 int32_t AudioDeviceIOS::StopRecording() { | 434 int32_t AudioDeviceIOS::StopRecording() { |
343 LOGI() << "StopRecording"; | 435 LOGI() << "StopRecording"; |
344 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 436 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
345 if (!rec_is_initialized_ || !recording_) { | 437 if (!rec_is_initialized_ || !recording_) { |
346 return 0; | 438 return 0; |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
632 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 724 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
633 audio_record_buffer_list_.mNumberBuffers = 1; | 725 audio_record_buffer_list_.mNumberBuffers = 1; |
634 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; | 726 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; |
635 audio_buffer->mNumberChannels = record_parameters_.channels(); | 727 audio_buffer->mNumberChannels = record_parameters_.channels(); |
636 audio_buffer->mDataByteSize = data_byte_size; | 728 audio_buffer->mDataByteSize = data_byte_size; |
637 audio_buffer->mData = record_audio_buffer_.get(); | 729 audio_buffer->mData = record_audio_buffer_.get(); |
638 } | 730 } |
639 | 731 |
640 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { | 732 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { |
641 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; | 733 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; |
642 RTC_DCHECK(!vpio_unit_); | 734 RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists"; |
643 // Create an audio component description to identify the Voice-Processing | 735 // Create an audio component description to identify the Voice-Processing |
644 // I/O audio unit. | 736 // I/O audio unit. |
645 AudioComponentDescription vpio_unit_description; | 737 AudioComponentDescription vpio_unit_description; |
646 vpio_unit_description.componentType = kAudioUnitType_Output; | 738 vpio_unit_description.componentType = kAudioUnitType_Output; |
647 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 739 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
648 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; | 740 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; |
649 vpio_unit_description.componentFlags = 0; | 741 vpio_unit_description.componentFlags = 0; |
650 vpio_unit_description.componentFlagsMask = 0; | 742 vpio_unit_description.componentFlagsMask = 0; |
651 // Obtain an audio unit instance given the description. | 743 // Obtain an audio unit instance given the description. |
652 AudioComponent found_vpio_unit_ref = | 744 AudioComponent found_vpio_unit_ref = |
653 AudioComponentFindNext(nullptr, &vpio_unit_description); | 745 AudioComponentFindNext(nullptr, &vpio_unit_description); |
654 | 746 |
655 // Create a Voice-Processing IO audio unit. | 747 // Create a Voice-Processing IO audio unit. |
656 LOG_AND_RETURN_IF_ERROR( | 748 OSStatus result = noErr; |
657 AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_), | 749 result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); |
658 "Failed to create a VoiceProcessingIO audio unit"); | 750 if (result != noErr) { |
751 vpio_unit_ = nullptr; | |
752 LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result; | |
753 return false; | |
754 } | |
659 | 755 |
660 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable | 756 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable |
661 // input on the input scope of the input element. | 757 // input on the input scope of the input element. |
662 AudioUnitElement input_bus = 1; | 758 AudioUnitElement input_bus = 1; |
663 UInt32 enable_input = 1; | 759 UInt32 enable_input = 1; |
664 LOG_AND_RETURN_IF_ERROR( | 760 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
665 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 761 kAudioUnitScope_Input, input_bus, &enable_input, |
666 kAudioUnitScope_Input, input_bus, &enable_input, | 762 sizeof(enable_input)); |
667 sizeof(enable_input)), | 763 if (result != noErr) { |
668 "Failed to enable input on input scope of input element"); | 764 DisposeAudioUnit(); |
765 LOG(LS_ERROR) << "Failed to enable input on input scope of input element: " | |
766 << result; | |
767 return false; | |
768 } | |
669 | 769 |
670 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable | 770 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable |
671 // output on the output scope of the output element. | 771 // output on the output scope of the output element. |
672 AudioUnitElement output_bus = 0; | 772 AudioUnitElement output_bus = 0; |
673 UInt32 enable_output = 1; | 773 UInt32 enable_output = 1; |
674 LOG_AND_RETURN_IF_ERROR( | 774 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, |
675 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 775 kAudioUnitScope_Output, output_bus, |
676 kAudioUnitScope_Output, output_bus, &enable_output, | 776 &enable_output, sizeof(enable_output)); |
677 sizeof(enable_output)), | 777 if (result != noErr) { |
678 "Failed to enable output on output scope of output element"); | 778 DisposeAudioUnit(); |
779 LOG(LS_ERROR) | |
780 << "Failed to enable output on output scope of output element: " | |
781 << result; | |
782 return false; | |
783 } | |
679 | 784 |
680 // Set the application formats for input and output: | 785 // Set the application formats for input and output: |
681 // - use same format in both directions | 786 // - use same format in both directions |
682 // - avoid resampling in the I/O unit by using the hardware sample rate | 787 // - avoid resampling in the I/O unit by using the hardware sample rate |
683 // - linear PCM => noncompressed audio data format with one frame per packet | 788 // - linear PCM => noncompressed audio data format with one frame per packet |
684 // - no need to specify interleaving since only mono is supported | 789 // - no need to specify interleaving since only mono is supported |
685 AudioStreamBasicDescription application_format = {0}; | 790 AudioStreamBasicDescription application_format = {0}; |
686 UInt32 size = sizeof(application_format); | 791 UInt32 size = sizeof(application_format); |
687 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), | 792 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), |
688 record_parameters_.sample_rate()); | 793 record_parameters_.sample_rate()); |
689 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); | 794 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); |
690 application_format.mSampleRate = playout_parameters_.sample_rate(); | 795 application_format.mSampleRate = playout_parameters_.sample_rate(); |
691 application_format.mFormatID = kAudioFormatLinearPCM; | 796 application_format.mFormatID = kAudioFormatLinearPCM; |
692 application_format.mFormatFlags = | 797 application_format.mFormatFlags = |
693 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 798 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
694 application_format.mBytesPerPacket = kBytesPerSample; | 799 application_format.mBytesPerPacket = kBytesPerSample; |
695 application_format.mFramesPerPacket = 1; // uncompressed | 800 application_format.mFramesPerPacket = 1; // uncompressed |
696 application_format.mBytesPerFrame = kBytesPerSample; | 801 application_format.mBytesPerFrame = kBytesPerSample; |
697 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; | 802 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; |
698 application_format.mBitsPerChannel = 8 * kBytesPerSample; | 803 application_format.mBitsPerChannel = 8 * kBytesPerSample; |
699 // Store the new format. | 804 // Store the new format. |
700 application_format_ = application_format; | 805 application_format_ = application_format; |
701 #if !defined(NDEBUG) | 806 #if !defined(NDEBUG) |
702 LogABSD(application_format_); | 807 LogABSD(application_format_); |
703 #endif | 808 #endif |
704 | 809 |
705 // Set the application format on the output scope of the input element/bus. | 810 // Set the application format on the output scope of the input element/bus. |
706 LOG_AND_RETURN_IF_ERROR( | 811 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
707 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 812 kAudioUnitScope_Output, input_bus, |
708 kAudioUnitScope_Output, input_bus, | 813 &application_format, size); |
709 &application_format, size), | 814 if (result != noErr) { |
710 "Failed to set application format on output scope of input element"); | 815 DisposeAudioUnit(); |
816 LOG(LS_ERROR) | |
817 << "Failed to set application format on output scope of input bus: " | |
818 << result; | |
819 return false; | |
820 } | |
711 | 821 |
712 // Set the application format on the input scope of the output element/bus. | 822 // Set the application format on the input scope of the output element/bus. |
713 LOG_AND_RETURN_IF_ERROR( | 823 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
714 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 824 kAudioUnitScope_Input, output_bus, |
715 kAudioUnitScope_Input, output_bus, | 825 &application_format, size); |
716 &application_format, size), | 826 if (result != noErr) { |
717 "Failed to set application format on input scope of output element"); | 827 DisposeAudioUnit(); |
828 LOG(LS_ERROR) | |
829 << "Failed to set application format on input scope of output bus: " | |
830 << result; | |
831 return false; | |
832 } | |
718 | 833 |
719 // Specify the callback function that provides audio samples to the audio | 834 // Specify the callback function that provides audio samples to the audio |
720 // unit. | 835 // unit. |
721 AURenderCallbackStruct render_callback; | 836 AURenderCallbackStruct render_callback; |
722 render_callback.inputProc = GetPlayoutData; | 837 render_callback.inputProc = GetPlayoutData; |
723 render_callback.inputProcRefCon = this; | 838 render_callback.inputProcRefCon = this; |
724 LOG_AND_RETURN_IF_ERROR( | 839 result = AudioUnitSetProperty( |
725 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback, | 840 vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, |
726 kAudioUnitScope_Input, output_bus, &render_callback, | 841 output_bus, &render_callback, sizeof(render_callback)); |
727 sizeof(render_callback)), | 842 if (result != noErr) { |
728 "Failed to specify the render callback on the output element"); | 843 DisposeAudioUnit(); |
844 LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: " | |
845 << result; | |
846 return false; | |
847 } | |
729 | 848 |
730 // Disable AU buffer allocation for the recorder, we allocate our own. | 849 // Disable AU buffer allocation for the recorder, we allocate our own. |
731 // TODO(henrika): not sure that it actually saves resource to make this call. | 850 // TODO(henrika): not sure that it actually saves resource to make this call. |
732 UInt32 flag = 0; | 851 UInt32 flag = 0; |
733 LOG_AND_RETURN_IF_ERROR( | 852 result = AudioUnitSetProperty( |
734 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | 853 vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, |
735 kAudioUnitScope_Output, input_bus, &flag, | 854 kAudioUnitScope_Output, input_bus, &flag, sizeof(flag)); |
736 sizeof(flag)), | 855 if (result != noErr) { |
737 "Failed to disable buffer allocation on the input element"); | 856 DisposeAudioUnit(); |
857 LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: " | |
858 << result; | |
859 } | |
738 | 860 |
739 // Specify the callback to be called by the I/O thread to us when input audio | 861 // Specify the callback to be called by the I/O thread to us when input audio |
740 // is available. The recorded samples can then be obtained by calling the | 862 // is available. The recorded samples can then be obtained by calling the |
741 // AudioUnitRender() method. | 863 // AudioUnitRender() method. |
742 AURenderCallbackStruct input_callback; | 864 AURenderCallbackStruct input_callback; |
743 input_callback.inputProc = RecordedDataIsAvailable; | 865 input_callback.inputProc = RecordedDataIsAvailable; |
744 input_callback.inputProcRefCon = this; | 866 input_callback.inputProcRefCon = this; |
745 LOG_AND_RETURN_IF_ERROR( | 867 result = AudioUnitSetProperty(vpio_unit_, |
746 AudioUnitSetProperty(vpio_unit_, | 868 kAudioOutputUnitProperty_SetInputCallback, |
747 kAudioOutputUnitProperty_SetInputCallback, | 869 kAudioUnitScope_Global, input_bus, |
748 kAudioUnitScope_Global, input_bus, &input_callback, | 870 &input_callback, sizeof(input_callback)); |
749 sizeof(input_callback)), | 871 if (result != noErr) { |
750 "Failed to specify the input callback on the input element"); | 872 DisposeAudioUnit(); |
873 LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: " | |
874 << result; | |
875 } | |
751 | 876 |
752 // Initialize the Voice-Processing I/O unit instance. | 877 // Initialize the Voice-Processing I/O unit instance. |
753 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), | 878 result = AudioUnitInitialize(vpio_unit_); |
754 "Failed to initialize the Voice-Processing I/O unit"); | 879 if (result != noErr) { |
880 result = AudioUnitUninitialize(vpio_unit_); | |
881 if (result != noErr) { | |
882 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | |
883 } | |
884 DisposeAudioUnit(); | |
885 LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: " | |
886 << result; | |
887 return false; | |
888 } | |
755 return true; | 889 return true; |
756 } | 890 } |
757 | 891 |
758 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { | 892 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
759 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; | 893 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; |
760 // Stop the active audio unit. | 894 // Stop the active audio unit. |
761 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), | 895 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
762 "Failed to stop the the Voice-Processing I/O unit"); | 896 "Failed to stop the the Voice-Processing I/O unit"); |
763 | 897 |
764 // The stream format is about to be changed and it requires that we first | 898 // The stream format is about to be changed and it requires that we first |
(...skipping 18 matching lines...) Expand all Loading... | |
783 "Failed to initialize the Voice-Processing I/O unit"); | 917 "Failed to initialize the Voice-Processing I/O unit"); |
784 | 918 |
785 // Start rendering audio using the new format. | 919 // Start rendering audio using the new format. |
786 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | 920 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
787 "Failed to start the Voice-Processing I/O unit"); | 921 "Failed to start the Voice-Processing I/O unit"); |
788 return true; | 922 return true; |
789 } | 923 } |
790 | 924 |
791 bool AudioDeviceIOS::InitPlayOrRecord() { | 925 bool AudioDeviceIOS::InitPlayOrRecord() { |
792 LOGI() << "InitPlayOrRecord"; | 926 LOGI() << "InitPlayOrRecord"; |
927 // Activate the audio session if not already activated. | |
928 if (!ActivateAudioSessionWithLock()) { | |
929 return false; | |
930 } | |
931 | |
932 // Ensure that the active audio session has the correct category and mode. | |
793 AVAudioSession* session = [AVAudioSession sharedInstance]; | 933 AVAudioSession* session = [AVAudioSession sharedInstance]; |
794 // Activate the audio session and ask for a set of preferred audio parameters. | 934 if (!VerifyAudioSession(session)) { |
795 ActivateAudioSession(session, true); | 935 DeactivateAudioSessionWithLock(); |
936 LOG(LS_ERROR) << "Failed to verify audio session category and mode"; | |
937 return false; | |
938 } | |
796 | 939 |
797 // Start observing audio session interruptions and route changes. | 940 // Start observing audio session interruptions and route changes. |
798 RegisterNotificationObservers(); | 941 RegisterNotificationObservers(); |
799 | 942 |
800 // Ensure that we got what what we asked for in our active audio session. | 943 // Ensure that we got what what we asked for in our active audio session. |
801 SetupAudioBuffersForActiveAudioSession(); | 944 SetupAudioBuffersForActiveAudioSession(); |
802 | 945 |
803 // Create, setup and initialize a new Voice-Processing I/O unit. | 946 // Create, setup and initialize a new Voice-Processing I/O unit. |
804 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 947 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
948 // Reduce usage count for the audio session and possibly deactivate it if | |
949 // this object is the only user. | |
950 DeactivateAudioSessionWithLock(); | |
805 return false; | 951 return false; |
806 } | 952 } |
807 return true; | 953 return true; |
808 } | 954 } |
809 | 955 |
810 bool AudioDeviceIOS::ShutdownPlayOrRecord() { | 956 void AudioDeviceIOS::ShutdownPlayOrRecord() { |
811 LOGI() << "ShutdownPlayOrRecord"; | 957 LOGI() << "ShutdownPlayOrRecord"; |
812 // Remove audio session notification observers. | |
813 UnregisterNotificationObservers(); | |
814 | |
815 // Close and delete the voice-processing I/O unit. | 958 // Close and delete the voice-processing I/O unit. |
816 OSStatus result = -1; | 959 OSStatus result = -1; |
817 if (nullptr != vpio_unit_) { | 960 if (nullptr != vpio_unit_) { |
818 result = AudioOutputUnitStop(vpio_unit_); | 961 result = AudioOutputUnitStop(vpio_unit_); |
819 if (result != noErr) { | 962 if (result != noErr) { |
820 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | 963 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
821 } | 964 } |
822 result = AudioUnitUninitialize(vpio_unit_); | 965 result = AudioUnitUninitialize(vpio_unit_); |
823 if (result != noErr) { | 966 if (result != noErr) { |
824 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | 967 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
825 } | 968 } |
826 result = AudioComponentInstanceDispose(vpio_unit_); | 969 DisposeAudioUnit(); |
827 if (result != noErr) { | |
828 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; | |
829 } | |
830 vpio_unit_ = nullptr; | |
831 } | 970 } |
832 | 971 |
972 // Remove audio session notification observers. | |
973 UnregisterNotificationObservers(); | |
974 | |
833 // All I/O should be stopped or paused prior to deactivating the audio | 975 // All I/O should be stopped or paused prior to deactivating the audio |
834 // session, hence we deactivate as last action. | 976 // session, hence we deactivate as last action. |
835 AVAudioSession* session = [AVAudioSession sharedInstance]; | 977 DeactivateAudioSessionWithLock(); |
836 ActivateAudioSession(session, false); | 978 } |
837 return true; | 979 |
980 void AudioDeviceIOS::DisposeAudioUnit() { | |
981 if (nullptr == vpio_unit_) | |
982 return; | |
983 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | |
984 if (result != noErr) { | |
985 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; | |
986 } | |
987 vpio_unit_ = nullptr; | |
838 } | 988 } |
839 | 989 |
840 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | 990 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |
841 void* in_ref_con, | 991 void* in_ref_con, |
842 AudioUnitRenderActionFlags* io_action_flags, | 992 AudioUnitRenderActionFlags* io_action_flags, |
843 const AudioTimeStamp* in_time_stamp, | 993 const AudioTimeStamp* in_time_stamp, |
844 UInt32 in_bus_number, | 994 UInt32 in_bus_number, |
845 UInt32 in_number_frames, | 995 UInt32 in_number_frames, |
846 AudioBufferList* io_data) { | 996 AudioBufferList* io_data) { |
847 RTC_DCHECK_EQ(1u, in_bus_number); | 997 RTC_DCHECK_EQ(1u, in_bus_number); |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
926 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 1076 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
927 // the native I/O audio unit) to a preallocated intermediate buffer and | 1077 // the native I/O audio unit) to a preallocated intermediate buffer and |
928 // copy the result to the audio buffer in the |io_data| destination. | 1078 // copy the result to the audio buffer in the |io_data| destination. |
929 SInt8* source = playout_audio_buffer_.get(); | 1079 SInt8* source = playout_audio_buffer_.get(); |
930 fine_audio_buffer_->GetPlayoutData(source); | 1080 fine_audio_buffer_->GetPlayoutData(source); |
931 memcpy(destination, source, dataSizeInBytes); | 1081 memcpy(destination, source, dataSizeInBytes); |
932 return noErr; | 1082 return noErr; |
933 } | 1083 } |
934 | 1084 |
935 } // namespace webrtc | 1085 } // namespace webrtc |
OLD | NEW |