OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 | 48 |
49 #define LOG_IF_ERROR(error, message) \ | 49 #define LOG_IF_ERROR(error, message) \ |
50 do { \ | 50 do { \ |
51 OSStatus err = error; \ | 51 OSStatus err = error; \ |
52 if (err) { \ | 52 if (err) { \ |
53 LOG(LS_ERROR) << message << ": " << err; \ | 53 LOG(LS_ERROR) << message << ": " << err; \ |
54 } \ | 54 } \ |
55 } while (0) | 55 } while (0) |
56 | 56 |
57 | 57 |
58 // Number of bytes per audio sample for 16-bit signed integer representation. | |
59 const UInt32 kBytesPerSample = 2; | |
60 // Hardcoded delay estimates based on real measurements. | 58 // Hardcoded delay estimates based on real measurements. |
61 // TODO(henrika): these value is not used in combination with built-in AEC. | 59 // TODO(henrika): these value is not used in combination with built-in AEC. |
62 // Can most likely be removed. | 60 // Can most likely be removed. |
63 const UInt16 kFixedPlayoutDelayEstimate = 30; | 61 const UInt16 kFixedPlayoutDelayEstimate = 30; |
64 const UInt16 kFixedRecordDelayEstimate = 30; | 62 const UInt16 kFixedRecordDelayEstimate = 30; |
65 // Calls to AudioUnitInitialize() can fail if called back-to-back on different | |
66 // ADM instances. A fall-back solution is to allow multiple sequential calls | |
67 // with as small delay between each. This factor sets the max number of allowed | |
68 // initialization attempts. | |
69 const int kMaxNumberOfAudioUnitInitializeAttempts = 5; | |
70 | 63 |
71 using ios::CheckAndLogError; | 64 using ios::CheckAndLogError; |
72 | 65 |
73 #if !defined(NDEBUG) | 66 #if !defined(NDEBUG) |
74 // Helper method for printing out an AudioStreamBasicDescription structure. | |
75 static void LogABSD(AudioStreamBasicDescription absd) { | |
76 char formatIDString[5]; | |
77 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID); | |
78 bcopy(&formatID, formatIDString, 4); | |
79 formatIDString[4] = '\0'; | |
80 LOG(LS_INFO) << "LogABSD"; | |
81 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate; | |
82 LOG(LS_INFO) << " format ID: " << formatIDString; | |
83 LOG(LS_INFO) << " format flags: " << std::hex << absd.mFormatFlags; | |
84 LOG(LS_INFO) << " bytes per packet: " << absd.mBytesPerPacket; | |
85 LOG(LS_INFO) << " frames per packet: " << absd.mFramesPerPacket; | |
86 LOG(LS_INFO) << " bytes per frame: " << absd.mBytesPerFrame; | |
87 LOG(LS_INFO) << " channels per packet: " << absd.mChannelsPerFrame; | |
88 LOG(LS_INFO) << " bits per channel: " << absd.mBitsPerChannel; | |
89 LOG(LS_INFO) << " reserved: " << absd.mReserved; | |
90 } | |
91 | |
92 // Helper method that logs essential device information strings. | 67 // Helper method that logs essential device information strings. |
93 static void LogDeviceInfo() { | 68 static void LogDeviceInfo() { |
94 LOG(LS_INFO) << "LogDeviceInfo"; | 69 LOG(LS_INFO) << "LogDeviceInfo"; |
95 @autoreleasepool { | 70 @autoreleasepool { |
96 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); | 71 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); |
97 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); | 72 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); |
98 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); | 73 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); |
99 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); | 74 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); |
100 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); | 75 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); |
101 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); | 76 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); |
102 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); | 77 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); |
103 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); | 78 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); |
104 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); | 79 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); |
105 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 | 80 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 |
106 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); | 81 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); |
107 #endif | 82 #endif |
108 } | 83 } |
109 } | 84 } |
110 #endif // !defined(NDEBUG) | 85 #endif // !defined(NDEBUG) |
111 | 86 |
112 AudioDeviceIOS::AudioDeviceIOS() | 87 AudioDeviceIOS::AudioDeviceIOS() |
113 : async_invoker_(new rtc::AsyncInvoker()), | 88 : async_invoker_(new rtc::AsyncInvoker()), |
114 audio_device_buffer_(nullptr), | 89 audio_device_buffer_(nullptr), |
115 vpio_unit_(nullptr), | 90 audio_unit_(nullptr), |
116 recording_(0), | 91 recording_(0), |
117 playing_(0), | 92 playing_(0), |
118 initialized_(false), | 93 initialized_(false), |
119 rec_is_initialized_(false), | 94 rec_is_initialized_(false), |
120 play_is_initialized_(false), | 95 play_is_initialized_(false), |
121 is_interrupted_(false) { | 96 is_interrupted_(false) { |
122 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 97 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
123 thread_ = rtc::Thread::Current(); | 98 thread_ = rtc::Thread::Current(); |
124 audio_session_observer_ = | 99 audio_session_observer_ = |
125 [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this]; | 100 [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this]; |
126 } | 101 } |
127 | 102 |
128 AudioDeviceIOS::~AudioDeviceIOS() { | 103 AudioDeviceIOS::~AudioDeviceIOS() { |
129 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); | 104 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
130 audio_session_observer_ = nil; | 105 audio_session_observer_ = nil; |
131 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 106 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
211 return 0; | 186 return 0; |
212 } | 187 } |
213 | 188 |
214 int32_t AudioDeviceIOS::StartPlayout() { | 189 int32_t AudioDeviceIOS::StartPlayout() { |
215 LOGI() << "StartPlayout"; | 190 LOGI() << "StartPlayout"; |
216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 191 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
217 RTC_DCHECK(play_is_initialized_); | 192 RTC_DCHECK(play_is_initialized_); |
218 RTC_DCHECK(!playing_); | 193 RTC_DCHECK(!playing_); |
219 fine_audio_buffer_->ResetPlayout(); | 194 fine_audio_buffer_->ResetPlayout(); |
220 if (!recording_) { | 195 if (!recording_) { |
221 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 196 if (!audio_unit_->Start()) { |
222 if (result != noErr) { | 197 RTCLogError(@"StartPlayout failed to start audio unit."); |
223 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: " | |
224 << result; | |
225 return -1; | 198 return -1; |
226 } | 199 } |
227 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | 200 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; |
228 } | 201 } |
229 rtc::AtomicOps::ReleaseStore(&playing_, 1); | 202 rtc::AtomicOps::ReleaseStore(&playing_, 1); |
230 return 0; | 203 return 0; |
231 } | 204 } |
232 | 205 |
233 int32_t AudioDeviceIOS::StopPlayout() { | 206 int32_t AudioDeviceIOS::StopPlayout() { |
234 LOGI() << "StopPlayout"; | 207 LOGI() << "StopPlayout"; |
235 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 208 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
236 if (!play_is_initialized_ || !playing_) { | 209 if (!play_is_initialized_ || !playing_) { |
237 return 0; | 210 return 0; |
238 } | 211 } |
239 if (!recording_) { | 212 if (!recording_) { |
240 ShutdownPlayOrRecord(); | 213 ShutdownPlayOrRecord(); |
241 } | 214 } |
242 play_is_initialized_ = false; | 215 play_is_initialized_ = false; |
243 rtc::AtomicOps::ReleaseStore(&playing_, 0); | 216 rtc::AtomicOps::ReleaseStore(&playing_, 0); |
244 return 0; | 217 return 0; |
245 } | 218 } |
246 | 219 |
247 int32_t AudioDeviceIOS::StartRecording() { | 220 int32_t AudioDeviceIOS::StartRecording() { |
248 LOGI() << "StartRecording"; | 221 LOGI() << "StartRecording"; |
249 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 222 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
250 RTC_DCHECK(rec_is_initialized_); | 223 RTC_DCHECK(rec_is_initialized_); |
251 RTC_DCHECK(!recording_); | 224 RTC_DCHECK(!recording_); |
252 fine_audio_buffer_->ResetRecord(); | 225 fine_audio_buffer_->ResetRecord(); |
253 if (!playing_) { | 226 if (!playing_) { |
254 OSStatus result = AudioOutputUnitStart(vpio_unit_); | 227 if (!audio_unit_->Start()) { |
255 if (result != noErr) { | 228 RTCLogError(@"StartRecording failed to start audio unit."); |
256 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: " | |
257 << result; | |
258 return -1; | 229 return -1; |
259 } | 230 } |
260 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | 231 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; |
261 } | 232 } |
262 rtc::AtomicOps::ReleaseStore(&recording_, 1); | 233 rtc::AtomicOps::ReleaseStore(&recording_, 1); |
263 return 0; | 234 return 0; |
264 } | 235 } |
265 | 236 |
266 int32_t AudioDeviceIOS::StopRecording() { | 237 int32_t AudioDeviceIOS::StopRecording() { |
267 LOGI() << "StopRecording"; | 238 LOGI() << "StopRecording"; |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 RTC_DCHECK(thread_); | 340 RTC_DCHECK(thread_); |
370 if (thread_->IsCurrent()) { | 341 if (thread_->IsCurrent()) { |
371 HandleValidRouteChange(); | 342 HandleValidRouteChange(); |
372 return; | 343 return; |
373 } | 344 } |
374 async_invoker_->AsyncInvoke<void>( | 345 async_invoker_->AsyncInvoke<void>( |
375 thread_, | 346 thread_, |
376 rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this)); | 347 rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this)); |
377 } | 348 } |
378 | 349 |
| 350 OSStatus AudioDeviceIOS::OnDeliverRecordedData( |
| 351 AudioUnitRenderActionFlags* flags, |
| 352 const AudioTimeStamp* time_stamp, |
| 353 UInt32 bus_number, |
| 354 UInt32 num_frames, |
| 355 AudioBufferList* /* io_data */) { |
| 356 OSStatus result = noErr; |
| 357 // Simply return if recording is not enabled. |
| 358 if (!rtc::AtomicOps::AcquireLoad(&recording_)) |
| 359 return result; |
| 360 |
| 361 size_t frames_per_buffer = record_parameters_.frames_per_buffer(); |
| 362 if (num_frames != frames_per_buffer) { |
| 363 // We have seen short bursts (1-2 frames) where |in_number_frames| changes. |
| 364 // Add a log to keep track of longer sequences if that should ever happen. |
| 365 // Also return since calling AudioUnitRender in this state will only result |
| 366 // in kAudio_ParamError (-50) anyhow. |
| 367 RTCLogWarning(@"Expected %u frames but got %u", |
| 368 static_cast<unsigned int>(frames_per_buffer), |
| 369 static_cast<unsigned int>(num_frames)); |
| 370 return result; |
| 371 } |
| 372 |
| 373 // Obtain the recorded audio samples by initiating a rendering cycle. |
| 374 // Since it happens on the input bus, the |io_data| parameter is a reference |
| 375 // to the preallocated audio buffer list that the audio unit renders into. |
| 376 // We can make the audio unit provide a buffer instead in io_data, but we |
| 377 // currently just use our own. |
| 378 // TODO(henrika): should error handling be improved? |
| 379 AudioBufferList* io_data = &audio_record_buffer_list_; |
| 380 result = |
| 381 audio_unit_->Render(flags, time_stamp, bus_number, num_frames, io_data); |
| 382 if (result != noErr) { |
| 383 RTCLogError(@"Failed to render audio."); |
| 384 return result; |
| 385 } |
| 386 |
| 387 // Get a pointer to the recorded audio and send it to the WebRTC ADB. |
| 388 // Use the FineAudioBuffer instance to convert between native buffer size |
| 389 // and the 10ms buffer size used by WebRTC. |
| 390 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; |
| 391 const size_t size_in_bytes = audio_buffer->mDataByteSize; |
| 392 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, |
| 393 num_frames); |
| 394 int8_t* data = static_cast<int8_t*>(audio_buffer->mData); |
| 395 fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, |
| 396 kFixedPlayoutDelayEstimate, |
| 397 kFixedRecordDelayEstimate); |
| 398 return noErr; |
| 399 } |
| 400 |
| 401 OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, |
| 402 const AudioTimeStamp* time_stamp, |
| 403 UInt32 bus_number, |
| 404 UInt32 num_frames, |
| 405 AudioBufferList* io_data) { |
| 406 // Verify 16-bit, noninterleaved mono PCM signal format. |
| 407 RTC_DCHECK_EQ(1u, io_data->mNumberBuffers); |
| 408 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; |
| 409 RTC_DCHECK_EQ(1u, audio_buffer->mNumberChannels); |
| 410 // Get pointer to internal audio buffer to which new audio data shall be |
| 411 // written. |
| 412 const size_t size_in_bytes = audio_buffer->mDataByteSize; |
| 413 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, |
| 414 num_frames); |
| 415 int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData); |
| 416 // Produce silence and give audio unit a hint about it if playout is not |
| 417 // activated. |
| 418 if (!rtc::AtomicOps::AcquireLoad(&playing_)) { |
| 419 *flags |= kAudioUnitRenderAction_OutputIsSilence; |
| 420 memset(destination, 0, size_in_bytes); |
| 421 return noErr; |
| 422 } |
| 423 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
| 424 // the native I/O audio unit) to a preallocated intermediate buffer and |
| 425 // copy the result to the audio buffer in the |io_data| destination. |
| 426 int8_t* source = playout_audio_buffer_.get(); |
| 427 fine_audio_buffer_->GetPlayoutData(source); |
| 428 memcpy(destination, source, size_in_bytes); |
| 429 return noErr; |
| 430 } |
| 431 |
379 void AudioDeviceIOS::HandleInterruptionBegin() { | 432 void AudioDeviceIOS::HandleInterruptionBegin() { |
380 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 433 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
381 RTCLog(@"Stopping the audio unit due to interruption begin."); | 434 RTCLog(@"Stopping the audio unit due to interruption begin."); |
382 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_), | 435 if (!audio_unit_->Stop()) { |
383 "Failed to stop the the Voice-Processing I/O unit"); | 436 RTCLogError(@"Failed to stop the audio unit."); |
| 437 } |
384 is_interrupted_ = true; | 438 is_interrupted_ = true; |
385 } | 439 } |
386 | 440 |
387 void AudioDeviceIOS::HandleInterruptionEnd() { | 441 void AudioDeviceIOS::HandleInterruptionEnd() { |
388 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 442 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
389 RTCLog(@"Starting the audio unit due to interruption end."); | 443 RTCLog(@"Starting the audio unit due to interruption end."); |
390 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | 444 if (!audio_unit_->Start()) { |
391 "Failed to start the the Voice-Processing I/O unit"); | 445 RTCLogError(@"Failed to start the audio unit."); |
| 446 } |
392 is_interrupted_ = false; | 447 is_interrupted_ = false; |
393 } | 448 } |
394 | 449 |
395 void AudioDeviceIOS::HandleValidRouteChange() { | 450 void AudioDeviceIOS::HandleValidRouteChange() { |
396 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 451 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
397 | 452 |
398 // Don't do anything if we're interrupted. | 453 // Don't do anything if we're interrupted. |
399 if (is_interrupted_) { | 454 if (is_interrupted_) { |
400 return; | 455 return; |
401 } | 456 } |
402 | 457 |
403 // Only restart audio for a valid route change if the session sample rate | 458 // Only restart audio for a valid route change if the session sample rate |
404 // has changed. | 459 // has changed. |
405 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 460 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
406 const double current_sample_rate = playout_parameters_.sample_rate(); | 461 const double current_sample_rate = playout_parameters_.sample_rate(); |
407 const double session_sample_rate = session.sampleRate; | 462 const double session_sample_rate = session.sampleRate; |
408 if (current_sample_rate != session_sample_rate) { | 463 if (current_sample_rate != session_sample_rate) { |
409 RTCLog(@"Route changed caused sample rate to change from %f to %f. " | 464 RTCLog(@"Route changed caused sample rate to change from %f to %f. " |
410 "Restarting audio unit.", current_sample_rate, session_sample_rate); | 465 "Restarting audio unit.", current_sample_rate, session_sample_rate); |
411 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) { | 466 if (!RestartAudioUnit(session_sample_rate)) { |
412 RTCLogError(@"Audio restart failed."); | 467 RTCLogError(@"Audio restart failed."); |
413 } | 468 } |
414 } | 469 } |
415 } | 470 } |
416 | 471 |
417 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { | 472 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { |
418 LOGI() << "UpdateAudioDevicebuffer"; | 473 LOGI() << "UpdateAudioDevicebuffer"; |
419 // AttachAudioBuffer() is called at construction by the main class but check | 474 // AttachAudioBuffer() is called at construction by the main class but check |
420 // just in case. | 475 // just in case. |
421 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; | 476 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; |
422 // Inform the audio device buffer (ADB) about the new audio format. | 477 // Inform the audio device buffer (ADB) about the new audio format. |
423 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); | 478 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
424 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); | 479 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); |
425 audio_device_buffer_->SetRecordingSampleRate( | 480 audio_device_buffer_->SetRecordingSampleRate( |
426 record_parameters_.sample_rate()); | 481 record_parameters_.sample_rate()); |
427 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); | 482 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); |
428 } | 483 } |
429 | 484 |
430 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { | 485 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
431 LOGI() << "SetupAudioBuffersForActiveAudioSession"; | 486 LOGI() << "SetupAudioBuffersForActiveAudioSession"; |
432 // Verify the current values once the audio session has been activated. | 487 // Verify the current values once the audio session has been activated. |
433 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 488 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
434 double sample_rate = session.sampleRate; | 489 double sample_rate = session.sampleRate; |
435 NSTimeInterval io_buffer_duration = session.IOBufferDuration; | 490 NSTimeInterval io_buffer_duration = session.IOBufferDuration; |
436 LOG(LS_INFO) << " sample rate: " << sample_rate; | 491 RTCLog(@"%@", session); |
437 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration; | |
438 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; | |
439 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; | |
440 LOG(LS_INFO) << " output latency: " << session.outputLatency; | |
441 LOG(LS_INFO) << " input latency: " << session.inputLatency; | |
442 | 492 |
443 // Log a warning message for the case when we are unable to set the preferred | 493 // Log a warning message for the case when we are unable to set the preferred |
444 // hardware sample rate but continue and use the non-ideal sample rate after | 494 // hardware sample rate but continue and use the non-ideal sample rate after |
445 // reinitializing the audio parameters. Most BT headsets only support 8kHz or | 495 // reinitializing the audio parameters. Most BT headsets only support 8kHz or |
446 // 16kHz. | 496 // 16kHz. |
447 RTCAudioSessionConfiguration* webRTCConfig = | 497 RTCAudioSessionConfiguration* webRTCConfig = |
448 [RTCAudioSessionConfiguration webRTCConfiguration]; | 498 [RTCAudioSessionConfiguration webRTCConfiguration]; |
449 if (sample_rate != webRTCConfig.sampleRate) { | 499 if (sample_rate != webRTCConfig.sampleRate) { |
450 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; | 500 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; |
451 } | 501 } |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
494 // at each input callback when calling AudioUnitRender(). | 544 // at each input callback when calling AudioUnitRender(). |
495 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); | 545 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); |
496 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 546 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
497 audio_record_buffer_list_.mNumberBuffers = 1; | 547 audio_record_buffer_list_.mNumberBuffers = 1; |
498 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; | 548 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; |
499 audio_buffer->mNumberChannels = record_parameters_.channels(); | 549 audio_buffer->mNumberChannels = record_parameters_.channels(); |
500 audio_buffer->mDataByteSize = data_byte_size; | 550 audio_buffer->mDataByteSize = data_byte_size; |
501 audio_buffer->mData = record_audio_buffer_.get(); | 551 audio_buffer->mData = record_audio_buffer_.get(); |
502 } | 552 } |
503 | 553 |
504 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { | 554 bool AudioDeviceIOS::CreateAudioUnit() { |
505 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; | 555 RTC_DCHECK(!audio_unit_); |
506 RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists"; | |
507 // Create an audio component description to identify the Voice-Processing | |
508 // I/O audio unit. | |
509 AudioComponentDescription vpio_unit_description; | |
510 vpio_unit_description.componentType = kAudioUnitType_Output; | |
511 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | |
512 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple; | |
513 vpio_unit_description.componentFlags = 0; | |
514 vpio_unit_description.componentFlagsMask = 0; | |
515 | 556 |
516 // Obtain an audio unit instance given the description. | 557 audio_unit_.reset(new VoiceProcessingAudioUnit(this)); |
517 AudioComponent found_vpio_unit_ref = | 558 if (!audio_unit_->Init()) { |
518 AudioComponentFindNext(nullptr, &vpio_unit_description); | 559 audio_unit_.reset(); |
519 | |
520 // Create a Voice-Processing IO audio unit. | |
521 OSStatus result = noErr; | |
522 result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_); | |
523 if (result != noErr) { | |
524 vpio_unit_ = nullptr; | |
525 LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result; | |
526 return false; | 560 return false; |
527 } | 561 } |
528 | 562 |
529 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable | 563 return true; |
530 // input on the input scope of the input element. | 564 } |
531 AudioUnitElement input_bus = 1; | 565 |
532 UInt32 enable_input = 1; | 566 bool AudioDeviceIOS::RestartAudioUnit(float sample_rate) { |
533 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | 567 RTCLog(@"Restarting audio unit with new sample rate: %f", sample_rate); |
534 kAudioUnitScope_Input, input_bus, &enable_input, | 568 |
535 sizeof(enable_input)); | 569 // Stop the active audio unit. |
536 if (result != noErr) { | 570 if (!audio_unit_->Stop()) { |
537 DisposeAudioUnit(); | 571 RTCLogError(@"Failed to stop the audio unit."); |
538 LOG(LS_ERROR) << "Failed to enable input on input scope of input element: " | |
539 << result; | |
540 return false; | 572 return false; |
541 } | 573 } |
542 | 574 |
543 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable | 575 // The stream format is about to be changed and it requires that we first |
544 // output on the output scope of the output element. | 576 // uninitialize it to deallocate its resources. |
545 AudioUnitElement output_bus = 0; | 577 if (!audio_unit_->Uninitialize()) { |
546 UInt32 enable_output = 1; | 578 RTCLogError(@"Failed to uninitialize the audio unit."); |
547 result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO, | |
548 kAudioUnitScope_Output, output_bus, | |
549 &enable_output, sizeof(enable_output)); | |
550 if (result != noErr) { | |
551 DisposeAudioUnit(); | |
552 LOG(LS_ERROR) | |
553 << "Failed to enable output on output scope of output element: " | |
554 << result; | |
555 return false; | 579 return false; |
556 } | 580 } |
557 | 581 |
558 // Set the application formats for input and output: | |
559 // - use same format in both directions | |
560 // - avoid resampling in the I/O unit by using the hardware sample rate | |
561 // - linear PCM => noncompressed audio data format with one frame per packet | |
562 // - no need to specify interleaving since only mono is supported | |
563 AudioStreamBasicDescription application_format = {0}; | |
564 UInt32 size = sizeof(application_format); | |
565 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), | |
566 record_parameters_.sample_rate()); | |
567 RTC_DCHECK_EQ(1, kRTCAudioSessionPreferredNumberOfChannels); | |
568 application_format.mSampleRate = playout_parameters_.sample_rate(); | |
569 application_format.mFormatID = kAudioFormatLinearPCM; | |
570 application_format.mFormatFlags = | |
571 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | |
572 application_format.mBytesPerPacket = kBytesPerSample; | |
573 application_format.mFramesPerPacket = 1; // uncompressed | |
574 application_format.mBytesPerFrame = kBytesPerSample; | |
575 application_format.mChannelsPerFrame = | |
576 kRTCAudioSessionPreferredNumberOfChannels; | |
577 application_format.mBitsPerChannel = 8 * kBytesPerSample; | |
578 // Store the new format. | |
579 application_format_ = application_format; | |
580 #if !defined(NDEBUG) | |
581 LogABSD(application_format_); | |
582 #endif | |
583 | |
584 // Set the application format on the output scope of the input element/bus. | |
585 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | |
586 kAudioUnitScope_Output, input_bus, | |
587 &application_format, size); | |
588 if (result != noErr) { | |
589 DisposeAudioUnit(); | |
590 LOG(LS_ERROR) | |
591 << "Failed to set application format on output scope of input bus: " | |
592 << result; | |
593 return false; | |
594 } | |
595 | |
596 // Set the application format on the input scope of the output element/bus. | |
597 result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | |
598 kAudioUnitScope_Input, output_bus, | |
599 &application_format, size); | |
600 if (result != noErr) { | |
601 DisposeAudioUnit(); | |
602 LOG(LS_ERROR) | |
603 << "Failed to set application format on input scope of output bus: " | |
604 << result; | |
605 return false; | |
606 } | |
607 | |
608 // Specify the callback function that provides audio samples to the audio | |
609 // unit. | |
610 AURenderCallbackStruct render_callback; | |
611 render_callback.inputProc = GetPlayoutData; | |
612 render_callback.inputProcRefCon = this; | |
613 result = AudioUnitSetProperty( | |
614 vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, | |
615 output_bus, &render_callback, sizeof(render_callback)); | |
616 if (result != noErr) { | |
617 DisposeAudioUnit(); | |
618 LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: " | |
619 << result; | |
620 return false; | |
621 } | |
622 | |
623 // Disable AU buffer allocation for the recorder, we allocate our own. | |
624 // TODO(henrika): not sure that it actually saves resource to make this call. | |
625 UInt32 flag = 0; | |
626 result = AudioUnitSetProperty( | |
627 vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer, | |
628 kAudioUnitScope_Output, input_bus, &flag, sizeof(flag)); | |
629 if (result != noErr) { | |
630 DisposeAudioUnit(); | |
631 LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: " | |
632 << result; | |
633 } | |
634 | |
635 // Specify the callback to be called by the I/O thread to us when input audio | |
636 // is available. The recorded samples can then be obtained by calling the | |
637 // AudioUnitRender() method. | |
638 AURenderCallbackStruct input_callback; | |
639 input_callback.inputProc = RecordedDataIsAvailable; | |
640 input_callback.inputProcRefCon = this; | |
641 result = AudioUnitSetProperty(vpio_unit_, | |
642 kAudioOutputUnitProperty_SetInputCallback, | |
643 kAudioUnitScope_Global, input_bus, | |
644 &input_callback, sizeof(input_callback)); | |
645 if (result != noErr) { | |
646 DisposeAudioUnit(); | |
647 LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: " | |
648 << result; | |
649 } | |
650 | |
651 // Initialize the Voice-Processing I/O unit instance. | |
652 // Calls to AudioUnitInitialize() can fail if called back-to-back on | |
653 // different ADM instances. The error message in this case is -66635 which is | |
654 // undocumented. Tests have shown that calling AudioUnitInitialize a second | |
655 // time, after a short sleep, avoids this issue. | |
656 // See webrtc:5166 for details. | |
657 int failed_initalize_attempts = 0; | |
658 result = AudioUnitInitialize(vpio_unit_); | |
659 while (result != noErr) { | |
660 LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: " | |
661 << result; | |
662 ++failed_initalize_attempts; | |
663 if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) { | |
664 // Max number of initialization attempts exceeded, hence abort. | |
665 LOG(LS_WARNING) << "Too many initialization attempts"; | |
666 DisposeAudioUnit(); | |
667 return false; | |
668 } | |
669 LOG(LS_INFO) << "pause 100ms and try audio unit initialization again..."; | |
670 [NSThread sleepForTimeInterval:0.1f]; | |
671 result = AudioUnitInitialize(vpio_unit_); | |
672 } | |
673 LOG(LS_INFO) << "Voice-Processing I/O unit is now initialized"; | |
674 return true; | |
675 } | |
676 | |
677 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { | |
678 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; | |
679 // Stop the active audio unit. | |
680 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), | |
681 "Failed to stop the the Voice-Processing I/O unit"); | |
682 | |
683 // The stream format is about to be changed and it requires that we first | |
684 // uninitialize it to deallocate its resources. | |
685 LOG_AND_RETURN_IF_ERROR( | |
686 AudioUnitUninitialize(vpio_unit_), | |
687 "Failed to uninitialize the the Voice-Processing I/O unit"); | |
688 | |
689 // Allocate new buffers given the new stream format. | 582 // Allocate new buffers given the new stream format. |
690 SetupAudioBuffersForActiveAudioSession(); | 583 SetupAudioBuffersForActiveAudioSession(); |
691 | 584 |
692 // Update the existing application format using the new sample rate. | 585 // Initialize the audio unit again with the new sample rate. |
693 application_format_.mSampleRate = playout_parameters_.sample_rate(); | 586 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), sample_rate); |
694 UInt32 size = sizeof(application_format_); | 587 if (!audio_unit_->Initialize(sample_rate)) { |
695 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 588 RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", |
696 kAudioUnitScope_Output, 1, &application_format_, size); | 589 sample_rate); |
697 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 590 return false; |
698 kAudioUnitScope_Input, 0, &application_format_, size); | 591 } |
699 | 592 |
700 // Prepare the audio unit to render audio again. | 593 // Restart the audio unit. |
701 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), | 594 if (!audio_unit_->Start()) { |
702 "Failed to initialize the Voice-Processing I/O unit"); | 595 RTCLogError(@"Failed to start audio unit."); |
703 LOG(LS_INFO) << "Voice-Processing I/O unit is now reinitialized"; | 596 return false; |
| 597 } |
| 598 RTCLog(@"Successfully restarted audio unit."); |
704 | 599 |
705 // Start rendering audio using the new format. | |
706 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), | |
707 "Failed to start the Voice-Processing I/O unit"); | |
708 LOG(LS_INFO) << "Voice-Processing I/O unit is now restarted"; | |
709 return true; | 600 return true; |
710 } | 601 } |
711 | 602 |
712 bool AudioDeviceIOS::InitPlayOrRecord() { | 603 bool AudioDeviceIOS::InitPlayOrRecord() { |
713 LOGI() << "InitPlayOrRecord"; | 604 LOGI() << "InitPlayOrRecord"; |
714 | 605 |
715 // Use the correct audio session configuration for WebRTC. | 606 // Use the correct audio session configuration for WebRTC. |
716 // This will attempt to activate the audio session. | 607 // This will attempt to activate the audio session. |
717 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 608 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
718 [session lockForConfiguration]; | 609 [session lockForConfiguration]; |
719 NSError* error = nil; | 610 NSError* error = nil; |
720 if (![session configureWebRTCSession:&error]) { | 611 if (![session configureWebRTCSession:&error]) { |
721 RTCLogError(@"Failed to configure WebRTC session: %@", | 612 RTCLogError(@"Failed to configure WebRTC session: %@", |
722 error.localizedDescription); | 613 error.localizedDescription); |
723 [session unlockForConfiguration]; | 614 [session unlockForConfiguration]; |
724 return false; | 615 return false; |
725 } | 616 } |
726 | 617 |
727 // Start observing audio session interruptions and route changes. | 618 // Start observing audio session interruptions and route changes. |
728 [session pushDelegate:audio_session_observer_]; | 619 [session pushDelegate:audio_session_observer_]; |
729 | 620 |
730 // Ensure that we got what what we asked for in our active audio session. | 621 // Ensure that we got what what we asked for in our active audio session. |
731 SetupAudioBuffersForActiveAudioSession(); | 622 SetupAudioBuffersForActiveAudioSession(); |
732 | 623 |
733 // Create, setup and initialize a new Voice-Processing I/O unit. | 624 // Create, setup and initialize a new Voice-Processing I/O unit. |
734 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 625 // TODO(tkchin): Delay the initialization when needed. |
| 626 if (!CreateAudioUnit() || |
| 627 !audio_unit_->Initialize(playout_parameters_.sample_rate())) { |
735 [session setActive:NO error:nil]; | 628 [session setActive:NO error:nil]; |
736 [session unlockForConfiguration]; | 629 [session unlockForConfiguration]; |
737 return false; | 630 return false; |
738 } | 631 } |
739 [session unlockForConfiguration]; | 632 [session unlockForConfiguration]; |
| 633 |
740 return true; | 634 return true; |
741 } | 635 } |
742 | 636 |
743 void AudioDeviceIOS::ShutdownPlayOrRecord() { | 637 void AudioDeviceIOS::ShutdownPlayOrRecord() { |
744 LOGI() << "ShutdownPlayOrRecord"; | 638 LOGI() << "ShutdownPlayOrRecord"; |
| 639 |
745 // Close and delete the voice-processing I/O unit. | 640 // Close and delete the voice-processing I/O unit. |
746 OSStatus result = -1; | 641 if (audio_unit_) { |
747 if (nullptr != vpio_unit_) { | 642 audio_unit_->Stop(); |
748 result = AudioOutputUnitStop(vpio_unit_); | 643 audio_unit_->Uninitialize(); |
749 if (result != noErr) { | 644 audio_unit_.reset(); |
750 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | |
751 } | |
752 result = AudioUnitUninitialize(vpio_unit_); | |
753 if (result != noErr) { | |
754 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; | |
755 } | |
756 DisposeAudioUnit(); | |
757 } | 645 } |
758 | 646 |
759 // Remove audio session notification observers. | 647 // Remove audio session notification observers. |
760 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 648 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
761 [session removeDelegate:audio_session_observer_]; | 649 [session removeDelegate:audio_session_observer_]; |
762 | 650 |
763 // All I/O should be stopped or paused prior to deactivating the audio | 651 // All I/O should be stopped or paused prior to deactivating the audio |
764 // session, hence we deactivate as last action. | 652 // session, hence we deactivate as last action. |
765 [session lockForConfiguration]; | 653 [session lockForConfiguration]; |
766 [session setActive:NO error:nil]; | 654 [session setActive:NO error:nil]; |
767 [session unlockForConfiguration]; | 655 [session unlockForConfiguration]; |
768 } | 656 } |
769 | 657 |
770 void AudioDeviceIOS::DisposeAudioUnit() { | |
771 if (nullptr == vpio_unit_) | |
772 return; | |
773 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); | |
774 if (result != noErr) { | |
775 LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result; | |
776 } | |
777 vpio_unit_ = nullptr; | |
778 } | |
779 | |
780 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | |
781 void* in_ref_con, | |
782 AudioUnitRenderActionFlags* io_action_flags, | |
783 const AudioTimeStamp* in_time_stamp, | |
784 UInt32 in_bus_number, | |
785 UInt32 in_number_frames, | |
786 AudioBufferList* io_data) { | |
787 RTC_DCHECK_EQ(1u, in_bus_number); | |
788 RTC_DCHECK( | |
789 !io_data); // no buffer should be allocated for input at this stage | |
790 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); | |
791 return audio_device_ios->OnRecordedDataIsAvailable( | |
792 io_action_flags, in_time_stamp, in_bus_number, in_number_frames); | |
793 } | |
794 | |
795 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( | |
796 AudioUnitRenderActionFlags* io_action_flags, | |
797 const AudioTimeStamp* in_time_stamp, | |
798 UInt32 in_bus_number, | |
799 UInt32 in_number_frames) { | |
800 OSStatus result = noErr; | |
801 // Simply return if recording is not enabled. | |
802 if (!rtc::AtomicOps::AcquireLoad(&recording_)) | |
803 return result; | |
804 if (in_number_frames != record_parameters_.frames_per_buffer()) { | |
805 // We have seen short bursts (1-2 frames) where |in_number_frames| changes. | |
806 // Add a log to keep track of longer sequences if that should ever happen. | |
807 // Also return since calling AudioUnitRender in this state will only result | |
808 // in kAudio_ParamError (-50) anyhow. | |
809 LOG(LS_WARNING) << "in_number_frames (" << in_number_frames | |
810 << ") != " << record_parameters_.frames_per_buffer(); | |
811 return noErr; | |
812 } | |
813 // Obtain the recorded audio samples by initiating a rendering cycle. | |
814 // Since it happens on the input bus, the |io_data| parameter is a reference | |
815 // to the preallocated audio buffer list that the audio unit renders into. | |
816 // TODO(henrika): should error handling be improved? | |
817 AudioBufferList* io_data = &audio_record_buffer_list_; | |
818 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp, | |
819 in_bus_number, in_number_frames, io_data); | |
820 if (result != noErr) { | |
821 LOG_F(LS_ERROR) << "AudioUnitRender failed: " << result; | |
822 return result; | |
823 } | |
824 // Get a pointer to the recorded audio and send it to the WebRTC ADB. | |
825 // Use the FineAudioBuffer instance to convert between native buffer size | |
826 // and the 10ms buffer size used by WebRTC. | |
827 const UInt32 data_size_in_bytes = io_data->mBuffers[0].mDataByteSize; | |
828 RTC_CHECK_EQ(data_size_in_bytes / kBytesPerSample, in_number_frames); | |
829 SInt8* data = static_cast<SInt8*>(io_data->mBuffers[0].mData); | |
830 fine_audio_buffer_->DeliverRecordedData(data, data_size_in_bytes, | |
831 kFixedPlayoutDelayEstimate, | |
832 kFixedRecordDelayEstimate); | |
833 return noErr; | |
834 } | |
835 | |
836 OSStatus AudioDeviceIOS::GetPlayoutData( | |
837 void* in_ref_con, | |
838 AudioUnitRenderActionFlags* io_action_flags, | |
839 const AudioTimeStamp* in_time_stamp, | |
840 UInt32 in_bus_number, | |
841 UInt32 in_number_frames, | |
842 AudioBufferList* io_data) { | |
843 RTC_DCHECK_EQ(0u, in_bus_number); | |
844 RTC_DCHECK(io_data); | |
845 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); | |
846 return audio_device_ios->OnGetPlayoutData(io_action_flags, in_number_frames, | |
847 io_data); | |
848 } | |
849 | |
850 OSStatus AudioDeviceIOS::OnGetPlayoutData( | |
851 AudioUnitRenderActionFlags* io_action_flags, | |
852 UInt32 in_number_frames, | |
853 AudioBufferList* io_data) { | |
854 // Verify 16-bit, noninterleaved mono PCM signal format. | |
855 RTC_DCHECK_EQ(1u, io_data->mNumberBuffers); | |
856 RTC_DCHECK_EQ(1u, io_data->mBuffers[0].mNumberChannels); | |
857 // Get pointer to internal audio buffer to which new audio data shall be | |
858 // written. | |
859 const UInt32 dataSizeInBytes = io_data->mBuffers[0].mDataByteSize; | |
860 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, in_number_frames); | |
861 SInt8* destination = static_cast<SInt8*>(io_data->mBuffers[0].mData); | |
862 // Produce silence and give audio unit a hint about it if playout is not | |
863 // activated. | |
864 if (!rtc::AtomicOps::AcquireLoad(&playing_)) { | |
865 *io_action_flags |= kAudioUnitRenderAction_OutputIsSilence; | |
866 memset(destination, 0, dataSizeInBytes); | |
867 return noErr; | |
868 } | |
869 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | |
870 // the native I/O audio unit) to a preallocated intermediate buffer and | |
871 // copy the result to the audio buffer in the |io_data| destination. | |
872 SInt8* source = playout_audio_buffer_.get(); | |
873 fine_audio_buffer_->GetPlayoutData(source); | |
874 memcpy(destination, source, dataSizeInBytes); | |
875 return noErr; | |
876 } | |
877 | |
878 } // namespace webrtc | 658 } // namespace webrtc |
OLD | NEW |