| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 } \ | 54 } \ |
| 55 } while (0) | 55 } while (0) |
| 56 | 56 |
| 57 | 57 |
| 58 // Hardcoded delay estimates based on real measurements. | 58 // Hardcoded delay estimates based on real measurements. |
| 59 // TODO(henrika): these value is not used in combination with built-in AEC. | 59 // TODO(henrika): these value is not used in combination with built-in AEC. |
| 60 // Can most likely be removed. | 60 // Can most likely be removed. |
| 61 const UInt16 kFixedPlayoutDelayEstimate = 30; | 61 const UInt16 kFixedPlayoutDelayEstimate = 30; |
| 62 const UInt16 kFixedRecordDelayEstimate = 30; | 62 const UInt16 kFixedRecordDelayEstimate = 30; |
| 63 | 63 |
| 64 enum AudioDeviceMessageType : uint32_t { |
| 65 kMessageTypeInterruptionBegin, |
| 66 kMessageTypeInterruptionEnd, |
| 67 kMessageTypeValidRouteChange, |
| 68 kMessageTypeCanPlayOrRecordChange, |
| 69 }; |
| 70 |
| 64 using ios::CheckAndLogError; | 71 using ios::CheckAndLogError; |
| 65 | 72 |
| 66 #if !defined(NDEBUG) | 73 #if !defined(NDEBUG) |
| 67 // Helper method that logs essential device information strings. | 74 // Helper method that logs essential device information strings. |
| 68 static void LogDeviceInfo() { | 75 static void LogDeviceInfo() { |
| 69 LOG(LS_INFO) << "LogDeviceInfo"; | 76 LOG(LS_INFO) << "LogDeviceInfo"; |
| 70 @autoreleasepool { | 77 @autoreleasepool { |
| 71 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); | 78 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); |
| 72 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); | 79 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); |
| 73 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); | 80 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); |
| 74 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); | 81 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); |
| 75 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); | 82 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); |
| 76 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); | 83 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); |
| 77 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); | 84 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); |
| 78 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); | 85 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); |
| 79 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); | 86 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); |
| 80 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 | 87 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 |
| 81 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); | 88 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); |
| 82 #endif | 89 #endif |
| 83 } | 90 } |
| 84 } | 91 } |
| 85 #endif // !defined(NDEBUG) | 92 #endif // !defined(NDEBUG) |
| 86 | 93 |
| 87 AudioDeviceIOS::AudioDeviceIOS() | 94 AudioDeviceIOS::AudioDeviceIOS() |
| 88 : async_invoker_(new rtc::AsyncInvoker()), | 95 : audio_device_buffer_(nullptr), |
| 89 audio_device_buffer_(nullptr), | |
| 90 audio_unit_(nullptr), | 96 audio_unit_(nullptr), |
| 91 recording_(0), | 97 recording_(0), |
| 92 playing_(0), | 98 playing_(0), |
| 93 initialized_(false), | 99 initialized_(false), |
| 94 rec_is_initialized_(false), | 100 rec_is_initialized_(false), |
| 95 play_is_initialized_(false), | 101 play_is_initialized_(false), |
| 96 is_interrupted_(false) { | 102 is_interrupted_(false), |
| 103 has_configured_session_(false) { |
| 97 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | 104 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); |
| 98 thread_ = rtc::Thread::Current(); | 105 thread_ = rtc::Thread::Current(); |
| 99 audio_session_observer_ = | 106 audio_session_observer_ = |
| 100 [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this]; | 107 [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this]; |
| 101 } | 108 } |
| 102 | 109 |
| 103 AudioDeviceIOS::~AudioDeviceIOS() { | 110 AudioDeviceIOS::~AudioDeviceIOS() { |
| 104 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); | 111 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); |
| 105 audio_session_observer_ = nil; | 112 audio_session_observer_ = nil; |
| 106 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 113 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 184 } | 191 } |
| 185 rec_is_initialized_ = true; | 192 rec_is_initialized_ = true; |
| 186 return 0; | 193 return 0; |
| 187 } | 194 } |
| 188 | 195 |
| 189 int32_t AudioDeviceIOS::StartPlayout() { | 196 int32_t AudioDeviceIOS::StartPlayout() { |
| 190 LOGI() << "StartPlayout"; | 197 LOGI() << "StartPlayout"; |
| 191 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 198 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 192 RTC_DCHECK(play_is_initialized_); | 199 RTC_DCHECK(play_is_initialized_); |
| 193 RTC_DCHECK(!playing_); | 200 RTC_DCHECK(!playing_); |
| 201 RTC_DCHECK(audio_unit_); |
| 194 if (fine_audio_buffer_) { | 202 if (fine_audio_buffer_) { |
| 195 fine_audio_buffer_->ResetPlayout(); | 203 fine_audio_buffer_->ResetPlayout(); |
| 196 } | 204 } |
| 197 if (!recording_ && | 205 if (!recording_ && |
| 198 audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { | 206 audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { |
| 199 if (!audio_unit_->Start()) { | 207 if (!audio_unit_->Start()) { |
| 200 RTCLogError(@"StartPlayout failed to start audio unit."); | 208 RTCLogError(@"StartPlayout failed to start audio unit."); |
| 201 return -1; | 209 return -1; |
| 202 } | 210 } |
| 203 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | 211 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; |
| 204 } | 212 } |
| 205 rtc::AtomicOps::ReleaseStore(&playing_, 1); | 213 rtc::AtomicOps::ReleaseStore(&playing_, 1); |
| 206 return 0; | 214 return 0; |
| 207 } | 215 } |
| 208 | 216 |
| 209 int32_t AudioDeviceIOS::StopPlayout() { | 217 int32_t AudioDeviceIOS::StopPlayout() { |
| 210 LOGI() << "StopPlayout"; | 218 LOGI() << "StopPlayout"; |
| 211 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 219 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 212 if (!play_is_initialized_ || !playing_) { | 220 if (!play_is_initialized_) { |
| 221 return 0; |
| 222 } |
| 223 if (!playing_) { |
| 224 play_is_initialized_ = false; |
| 213 return 0; | 225 return 0; |
| 214 } | 226 } |
| 215 if (!recording_) { | 227 if (!recording_) { |
| 216 ShutdownPlayOrRecord(); | 228 ShutdownPlayOrRecord(); |
| 217 } | 229 } |
| 218 play_is_initialized_ = false; | 230 play_is_initialized_ = false; |
| 219 rtc::AtomicOps::ReleaseStore(&playing_, 0); | 231 rtc::AtomicOps::ReleaseStore(&playing_, 0); |
| 220 return 0; | 232 return 0; |
| 221 } | 233 } |
| 222 | 234 |
| 223 int32_t AudioDeviceIOS::StartRecording() { | 235 int32_t AudioDeviceIOS::StartRecording() { |
| 224 LOGI() << "StartRecording"; | 236 LOGI() << "StartRecording"; |
| 225 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 237 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 226 RTC_DCHECK(rec_is_initialized_); | 238 RTC_DCHECK(rec_is_initialized_); |
| 227 RTC_DCHECK(!recording_); | 239 RTC_DCHECK(!recording_); |
| 240 RTC_DCHECK(audio_unit_); |
| 228 if (fine_audio_buffer_) { | 241 if (fine_audio_buffer_) { |
| 229 fine_audio_buffer_->ResetRecord(); | 242 fine_audio_buffer_->ResetRecord(); |
| 230 } | 243 } |
| 231 if (!playing_ && | 244 if (!playing_ && |
| 232 audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { | 245 audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { |
| 233 if (!audio_unit_->Start()) { | 246 if (!audio_unit_->Start()) { |
| 234 RTCLogError(@"StartRecording failed to start audio unit."); | 247 RTCLogError(@"StartRecording failed to start audio unit."); |
| 235 return -1; | 248 return -1; |
| 236 } | 249 } |
| 237 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; | 250 LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; |
| 238 } | 251 } |
| 239 rtc::AtomicOps::ReleaseStore(&recording_, 1); | 252 rtc::AtomicOps::ReleaseStore(&recording_, 1); |
| 240 return 0; | 253 return 0; |
| 241 } | 254 } |
| 242 | 255 |
| 243 int32_t AudioDeviceIOS::StopRecording() { | 256 int32_t AudioDeviceIOS::StopRecording() { |
| 244 LOGI() << "StopRecording"; | 257 LOGI() << "StopRecording"; |
| 245 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 258 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 246 if (!rec_is_initialized_ || !recording_) { | 259 if (!rec_is_initialized_) { |
| 260 return 0; |
| 261 } |
| 262 if (!recording_) { |
| 263 rec_is_initialized_ = false; |
| 247 return 0; | 264 return 0; |
| 248 } | 265 } |
| 249 if (!playing_) { | 266 if (!playing_) { |
| 250 ShutdownPlayOrRecord(); | 267 ShutdownPlayOrRecord(); |
| 251 } | 268 } |
| 252 rec_is_initialized_ = false; | 269 rec_is_initialized_ = false; |
| 253 rtc::AtomicOps::ReleaseStore(&recording_, 0); | 270 rtc::AtomicOps::ReleaseStore(&recording_, 0); |
| 254 return 0; | 271 return 0; |
| 255 } | 272 } |
| 256 | 273 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 311 | 328 |
| 312 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { | 329 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { |
| 313 LOGI() << "GetRecordAudioParameters"; | 330 LOGI() << "GetRecordAudioParameters"; |
| 314 RTC_DCHECK(record_parameters_.is_valid()); | 331 RTC_DCHECK(record_parameters_.is_valid()); |
| 315 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 332 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 316 *params = record_parameters_; | 333 *params = record_parameters_; |
| 317 return 0; | 334 return 0; |
| 318 } | 335 } |
| 319 | 336 |
| 320 void AudioDeviceIOS::OnInterruptionBegin() { | 337 void AudioDeviceIOS::OnInterruptionBegin() { |
| 321 RTC_DCHECK(async_invoker_); | |
| 322 RTC_DCHECK(thread_); | 338 RTC_DCHECK(thread_); |
| 323 if (thread_->IsCurrent()) { | 339 thread_->Post(this, kMessageTypeInterruptionBegin); |
| 324 HandleInterruptionBegin(); | |
| 325 return; | |
| 326 } | |
| 327 async_invoker_->AsyncInvoke<void>( | |
| 328 thread_, | |
| 329 rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionBegin, this)); | |
| 330 } | 340 } |
| 331 | 341 |
| 332 void AudioDeviceIOS::OnInterruptionEnd() { | 342 void AudioDeviceIOS::OnInterruptionEnd() { |
| 333 RTC_DCHECK(async_invoker_); | |
| 334 RTC_DCHECK(thread_); | 343 RTC_DCHECK(thread_); |
| 335 if (thread_->IsCurrent()) { | 344 thread_->Post(this, kMessageTypeInterruptionEnd); |
| 336 HandleInterruptionEnd(); | |
| 337 return; | |
| 338 } | |
| 339 async_invoker_->AsyncInvoke<void>( | |
| 340 thread_, | |
| 341 rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionEnd, this)); | |
| 342 } | 345 } |
| 343 | 346 |
| 344 void AudioDeviceIOS::OnValidRouteChange() { | 347 void AudioDeviceIOS::OnValidRouteChange() { |
| 345 RTC_DCHECK(async_invoker_); | |
| 346 RTC_DCHECK(thread_); | 348 RTC_DCHECK(thread_); |
| 347 if (thread_->IsCurrent()) { | 349 thread_->Post(this, kMessageTypeValidRouteChange); |
| 348 HandleValidRouteChange(); | |
| 349 return; | |
| 350 } | |
| 351 async_invoker_->AsyncInvoke<void>( | |
| 352 thread_, | |
| 353 rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this)); | |
| 354 } | 350 } |
| 355 | 351 |
| 356 void AudioDeviceIOS::OnConfiguredForWebRTC() { | 352 void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) { |
| 357 RTC_DCHECK(async_invoker_); | |
| 358 RTC_DCHECK(thread_); | 353 RTC_DCHECK(thread_); |
| 359 if (thread_->IsCurrent()) { | 354 thread_->Post(this, kMessageTypeCanPlayOrRecordChange, |
| 360 HandleValidRouteChange(); | 355 new rtc::TypedMessageData<bool>(can_play_or_record)); |
| 361 return; | |
| 362 } | |
| 363 async_invoker_->AsyncInvoke<void>( | |
| 364 thread_, | |
| 365 rtc::Bind(&webrtc::AudioDeviceIOS::HandleConfiguredForWebRTC, this)); | |
| 366 } | 356 } |
| 367 | 357 |
| 368 OSStatus AudioDeviceIOS::OnDeliverRecordedData( | 358 OSStatus AudioDeviceIOS::OnDeliverRecordedData( |
| 369 AudioUnitRenderActionFlags* flags, | 359 AudioUnitRenderActionFlags* flags, |
| 370 const AudioTimeStamp* time_stamp, | 360 const AudioTimeStamp* time_stamp, |
| 371 UInt32 bus_number, | 361 UInt32 bus_number, |
| 372 UInt32 num_frames, | 362 UInt32 num_frames, |
| 373 AudioBufferList* /* io_data */) { | 363 AudioBufferList* /* io_data */) { |
| 374 OSStatus result = noErr; | 364 OSStatus result = noErr; |
| 375 // Simply return if recording is not enabled. | 365 // Simply return if recording is not enabled. |
| 376 if (!rtc::AtomicOps::AcquireLoad(&recording_)) | 366 if (!rtc::AtomicOps::AcquireLoad(&recording_)) |
| 377 return result; | 367 return result; |
| 378 | 368 |
| 379 size_t frames_per_buffer = record_parameters_.frames_per_buffer(); | 369 size_t frames_per_buffer = record_parameters_.frames_per_buffer(); |
| 380 if (num_frames != frames_per_buffer) { | 370 if (num_frames != frames_per_buffer) { |
| 381 // We have seen short bursts (1-2 frames) where |in_number_frames| changes. | 371 // We have seen short bursts (1-2 frames) where |in_number_frames| changes. |
| 382 // Add a log to keep track of longer sequences if that should ever happen. | 372 // Add a log to keep track of longer sequences if that should ever happen. |
| 383 // Also return since calling AudioUnitRender in this state will only result | 373 // Also return since calling AudioUnitRender in this state will only result |
| 384 // in kAudio_ParamError (-50) anyhow. | 374 // in kAudio_ParamError (-50) anyhow. |
| 385 RTCLogWarning(@"Expected %u frames but got %u", | 375 RTCLogWarning(@"Expected %u frames but got %u", |
| 386 static_cast<unsigned int>(frames_per_buffer), | 376 static_cast<unsigned int>(frames_per_buffer), |
| 387 static_cast<unsigned int>(num_frames)); | 377 static_cast<unsigned int>(num_frames)); |
| 378 |
| 379 RTCAudioSession *session = [RTCAudioSession sharedInstance]; |
| 380 RTCLogWarning(@"Session:\n %@", session); |
| 388 return result; | 381 return result; |
| 389 } | 382 } |
| 390 | 383 |
| 391 // Obtain the recorded audio samples by initiating a rendering cycle. | 384 // Obtain the recorded audio samples by initiating a rendering cycle. |
| 392 // Since it happens on the input bus, the |io_data| parameter is a reference | 385 // Since it happens on the input bus, the |io_data| parameter is a reference |
| 393 // to the preallocated audio buffer list that the audio unit renders into. | 386 // to the preallocated audio buffer list that the audio unit renders into. |
| 394 // We can make the audio unit provide a buffer instead in io_data, but we | 387 // We can make the audio unit provide a buffer instead in io_data, but we |
| 395 // currently just use our own. | 388 // currently just use our own. |
| 396 // TODO(henrika): should error handling be improved? | 389 // TODO(henrika): should error handling be improved? |
| 397 AudioBufferList* io_data = &audio_record_buffer_list_; | 390 AudioBufferList* io_data = &audio_record_buffer_list_; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 440 } | 433 } |
| 441 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 434 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
| 442 // the native I/O audio unit) to a preallocated intermediate buffer and | 435 // the native I/O audio unit) to a preallocated intermediate buffer and |
| 443 // copy the result to the audio buffer in the |io_data| destination. | 436 // copy the result to the audio buffer in the |io_data| destination. |
| 444 int8_t* source = playout_audio_buffer_.get(); | 437 int8_t* source = playout_audio_buffer_.get(); |
| 445 fine_audio_buffer_->GetPlayoutData(source); | 438 fine_audio_buffer_->GetPlayoutData(source); |
| 446 memcpy(destination, source, size_in_bytes); | 439 memcpy(destination, source, size_in_bytes); |
| 447 return noErr; | 440 return noErr; |
| 448 } | 441 } |
| 449 | 442 |
| 443 void AudioDeviceIOS::OnMessage(rtc::Message *msg) { |
| 444 switch (msg->message_id) { |
| 445 case kMessageTypeInterruptionBegin: |
| 446 HandleInterruptionBegin(); |
| 447 break; |
| 448 case kMessageTypeInterruptionEnd: |
| 449 HandleInterruptionEnd(); |
| 450 break; |
| 451 case kMessageTypeValidRouteChange: |
| 452 HandleValidRouteChange(); |
| 453 break; |
| 454 case kMessageTypeCanPlayOrRecordChange: { |
| 455 rtc::TypedMessageData<bool>* data = |
| 456 static_cast<rtc::TypedMessageData<bool>*>(msg->pdata); |
| 457 HandleCanPlayOrRecordChange(data->data()); |
| 458 delete data; |
| 459 break; |
| 460 } |
| 461 } |
| 462 } |
| 463 |
| 450 void AudioDeviceIOS::HandleInterruptionBegin() { | 464 void AudioDeviceIOS::HandleInterruptionBegin() { |
| 451 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 465 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 452 | 466 |
| 453 RTCLog(@"Stopping the audio unit due to interruption begin."); | 467 if (audio_unit_ && |
| 454 if (!audio_unit_->Stop()) { | 468 audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { |
| 455 RTCLogError(@"Failed to stop the audio unit."); | 469 RTCLog(@"Stopping the audio unit due to interruption begin."); |
| 470 if (!audio_unit_->Stop()) { |
| 471 RTCLogError(@"Failed to stop the audio unit for interruption begin."); |
| 472 } |
| 456 } | 473 } |
| 457 is_interrupted_ = true; | 474 is_interrupted_ = true; |
| 458 } | 475 } |
| 459 | 476 |
| 460 void AudioDeviceIOS::HandleInterruptionEnd() { | 477 void AudioDeviceIOS::HandleInterruptionEnd() { |
| 461 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 478 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 462 | 479 |
| 463 RTCLog(@"Starting the audio unit due to interruption end."); | |
| 464 if (!audio_unit_->Start()) { | |
| 465 RTCLogError(@"Failed to start the audio unit."); | |
| 466 } | |
| 467 is_interrupted_ = false; | 480 is_interrupted_ = false; |
| 481 RTCLog(@"Interruption ended. Updating audio unit state."); |
| 482 UpdateAudioUnit([RTCAudioSession sharedInstance].canPlayOrRecord); |
| 468 } | 483 } |
| 469 | 484 |
| 470 void AudioDeviceIOS::HandleValidRouteChange() { | 485 void AudioDeviceIOS::HandleValidRouteChange() { |
| 471 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 486 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 472 | 487 |
| 488 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 489 HandleSampleRateChange(session.sampleRate); |
| 490 } |
| 491 |
| 492 void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) { |
| 493 RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record); |
| 494 UpdateAudioUnit(can_play_or_record); |
| 495 } |
| 496 |
| 497 void AudioDeviceIOS::HandleSampleRateChange(float sample_rate) { |
| 498 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 499 RTCLog(@"Handling sample rate change to %f.", sample_rate); |
| 500 |
| 473 // Don't do anything if we're interrupted. | 501 // Don't do anything if we're interrupted. |
| 474 if (is_interrupted_) { | 502 if (is_interrupted_) { |
| 503 RTCLog(@"Ignoring sample rate change to %f due to interruption.", |
| 504 sample_rate); |
| 475 return; | 505 return; |
| 476 } | 506 } |
| 477 | 507 |
| 478 // Only restart audio for a valid route change if the session sample rate | 508 // If we don't have an audio unit yet, or the audio unit is uninitialized, |
| 479 // has changed. | 509 // there is no work to do. |
| 480 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 510 if (!audio_unit_ || |
| 481 const double current_sample_rate = playout_parameters_.sample_rate(); | 511 audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) { |
| 482 const double session_sample_rate = session.sampleRate; | |
| 483 if (current_sample_rate != session_sample_rate) { | |
| 484 RTCLog(@"Route changed caused sample rate to change from %f to %f. " | |
| 485 "Restarting audio unit.", current_sample_rate, session_sample_rate); | |
| 486 if (!RestartAudioUnit(session_sample_rate)) { | |
| 487 RTCLogError(@"Audio restart failed."); | |
| 488 } | |
| 489 } | |
| 490 } | |
| 491 | |
| 492 void AudioDeviceIOS::HandleConfiguredForWebRTC() { | |
| 493 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 494 | |
| 495 // If we're not initialized we don't need to do anything. Audio unit will | |
| 496 // be initialized on initialization. | |
| 497 if (!rec_is_initialized_ && !play_is_initialized_) | |
| 498 return; | |
| 499 | |
| 500 // If we're initialized, we must have an audio unit. | |
| 501 RTC_DCHECK(audio_unit_); | |
| 502 | |
| 503 // Use configured audio session's settings to set up audio device buffer. | |
| 504 // TODO(tkchin): Use RTCAudioSessionConfiguration to pick up settings and | |
| 505 // pass it along. | |
| 506 SetupAudioBuffersForActiveAudioSession(); | |
| 507 | |
| 508 // Initialize the audio unit. This will affect any existing audio playback. | |
| 509 if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { | |
| 510 RTCLogError(@"Failed to initialize audio unit after configuration."); | |
| 511 return; | 512 return; |
| 512 } | 513 } |
| 513 | 514 |
| 514 // If we haven't started playing or recording there's nothing more to do. | 515 // The audio unit is already initialized or started. |
| 515 if (!playing_ && !recording_) | 516 // Check to see if the sample rate or buffer size has changed. |
| 516 return; | 517 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 518 const double session_sample_rate = session.sampleRate; |
| 519 const NSTimeInterval session_buffer_duration = session.IOBufferDuration; |
| 520 const size_t session_frames_per_buffer = |
| 521 static_cast<size_t>(session_sample_rate * session_buffer_duration + .5); |
| 522 const double current_sample_rate = playout_parameters_.sample_rate(); |
| 523 const size_t current_frames_per_buffer = |
| 524 playout_parameters_.frames_per_buffer(); |
| 525 RTCLog(@"Handling playout sample rate change to: %f\n" |
| 526 " Session sample rate: %f frames_per_buffer: %lu\n" |
| 527 " ADM sample rate: %f frames_per_buffer: %lu", |
| 528 sample_rate, |
| 529 session_sample_rate, (unsigned long)session_frames_per_buffer, |
| 530 current_sample_rate, (unsigned long)current_frames_per_buffer);; |
| 517 | 531 |
| 518 // We are in a play or record state, start the audio unit. | 532 // Sample rate and buffer size are the same, no work to do. |
| 519 if (!audio_unit_->Start()) { | 533 if (abs(current_sample_rate - session_sample_rate) <= DBL_EPSILON && |
| 520 RTCLogError(@"Failed to start audio unit after configuration."); | 534 current_frames_per_buffer == session_frames_per_buffer) { |
| 521 return; | 535 return; |
| 522 } | 536 } |
| 537 |
| 538 // We need to adjust our format and buffer sizes. |
| 539 // The stream format is about to be changed and it requires that we first |
| 540 // stop and uninitialize the audio unit to deallocate its resources. |
| 541 RTCLog(@"Stopping and uninitializing audio unit to adjust buffers."); |
| 542 bool restart_audio_unit = false; |
| 543 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { |
| 544 audio_unit_->Stop(); |
| 545 restart_audio_unit = true; |
| 546 } |
| 547 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { |
| 548 audio_unit_->Uninitialize(); |
| 549 } |
| 550 |
| 551 // Allocate new buffers given the new stream format. |
| 552 SetupAudioBuffersForActiveAudioSession(); |
| 553 |
| 554 // Initialize the audio unit again with the new sample rate. |
| 555 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), session_sample_rate); |
| 556 if (!audio_unit_->Initialize(session_sample_rate)) { |
| 557 RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", |
| 558 session_sample_rate); |
| 559 return; |
| 560 } |
| 561 |
| 562 // Restart the audio unit if it was already running. |
| 563 if (restart_audio_unit && !audio_unit_->Start()) { |
| 564 RTCLogError(@"Failed to start audio unit with sample rate: %f", |
| 565 session_sample_rate); |
| 566 return; |
| 567 } |
| 568 RTCLog(@"Successfully handled sample rate change."); |
| 523 } | 569 } |
| 524 | 570 |
| 525 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { | 571 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { |
| 526 LOGI() << "UpdateAudioDevicebuffer"; | 572 LOGI() << "UpdateAudioDevicebuffer"; |
| 527 // AttachAudioBuffer() is called at construction by the main class but check | 573 // AttachAudioBuffer() is called at construction by the main class but check |
| 528 // just in case. | 574 // just in case. |
| 529 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; | 575 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; |
| 530 // Inform the audio device buffer (ADB) about the new audio format. | 576 // Inform the audio device buffer (ADB) about the new audio format. |
| 531 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); | 577 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
| 532 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); | 578 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 590 << required_playout_buffer_size; | 636 << required_playout_buffer_size; |
| 591 playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]); | 637 playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]); |
| 592 | 638 |
| 593 // Allocate AudioBuffers to be used as storage for the received audio. | 639 // Allocate AudioBuffers to be used as storage for the received audio. |
| 594 // The AudioBufferList structure works as a placeholder for the | 640 // The AudioBufferList structure works as a placeholder for the |
| 595 // AudioBuffer structure, which holds a pointer to the actual data buffer | 641 // AudioBuffer structure, which holds a pointer to the actual data buffer |
| 596 // in |record_audio_buffer_|. Recorded audio will be rendered into this memory | 642 // in |record_audio_buffer_|. Recorded audio will be rendered into this memory |
| 597 // at each input callback when calling AudioUnitRender(). | 643 // at each input callback when calling AudioUnitRender(). |
| 598 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); | 644 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); |
| 599 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 645 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
| 646 memset(record_audio_buffer_.get(), 0, data_byte_size); |
| 600 audio_record_buffer_list_.mNumberBuffers = 1; | 647 audio_record_buffer_list_.mNumberBuffers = 1; |
| 601 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; | 648 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0]; |
| 602 audio_buffer->mNumberChannels = record_parameters_.channels(); | 649 audio_buffer->mNumberChannels = record_parameters_.channels(); |
| 603 audio_buffer->mDataByteSize = data_byte_size; | 650 audio_buffer->mDataByteSize = data_byte_size; |
| 604 audio_buffer->mData = record_audio_buffer_.get(); | 651 audio_buffer->mData = record_audio_buffer_.get(); |
| 605 } | 652 } |
| 606 | 653 |
| 607 bool AudioDeviceIOS::CreateAudioUnit() { | 654 bool AudioDeviceIOS::CreateAudioUnit() { |
| 608 RTC_DCHECK(!audio_unit_); | 655 RTC_DCHECK(!audio_unit_); |
| 609 | 656 |
| 610 audio_unit_.reset(new VoiceProcessingAudioUnit(this)); | 657 audio_unit_.reset(new VoiceProcessingAudioUnit(this)); |
| 611 if (!audio_unit_->Init()) { | 658 if (!audio_unit_->Init()) { |
| 612 audio_unit_.reset(); | 659 audio_unit_.reset(); |
| 613 return false; | 660 return false; |
| 614 } | 661 } |
| 615 | 662 |
| 616 return true; | 663 return true; |
| 617 } | 664 } |
| 618 | 665 |
| 619 bool AudioDeviceIOS::RestartAudioUnit(float sample_rate) { | 666 void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) { |
| 620 RTCLog(@"Restarting audio unit with new sample rate: %f", sample_rate); | 667 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 668 RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d", |
| 669 can_play_or_record, is_interrupted_); |
| 621 | 670 |
| 622 // Stop the active audio unit. | 671 if (is_interrupted_) { |
| 623 if (!audio_unit_->Stop()) { | 672 RTCLog(@"Ignoring audio unit update due to interruption."); |
| 624 RTCLogError(@"Failed to stop the audio unit."); | 673 return; |
| 625 return false; | |
| 626 } | 674 } |
| 627 | 675 |
| 628 // The stream format is about to be changed and it requires that we first | 676 // If we're not initialized we don't need to do anything. Audio unit will |
| 629 // uninitialize it to deallocate its resources. | 677 // be initialized on initialization. |
| 630 if (!audio_unit_->Uninitialize()) { | 678 if (!rec_is_initialized_ && !play_is_initialized_) |
| 631 RTCLogError(@"Failed to uninitialize the audio unit."); | 679 return; |
| 632 return false; | 680 |
| 681 // If we're initialized, we must have an audio unit. |
| 682 RTC_DCHECK(audio_unit_); |
| 683 |
| 684 bool should_initialize_audio_unit = false; |
| 685 bool should_uninitialize_audio_unit = false; |
| 686 bool should_start_audio_unit = false; |
| 687 bool should_stop_audio_unit = false; |
| 688 |
| 689 switch (audio_unit_->GetState()) { |
| 690 case VoiceProcessingAudioUnit::kInitRequired: |
| 691 RTC_NOTREACHED(); |
| 692 break; |
| 693 case VoiceProcessingAudioUnit::kUninitialized: |
| 694 should_initialize_audio_unit = can_play_or_record; |
| 695 should_start_audio_unit = should_initialize_audio_unit && |
| 696 (playing_ || recording_); |
| 697 break; |
| 698 case VoiceProcessingAudioUnit::kInitialized: |
| 699 should_start_audio_unit = |
| 700 can_play_or_record && (playing_ || recording_); |
| 701 should_uninitialize_audio_unit = !can_play_or_record; |
| 702 break; |
| 703 case VoiceProcessingAudioUnit::kStarted: |
| 704 RTC_DCHECK(playing_ || recording_); |
| 705 should_stop_audio_unit = !can_play_or_record; |
| 706 should_uninitialize_audio_unit = should_stop_audio_unit; |
| 707 break; |
| 633 } | 708 } |
| 634 | 709 |
| 635 // Allocate new buffers given the new stream format. | 710 if (should_initialize_audio_unit) { |
| 636 SetupAudioBuffersForActiveAudioSession(); | 711 RTCLog(@"Initializing audio unit for UpdateAudioUnit"); |
| 637 | 712 ConfigureAudioSession(); |
| 638 // Initialize the audio unit again with the new sample rate. | 713 SetupAudioBuffersForActiveAudioSession(); |
| 639 RTC_DCHECK_EQ(playout_parameters_.sample_rate(), sample_rate); | 714 if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { |
| 640 if (!audio_unit_->Initialize(sample_rate)) { | 715 RTCLogError(@"Failed to initialize audio unit."); |
| 641 RTCLogError(@"Failed to initialize the audio unit with sample rate: %f", | 716 return; |
| 642 sample_rate); | 717 } |
| 643 return false; | |
| 644 } | 718 } |
| 645 | 719 |
| 646 // Restart the audio unit. | 720 if (should_start_audio_unit) { |
| 647 if (!audio_unit_->Start()) { | 721 RTCLog(@"Starting audio unit for UpdateAudioUnit"); |
| 648 RTCLogError(@"Failed to start audio unit."); | 722 if (!audio_unit_->Start()) { |
| 649 return false; | 723 RTCLogError(@"Failed to start audio unit."); |
| 724 return; |
| 725 } |
| 650 } | 726 } |
| 651 RTCLog(@"Successfully restarted audio unit."); | |
| 652 | 727 |
| 653 return true; | 728 if (should_stop_audio_unit) { |
| 729 RTCLog(@"Stopping audio unit for UpdateAudioUnit"); |
| 730 if (!audio_unit_->Stop()) { |
| 731 RTCLogError(@"Failed to stop audio unit."); |
| 732 return; |
| 733 } |
| 734 } |
| 735 |
| 736 if (should_uninitialize_audio_unit) { |
| 737 RTCLog(@"Uninitializing audio unit for UpdateAudioUnit"); |
| 738 audio_unit_->Uninitialize(); |
| 739 UnconfigureAudioSession(); |
| 740 } |
| 741 } |
| 742 |
| 743 void AudioDeviceIOS::ConfigureAudioSession() { |
| 744 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 745 RTCLog(@"Configuring audio session."); |
| 746 if (has_configured_session_) { |
| 747 RTCLogWarning(@"Audio session already configured."); |
| 748 return; |
| 749 } |
| 750 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 751 [session lockForConfiguration]; |
| 752 [session configureWebRTCSession:nil]; |
| 753 [session unlockForConfiguration]; |
| 754 has_configured_session_ = true; |
| 755 RTCLog(@"Configured audio session."); |
| 756 } |
| 757 |
| 758 void AudioDeviceIOS::UnconfigureAudioSession() { |
| 759 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 760 RTCLog(@"Unconfiguring audio session."); |
| 761 if (!has_configured_session_) { |
| 762 RTCLogWarning(@"Audio session already unconfigured."); |
| 763 return; |
| 764 } |
| 765 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 766 [session lockForConfiguration]; |
| 767 [session unconfigureWebRTCSession:nil]; |
| 768 [session unlockForConfiguration]; |
| 769 has_configured_session_ = false; |
| 770 RTCLog(@"Unconfigured audio session."); |
| 654 } | 771 } |
| 655 | 772 |
| 656 bool AudioDeviceIOS::InitPlayOrRecord() { | 773 bool AudioDeviceIOS::InitPlayOrRecord() { |
| 657 LOGI() << "InitPlayOrRecord"; | 774 LOGI() << "InitPlayOrRecord"; |
| 658 | 775 |
| 776 // There should be no audio unit at this point. |
| 659 if (!CreateAudioUnit()) { | 777 if (!CreateAudioUnit()) { |
| 660 return false; | 778 return false; |
| 661 } | 779 } |
| 662 | 780 |
| 663 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 781 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 664 // Subscribe to audio session events. | 782 // Subscribe to audio session events. |
| 665 [session pushDelegate:audio_session_observer_]; | 783 [session pushDelegate:audio_session_observer_]; |
| 666 | 784 |
| 667 // Lock the session to make configuration changes. | 785 // Lock the session to make configuration changes. |
| 668 [session lockForConfiguration]; | 786 [session lockForConfiguration]; |
| 669 NSError* error = nil; | 787 NSError* error = nil; |
| 670 if (![session beginWebRTCSession:&error]) { | 788 if (![session beginWebRTCSession:&error]) { |
| 671 [session unlockForConfiguration]; | 789 [session unlockForConfiguration]; |
| 672 RTCLogError(@"Failed to begin WebRTC session: %@", | 790 RTCLogError(@"Failed to begin WebRTC session: %@", |
| 673 error.localizedDescription); | 791 error.localizedDescription); |
| 674 return false; | 792 return false; |
| 675 } | 793 } |
| 676 | 794 |
| 677 // If we are already configured properly, we can initialize the audio unit. | 795 // If we are ready to play or record, initialize the audio unit. |
| 678 if (session.isConfiguredForWebRTC) { | 796 if (session.canPlayOrRecord) { |
| 679 [session unlockForConfiguration]; | 797 ConfigureAudioSession(); |
| 680 SetupAudioBuffersForActiveAudioSession(); | 798 SetupAudioBuffersForActiveAudioSession(); |
| 681 // Audio session has been marked ready for WebRTC so we can initialize the | |
| 682 // audio unit now. | |
| 683 audio_unit_->Initialize(playout_parameters_.sample_rate()); | 799 audio_unit_->Initialize(playout_parameters_.sample_rate()); |
| 684 return true; | |
| 685 } | 800 } |
| 686 | 801 |
| 687 // Release the lock. | 802 // Release the lock. |
| 688 [session unlockForConfiguration]; | 803 [session unlockForConfiguration]; |
| 689 | 804 |
| 690 return true; | 805 return true; |
| 691 } | 806 } |
| 692 | 807 |
| 693 void AudioDeviceIOS::ShutdownPlayOrRecord() { | 808 void AudioDeviceIOS::ShutdownPlayOrRecord() { |
| 694 LOGI() << "ShutdownPlayOrRecord"; | 809 LOGI() << "ShutdownPlayOrRecord"; |
| 695 | 810 |
| 696 // Close and delete the voice-processing I/O unit. | 811 // Close and delete the voice-processing I/O unit. |
| 697 if (audio_unit_) { | 812 audio_unit_.reset(); |
| 698 audio_unit_.reset(); | |
| 699 } | |
| 700 | 813 |
| 701 // Remove audio session notification observers. | 814 // Remove audio session notification observers. |
| 702 RTCAudioSession* session = [RTCAudioSession sharedInstance]; | 815 RTCAudioSession* session = [RTCAudioSession sharedInstance]; |
| 703 [session removeDelegate:audio_session_observer_]; | 816 [session removeDelegate:audio_session_observer_]; |
| 704 | 817 |
| 705 // All I/O should be stopped or paused prior to deactivating the audio | 818 // All I/O should be stopped or paused prior to deactivating the audio |
| 706 // session, hence we deactivate as last action. | 819 // session, hence we deactivate as last action. |
| 707 [session lockForConfiguration]; | 820 [session lockForConfiguration]; |
| 821 UnconfigureAudioSession(); |
| 708 [session endWebRTCSession:nil]; | 822 [session endWebRTCSession:nil]; |
| 709 [session unlockForConfiguration]; | 823 [session unlockForConfiguration]; |
| 710 } | 824 } |
| 711 | 825 |
| 712 } // namespace webrtc | 826 } // namespace webrtc |
| OLD | NEW |