OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 18 matching lines...) Expand all Loading... |
29 | 29 |
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ | 30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ |
31 do { \ | 31 do { \ |
32 OSStatus err = error; \ | 32 OSStatus err = error; \ |
33 if (err) { \ | 33 if (err) { \ |
34 LOG(LS_ERROR) << message << ": " << err; \ | 34 LOG(LS_ERROR) << message << ": " << err; \ |
35 return false; \ | 35 return false; \ |
36 } \ | 36 } \ |
37 } while (0) | 37 } while (0) |
38 | 38 |
| 39 #define LOG_IF_ERROR(error, message) \ |
| 40 do { \ |
| 41 OSStatus err = error; \ |
| 42 if (err) { \ |
| 43 LOG(LS_ERROR) << message << ": " << err; \ |
| 44 } \ |
| 45 } while (0) |
| 46 |
39 // Preferred hardware sample rate (unit is in Hertz). The client sample rate | 47 // Preferred hardware sample rate (unit is in Hertz). The client sample rate |
40 // will be set to this value as well to avoid resampling the the audio unit's | 48 // will be set to this value as well to avoid resampling the the audio unit's |
41 // format converter. Note that, some devices, e.g. BT headsets, only supports | 49 // format converter. Note that, some devices, e.g. BT headsets, only supports |
42 // 8000Hz as native sample rate. | 50 // 8000Hz as native sample rate. |
43 const double kPreferredSampleRate = 48000.0; | 51 const double kPreferredSampleRate = 48000.0; |
44 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms | 52 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms |
45 // size used by WebRTC. The exact actual size will differ between devices. | 53 // size used by WebRTC. The exact actual size will differ between devices. |
46 // Example: using 48kHz on iPhone 6 results in a native buffer size of | 54 // Example: using 48kHz on iPhone 6 results in a native buffer size of |
47 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will | 55 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will |
48 // take care of any buffering required to convert between native buffers and | 56 // take care of any buffering required to convert between native buffers and |
(...skipping 21 matching lines...) Expand all Loading... |
70 using ios::CheckAndLogError; | 78 using ios::CheckAndLogError; |
71 | 79 |
72 // Activates an audio session suitable for full duplex VoIP sessions when | 80 // Activates an audio session suitable for full duplex VoIP sessions when |
73 // |activate| is true. Also sets the preferred sample rate and IO buffer | 81 // |activate| is true. Also sets the preferred sample rate and IO buffer |
74 // duration. Deactivates an active audio session if |activate| is set to false. | 82 // duration. Deactivates an active audio session if |activate| is set to false. |
75 static void ActivateAudioSession(AVAudioSession* session, bool activate) { | 83 static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; | 84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
77 @autoreleasepool { | 85 @autoreleasepool { |
78 NSError* error = nil; | 86 NSError* error = nil; |
79 BOOL success = NO; | 87 BOOL success = NO; |
| 88 |
80 // Deactivate the audio session and return if |activate| is false. | 89 // Deactivate the audio session and return if |activate| is false. |
81 if (!activate) { | 90 if (!activate) { |
82 success = [session setActive:NO error:&error]; | 91 success = [session setActive:NO error:&error]; |
83 RTC_DCHECK(CheckAndLogError(success, error)); | 92 RTC_DCHECK(CheckAndLogError(success, error)); |
84 return; | 93 return; |
85 } | 94 } |
| 95 |
86 // Use a category which supports simultaneous recording and playback. | 96 // Use a category which supports simultaneous recording and playback. |
87 // By default, using this category implies that our app’s audio is | 97 // By default, using this category implies that our app’s audio is |
88 // nonmixable, hence activating the session will interrupt any other | 98 // nonmixable, hence activating the session will interrupt any other |
89 // audio sessions which are also nonmixable. | 99 // audio sessions which are also nonmixable. |
90 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | 100 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
91 error = nil; | 101 error = nil; |
92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord | 102 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
| 103 withOptions:AVAudioSessionCategoryOptionAllowBluetooth |
93 error:&error]; | 104 error:&error]; |
94 RTC_DCHECK(CheckAndLogError(success, error)); | 105 RTC_DCHECK(CheckAndLogError(success, error)); |
95 } | 106 } |
| 107 |
96 // Specify mode for two-way voice communication (e.g. VoIP). | 108 // Specify mode for two-way voice communication (e.g. VoIP). |
97 if (session.mode != AVAudioSessionModeVoiceChat) { | 109 if (session.mode != AVAudioSessionModeVoiceChat) { |
98 error = nil; | 110 error = nil; |
99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; | 111 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; |
100 RTC_DCHECK(CheckAndLogError(success, error)); | 112 RTC_DCHECK(CheckAndLogError(success, error)); |
101 } | 113 } |
| 114 |
102 // Set the session's sample rate or the hardware sample rate. | 115 // Set the session's sample rate or the hardware sample rate. |
103 // It is essential that we use the same sample rate as stream format | 116 // It is essential that we use the same sample rate as stream format |
104 // to ensure that the I/O unit does not have to do sample rate conversion. | 117 // to ensure that the I/O unit does not have to do sample rate conversion. |
105 error = nil; | 118 error = nil; |
106 success = | 119 success = |
107 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; | 120 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; |
108 RTC_DCHECK(CheckAndLogError(success, error)); | 121 RTC_DCHECK(CheckAndLogError(success, error)); |
| 122 |
109 // Set the preferred audio I/O buffer duration, in seconds. | 123 // Set the preferred audio I/O buffer duration, in seconds. |
110 // TODO(henrika): add more comments here. | 124 // TODO(henrika): add more comments here. |
111 error = nil; | 125 error = nil; |
112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration | 126 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration |
113 error:&error]; | 127 error:&error]; |
114 RTC_DCHECK(CheckAndLogError(success, error)); | 128 RTC_DCHECK(CheckAndLogError(success, error)); |
115 | 129 |
116 // TODO(henrika): add observers here... | |
117 | |
118 // Activate the audio session. Activation can fail if another active audio | 130 // Activate the audio session. Activation can fail if another active audio |
119 // session (e.g. phone call) has higher priority than ours. | 131 // session (e.g. phone call) has higher priority than ours. |
120 error = nil; | 132 error = nil; |
121 success = [session setActive:YES error:&error]; | 133 success = [session setActive:YES error:&error]; |
122 RTC_DCHECK(CheckAndLogError(success, error)); | 134 RTC_DCHECK(CheckAndLogError(success, error)); |
123 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; | 135 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; |
| 136 |
124 // Ensure that category and mode are actually activated. | 137 // Ensure that category and mode are actually activated. |
125 RTC_DCHECK( | 138 RTC_DCHECK( |
126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); | 139 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); |
127 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); | 140 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); |
| 141 |
128 // Try to set the preferred number of hardware audio channels. These calls | 142 // Try to set the preferred number of hardware audio channels. These calls |
129 // must be done after setting the audio session’s category and mode and | 143 // must be done after setting the audio session’s category and mode and |
130 // activating the session. | 144 // activating the session. |
131 // We try to use mono in both directions to save resources and format | 145 // We try to use mono in both directions to save resources and format |
132 // conversions in the audio unit. Some devices does only support stereo; | 146 // conversions in the audio unit. Some devices does only support stereo; |
133 // e.g. wired headset on iPhone 6. | 147 // e.g. wired headset on iPhone 6. |
134 // TODO(henrika): add support for stereo if needed. | 148 // TODO(henrika): add support for stereo if needed. |
135 error = nil; | 149 error = nil; |
136 success = | 150 success = |
137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels | 151 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels |
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
397 // just in case. | 411 // just in case. |
398 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; | 412 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; |
399 // Inform the audio device buffer (ADB) about the new audio format. | 413 // Inform the audio device buffer (ADB) about the new audio format. |
400 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); | 414 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
401 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); | 415 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); |
402 audio_device_buffer_->SetRecordingSampleRate( | 416 audio_device_buffer_->SetRecordingSampleRate( |
403 record_parameters_.sample_rate()); | 417 record_parameters_.sample_rate()); |
404 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); | 418 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); |
405 } | 419 } |
406 | 420 |
| 421 void AudioDeviceIOS::RegisterNotificationObservers() { |
| 422 LOGI() << "RegisterNotificationObservers"; |
| 423 // This code block will be called when AVAudioSessionInterruptionNotification |
| 424 // is observed. |
| 425 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) { |
| 426 NSNumber* type_number = |
| 427 notification.userInfo[AVAudioSessionInterruptionTypeKey]; |
| 428 AVAudioSessionInterruptionType type = |
| 429 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue; |
| 430 LOG(LS_INFO) << "Audio session interruption:"; |
| 431 switch (type) { |
| 432 case AVAudioSessionInterruptionTypeBegan: |
| 433 // The system has deactivated our audio session. |
| 434 // Stop the active audio unit. |
| 435 LOG(LS_INFO) << " Began => stopping the audio unit"; |
| 436 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
| 437 "Failed to stop the the Voice-Processing I/O unit"); |
| 438 break; |
| 439 case AVAudioSessionInterruptionTypeEnded: |
| 440 // The interruption has ended. Restart the audio session and start the |
| 441 // initialized audio unit again. |
| 442 LOG(LS_INFO) << " Ended => restarting audio session and audio unit"; |
| 443 NSError* error = nil; |
| 444 BOOL success = NO; |
| 445 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 446 success = [session setActive:YES error:&error]; |
| 447 if (CheckAndLogError(success, error)) { |
| 448 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
| 449 "Failed to start the the Voice-Processing I/O unit"); |
| 450 } |
| 451 break; |
| 452 } |
| 453 }; |
| 454 |
| 455 // This code block will be called when AVAudioSessionRouteChangeNotification |
| 456 // is observed. |
| 457 void (^route_change_block)(NSNotification*) = |
| 458 ^(NSNotification* notification) { |
| 459 // Get reason for current route change. |
| 460 NSNumber* reason_number = |
| 461 notification.userInfo[AVAudioSessionRouteChangeReasonKey]; |
| 462 AVAudioSessionRouteChangeReason reason = |
| 463 (AVAudioSessionRouteChangeReason)reason_number.unsignedIntegerValue; |
| 464 bool valid_route_change = true; |
| 465 LOG(LS_INFO) << "Route change:"; |
| 466 switch (reason) { |
| 467 case AVAudioSessionRouteChangeReasonUnknown: |
| 468 LOG(LS_INFO) << " ReasonUnknown"; |
| 469 break; |
| 470 case AVAudioSessionRouteChangeReasonNewDeviceAvailable: |
| 471 LOG(LS_INFO) << " NewDeviceAvailable"; |
| 472 break; |
| 473 case AVAudioSessionRouteChangeReasonOldDeviceUnavailable: |
| 474 LOG(LS_INFO) << " OldDeviceUnavailable"; |
| 475 break; |
| 476 case AVAudioSessionRouteChangeReasonCategoryChange: |
| 477 LOG(LS_INFO) << " CategoryChange"; |
| 478 LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory(); |
| 479 // Don't see this as route change since it can be triggered in |
| 480 // combination with session interruptions as well. |
| 481 valid_route_change = false; |
| 482 break; |
| 483 case AVAudioSessionRouteChangeReasonOverride: |
| 484 LOG(LS_INFO) << " Override"; |
| 485 break; |
| 486 case AVAudioSessionRouteChangeReasonWakeFromSleep: |
| 487 LOG(LS_INFO) << " WakeFromSleep"; |
| 488 break; |
| 489 case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory: |
| 490 LOG(LS_INFO) << " NoSuitableRouteForCategory"; |
| 491 break; |
| 492 case AVAudioSessionRouteChangeReasonRouteConfigurationChange: |
| 493 // Ignore this type of route change since we are focusing |
| 494 // on detecting headset changes. |
| 495 LOG(LS_INFO) << " RouteConfigurationChange"; |
| 496 valid_route_change = false; |
| 497 break; |
| 498 } |
| 499 |
| 500 if (valid_route_change) { |
| 501 // Log previous route configuration. |
| 502 AVAudioSessionRouteDescription* prev_route = |
| 503 notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey]; |
| 504 LOG(LS_INFO) << "Previous route:"; |
| 505 LOG(LS_INFO) << ios::StdStringFromNSString( |
| 506 [NSString stringWithFormat:@"%@", prev_route]); |
| 507 |
| 508 // Only restart audio for a valid route change and if the |
| 509 // session sample rate has changed. |
| 510 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 511 const double session_sample_rate = session.sampleRate; |
| 512 LOG(LS_INFO) << "session sample rate: " << session_sample_rate; |
| 513 if (playout_parameters_.sample_rate() != session_sample_rate) { |
| 514 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) { |
| 515 LOG(LS_ERROR) << "Audio restart failed"; |
| 516 } |
| 517 } |
| 518 } |
| 519 }; |
| 520 |
| 521 // Get the default notification center of the current process. |
| 522 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| 523 |
| 524 // Add AVAudioSessionInterruptionNotification observer. |
| 525 id interruption_observer = |
| 526 [center addObserverForName:AVAudioSessionInterruptionNotification |
| 527 object:nil |
| 528 queue:[NSOperationQueue mainQueue] |
| 529 usingBlock:interrupt_block]; |
| 530 // Add AVAudioSessionRouteChangeNotification observer. |
| 531 id route_change_observer = |
| 532 [center addObserverForName:AVAudioSessionRouteChangeNotification |
| 533 object:nil |
| 534 queue:[NSOperationQueue mainQueue] |
| 535 usingBlock:route_change_block]; |
| 536 |
| 537 // Increment refcount on observers using ARC bridge. Instance variable is a |
| 538 // void* instead of an id because header is included in other pure C++ |
| 539 // files. |
| 540 audio_interruption_observer_ = (__bridge_retained void*)interruption_observer; |
| 541 route_change_observer_ = (__bridge_retained void*)route_change_observer; |
| 542 } |
| 543 |
| 544 void AudioDeviceIOS::UnregisterNotificationObservers() { |
| 545 LOGI() << "UnregisterNotificationObservers"; |
| 546 // Transfer ownership of observer back to ARC, which will deallocate the |
| 547 // observer once it exits this scope. |
| 548 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| 549 if (audio_interruption_observer_ != nullptr) { |
| 550 id observer = (__bridge_transfer id)audio_interruption_observer_; |
| 551 [center removeObserver:observer]; |
| 552 audio_interruption_observer_ = nullptr; |
| 553 } |
| 554 if (route_change_observer_ != nullptr) { |
| 555 id observer = (__bridge_transfer id)route_change_observer_; |
| 556 [center removeObserver:observer]; |
| 557 route_change_observer_ = nullptr; |
| 558 } |
| 559 } |
| 560 |
407 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { | 561 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
408 LOGI() << "SetupAudioBuffersForActiveAudioSession"; | 562 LOGI() << "SetupAudioBuffersForActiveAudioSession"; |
| 563 // Verify the current values once the audio session has been activated. |
409 AVAudioSession* session = [AVAudioSession sharedInstance]; | 564 AVAudioSession* session = [AVAudioSession sharedInstance]; |
410 // Verify the current values once the audio session has been activated. | |
411 LOG(LS_INFO) << " sample rate: " << session.sampleRate; | 565 LOG(LS_INFO) << " sample rate: " << session.sampleRate; |
412 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; | 566 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; |
413 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; | 567 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; |
414 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; | 568 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; |
415 LOG(LS_INFO) << " output latency: " << session.outputLatency; | 569 LOG(LS_INFO) << " output latency: " << session.outputLatency; |
416 LOG(LS_INFO) << " input latency: " << session.inputLatency; | 570 LOG(LS_INFO) << " input latency: " << session.inputLatency; |
| 571 |
417 // Log a warning message for the case when we are unable to set the preferred | 572 // Log a warning message for the case when we are unable to set the preferred |
418 // hardware sample rate but continue and use the non-ideal sample rate after | 573 // hardware sample rate but continue and use the non-ideal sample rate after |
419 // reinitializing the audio parameters. | 574 // reinitializing the audio parameters. Most BT headsets only support 8kHz or |
420 if (session.sampleRate != playout_parameters_.sample_rate()) { | 575 // 16kHz. |
421 LOG(LS_WARNING) | 576 if (session.sampleRate != kPreferredSampleRate) { |
422 << "Failed to enable an audio session with the preferred sample rate!"; | 577 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; |
423 } | 578 } |
424 | 579 |
425 // At this stage, we also know the exact IO buffer duration and can add | 580 // At this stage, we also know the exact IO buffer duration and can add |
426 // that info to the existing audio parameters where it is converted into | 581 // that info to the existing audio parameters where it is converted into |
427 // number of audio frames. | 582 // number of audio frames. |
428 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. | 583 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. |
429 // Hence, 128 is the size we expect to see in upcoming render callbacks. | 584 // Hence, 128 is the size we expect to see in upcoming render callbacks. |
430 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), | 585 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), |
431 session.IOBufferDuration); | 586 session.IOBufferDuration); |
432 RTC_DCHECK(playout_parameters_.is_complete()); | 587 RTC_DCHECK(playout_parameters_.is_complete()); |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
525 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); | 680 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); |
526 application_format.mSampleRate = playout_parameters_.sample_rate(); | 681 application_format.mSampleRate = playout_parameters_.sample_rate(); |
527 application_format.mFormatID = kAudioFormatLinearPCM; | 682 application_format.mFormatID = kAudioFormatLinearPCM; |
528 application_format.mFormatFlags = | 683 application_format.mFormatFlags = |
529 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; | 684 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
530 application_format.mBytesPerPacket = kBytesPerSample; | 685 application_format.mBytesPerPacket = kBytesPerSample; |
531 application_format.mFramesPerPacket = 1; // uncompressed | 686 application_format.mFramesPerPacket = 1; // uncompressed |
532 application_format.mBytesPerFrame = kBytesPerSample; | 687 application_format.mBytesPerFrame = kBytesPerSample; |
533 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; | 688 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; |
534 application_format.mBitsPerChannel = 8 * kBytesPerSample; | 689 application_format.mBitsPerChannel = 8 * kBytesPerSample; |
| 690 // Store the new format. |
| 691 application_format_ = application_format; |
535 #if !defined(NDEBUG) | 692 #if !defined(NDEBUG) |
536 LogABSD(application_format); | 693 LogABSD(application_format_); |
537 #endif | 694 #endif |
538 | 695 |
539 // Set the application format on the output scope of the input element/bus. | 696 // Set the application format on the output scope of the input element/bus. |
540 LOG_AND_RETURN_IF_ERROR( | 697 LOG_AND_RETURN_IF_ERROR( |
541 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, | 698 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
542 kAudioUnitScope_Output, input_bus, | 699 kAudioUnitScope_Output, input_bus, |
543 &application_format, size), | 700 &application_format, size), |
544 "Failed to set application format on output scope of input element"); | 701 "Failed to set application format on output scope of input element"); |
545 | 702 |
546 // Set the application format on the input scope of the output element/bus. | 703 // Set the application format on the input scope of the output element/bus. |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
582 kAudioUnitScope_Global, input_bus, &input_callback, | 739 kAudioUnitScope_Global, input_bus, &input_callback, |
583 sizeof(input_callback)), | 740 sizeof(input_callback)), |
584 "Failed to specify the input callback on the input element"); | 741 "Failed to specify the input callback on the input element"); |
585 | 742 |
586 // Initialize the Voice-Processing I/O unit instance. | 743 // Initialize the Voice-Processing I/O unit instance. |
587 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), | 744 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), |
588 "Failed to initialize the Voice-Processing I/O unit"); | 745 "Failed to initialize the Voice-Processing I/O unit"); |
589 return true; | 746 return true; |
590 } | 747 } |
591 | 748 |
| 749 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) { |
| 750 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")"; |
| 751 // Stop the active audio unit. |
| 752 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_), |
| 753 "Failed to stop the the Voice-Processing I/O unit"); |
| 754 |
| 755 // The stream format is about to be changed and it requires that we first |
| 756 // uninitialize it to deallocate its resources. |
| 757 LOG_AND_RETURN_IF_ERROR( |
| 758 AudioUnitUninitialize(vpio_unit_), |
| 759 "Failed to uninitialize the the Voice-Processing I/O unit"); |
| 760 |
| 761 // Allocate new buffers given the new stream format. |
| 762 SetupAudioBuffersForActiveAudioSession(); |
| 763 |
| 764 // Update the existing application format using the new sample rate. |
| 765 application_format_.mSampleRate = playout_parameters_.sample_rate(); |
| 766 UInt32 size = sizeof(application_format_); |
| 767 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
| 768 kAudioUnitScope_Output, 1, &application_format_, size); |
| 769 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, |
| 770 kAudioUnitScope_Input, 0, &application_format_, size); |
| 771 |
| 772 // Prepare the audio unit to render audio again. |
| 773 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), |
| 774 "Failed to initialize the Voice-Processing I/O unit"); |
| 775 |
| 776 // Start rendering audio using the new format. |
| 777 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_), |
| 778 "Failed to start the Voice-Processing I/O unit"); |
| 779 return true; |
| 780 } |
| 781 |
592 bool AudioDeviceIOS::InitPlayOrRecord() { | 782 bool AudioDeviceIOS::InitPlayOrRecord() { |
593 LOGI() << "InitPlayOrRecord"; | 783 LOGI() << "InitPlayOrRecord"; |
594 AVAudioSession* session = [AVAudioSession sharedInstance]; | 784 AVAudioSession* session = [AVAudioSession sharedInstance]; |
595 // Activate the audio session and ask for a set of preferred audio parameters. | 785 // Activate the audio session and ask for a set of preferred audio parameters. |
596 ActivateAudioSession(session, true); | 786 ActivateAudioSession(session, true); |
597 | 787 |
| 788 // Start observing audio session interruptions and route changes. |
| 789 RegisterNotificationObservers(); |
| 790 |
598 // Ensure that we got what what we asked for in our active audio session. | 791 // Ensure that we got what what we asked for in our active audio session. |
599 SetupAudioBuffersForActiveAudioSession(); | 792 SetupAudioBuffersForActiveAudioSession(); |
600 | 793 |
601 // Create, setup and initialize a new Voice-Processing I/O unit. | 794 // Create, setup and initialize a new Voice-Processing I/O unit. |
602 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { | 795 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { |
603 return false; | 796 return false; |
604 } | 797 } |
605 | |
606 // Listen to audio interruptions. | |
607 // TODO(henrika): learn this area better. | |
608 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
609 id observer = [center | |
610 addObserverForName:AVAudioSessionInterruptionNotification | |
611 object:nil | |
612 queue:[NSOperationQueue mainQueue] | |
613 usingBlock:^(NSNotification* notification) { | |
614 NSNumber* typeNumber = | |
615 [notification userInfo][AVAudioSessionInterruptionTypeKey]; | |
616 AVAudioSessionInterruptionType type = | |
617 (AVAudioSessionInterruptionType)[typeNumber | |
618 unsignedIntegerValue]; | |
619 switch (type) { | |
620 case AVAudioSessionInterruptionTypeBegan: | |
621 // At this point our audio session has been deactivated and | |
622 // the audio unit render callbacks no longer occur. | |
623 // Nothing to do. | |
624 break; | |
625 case AVAudioSessionInterruptionTypeEnded: { | |
626 NSError* error = nil; | |
627 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
628 [session setActive:YES error:&error]; | |
629 if (error != nil) { | |
630 LOG_F(LS_ERROR) << "Failed to active audio session"; | |
631 } | |
632 // Post interruption the audio unit render callbacks don't | |
633 // automatically continue, so we restart the unit manually | |
634 // here. | |
635 AudioOutputUnitStop(vpio_unit_); | |
636 AudioOutputUnitStart(vpio_unit_); | |
637 break; | |
638 } | |
639 } | |
640 }]; | |
641 // Increment refcount on observer using ARC bridge. Instance variable is a | |
642 // void* instead of an id because header is included in other pure C++ | |
643 // files. | |
644 audio_interruption_observer_ = (__bridge_retained void*)observer; | |
645 return true; | 798 return true; |
646 } | 799 } |
647 | 800 |
648 bool AudioDeviceIOS::ShutdownPlayOrRecord() { | 801 bool AudioDeviceIOS::ShutdownPlayOrRecord() { |
649 LOGI() << "ShutdownPlayOrRecord"; | 802 LOGI() << "ShutdownPlayOrRecord"; |
650 if (audio_interruption_observer_ != nullptr) { | 803 // Remove audio session notification observers. |
651 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | 804 UnregisterNotificationObservers(); |
652 // Transfer ownership of observer back to ARC, which will dealloc the | 805 |
653 // observer once it exits this scope. | |
654 id observer = (__bridge_transfer id)audio_interruption_observer_; | |
655 [center removeObserver:observer]; | |
656 audio_interruption_observer_ = nullptr; | |
657 } | |
658 // Close and delete the voice-processing I/O unit. | 806 // Close and delete the voice-processing I/O unit. |
659 OSStatus result = -1; | 807 OSStatus result = -1; |
660 if (nullptr != vpio_unit_) { | 808 if (nullptr != vpio_unit_) { |
661 result = AudioOutputUnitStop(vpio_unit_); | 809 result = AudioOutputUnitStop(vpio_unit_); |
662 if (result != noErr) { | 810 if (result != noErr) { |
663 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; | 811 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
664 } | 812 } |
| 813 result = AudioUnitUninitialize(vpio_unit_); |
| 814 if (result != noErr) { |
| 815 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; |
| 816 } |
665 result = AudioComponentInstanceDispose(vpio_unit_); | 817 result = AudioComponentInstanceDispose(vpio_unit_); |
666 if (result != noErr) { | 818 if (result != noErr) { |
667 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; | 819 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; |
668 } | 820 } |
669 vpio_unit_ = nullptr; | 821 vpio_unit_ = nullptr; |
670 } | 822 } |
| 823 |
671 // All I/O should be stopped or paused prior to deactivating the audio | 824 // All I/O should be stopped or paused prior to deactivating the audio |
672 // session, hence we deactivate as last action. | 825 // session, hence we deactivate as last action. |
673 AVAudioSession* session = [AVAudioSession sharedInstance]; | 826 AVAudioSession* session = [AVAudioSession sharedInstance]; |
674 ActivateAudioSession(session, false); | 827 ActivateAudioSession(session, false); |
675 return true; | 828 return true; |
676 } | 829 } |
677 | 830 |
678 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( | 831 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( |
679 void* in_ref_con, | 832 void* in_ref_con, |
680 AudioUnitRenderActionFlags* io_action_flags, | 833 AudioUnitRenderActionFlags* io_action_flags, |
681 const AudioTimeStamp* in_time_stamp, | 834 const AudioTimeStamp* in_time_stamp, |
682 UInt32 in_bus_number, | 835 UInt32 in_bus_number, |
683 UInt32 in_number_frames, | 836 UInt32 in_number_frames, |
684 AudioBufferList* io_data) { | 837 AudioBufferList* io_data) { |
685 RTC_DCHECK_EQ(1u, in_bus_number); | 838 RTC_DCHECK_EQ(1u, in_bus_number); |
686 RTC_DCHECK( | 839 RTC_DCHECK( |
687 !io_data); // no buffer should be allocated for input at this stage | 840 !io_data); // no buffer should be allocated for input at this stage |
688 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); | 841 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); |
689 return audio_device_ios->OnRecordedDataIsAvailable( | 842 return audio_device_ios->OnRecordedDataIsAvailable( |
690 io_action_flags, in_time_stamp, in_bus_number, in_number_frames); | 843 io_action_flags, in_time_stamp, in_bus_number, in_number_frames); |
691 } | 844 } |
692 | 845 |
693 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( | 846 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( |
694 AudioUnitRenderActionFlags* io_action_flags, | 847 AudioUnitRenderActionFlags* io_action_flags, |
695 const AudioTimeStamp* in_time_stamp, | 848 const AudioTimeStamp* in_time_stamp, |
696 UInt32 in_bus_number, | 849 UInt32 in_bus_number, |
697 UInt32 in_number_frames) { | 850 UInt32 in_number_frames) { |
698 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames); | |
699 OSStatus result = noErr; | 851 OSStatus result = noErr; |
700 // Simply return if recording is not enabled. | 852 // Simply return if recording is not enabled. |
701 if (!rtc::AtomicOps::AcquireLoad(&recording_)) | 853 if (!rtc::AtomicOps::AcquireLoad(&recording_)) |
702 return result; | 854 return result; |
703 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames); | 855 if (in_number_frames != record_parameters_.frames_per_buffer()) { |
| 856 // We have seen short bursts (1-2 frames) where |in_number_frames| changes. |
| 857 // Add a log to keep track of longer sequences if that should ever happen. |
| 858 LOG(LS_WARNING) << "in_number_frames (" << in_number_frames |
| 859 << ") != " << record_parameters_.frames_per_buffer(); |
| 860 } |
704 // Obtain the recorded audio samples by initiating a rendering cycle. | 861 // Obtain the recorded audio samples by initiating a rendering cycle. |
705 // Since it happens on the input bus, the |io_data| parameter is a reference | 862 // Since it happens on the input bus, the |io_data| parameter is a reference |
706 // to the preallocated audio buffer list that the audio unit renders into. | 863 // to the preallocated audio buffer list that the audio unit renders into. |
707 // TODO(henrika): should error handling be improved? | 864 // TODO(henrika): should error handling be improved? |
708 AudioBufferList* io_data = &audio_record_buffer_list_; | 865 AudioBufferList* io_data = &audio_record_buffer_list_; |
709 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp, | 866 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp, |
710 in_bus_number, in_number_frames, io_data); | 867 in_bus_number, in_number_frames, io_data); |
711 if (result != noErr) { | 868 if (result != noErr) { |
712 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | 869 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
713 return result; | 870 return result; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
760 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 917 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
761 // the native I/O audio unit) to a preallocated intermediate buffer and | 918 // the native I/O audio unit) to a preallocated intermediate buffer and |
762 // copy the result to the audio buffer in the |io_data| destination. | 919 // copy the result to the audio buffer in the |io_data| destination. |
763 SInt8* source = playout_audio_buffer_.get(); | 920 SInt8* source = playout_audio_buffer_.get(); |
764 fine_audio_buffer_->GetPlayoutData(source); | 921 fine_audio_buffer_->GetPlayoutData(source); |
765 memcpy(destination, source, dataSizeInBytes); | 922 memcpy(destination, source, dataSizeInBytes); |
766 return noErr; | 923 return noErr; |
767 } | 924 } |
768 | 925 |
769 } // namespace webrtc | 926 } // namespace webrtc |
OLD | NEW |