Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(623)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1401963002: Adds support for Bluetooth headsets to the iOS audio layer (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 using ios::CheckAndLogError; 70 using ios::CheckAndLogError;
71 71
72 // Activates an audio session suitable for full duplex VoIP sessions when 72 // Activates an audio session suitable for full duplex VoIP sessions when
73 // |activate| is true. Also sets the preferred sample rate and IO buffer 73 // |activate| is true. Also sets the preferred sample rate and IO buffer
74 // duration. Deactivates an active audio session if |activate| is set to false. 74 // duration. Deactivates an active audio session if |activate| is set to false.
75 static void ActivateAudioSession(AVAudioSession* session, bool activate) { 75 static void ActivateAudioSession(AVAudioSession* session, bool activate) {
76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; 76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
77 @autoreleasepool { 77 @autoreleasepool {
78 NSError* error = nil; 78 NSError* error = nil;
79 BOOL success = NO; 79 BOOL success = NO;
80
80 // Deactivate the audio session and return if |activate| is false. 81 // Deactivate the audio session and return if |activate| is false.
81 if (!activate) { 82 if (!activate) {
82 success = [session setActive:NO error:&error]; 83 success = [session setActive:NO error:&error];
83 RTC_DCHECK(CheckAndLogError(success, error)); 84 RTC_DCHECK(CheckAndLogError(success, error));
84 return; 85 return;
85 } 86 }
87
86 // Use a category which supports simultaneous recording and playback. 88 // Use a category which supports simultaneous recording and playback.
87 // By default, using this category implies that our app’s audio is 89 // By default, using this category implies that our app’s audio is
88 // nonmixable, hence activating the session will interrupt any other 90 // nonmixable, hence activating the session will interrupt any other
89 // audio sessions which are also nonmixable. 91 // audio sessions which are also nonmixable.
90 if (session.category != AVAudioSessionCategoryPlayAndRecord) { 92 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
91 error = nil; 93 error = nil;
92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord 94 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
93 error:&error]; 95 error:&error];
94 RTC_DCHECK(CheckAndLogError(success, error)); 96 RTC_DCHECK(CheckAndLogError(success, error));
95 } 97 }
98
96 // Specify mode for two-way voice communication (e.g. VoIP). 99 // Specify mode for two-way voice communication (e.g. VoIP).
97 if (session.mode != AVAudioSessionModeVoiceChat) { 100 if (session.mode != AVAudioSessionModeVoiceChat) {
98 error = nil; 101 error = nil;
99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; 102 success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
100 RTC_DCHECK(CheckAndLogError(success, error)); 103 RTC_DCHECK(CheckAndLogError(success, error));
101 } 104 }
105
102 // Set the session's sample rate or the hardware sample rate. 106 // Set the session's sample rate or the hardware sample rate.
103 // It is essential that we use the same sample rate as stream format 107 // It is essential that we use the same sample rate as stream format
104 // to ensure that the I/O unit does not have to do sample rate conversion. 108 // to ensure that the I/O unit does not have to do sample rate conversion.
105 error = nil; 109 error = nil;
106 success = 110 success =
107 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; 111 [session setPreferredSampleRate:kPreferredSampleRate error:&error];
108 RTC_DCHECK(CheckAndLogError(success, error)); 112 RTC_DCHECK(CheckAndLogError(success, error));
113
109 // Set the preferred audio I/O buffer duration, in seconds. 114 // Set the preferred audio I/O buffer duration, in seconds.
110 // TODO(henrika): add more comments here. 115 // TODO(henrika): add more comments here.
111 error = nil; 116 error = nil;
112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration 117 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
113 error:&error]; 118 error:&error];
114 RTC_DCHECK(CheckAndLogError(success, error)); 119 RTC_DCHECK(CheckAndLogError(success, error));
115 120
116 // TODO(henrika): add observers here...
117
118 // Activate the audio session. Activation can fail if another active audio 121 // Activate the audio session. Activation can fail if another active audio
119 // session (e.g. phone call) has higher priority than ours. 122 // session (e.g. phone call) has higher priority than ours.
120 error = nil; 123 error = nil;
121 success = [session setActive:YES error:&error]; 124 success = [session setActive:YES error:&error];
122 RTC_DCHECK(CheckAndLogError(success, error)); 125 RTC_DCHECK(CheckAndLogError(success, error));
123 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; 126 RTC_CHECK(session.isInputAvailable) << "No input path is available!";
127
124 // Ensure that category and mode are actually activated. 128 // Ensure that category and mode are actually activated.
125 RTC_DCHECK( 129 RTC_DCHECK(
126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); 130 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
127 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); 131 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
132
128 // Try to set the preferred number of hardware audio channels. These calls 133 // Try to set the preferred number of hardware audio channels. These calls
129 // must be done after setting the audio session’s category and mode and 134 // must be done after setting the audio session’s category and mode and
130 // activating the session. 135 // activating the session.
131 // We try to use mono in both directions to save resources and format 136 // We try to use mono in both directions to save resources and format
132 // conversions in the audio unit. Some devices does only support stereo; 137 // conversions in the audio unit. Some devices does only support stereo;
133 // e.g. wired headset on iPhone 6. 138 // e.g. wired headset on iPhone 6.
134 // TODO(henrika): add support for stereo if needed. 139 // TODO(henrika): add support for stereo if needed.
135 error = nil; 140 error = nil;
136 success = 141 success =
137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels 142 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 // just in case. 402 // just in case.
398 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; 403 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
399 // Inform the audio device buffer (ADB) about the new audio format. 404 // Inform the audio device buffer (ADB) about the new audio format.
400 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); 405 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
401 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); 406 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
402 audio_device_buffer_->SetRecordingSampleRate( 407 audio_device_buffer_->SetRecordingSampleRate(
403 record_parameters_.sample_rate()); 408 record_parameters_.sample_rate());
404 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); 409 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
405 } 410 }
406 411
412 void AudioDeviceIOS::RegisterNotificationObservers() {
413 LOGI() << "RegisterNotificationObservers";
414 // Get the default notification center of the current process.
415 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
416
417 // Add AVAudioSessionInterruptionNotification observer.
418 id interruption_observer = [center
419 addObserverForName:AVAudioSessionInterruptionNotification
420 object:nil
421 queue:[NSOperationQueue mainQueue]
422 usingBlock:^(NSNotification* notification) {
423 NSNumber* typeNumber =
424 [notification userInfo][AVAudioSessionInterruptionTypeKey];
425 AVAudioSessionInterruptionType type =
426 (AVAudioSessionInterruptionType)[typeNumber
427 unsignedIntegerValue];
428 switch (type) {
429 case AVAudioSessionInterruptionTypeBegan:
430 // At this point our audio session has been deactivated and
431 // the audio unit render callbacks no longer occur.
432 // Nothing to do.
433 break;
434 case AVAudioSessionInterruptionTypeEnded: {
435 NSError* error = nil;
436 AVAudioSession* session = [AVAudioSession sharedInstance];
437 [session setActive:YES error:&error];
438 if (error != nil) {
439 LOG_F(LS_ERROR) << "Failed to active audio session";
440 }
441 // Post interruption the audio unit render callbacks don't
442 // automatically continue, so we restart the unit manually
443 // here.
444 AudioOutputUnitStop(vpio_unit_);
445 AudioOutputUnitStart(vpio_unit_);
446 break;
447 }
448 }
449 }];
450
451 // Add AVAudioSessionRouteChangeNotification observer.
452 id route_change_observer = [center
453 addObserverForName:AVAudioSessionRouteChangeNotification
454 object:nil
455 queue:[NSOperationQueue mainQueue]
456 usingBlock:^(NSNotification* notification) {
457 // Get reason for current route change.
458 NSUInteger reason_value = [[notification.userInfo
459 valueForKey:AVAudioSessionRouteChangeReasonKey]
460 unsignedIntegerValue];
461 bool valid_route_change = true;
462 LOG(LS_INFO) << "Route change:";
463 switch (reason_value) {
464 case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
465 LOG(LS_INFO) << " NewDeviceAvailable";
466 break;
467 case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
468 LOG(LS_INFO) << " OldDeviceUnavailable";
469 break;
470 case AVAudioSessionRouteChangeReasonCategoryChange:
471 LOG(LS_INFO) << " CategoryChange";
472 LOG(LS_INFO) << " New category: "
473 << ios::GetAudioSessionCategory();
474 break;
475 case AVAudioSessionRouteChangeReasonOverride:
476 LOG(LS_INFO) << " Override";
477 break;
478 case AVAudioSessionRouteChangeReasonWakeFromSleep:
479 LOG(LS_INFO) << " WakeFromSleep";
480 break;
481 case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
482 // Ignore this type of route change since we are focusing
483 // on detecting headset changes.
484 LOG(LS_INFO) << " RouteConfigurationChange";
485 valid_route_change = false;
486 break;
487 default:
488 LOG(LS_INFO) << " ReasonUnknown";
489 }
490
491 if (valid_route_change) {
492 // Log previous route configuration.
493 AVAudioSessionRouteDescription* prev_route = [notification
494 userInfo][AVAudioSessionRouteChangePreviousRouteKey];
495 LOG(LS_INFO) << "Previous route:";
496 LOG(LS_INFO) << ios::StdStringFromNSString(
497 [NSString stringWithFormat:@"%@", prev_route]);
498
499 // Only restart audio for a valid route change and if the
500 // session sample rate has changed.
501 const double session_sample_rate =
502 [[AVAudioSession sharedInstance] sampleRate];
503 if (playout_parameters_.sample_rate() !=
504 session_sample_rate) {
505 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
506 LOG(LS_ERROR) << "Audio restart failed";
507 }
508 }
509 }
510
511 }];
512
513 // Increment refcount on observers using ARC bridge. Instance variable is a
514 // void* instead of an id because header is included in other pure C++
515 // files.
516 audio_interruption_observer_ = (__bridge_retained void*)interruption_observer;
517 route_change_observer_ = (__bridge_retained void*)route_change_observer;
518 }
519
520 void AudioDeviceIOS::UnregisterNotificationObservers() {
521 LOGI() << "UnregisterNotificationObservers";
522 // Transfer ownership of observer back to ARC, which will deallocate the
523 // observer once it exits this scope.
524 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
525 if (audio_interruption_observer_ != nullptr) {
526 id observer = (__bridge_transfer id)audio_interruption_observer_;
527 [center removeObserver:observer];
528 audio_interruption_observer_ = nullptr;
529 }
530 if (route_change_observer_ != nullptr) {
531 id observer = (__bridge_transfer id)route_change_observer_;
532 [center removeObserver:observer];
533 route_change_observer_ = nullptr;
534 }
535 }
536
407 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 537 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
408 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 538 LOGI() << "SetupAudioBuffersForActiveAudioSession";
539 // Verify the current values once the audio session has been activated.
409 AVAudioSession* session = [AVAudioSession sharedInstance]; 540 AVAudioSession* session = [AVAudioSession sharedInstance];
410 // Verify the current values once the audio session has been activated.
411 LOG(LS_INFO) << " sample rate: " << session.sampleRate; 541 LOG(LS_INFO) << " sample rate: " << session.sampleRate;
412 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; 542 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
413 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; 543 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
414 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; 544 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
415 LOG(LS_INFO) << " output latency: " << session.outputLatency; 545 LOG(LS_INFO) << " output latency: " << session.outputLatency;
416 LOG(LS_INFO) << " input latency: " << session.inputLatency; 546 LOG(LS_INFO) << " input latency: " << session.inputLatency;
547
417 // Log a warning message for the case when we are unable to set the preferred 548 // Log a warning message for the case when we are unable to set the preferred
418 // hardware sample rate but continue and use the non-ideal sample rate after 549 // hardware sample rate but continue and use the non-ideal sample rate after
419 // reinitializing the audio parameters. 550 // reinitializing the audio parameters. Most BT headsets only support 8kHz or
420 if (session.sampleRate != playout_parameters_.sample_rate()) { 551 // 16kHz.
421 LOG(LS_WARNING) 552 if (session.sampleRate != kPreferredSampleRate) {
422 << "Failed to enable an audio session with the preferred sample rate!"; 553 LOG(LS_WARNING) << "Unable to set the preferred sample rate";
423 } 554 }
424 555
425 // At this stage, we also know the exact IO buffer duration and can add 556 // At this stage, we also know the exact IO buffer duration and can add
426 // that info to the existing audio parameters where it is converted into 557 // that info to the existing audio parameters where it is converted into
427 // number of audio frames. 558 // number of audio frames.
428 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 559 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
429 // Hence, 128 is the size we expect to see in upcoming render callbacks. 560 // Hence, 128 is the size we expect to see in upcoming render callbacks.
430 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), 561 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(),
431 session.IOBufferDuration); 562 session.IOBufferDuration);
432 RTC_DCHECK(playout_parameters_.is_complete()); 563 RTC_DCHECK(playout_parameters_.is_complete());
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
525 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); 656 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
526 application_format.mSampleRate = playout_parameters_.sample_rate(); 657 application_format.mSampleRate = playout_parameters_.sample_rate();
527 application_format.mFormatID = kAudioFormatLinearPCM; 658 application_format.mFormatID = kAudioFormatLinearPCM;
528 application_format.mFormatFlags = 659 application_format.mFormatFlags =
529 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 660 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
530 application_format.mBytesPerPacket = kBytesPerSample; 661 application_format.mBytesPerPacket = kBytesPerSample;
531 application_format.mFramesPerPacket = 1; // uncompressed 662 application_format.mFramesPerPacket = 1; // uncompressed
532 application_format.mBytesPerFrame = kBytesPerSample; 663 application_format.mBytesPerFrame = kBytesPerSample;
533 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; 664 application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
534 application_format.mBitsPerChannel = 8 * kBytesPerSample; 665 application_format.mBitsPerChannel = 8 * kBytesPerSample;
666 // Store the new format.
667 application_format_ = application_format;
535 #if !defined(NDEBUG) 668 #if !defined(NDEBUG)
536 LogABSD(application_format); 669 LogABSD(application_format_);
537 #endif 670 #endif
538 671
539 // Set the application format on the output scope of the input element/bus. 672 // Set the application format on the output scope of the input element/bus.
540 LOG_AND_RETURN_IF_ERROR( 673 LOG_AND_RETURN_IF_ERROR(
541 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, 674 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
542 kAudioUnitScope_Output, input_bus, 675 kAudioUnitScope_Output, input_bus,
543 &application_format, size), 676 &application_format, size),
544 "Failed to set application format on output scope of input element"); 677 "Failed to set application format on output scope of input element");
545 678
546 // Set the application format on the input scope of the output element/bus. 679 // Set the application format on the input scope of the output element/bus.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 kAudioUnitScope_Global, input_bus, &input_callback, 715 kAudioUnitScope_Global, input_bus, &input_callback,
583 sizeof(input_callback)), 716 sizeof(input_callback)),
584 "Failed to specify the input callback on the input element"); 717 "Failed to specify the input callback on the input element");
585 718
586 // Initialize the Voice-Processing I/O unit instance. 719 // Initialize the Voice-Processing I/O unit instance.
587 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), 720 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
588 "Failed to initialize the Voice-Processing I/O unit"); 721 "Failed to initialize the Voice-Processing I/O unit");
589 return true; 722 return true;
590 } 723 }
591 724
725 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
726 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")";
727 // Stop the active audio unit.
728 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
729 "Failed to stop the the Voice-Processing I/O unit");
730
731 // The stream format is about to be changed and it requires that we first
732 // uninitialize it to deallocate its resources.
733 LOG_AND_RETURN_IF_ERROR(
734 AudioUnitUninitialize(vpio_unit_),
735 "Failed to uninitialize the the Voice-Processing I/O unit");
736
737 // Allocate new buffers given the new stream format.
738 SetupAudioBuffersForActiveAudioSession();
739
740 // Update the existing application format using the new sample rate.
741 application_format_.mSampleRate = playout_parameters_.sample_rate();
742 UInt32 size = sizeof(application_format_);
743 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
744 kAudioUnitScope_Output, 1, &application_format_, size);
745 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
746 kAudioUnitScope_Input, 0, &application_format_, size);
747
748 // Prepare the audio unit to render audio again.
749 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
750 "Failed to initialize the Voice-Processing I/O unit");
751
752 // Start rendering audio using the new format.
753 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
754 "Failed to start the Voice-Processing I/O unit");
755 return true;
756 }
757
592 bool AudioDeviceIOS::InitPlayOrRecord() { 758 bool AudioDeviceIOS::InitPlayOrRecord() {
593 LOGI() << "InitPlayOrRecord"; 759 LOGI() << "InitPlayOrRecord";
594 AVAudioSession* session = [AVAudioSession sharedInstance]; 760 AVAudioSession* session = [AVAudioSession sharedInstance];
595 // Activate the audio session and ask for a set of preferred audio parameters. 761 // Activate the audio session and ask for a set of preferred audio parameters.
596 ActivateAudioSession(session, true); 762 ActivateAudioSession(session, true);
597 763
764 // Start observing audio session interruptions and route changes.
765 RegisterNotificationObservers();
766
598 // Ensure that we got what what we asked for in our active audio session. 767 // Ensure that we got what what we asked for in our active audio session.
599 SetupAudioBuffersForActiveAudioSession(); 768 SetupAudioBuffersForActiveAudioSession();
600 769
601 // Create, setup and initialize a new Voice-Processing I/O unit. 770 // Create, setup and initialize a new Voice-Processing I/O unit.
602 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { 771 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
603 return false; 772 return false;
604 } 773 }
605
606 // Listen to audio interruptions.
607 // TODO(henrika): learn this area better.
608 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
609 id observer = [center
610 addObserverForName:AVAudioSessionInterruptionNotification
611 object:nil
612 queue:[NSOperationQueue mainQueue]
613 usingBlock:^(NSNotification* notification) {
614 NSNumber* typeNumber =
615 [notification userInfo][AVAudioSessionInterruptionTypeKey];
616 AVAudioSessionInterruptionType type =
617 (AVAudioSessionInterruptionType)[typeNumber
618 unsignedIntegerValue];
619 switch (type) {
620 case AVAudioSessionInterruptionTypeBegan:
621 // At this point our audio session has been deactivated and
622 // the audio unit render callbacks no longer occur.
623 // Nothing to do.
624 break;
625 case AVAudioSessionInterruptionTypeEnded: {
626 NSError* error = nil;
627 AVAudioSession* session = [AVAudioSession sharedInstance];
628 [session setActive:YES error:&error];
629 if (error != nil) {
630 LOG_F(LS_ERROR) << "Failed to active audio session";
631 }
632 // Post interruption the audio unit render callbacks don't
633 // automatically continue, so we restart the unit manually
634 // here.
635 AudioOutputUnitStop(vpio_unit_);
636 AudioOutputUnitStart(vpio_unit_);
637 break;
638 }
639 }
640 }];
641 // Increment refcount on observer using ARC bridge. Instance variable is a
642 // void* instead of an id because header is included in other pure C++
643 // files.
644 audio_interruption_observer_ = (__bridge_retained void*)observer;
645 return true; 774 return true;
646 } 775 }
647 776
648 bool AudioDeviceIOS::ShutdownPlayOrRecord() { 777 bool AudioDeviceIOS::ShutdownPlayOrRecord() {
649 LOGI() << "ShutdownPlayOrRecord"; 778 LOGI() << "ShutdownPlayOrRecord";
650 if (audio_interruption_observer_ != nullptr) { 779 // Remove audio session notification observers.
651 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; 780 UnregisterNotificationObservers();
652 // Transfer ownership of observer back to ARC, which will dealloc the 781
653 // observer once it exits this scope.
654 id observer = (__bridge_transfer id)audio_interruption_observer_;
655 [center removeObserver:observer];
656 audio_interruption_observer_ = nullptr;
657 }
658 // Close and delete the voice-processing I/O unit. 782 // Close and delete the voice-processing I/O unit.
659 OSStatus result = -1; 783 OSStatus result = -1;
660 if (nullptr != vpio_unit_) { 784 if (nullptr != vpio_unit_) {
661 result = AudioOutputUnitStop(vpio_unit_); 785 result = AudioOutputUnitStop(vpio_unit_);
662 if (result != noErr) { 786 if (result != noErr) {
663 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 787 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
664 } 788 }
789 result = AudioUnitUninitialize(vpio_unit_);
790 if (result != noErr) {
791 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
792 }
665 result = AudioComponentInstanceDispose(vpio_unit_); 793 result = AudioComponentInstanceDispose(vpio_unit_);
666 if (result != noErr) { 794 if (result != noErr) {
667 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; 795 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
668 } 796 }
669 vpio_unit_ = nullptr; 797 vpio_unit_ = nullptr;
670 } 798 }
799
671 // All I/O should be stopped or paused prior to deactivating the audio 800 // All I/O should be stopped or paused prior to deactivating the audio
672 // session, hence we deactivate as last action. 801 // session, hence we deactivate as last action.
673 AVAudioSession* session = [AVAudioSession sharedInstance]; 802 AVAudioSession* session = [AVAudioSession sharedInstance];
674 ActivateAudioSession(session, false); 803 ActivateAudioSession(session, false);
675 return true; 804 return true;
676 } 805 }
677 806
678 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( 807 OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
679 void* in_ref_con, 808 void* in_ref_con,
680 AudioUnitRenderActionFlags* io_action_flags, 809 AudioUnitRenderActionFlags* io_action_flags,
681 const AudioTimeStamp* in_time_stamp, 810 const AudioTimeStamp* in_time_stamp,
682 UInt32 in_bus_number, 811 UInt32 in_bus_number,
683 UInt32 in_number_frames, 812 UInt32 in_number_frames,
684 AudioBufferList* io_data) { 813 AudioBufferList* io_data) {
685 RTC_DCHECK_EQ(1u, in_bus_number); 814 RTC_DCHECK_EQ(1u, in_bus_number);
686 RTC_DCHECK( 815 RTC_DCHECK(
687 !io_data); // no buffer should be allocated for input at this stage 816 !io_data); // no buffer should be allocated for input at this stage
688 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); 817 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
689 return audio_device_ios->OnRecordedDataIsAvailable( 818 return audio_device_ios->OnRecordedDataIsAvailable(
690 io_action_flags, in_time_stamp, in_bus_number, in_number_frames); 819 io_action_flags, in_time_stamp, in_bus_number, in_number_frames);
691 } 820 }
692 821
693 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( 822 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
694 AudioUnitRenderActionFlags* io_action_flags, 823 AudioUnitRenderActionFlags* io_action_flags,
695 const AudioTimeStamp* in_time_stamp, 824 const AudioTimeStamp* in_time_stamp,
696 UInt32 in_bus_number, 825 UInt32 in_bus_number,
697 UInt32 in_number_frames) { 826 UInt32 in_number_frames) {
698 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
699 OSStatus result = noErr; 827 OSStatus result = noErr;
700 // Simply return if recording is not enabled. 828 // Simply return if recording is not enabled.
701 if (!rtc::AtomicOps::AcquireLoad(&recording_)) 829 if (!rtc::AtomicOps::AcquireLoad(&recording_))
702 return result; 830 return result;
831 // LOG(LS_INFO) << "+ in_number_frames: " << in_number_frames;
703 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames); 832 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
704 // Obtain the recorded audio samples by initiating a rendering cycle. 833 // Obtain the recorded audio samples by initiating a rendering cycle.
705 // Since it happens on the input bus, the |io_data| parameter is a reference 834 // Since it happens on the input bus, the |io_data| parameter is a reference
706 // to the preallocated audio buffer list that the audio unit renders into. 835 // to the preallocated audio buffer list that the audio unit renders into.
707 // TODO(henrika): should error handling be improved? 836 // TODO(henrika): should error handling be improved?
708 AudioBufferList* io_data = &audio_record_buffer_list_; 837 AudioBufferList* io_data = &audio_record_buffer_list_;
709 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp, 838 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
710 in_bus_number, in_number_frames, io_data); 839 in_bus_number, in_number_frames, io_data);
711 if (result != noErr) { 840 if (result != noErr) {
712 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 841 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
760 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 889 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
761 // the native I/O audio unit) to a preallocated intermediate buffer and 890 // the native I/O audio unit) to a preallocated intermediate buffer and
762 // copy the result to the audio buffer in the |io_data| destination. 891 // copy the result to the audio buffer in the |io_data| destination.
763 SInt8* source = playout_audio_buffer_.get(); 892 SInt8* source = playout_audio_buffer_.get();
764 fine_audio_buffer_->GetPlayoutData(source); 893 fine_audio_buffer_->GetPlayoutData(source);
765 memcpy(destination, source, dataSizeInBytes); 894 memcpy(destination, source, dataSizeInBytes);
766 return noErr; 895 return noErr;
767 } 896 }
768 897
769 } // namespace webrtc 898 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698