Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(128)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1401963002: Adds support for Bluetooth headsets to the iOS audio layer (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Fixed interruption handling Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 18 matching lines...) Expand all
29 29
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \ 30 #define LOG_AND_RETURN_IF_ERROR(error, message) \
31 do { \ 31 do { \
32 OSStatus err = error; \ 32 OSStatus err = error; \
33 if (err) { \ 33 if (err) { \
34 LOG(LS_ERROR) << message << ": " << err; \ 34 LOG(LS_ERROR) << message << ": " << err; \
35 return false; \ 35 return false; \
36 } \ 36 } \
37 } while (0) 37 } while (0)
38 38
39 #define LOG_IF_ERROR(error, message) \
40 do { \
41 OSStatus err = error; \
42 if (err) { \
43 LOG(LS_ERROR) << message << ": " << err; \
44 } \
45 } while (0)
46
39 // Preferred hardware sample rate (unit is in Hertz). The client sample rate 47 // Preferred hardware sample rate (unit is in Hertz). The client sample rate
40 // will be set to this value as well to avoid resampling the the audio unit's 48 // will be set to this value as well to avoid resampling the the audio unit's
41 // format converter. Note that, some devices, e.g. BT headsets, only supports 49 // format converter. Note that, some devices, e.g. BT headsets, only supports
42 // 8000Hz as native sample rate. 50 // 8000Hz as native sample rate.
43 const double kPreferredSampleRate = 48000.0; 51 const double kPreferredSampleRate = 48000.0;
44 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms 52 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
45 // size used by WebRTC. The exact actual size will differ between devices. 53 // size used by WebRTC. The exact actual size will differ between devices.
46 // Example: using 48kHz on iPhone 6 results in a native buffer size of 54 // Example: using 48kHz on iPhone 6 results in a native buffer size of
47 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will 55 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
48 // take care of any buffering required to convert between native buffers and 56 // take care of any buffering required to convert between native buffers and
(...skipping 21 matching lines...) Expand all
70 using ios::CheckAndLogError; 78 using ios::CheckAndLogError;
71 79
72 // Activates an audio session suitable for full duplex VoIP sessions when 80 // Activates an audio session suitable for full duplex VoIP sessions when
73 // |activate| is true. Also sets the preferred sample rate and IO buffer 81 // |activate| is true. Also sets the preferred sample rate and IO buffer
74 // duration. Deactivates an active audio session if |activate| is set to false. 82 // duration. Deactivates an active audio session if |activate| is set to false.
75 static void ActivateAudioSession(AVAudioSession* session, bool activate) { 83 static void ActivateAudioSession(AVAudioSession* session, bool activate) {
76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; 84 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
77 @autoreleasepool { 85 @autoreleasepool {
78 NSError* error = nil; 86 NSError* error = nil;
79 BOOL success = NO; 87 BOOL success = NO;
88
80 // Deactivate the audio session and return if |activate| is false. 89 // Deactivate the audio session and return if |activate| is false.
81 if (!activate) { 90 if (!activate) {
82 success = [session setActive:NO error:&error]; 91 success = [session setActive:NO error:&error];
83 RTC_DCHECK(CheckAndLogError(success, error)); 92 RTC_DCHECK(CheckAndLogError(success, error));
84 return; 93 return;
85 } 94 }
95
86 // Use a category which supports simultaneous recording and playback. 96 // Use a category which supports simultaneous recording and playback.
87 // By default, using this category implies that our app’s audio is 97 // By default, using this category implies that our app’s audio is
88 // nonmixable, hence activating the session will interrupt any other 98 // nonmixable, hence activating the session will interrupt any other
89 // audio sessions which are also nonmixable. 99 // audio sessions which are also nonmixable.
90 if (session.category != AVAudioSessionCategoryPlayAndRecord) { 100 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
91 error = nil; 101 error = nil;
92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord 102 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
103 withOptions:AVAudioSessionCategoryOptionAllowBluetooth
93 error:&error]; 104 error:&error];
94 RTC_DCHECK(CheckAndLogError(success, error)); 105 RTC_DCHECK(CheckAndLogError(success, error));
95 } 106 }
107
96 // Specify mode for two-way voice communication (e.g. VoIP). 108 // Specify mode for two-way voice communication (e.g. VoIP).
97 if (session.mode != AVAudioSessionModeVoiceChat) { 109 if (session.mode != AVAudioSessionModeVoiceChat) {
98 error = nil; 110 error = nil;
99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; 111 success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
100 RTC_DCHECK(CheckAndLogError(success, error)); 112 RTC_DCHECK(CheckAndLogError(success, error));
101 } 113 }
114
102 // Set the session's sample rate or the hardware sample rate. 115 // Set the session's sample rate or the hardware sample rate.
103 // It is essential that we use the same sample rate as stream format 116 // It is essential that we use the same sample rate as stream format
104 // to ensure that the I/O unit does not have to do sample rate conversion. 117 // to ensure that the I/O unit does not have to do sample rate conversion.
105 error = nil; 118 error = nil;
106 success = 119 success =
107 [session setPreferredSampleRate:kPreferredSampleRate error:&error]; 120 [session setPreferredSampleRate:kPreferredSampleRate error:&error];
108 RTC_DCHECK(CheckAndLogError(success, error)); 121 RTC_DCHECK(CheckAndLogError(success, error));
122
109 // Set the preferred audio I/O buffer duration, in seconds. 123 // Set the preferred audio I/O buffer duration, in seconds.
110 // TODO(henrika): add more comments here. 124 // TODO(henrika): add more comments here.
111 error = nil; 125 error = nil;
112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration 126 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
113 error:&error]; 127 error:&error];
114 RTC_DCHECK(CheckAndLogError(success, error)); 128 RTC_DCHECK(CheckAndLogError(success, error));
115 129
116 // TODO(henrika): add observers here...
117
118 // Activate the audio session. Activation can fail if another active audio 130 // Activate the audio session. Activation can fail if another active audio
119 // session (e.g. phone call) has higher priority than ours. 131 // session (e.g. phone call) has higher priority than ours.
120 error = nil; 132 error = nil;
121 success = [session setActive:YES error:&error]; 133 success = [session setActive:YES error:&error];
122 RTC_DCHECK(CheckAndLogError(success, error)); 134 RTC_DCHECK(CheckAndLogError(success, error));
123 RTC_CHECK(session.isInputAvailable) << "No input path is available!"; 135 RTC_CHECK(session.isInputAvailable) << "No input path is available!";
136
124 // Ensure that category and mode are actually activated. 137 // Ensure that category and mode are actually activated.
125 RTC_DCHECK( 138 RTC_DCHECK(
126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); 139 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
127 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); 140 RTC_DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
141
128 // Try to set the preferred number of hardware audio channels. These calls 142 // Try to set the preferred number of hardware audio channels. These calls
129 // must be done after setting the audio session’s category and mode and 143 // must be done after setting the audio session’s category and mode and
130 // activating the session. 144 // activating the session.
131 // We try to use mono in both directions to save resources and format 145 // We try to use mono in both directions to save resources and format
132 // conversions in the audio unit. Some devices does only support stereo; 146 // conversions in the audio unit. Some devices does only support stereo;
133 // e.g. wired headset on iPhone 6. 147 // e.g. wired headset on iPhone 6.
134 // TODO(henrika): add support for stereo if needed. 148 // TODO(henrika): add support for stereo if needed.
135 error = nil; 149 error = nil;
136 success = 150 success =
137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels 151 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
(...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after
397 // just in case. 411 // just in case.
398 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; 412 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
399 // Inform the audio device buffer (ADB) about the new audio format. 413 // Inform the audio device buffer (ADB) about the new audio format.
400 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); 414 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
401 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); 415 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
402 audio_device_buffer_->SetRecordingSampleRate( 416 audio_device_buffer_->SetRecordingSampleRate(
403 record_parameters_.sample_rate()); 417 record_parameters_.sample_rate());
404 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); 418 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
405 } 419 }
406 420
421 void AudioDeviceIOS::RegisterNotificationObservers() {
422 LOGI() << "RegisterNotificationObservers";
423 // This code block will be called when AVAudioSessionInterruptionNotification
424 // is observed.
425 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) {
426 NSNumber* type_number =
427 notification.userInfo[AVAudioSessionInterruptionTypeKey];
428 AVAudioSessionInterruptionType type =
429 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue;
430 LOG(LS_INFO) << "Audio session interruption:";
431 switch (type) {
432 case AVAudioSessionInterruptionTypeBegan:
433 LOG(LS_INFO) << " Began";
434 // The system has deactivated our audio session.
435 // Stop the active audio unit.
436 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
437 "Failed to stop the the Voice-Processing I/O unit");
438 break;
439 case AVAudioSessionInterruptionTypeEnded:
440 LOG(LS_INFO) << " Ended";
441 // The interruption has ended. Restart the audio session and start the
442 // initialized audio unit again.
443 NSError* error = nil;
444 BOOL success = NO;
445 AVAudioSession* session = [AVAudioSession sharedInstance];
446 success = [session setActive:YES error:&error];
447 RTC_DCHECK(CheckAndLogError(success, error));
tkchin_webrtc 2015/10/21 02:21:51 why a dcheck? This is something that can genuinely
henrika_webrtc 2015/10/21 08:28:45 Removed DCHECK and instead log any error and only
448 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
449 "Failed to start the the Voice-Processing I/O unit");
450 break;
451 }
452 };
453
454 // This code block will be called when AVAudioSessionRouteChangeNotification
455 // is observed.
456 void (^route_change_block)(NSNotification*) =
457 ^(NSNotification* notification) {
458 // Get reason for current route change.
459 NSNumber* reason_number =
460 notification.userInfo[AVAudioSessionRouteChangeReasonKey];
461 AVAudioSessionRouteChangeReason reason =
462 (AVAudioSessionRouteChangeReason)reason_number.unsignedIntegerValue;
463 bool valid_route_change = true;
464 LOG(LS_INFO) << "Route change:";
465 switch (reason) {
466 case AVAudioSessionRouteChangeReasonUnknown:
467 LOG(LS_INFO) << " ReasonUnknown";
468 break;
469 case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
470 LOG(LS_INFO) << " NewDeviceAvailable";
471 break;
472 case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
473 LOG(LS_INFO) << " OldDeviceUnavailable";
474 break;
475 case AVAudioSessionRouteChangeReasonCategoryChange:
476 LOG(LS_INFO) << " CategoryChange";
477 LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory();
478 // Don't see this as route change since it can be triggered in
479 // combination with session interruptions as well.
480 valid_route_change = false;
481 break;
482 case AVAudioSessionRouteChangeReasonOverride:
483 LOG(LS_INFO) << " Override";
484 break;
485 case AVAudioSessionRouteChangeReasonWakeFromSleep:
486 LOG(LS_INFO) << " WakeFromSleep";
487 break;
488 case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
489 LOG(LS_INFO) << " NoSuitableRouteForCategory";
490 break;
491 case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
492 // Ignore this type of route change since we are focusing
493 // on detecting headset changes.
494 LOG(LS_INFO) << " RouteConfigurationChange";
495 valid_route_change = false;
496 break;
497 }
498
499 if (valid_route_change) {
500 // Log previous route configuration.
501 AVAudioSessionRouteDescription* prev_route =
502 notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
503 LOG(LS_INFO) << "Previous route:";
504 LOG(LS_INFO) << ios::StdStringFromNSString(
505 [NSString stringWithFormat:@"%@", prev_route]);
506
507 // Only restart audio for a valid route change and if the
508 // session sample rate has changed.
509 const double session_sample_rate =
510 AVAudioSession.sharedInstance.sampleRate;
511 LOG(LS_INFO) << "session sample rate: " << session_sample_rate;
512 if (playout_parameters_.sample_rate() != session_sample_rate) {
513 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
514 LOG(LS_ERROR) << "Audio restart failed";
515 }
516 }
517 }
518 };
519
520 // Get the default notification center of the current process.
521 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
522
523 // Add AVAudioSessionInterruptionNotification observer.
524 id interruption_observer =
525 [center addObserverForName:AVAudioSessionInterruptionNotification
526 object:nil
527 queue:[NSOperationQueue mainQueue]
528 usingBlock:interrupt_block];
529 // Add AVAudioSessionRouteChangeNotification observer.
530 id route_change_observer =
531 [center addObserverForName:AVAudioSessionRouteChangeNotification
532 object:nil
533 queue:[NSOperationQueue mainQueue]
534 usingBlock:route_change_block];
535
536 // Increment refcount on observers using ARC bridge. Instance variable is a
537 // void* instead of an id because header is included in other pure C++
538 // files.
539 audio_interruption_observer_ = (__bridge_retained void*)interruption_observer;
540 route_change_observer_ = (__bridge_retained void*)route_change_observer;
541 }
542
543 void AudioDeviceIOS::UnregisterNotificationObservers() {
544 LOGI() << "UnregisterNotificationObservers";
545 // Transfer ownership of observer back to ARC, which will deallocate the
546 // observer once it exits this scope.
547 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
548 if (audio_interruption_observer_ != nullptr) {
549 id observer = (__bridge_transfer id)audio_interruption_observer_;
550 [center removeObserver:observer];
551 audio_interruption_observer_ = nullptr;
552 }
553 if (route_change_observer_ != nullptr) {
554 id observer = (__bridge_transfer id)route_change_observer_;
555 [center removeObserver:observer];
556 route_change_observer_ = nullptr;
557 }
558 }
559
407 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 560 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
408 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 561 LOGI() << "SetupAudioBuffersForActiveAudioSession";
562 // Verify the current values once the audio session has been activated.
409 AVAudioSession* session = [AVAudioSession sharedInstance]; 563 AVAudioSession* session = [AVAudioSession sharedInstance];
410 // Verify the current values once the audio session has been activated.
411 LOG(LS_INFO) << " sample rate: " << session.sampleRate; 564 LOG(LS_INFO) << " sample rate: " << session.sampleRate;
412 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; 565 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
413 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; 566 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
414 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; 567 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
415 LOG(LS_INFO) << " output latency: " << session.outputLatency; 568 LOG(LS_INFO) << " output latency: " << session.outputLatency;
416 LOG(LS_INFO) << " input latency: " << session.inputLatency; 569 LOG(LS_INFO) << " input latency: " << session.inputLatency;
570
417 // Log a warning message for the case when we are unable to set the preferred 571 // Log a warning message for the case when we are unable to set the preferred
418 // hardware sample rate but continue and use the non-ideal sample rate after 572 // hardware sample rate but continue and use the non-ideal sample rate after
419 // reinitializing the audio parameters. 573 // reinitializing the audio parameters. Most BT headsets only support 8kHz or
420 if (session.sampleRate != playout_parameters_.sample_rate()) { 574 // 16kHz.
421 LOG(LS_WARNING) 575 if (session.sampleRate != kPreferredSampleRate) {
422 << "Failed to enable an audio session with the preferred sample rate!"; 576 LOG(LS_WARNING) << "Unable to set the preferred sample rate";
423 } 577 }
424 578
425 // At this stage, we also know the exact IO buffer duration and can add 579 // At this stage, we also know the exact IO buffer duration and can add
426 // that info to the existing audio parameters where it is converted into 580 // that info to the existing audio parameters where it is converted into
427 // number of audio frames. 581 // number of audio frames.
428 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 582 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
429 // Hence, 128 is the size we expect to see in upcoming render callbacks. 583 // Hence, 128 is the size we expect to see in upcoming render callbacks.
430 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), 584 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(),
431 session.IOBufferDuration); 585 session.IOBufferDuration);
432 RTC_DCHECK(playout_parameters_.is_complete()); 586 RTC_DCHECK(playout_parameters_.is_complete());
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
525 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); 679 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
526 application_format.mSampleRate = playout_parameters_.sample_rate(); 680 application_format.mSampleRate = playout_parameters_.sample_rate();
527 application_format.mFormatID = kAudioFormatLinearPCM; 681 application_format.mFormatID = kAudioFormatLinearPCM;
528 application_format.mFormatFlags = 682 application_format.mFormatFlags =
529 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 683 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
530 application_format.mBytesPerPacket = kBytesPerSample; 684 application_format.mBytesPerPacket = kBytesPerSample;
531 application_format.mFramesPerPacket = 1; // uncompressed 685 application_format.mFramesPerPacket = 1; // uncompressed
532 application_format.mBytesPerFrame = kBytesPerSample; 686 application_format.mBytesPerFrame = kBytesPerSample;
533 application_format.mChannelsPerFrame = kPreferredNumberOfChannels; 687 application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
534 application_format.mBitsPerChannel = 8 * kBytesPerSample; 688 application_format.mBitsPerChannel = 8 * kBytesPerSample;
689 // Store the new format.
690 application_format_ = application_format;
535 #if !defined(NDEBUG) 691 #if !defined(NDEBUG)
536 LogABSD(application_format); 692 LogABSD(application_format_);
537 #endif 693 #endif
538 694
539 // Set the application format on the output scope of the input element/bus. 695 // Set the application format on the output scope of the input element/bus.
540 LOG_AND_RETURN_IF_ERROR( 696 LOG_AND_RETURN_IF_ERROR(
541 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat, 697 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
542 kAudioUnitScope_Output, input_bus, 698 kAudioUnitScope_Output, input_bus,
543 &application_format, size), 699 &application_format, size),
544 "Failed to set application format on output scope of input element"); 700 "Failed to set application format on output scope of input element");
545 701
546 // Set the application format on the input scope of the output element/bus. 702 // Set the application format on the input scope of the output element/bus.
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 kAudioUnitScope_Global, input_bus, &input_callback, 738 kAudioUnitScope_Global, input_bus, &input_callback,
583 sizeof(input_callback)), 739 sizeof(input_callback)),
584 "Failed to specify the input callback on the input element"); 740 "Failed to specify the input callback on the input element");
585 741
586 // Initialize the Voice-Processing I/O unit instance. 742 // Initialize the Voice-Processing I/O unit instance.
587 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_), 743 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
588 "Failed to initialize the Voice-Processing I/O unit"); 744 "Failed to initialize the Voice-Processing I/O unit");
589 return true; 745 return true;
590 } 746 }
591 747
748 bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
749 LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")";
750 // Stop the active audio unit.
751 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
752 "Failed to stop the the Voice-Processing I/O unit");
753
754 // The stream format is about to be changed and it requires that we first
755 // uninitialize it to deallocate its resources.
756 LOG_AND_RETURN_IF_ERROR(
757 AudioUnitUninitialize(vpio_unit_),
758 "Failed to uninitialize the the Voice-Processing I/O unit");
759
760 // Allocate new buffers given the new stream format.
761 SetupAudioBuffersForActiveAudioSession();
762
763 // Update the existing application format using the new sample rate.
764 application_format_.mSampleRate = playout_parameters_.sample_rate();
765 UInt32 size = sizeof(application_format_);
766 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
767 kAudioUnitScope_Output, 1, &application_format_, size);
768 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
769 kAudioUnitScope_Input, 0, &application_format_, size);
770
771 // Prepare the audio unit to render audio again.
772 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
773 "Failed to initialize the Voice-Processing I/O unit");
774
775 // Start rendering audio using the new format.
776 LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
777 "Failed to start the Voice-Processing I/O unit");
778 return true;
779 }
780
592 bool AudioDeviceIOS::InitPlayOrRecord() { 781 bool AudioDeviceIOS::InitPlayOrRecord() {
593 LOGI() << "InitPlayOrRecord"; 782 LOGI() << "InitPlayOrRecord";
594 AVAudioSession* session = [AVAudioSession sharedInstance]; 783 AVAudioSession* session = [AVAudioSession sharedInstance];
595 // Activate the audio session and ask for a set of preferred audio parameters. 784 // Activate the audio session and ask for a set of preferred audio parameters.
596 ActivateAudioSession(session, true); 785 ActivateAudioSession(session, true);
597 786
787 // Start observing audio session interruptions and route changes.
788 RegisterNotificationObservers();
789
598 // Ensure that we got what what we asked for in our active audio session. 790 // Ensure that we got what what we asked for in our active audio session.
599 SetupAudioBuffersForActiveAudioSession(); 791 SetupAudioBuffersForActiveAudioSession();
600 792
601 // Create, setup and initialize a new Voice-Processing I/O unit. 793 // Create, setup and initialize a new Voice-Processing I/O unit.
602 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { 794 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
603 return false; 795 return false;
604 } 796 }
605
606 // Listen to audio interruptions.
607 // TODO(henrika): learn this area better.
608 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
609 id observer = [center
610 addObserverForName:AVAudioSessionInterruptionNotification
611 object:nil
612 queue:[NSOperationQueue mainQueue]
613 usingBlock:^(NSNotification* notification) {
614 NSNumber* typeNumber =
615 [notification userInfo][AVAudioSessionInterruptionTypeKey];
616 AVAudioSessionInterruptionType type =
617 (AVAudioSessionInterruptionType)[typeNumber
618 unsignedIntegerValue];
619 switch (type) {
620 case AVAudioSessionInterruptionTypeBegan:
621 // At this point our audio session has been deactivated and
622 // the audio unit render callbacks no longer occur.
623 // Nothing to do.
624 break;
625 case AVAudioSessionInterruptionTypeEnded: {
626 NSError* error = nil;
627 AVAudioSession* session = [AVAudioSession sharedInstance];
628 [session setActive:YES error:&error];
629 if (error != nil) {
630 LOG_F(LS_ERROR) << "Failed to active audio session";
631 }
632 // Post interruption the audio unit render callbacks don't
633 // automatically continue, so we restart the unit manually
634 // here.
635 AudioOutputUnitStop(vpio_unit_);
636 AudioOutputUnitStart(vpio_unit_);
637 break;
638 }
639 }
640 }];
641 // Increment refcount on observer using ARC bridge. Instance variable is a
642 // void* instead of an id because header is included in other pure C++
643 // files.
644 audio_interruption_observer_ = (__bridge_retained void*)observer;
645 return true; 797 return true;
646 } 798 }
647 799
648 bool AudioDeviceIOS::ShutdownPlayOrRecord() { 800 bool AudioDeviceIOS::ShutdownPlayOrRecord() {
649 LOGI() << "ShutdownPlayOrRecord"; 801 LOGI() << "ShutdownPlayOrRecord";
650 if (audio_interruption_observer_ != nullptr) { 802 // Remove audio session notification observers.
651 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; 803 UnregisterNotificationObservers();
652 // Transfer ownership of observer back to ARC, which will dealloc the 804
653 // observer once it exits this scope.
654 id observer = (__bridge_transfer id)audio_interruption_observer_;
655 [center removeObserver:observer];
656 audio_interruption_observer_ = nullptr;
657 }
658 // Close and delete the voice-processing I/O unit. 805 // Close and delete the voice-processing I/O unit.
659 OSStatus result = -1; 806 OSStatus result = -1;
660 if (nullptr != vpio_unit_) { 807 if (nullptr != vpio_unit_) {
661 result = AudioOutputUnitStop(vpio_unit_); 808 result = AudioOutputUnitStop(vpio_unit_);
662 if (result != noErr) { 809 if (result != noErr) {
663 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 810 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
664 } 811 }
812 result = AudioUnitUninitialize(vpio_unit_);
813 if (result != noErr) {
814 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
815 }
665 result = AudioComponentInstanceDispose(vpio_unit_); 816 result = AudioComponentInstanceDispose(vpio_unit_);
666 if (result != noErr) { 817 if (result != noErr) {
667 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; 818 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
668 } 819 }
669 vpio_unit_ = nullptr; 820 vpio_unit_ = nullptr;
670 } 821 }
822
671 // All I/O should be stopped or paused prior to deactivating the audio 823 // All I/O should be stopped or paused prior to deactivating the audio
672 // session, hence we deactivate as last action. 824 // session, hence we deactivate as last action.
673 AVAudioSession* session = [AVAudioSession sharedInstance]; 825 AVAudioSession* session = [AVAudioSession sharedInstance];
674 ActivateAudioSession(session, false); 826 ActivateAudioSession(session, false);
675 return true; 827 return true;
676 } 828 }
677 829
678 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( 830 OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
679 void* in_ref_con, 831 void* in_ref_con,
680 AudioUnitRenderActionFlags* io_action_flags, 832 AudioUnitRenderActionFlags* io_action_flags,
681 const AudioTimeStamp* in_time_stamp, 833 const AudioTimeStamp* in_time_stamp,
682 UInt32 in_bus_number, 834 UInt32 in_bus_number,
683 UInt32 in_number_frames, 835 UInt32 in_number_frames,
684 AudioBufferList* io_data) { 836 AudioBufferList* io_data) {
685 RTC_DCHECK_EQ(1u, in_bus_number); 837 RTC_DCHECK_EQ(1u, in_bus_number);
686 RTC_DCHECK( 838 RTC_DCHECK(
687 !io_data); // no buffer should be allocated for input at this stage 839 !io_data); // no buffer should be allocated for input at this stage
688 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con); 840 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
689 return audio_device_ios->OnRecordedDataIsAvailable( 841 return audio_device_ios->OnRecordedDataIsAvailable(
690 io_action_flags, in_time_stamp, in_bus_number, in_number_frames); 842 io_action_flags, in_time_stamp, in_bus_number, in_number_frames);
691 } 843 }
692 844
693 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( 845 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
694 AudioUnitRenderActionFlags* io_action_flags, 846 AudioUnitRenderActionFlags* io_action_flags,
695 const AudioTimeStamp* in_time_stamp, 847 const AudioTimeStamp* in_time_stamp,
696 UInt32 in_bus_number, 848 UInt32 in_bus_number,
697 UInt32 in_number_frames) { 849 UInt32 in_number_frames) {
698 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
699 OSStatus result = noErr; 850 OSStatus result = noErr;
700 // Simply return if recording is not enabled. 851 // Simply return if recording is not enabled.
701 if (!rtc::AtomicOps::AcquireLoad(&recording_)) 852 if (!rtc::AtomicOps::AcquireLoad(&recording_))
702 return result; 853 return result;
703 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames); 854 if (in_number_frames != record_parameters_.frames_per_buffer()) {
855 // We have seen short bursts (1-2 frames) where |in_number_frames| changes.
856 // Add a log to keep track of longer sequences if that should ever happen.
857 LOG(LS_WARNING) << "in_number_frames (" << in_number_frames
858 << ") != " << record_parameters_.frames_per_buffer();
859 }
704 // Obtain the recorded audio samples by initiating a rendering cycle. 860 // Obtain the recorded audio samples by initiating a rendering cycle.
705 // Since it happens on the input bus, the |io_data| parameter is a reference 861 // Since it happens on the input bus, the |io_data| parameter is a reference
706 // to the preallocated audio buffer list that the audio unit renders into. 862 // to the preallocated audio buffer list that the audio unit renders into.
707 // TODO(henrika): should error handling be improved? 863 // TODO(henrika): should error handling be improved?
708 AudioBufferList* io_data = &audio_record_buffer_list_; 864 AudioBufferList* io_data = &audio_record_buffer_list_;
709 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp, 865 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
710 in_bus_number, in_number_frames, io_data); 866 in_bus_number, in_number_frames, io_data);
711 if (result != noErr) { 867 if (result != noErr) {
712 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 868 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
713 return result; 869 return result;
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
760 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 916 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
761 // the native I/O audio unit) to a preallocated intermediate buffer and 917 // the native I/O audio unit) to a preallocated intermediate buffer and
762 // copy the result to the audio buffer in the |io_data| destination. 918 // copy the result to the audio buffer in the |io_data| destination.
763 SInt8* source = playout_audio_buffer_.get(); 919 SInt8* source = playout_audio_buffer_.get();
764 fine_audio_buffer_->GetPlayoutData(source); 920 fine_audio_buffer_->GetPlayoutData(source);
765 memcpy(destination, source, dataSizeInBytes); 921 memcpy(destination, source, dataSizeInBytes);
766 return noErr; 922 return noErr;
767 } 923 }
768 924
769 } // namespace webrtc 925 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698