Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1796983004: Use RTCAudioSessionDelegate in AudioDeviceIOS. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Some nits. Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #if !defined(__has_feature) || !__has_feature(objc_arc) 11 #if !defined(__has_feature) || !__has_feature(objc_arc)
12 #error "This file requires ARC support." 12 #error "This file requires ARC support."
13 #endif 13 #endif
14 14
15 #import <AVFoundation/AVFoundation.h> 15 #import <AVFoundation/AVFoundation.h>
16 #import <Foundation/Foundation.h> 16 #import <Foundation/Foundation.h>
17 17
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
19 19
20 #include "webrtc/base/atomicops.h" 20 #include "webrtc/base/atomicops.h"
21 #include "webrtc/base/bind.h"
21 #include "webrtc/base/checks.h" 22 #include "webrtc/base/checks.h"
22 #include "webrtc/base/criticalsection.h" 23 #include "webrtc/base/criticalsection.h"
23 #include "webrtc/base/logging.h" 24 #include "webrtc/base/logging.h"
25 #include "webrtc/base/thread.h"
24 #include "webrtc/base/thread_annotations.h" 26 #include "webrtc/base/thread_annotations.h"
25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" 27 #include "webrtc/modules/audio_device/fine_audio_buffer.h"
26 #include "webrtc/modules/utility/include/helpers_ios.h" 28 #include "webrtc/modules/utility/include/helpers_ios.h"
27 29
28 #import "webrtc/base/objc/RTCLogging.h" 30 #import "webrtc/base/objc/RTCLogging.h"
29 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" 31 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
32 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
30 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" 33 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
34 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
31 35
32 namespace webrtc { 36 namespace webrtc {
33 37
34 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" 38 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
35 39
36 #define LOG_AND_RETURN_IF_ERROR(error, message) \ 40 #define LOG_AND_RETURN_IF_ERROR(error, message) \
37 do { \ 41 do { \
38 OSStatus err = error; \ 42 OSStatus err = error; \
39 if (err) { \ 43 if (err) { \
40 LOG(LS_ERROR) << message << ": " << err; \ 44 LOG(LS_ERROR) << message << ": " << err; \
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); 103 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString();
100 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); 104 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount();
101 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 105 #if defined(__IPHONE_9_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0
102 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); 106 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled();
103 #endif 107 #endif
104 } 108 }
105 } 109 }
106 #endif // !defined(NDEBUG) 110 #endif // !defined(NDEBUG)
107 111
108 AudioDeviceIOS::AudioDeviceIOS() 112 AudioDeviceIOS::AudioDeviceIOS()
109 : audio_device_buffer_(nullptr), 113 : async_invoker_(new rtc::AsyncInvoker()),
110 vpio_unit_(nullptr), 114 audio_device_buffer_(nullptr),
111 recording_(0), 115 vpio_unit_(nullptr),
112 playing_(0), 116 recording_(0),
113 initialized_(false), 117 playing_(0),
114 rec_is_initialized_(false), 118 initialized_(false),
115 play_is_initialized_(false), 119 rec_is_initialized_(false),
116 audio_interruption_observer_(nullptr), 120 play_is_initialized_(false),
117 route_change_observer_(nullptr) { 121 is_interrupted_(false) {
118 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); 122 LOGI() << "ctor" << ios::GetCurrentThreadDescription();
123 thread_ = rtc::Thread::Current();
124 audio_session_observer_ =
125 [[RTCAudioSessionDelegateAdapter alloc] initWithObserver:this];
119 } 126 }
120 127
121 AudioDeviceIOS::~AudioDeviceIOS() { 128 AudioDeviceIOS::~AudioDeviceIOS() {
122 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); 129 LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
130 audio_session_observer_ = nil;
123 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 131 RTC_DCHECK(thread_checker_.CalledOnValidThread());
124 Terminate(); 132 Terminate();
125 } 133 }
126 134
127 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 135 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
128 LOGI() << "AttachAudioBuffer"; 136 LOGI() << "AttachAudioBuffer";
129 RTC_DCHECK(audioBuffer); 137 RTC_DCHECK(audioBuffer);
130 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 138 RTC_DCHECK(thread_checker_.CalledOnValidThread());
131 audio_device_buffer_ = audioBuffer; 139 audio_device_buffer_ = audioBuffer;
132 } 140 }
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
325 } 333 }
326 334
327 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { 335 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
328 LOGI() << "GetRecordAudioParameters"; 336 LOGI() << "GetRecordAudioParameters";
329 RTC_DCHECK(record_parameters_.is_valid()); 337 RTC_DCHECK(record_parameters_.is_valid());
330 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 338 RTC_DCHECK(thread_checker_.CalledOnValidThread());
331 *params = record_parameters_; 339 *params = record_parameters_;
332 return 0; 340 return 0;
333 } 341 }
334 342
343 void AudioDeviceIOS::OnInterruptionBegin() {
344 RTC_DCHECK(async_invoker_);
345 RTC_DCHECK(thread_);
346 if (thread_->IsCurrent()) {
347 HandleInterruptionBegin();
348 return;
349 }
350 async_invoker_->AsyncInvoke<void>(
351 thread_,
352 rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionBegin, this));
353 }
354
355 void AudioDeviceIOS::OnInterruptionEnd() {
356 RTC_DCHECK(async_invoker_);
357 RTC_DCHECK(thread_);
358 if (thread_->IsCurrent()) {
359 HandleInterruptionEnd();
360 return;
361 }
362 async_invoker_->AsyncInvoke<void>(
363 thread_,
364 rtc::Bind(&webrtc::AudioDeviceIOS::HandleInterruptionEnd, this));
365 }
366
367 void AudioDeviceIOS::OnValidRouteChange() {
368 RTC_DCHECK(async_invoker_);
369 RTC_DCHECK(thread_);
370 if (thread_->IsCurrent()) {
371 HandleValidRouteChange();
372 return;
373 }
374 async_invoker_->AsyncInvoke<void>(
375 thread_,
376 rtc::Bind(&webrtc::AudioDeviceIOS::HandleValidRouteChange, this));
377 }
378
379 void AudioDeviceIOS::HandleInterruptionBegin() {
380 RTC_DCHECK(thread_checker_.CalledOnValidThread());
381 RTCLog(@"Stopping the audio unit due to interruption begin.");
382 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
383 "Failed to stop the the Voice-Processing I/O unit");
384 is_interrupted_ = true;
385 }
386
387 void AudioDeviceIOS::HandleInterruptionEnd() {
388 RTC_DCHECK(thread_checker_.CalledOnValidThread());
389 RTCLog(@"Starting the audio unit due to interruption end.");
390 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
391 "Failed to start the the Voice-Processing I/O unit");
392 is_interrupted_ = false;
393 }
394
395 void AudioDeviceIOS::HandleValidRouteChange() {
396 RTC_DCHECK(thread_checker_.CalledOnValidThread());
397
398 // Don't do anything if we're interrupted.
399 if (is_interrupted_) {
400 return;
401 }
402
403 // Only restart audio for a valid route change if the session sample rate
404 // has changed.
405 RTCAudioSession* session = [RTCAudioSession sharedInstance];
406 const double current_sample_rate = playout_parameters_.sample_rate();
407 const double session_sample_rate = session.sampleRate;
408 if (current_sample_rate != session_sample_rate) {
409 RTCLog(@"Route changed caused sample rate to change from %f to %f. "
410 "Restarting audio unit.", current_sample_rate, session_sample_rate);
411 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
412 RTCLogError(@"Audio restart failed.");
413 }
414 }
415 }
416
335 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { 417 void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
336 LOGI() << "UpdateAudioDevicebuffer"; 418 LOGI() << "UpdateAudioDevicebuffer";
337 // AttachAudioBuffer() is called at construction by the main class but check 419 // AttachAudioBuffer() is called at construction by the main class but check
338 // just in case. 420 // just in case.
339 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; 421 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
340 // Inform the audio device buffer (ADB) about the new audio format. 422 // Inform the audio device buffer (ADB) about the new audio format.
341 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); 423 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
342 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); 424 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
343 audio_device_buffer_->SetRecordingSampleRate( 425 audio_device_buffer_->SetRecordingSampleRate(
344 record_parameters_.sample_rate()); 426 record_parameters_.sample_rate());
345 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); 427 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
346 } 428 }
347 429
348 void AudioDeviceIOS::RegisterNotificationObservers() {
349 LOGI() << "RegisterNotificationObservers";
350 // This code block will be called when AVAudioSessionInterruptionNotification
351 // is observed.
352 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) {
353 NSNumber* type_number =
354 notification.userInfo[AVAudioSessionInterruptionTypeKey];
355 AVAudioSessionInterruptionType type =
356 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue;
357 LOG(LS_INFO) << "Audio session interruption:";
358 switch (type) {
359 case AVAudioSessionInterruptionTypeBegan:
360 // The system has deactivated our audio session.
361 // Stop the active audio unit.
362 LOG(LS_INFO) << " Began => stopping the audio unit";
363 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
364 "Failed to stop the the Voice-Processing I/O unit");
365 break;
366 case AVAudioSessionInterruptionTypeEnded:
367 // The interruption has ended. Restart the audio session and start the
368 // initialized audio unit again.
369 LOG(LS_INFO) << " Ended => restarting audio session and audio unit";
370 NSError* error = nil;
371 BOOL success = NO;
372 AVAudioSession* session = [AVAudioSession sharedInstance];
373 success = [session setActive:YES error:&error];
374 if (CheckAndLogError(success, error)) {
375 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
376 "Failed to start the the Voice-Processing I/O unit");
377 }
378 break;
379 }
380 };
381
382 // This code block will be called when AVAudioSessionRouteChangeNotification
383 // is observed.
384 void (^route_change_block)(NSNotification*) =
385 ^(NSNotification* notification) {
386 // Get reason for current route change.
387 NSNumber* reason_number =
388 notification.userInfo[AVAudioSessionRouteChangeReasonKey];
389 AVAudioSessionRouteChangeReason reason =
390 (AVAudioSessionRouteChangeReason)reason_number.unsignedIntegerValue;
391 bool valid_route_change = true;
392 LOG(LS_INFO) << "Route change:";
393 switch (reason) {
394 case AVAudioSessionRouteChangeReasonUnknown:
395 LOG(LS_INFO) << " ReasonUnknown";
396 break;
397 case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
398 LOG(LS_INFO) << " NewDeviceAvailable";
399 break;
400 case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
401 LOG(LS_INFO) << " OldDeviceUnavailable";
402 break;
403 case AVAudioSessionRouteChangeReasonCategoryChange:
404 // It turns out that we see this notification (at least in iOS 9.2)
405 // when making a switch from a BT device to e.g. Speaker using the
406 // iOS Control Center and that we therefore must check if the sample
407 // rate has changed. And if so is the case, restart the audio unit.
408 LOG(LS_INFO) << " CategoryChange";
409 LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory();
410 break;
411 case AVAudioSessionRouteChangeReasonOverride:
412 LOG(LS_INFO) << " Override";
413 break;
414 case AVAudioSessionRouteChangeReasonWakeFromSleep:
415 LOG(LS_INFO) << " WakeFromSleep";
416 break;
417 case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
418 LOG(LS_INFO) << " NoSuitableRouteForCategory";
419 break;
420 case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
421 // The set of input and output ports has not changed, but their
422 // configuration has, e.g., a port’s selected data source has
423 // changed. Ignore this type of route change since we are focusing
424 // on detecting headset changes.
425 LOG(LS_INFO) << " RouteConfigurationChange (ignored)";
426 valid_route_change = false;
427 break;
428 }
429
430 if (valid_route_change) {
431 // Log previous route configuration.
432 AVAudioSessionRouteDescription* prev_route =
433 notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
434 LOG(LS_INFO) << "Previous route:";
435 LOG(LS_INFO) << ios::StdStringFromNSString(
436 [NSString stringWithFormat:@"%@", prev_route]);
437
438 // Only restart audio for a valid route change and if the
439 // session sample rate has changed.
440 RTCAudioSession* session = [RTCAudioSession sharedInstance];
441 const double session_sample_rate = session.sampleRate;
442 LOG(LS_INFO) << "session sample rate: " << session_sample_rate;
443 if (playout_parameters_.sample_rate() != session_sample_rate) {
444 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
445 LOG(LS_ERROR) << "Audio restart failed";
446 }
447 }
448 }
449 };
450
451 // Get the default notification center of the current process.
452 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
453
454 // Add AVAudioSessionInterruptionNotification observer.
455 id interruption_observer =
456 [center addObserverForName:AVAudioSessionInterruptionNotification
457 object:nil
458 queue:[NSOperationQueue mainQueue]
459 usingBlock:interrupt_block];
460 // Add AVAudioSessionRouteChangeNotification observer.
461 id route_change_observer =
462 [center addObserverForName:AVAudioSessionRouteChangeNotification
463 object:nil
464 queue:[NSOperationQueue mainQueue]
465 usingBlock:route_change_block];
466
467 // Increment refcount on observers using ARC bridge. Instance variable is a
468 // void* instead of an id because header is included in other pure C++
469 // files.
470 audio_interruption_observer_ = (__bridge_retained void*)interruption_observer;
471 route_change_observer_ = (__bridge_retained void*)route_change_observer;
472 }
473
474 void AudioDeviceIOS::UnregisterNotificationObservers() {
475 LOGI() << "UnregisterNotificationObservers";
476 // Transfer ownership of observer back to ARC, which will deallocate the
477 // observer once it exits this scope.
478 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
479 if (audio_interruption_observer_ != nullptr) {
480 id observer = (__bridge_transfer id)audio_interruption_observer_;
481 [center removeObserver:observer];
482 audio_interruption_observer_ = nullptr;
483 }
484 if (route_change_observer_ != nullptr) {
485 id observer = (__bridge_transfer id)route_change_observer_;
486 [center removeObserver:observer];
487 route_change_observer_ = nullptr;
488 }
489 }
490
491 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 430 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
492 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 431 LOGI() << "SetupAudioBuffersForActiveAudioSession";
493 // Verify the current values once the audio session has been activated. 432 // Verify the current values once the audio session has been activated.
494 RTCAudioSession* session = [RTCAudioSession sharedInstance]; 433 RTCAudioSession* session = [RTCAudioSession sharedInstance];
495 LOG(LS_INFO) << " sample rate: " << session.sampleRate; 434 double sample_rate = session.sampleRate;
496 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; 435 NSTimeInterval io_buffer_duration = session.IOBufferDuration;
436 LOG(LS_INFO) << " sample rate: " << sample_rate;
437 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
497 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; 438 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
498 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; 439 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
499 LOG(LS_INFO) << " output latency: " << session.outputLatency; 440 LOG(LS_INFO) << " output latency: " << session.outputLatency;
500 LOG(LS_INFO) << " input latency: " << session.inputLatency; 441 LOG(LS_INFO) << " input latency: " << session.inputLatency;
501 442
502 // Log a warning message for the case when we are unable to set the preferred 443 // Log a warning message for the case when we are unable to set the preferred
503 // hardware sample rate but continue and use the non-ideal sample rate after 444 // hardware sample rate but continue and use the non-ideal sample rate after
504 // reinitializing the audio parameters. Most BT headsets only support 8kHz or 445 // reinitializing the audio parameters. Most BT headsets only support 8kHz or
505 // 16kHz. 446 // 16kHz.
506 RTCAudioSessionConfiguration* webRTCConfig = 447 RTCAudioSessionConfiguration* webRTCConfig =
507 [RTCAudioSessionConfiguration webRTCConfiguration]; 448 [RTCAudioSessionConfiguration webRTCConfiguration];
508 if (session.sampleRate != webRTCConfig.sampleRate) { 449 if (sample_rate != webRTCConfig.sampleRate) {
509 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; 450 LOG(LS_WARNING) << "Unable to set the preferred sample rate";
510 } 451 }
511 452
512 // At this stage, we also know the exact IO buffer duration and can add 453 // At this stage, we also know the exact IO buffer duration and can add
513 // that info to the existing audio parameters where it is converted into 454 // that info to the existing audio parameters where it is converted into
514 // number of audio frames. 455 // number of audio frames.
515 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 456 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
516 // Hence, 128 is the size we expect to see in upcoming render callbacks. 457 // Hence, 128 is the size we expect to see in upcoming render callbacks.
517 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), 458 playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
518 session.IOBufferDuration); 459 io_buffer_duration);
519 RTC_DCHECK(playout_parameters_.is_complete()); 460 RTC_DCHECK(playout_parameters_.is_complete());
520 record_parameters_.reset(session.sampleRate, record_parameters_.channels(), 461 record_parameters_.reset(sample_rate, record_parameters_.channels(),
521 session.IOBufferDuration); 462 io_buffer_duration);
522 RTC_DCHECK(record_parameters_.is_complete()); 463 RTC_DCHECK(record_parameters_.is_complete());
523 LOG(LS_INFO) << " frames per I/O buffer: " 464 LOG(LS_INFO) << " frames per I/O buffer: "
524 << playout_parameters_.frames_per_buffer(); 465 << playout_parameters_.frames_per_buffer();
525 LOG(LS_INFO) << " bytes per I/O buffer: " 466 LOG(LS_INFO) << " bytes per I/O buffer: "
526 << playout_parameters_.GetBytesPerBuffer(); 467 << playout_parameters_.GetBytesPerBuffer();
527 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), 468 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
528 record_parameters_.GetBytesPerBuffer()); 469 record_parameters_.GetBytesPerBuffer());
529 470
530 // Update the ADB parameters since the sample rate might have changed. 471 // Update the ADB parameters since the sample rate might have changed.
531 UpdateAudioDeviceBuffer(); 472 UpdateAudioDeviceBuffer();
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
777 [session lockForConfiguration]; 718 [session lockForConfiguration];
778 NSError* error = nil; 719 NSError* error = nil;
779 if (![session configureWebRTCSession:&error]) { 720 if (![session configureWebRTCSession:&error]) {
780 RTCLogError(@"Failed to configure WebRTC session: %@", 721 RTCLogError(@"Failed to configure WebRTC session: %@",
781 error.localizedDescription); 722 error.localizedDescription);
782 [session unlockForConfiguration]; 723 [session unlockForConfiguration];
783 return false; 724 return false;
784 } 725 }
785 726
786 // Start observing audio session interruptions and route changes. 727 // Start observing audio session interruptions and route changes.
787 RegisterNotificationObservers(); 728 [session pushDelegate:audio_session_observer_];
788 729
789 // Ensure that we got what what we asked for in our active audio session. 730 // Ensure that we got what what we asked for in our active audio session.
790 SetupAudioBuffersForActiveAudioSession(); 731 SetupAudioBuffersForActiveAudioSession();
791 732
792 // Create, setup and initialize a new Voice-Processing I/O unit. 733 // Create, setup and initialize a new Voice-Processing I/O unit.
793 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { 734 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
794 [session setActive:NO error:nil]; 735 [session setActive:NO error:nil];
795 [session unlockForConfiguration]; 736 [session unlockForConfiguration];
796 return false; 737 return false;
797 } 738 }
(...skipping 11 matching lines...) Expand all
809 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 750 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
810 } 751 }
811 result = AudioUnitUninitialize(vpio_unit_); 752 result = AudioUnitUninitialize(vpio_unit_);
812 if (result != noErr) { 753 if (result != noErr) {
813 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; 754 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
814 } 755 }
815 DisposeAudioUnit(); 756 DisposeAudioUnit();
816 } 757 }
817 758
818 // Remove audio session notification observers. 759 // Remove audio session notification observers.
819 UnregisterNotificationObservers(); 760 RTCAudioSession* session = [RTCAudioSession sharedInstance];
761 [session removeDelegate:audio_session_observer_];
820 762
821 // All I/O should be stopped or paused prior to deactivating the audio 763 // All I/O should be stopped or paused prior to deactivating the audio
822 // session, hence we deactivate as last action. 764 // session, hence we deactivate as last action.
823 RTCAudioSession* session = [RTCAudioSession sharedInstance];
824 [session lockForConfiguration]; 765 [session lockForConfiguration];
825 [session setActive:NO error:nil]; 766 [session setActive:NO error:nil];
826 [session unlockForConfiguration]; 767 [session unlockForConfiguration];
827 } 768 }
828 769
829 void AudioDeviceIOS::DisposeAudioUnit() { 770 void AudioDeviceIOS::DisposeAudioUnit() {
830 if (nullptr == vpio_unit_) 771 if (nullptr == vpio_unit_)
831 return; 772 return;
832 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); 773 OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
833 if (result != noErr) { 774 if (result != noErr) {
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
928 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 869 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
929 // the native I/O audio unit) to a preallocated intermediate buffer and 870 // the native I/O audio unit) to a preallocated intermediate buffer and
930 // copy the result to the audio buffer in the |io_data| destination. 871 // copy the result to the audio buffer in the |io_data| destination.
931 SInt8* source = playout_audio_buffer_.get(); 872 SInt8* source = playout_audio_buffer_.get();
932 fine_audio_buffer_->GetPlayoutData(source); 873 fine_audio_buffer_->GetPlayoutData(source);
933 memcpy(destination, source, dataSizeInBytes); 874 memcpy(destination, source, dataSizeInBytes);
934 return noErr; 875 return noErr;
935 } 876 }
936 877
937 } // namespace webrtc 878 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_device/ios/audio_device_ios.h ('k') | webrtc/modules/audio_device/ios/audio_session_observer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698