Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(847)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1796983004: Use RTCAudioSessionDelegate in AudioDeviceIOS. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #if !defined(__has_feature) || !__has_feature(objc_arc) 11 #if !defined(__has_feature) || !__has_feature(objc_arc)
12 #error "This file requires ARC support." 12 #error "This file requires ARC support."
13 #endif 13 #endif
14 14
15 #import <AVFoundation/AVFoundation.h> 15 #import <AVFoundation/AVFoundation.h>
16 #import <Foundation/Foundation.h> 16 #import <Foundation/Foundation.h>
17 17
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
19 19
20 #include "webrtc/base/atomicops.h" 20 #include "webrtc/base/atomicops.h"
21 #include "webrtc/base/checks.h" 21 #include "webrtc/base/checks.h"
22 #include "webrtc/base/criticalsection.h" 22 #include "webrtc/base/criticalsection.h"
23 #include "webrtc/base/logging.h" 23 #include "webrtc/base/logging.h"
24 #include "webrtc/base/thread.h"
24 #include "webrtc/base/thread_annotations.h" 25 #include "webrtc/base/thread_annotations.h"
25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" 26 #include "webrtc/modules/audio_device/fine_audio_buffer.h"
26 #include "webrtc/modules/utility/include/helpers_ios.h" 27 #include "webrtc/modules/utility/include/helpers_ios.h"
27 28
28 #import "webrtc/base/objc/RTCLogging.h" 29 #import "webrtc/base/objc/RTCLogging.h"
29 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" 30 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h"
31 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession+Private.h"
30 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" 32 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h"
33 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionDelegateAdapter.h"
31 34
32 namespace webrtc { 35 namespace webrtc {
33 36
34 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" 37 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
35 38
36 #define LOG_AND_RETURN_IF_ERROR(error, message) \ 39 #define LOG_AND_RETURN_IF_ERROR(error, message) \
37 do { \ 40 do { \
38 OSStatus err = error; \ 41 OSStatus err = error; \
39 if (err) { \ 42 if (err) { \
40 LOG(LS_ERROR) << message << ": " << err; \ 43 LOG(LS_ERROR) << message << ": " << err; \
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
106 #endif // !defined(NDEBUG) 109 #endif // !defined(NDEBUG)
107 110
108 AudioDeviceIOS::AudioDeviceIOS() 111 AudioDeviceIOS::AudioDeviceIOS()
109 : audio_device_buffer_(nullptr), 112 : audio_device_buffer_(nullptr),
110 vpio_unit_(nullptr), 113 vpio_unit_(nullptr),
111 recording_(0), 114 recording_(0),
112 playing_(0), 115 playing_(0),
113 initialized_(false), 116 initialized_(false),
114 rec_is_initialized_(false), 117 rec_is_initialized_(false),
115 play_is_initialized_(false), 118 play_is_initialized_(false),
116 audio_interruption_observer_(nullptr), 119 is_interrupted_(false) {
117 route_change_observer_(nullptr) {
118 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); 120 LOGI() << "ctor" << ios::GetCurrentThreadDescription();
121 rtc::Thread *thread = rtc::Thread::Current();
122 audio_session_observer_ =
henrika_webrtc 2016/03/15 08:53:44 Nice!
tkchin_webrtc 2016/03/15 20:14:58 Acknowledged.
123 [[RTCAudioSessionDelegateAdapter alloc] initWithAudioDevice:this
124 callbackThread:thread];
119 } 125 }
120 126
121 AudioDeviceIOS::~AudioDeviceIOS() { 127 AudioDeviceIOS::~AudioDeviceIOS() {
122 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); 128 LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
129 audio_session_observer_ = nil;
123 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 130 RTC_DCHECK(thread_checker_.CalledOnValidThread());
124 Terminate(); 131 Terminate();
125 } 132 }
126 133
127 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 134 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
128 LOGI() << "AttachAudioBuffer"; 135 LOGI() << "AttachAudioBuffer";
129 RTC_DCHECK(audioBuffer); 136 RTC_DCHECK(audioBuffer);
130 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 137 RTC_DCHECK(thread_checker_.CalledOnValidThread());
131 audio_device_buffer_ = audioBuffer; 138 audio_device_buffer_ = audioBuffer;
132 } 139 }
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
325 } 332 }
326 333
327 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { 334 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
328 LOGI() << "GetRecordAudioParameters"; 335 LOGI() << "GetRecordAudioParameters";
329 RTC_DCHECK(record_parameters_.is_valid()); 336 RTC_DCHECK(record_parameters_.is_valid());
330 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 337 RTC_DCHECK(thread_checker_.CalledOnValidThread());
331 *params = record_parameters_; 338 *params = record_parameters_;
332 return 0; 339 return 0;
333 } 340 }
334 341
342 void AudioDeviceIOS::OnInterruptionBegin() {
343 RTC_DCHECK(thread_checker_.CalledOnValidThread());
344 LOG(LS_INFO) << " Began => stopping the audio unit";
345 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
346 "Failed to stop the the Voice-Processing I/O unit");
347 is_interrupted_ = true;
348 }
349
350 void AudioDeviceIOS::OnInterruptionEnd() {
351 RTC_DCHECK(thread_checker_.CalledOnValidThread());
352 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
Chuck 2016/03/15 14:33:21 Do you want a similar log line to 344 here?
tkchin_webrtc 2016/03/15 20:14:58 Done.
353 "Failed to start the the Voice-Processing I/O unit");
354 is_interrupted_ = false;
355 }
356
357 void AudioDeviceIOS::OnValidRouteChange() {
358 RTC_DCHECK(thread_checker_.CalledOnValidThread());
359
360 // Don't do anything if we're interrupted.
361 if (is_interrupted_) {
362 return;
363 }
364
365 // Only restart audio for a valid route change if the session sample rate
366 // has changed.
367 RTCAudioSession* session = [RTCAudioSession sharedInstance];
368 const double session_sample_rate = session.sampleRate;
369 RTCLog("Session sample rate: %f", session_sample_rate);
henrika_webrtc 2016/03/15 08:53:44 Thanks for adding these logs. Very useful when ext
Chuck 2016/03/15 14:33:21 Is this descriptive enough to know what happened f
tkchin_webrtc 2016/03/15 20:14:58 This was there before already actually :) Made it
370 if (playout_parameters_.sample_rate() != session_sample_rate) {
371 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
372 RTCLogError(@"Audio restart failed.");
373 }
374 }
375 }
376
335 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { 377 void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
336 LOGI() << "UpdateAudioDevicebuffer"; 378 LOGI() << "UpdateAudioDevicebuffer";
337 // AttachAudioBuffer() is called at construction by the main class but check 379 // AttachAudioBuffer() is called at construction by the main class but check
338 // just in case. 380 // just in case.
339 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; 381 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
340 // Inform the audio device buffer (ADB) about the new audio format. 382 // Inform the audio device buffer (ADB) about the new audio format.
341 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); 383 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
342 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); 384 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
343 audio_device_buffer_->SetRecordingSampleRate( 385 audio_device_buffer_->SetRecordingSampleRate(
344 record_parameters_.sample_rate()); 386 record_parameters_.sample_rate());
345 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); 387 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
346 } 388 }
347 389
348 void AudioDeviceIOS::RegisterNotificationObservers() {
349 LOGI() << "RegisterNotificationObservers";
350 // This code block will be called when AVAudioSessionInterruptionNotification
351 // is observed.
352 void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) {
353 NSNumber* type_number =
354 notification.userInfo[AVAudioSessionInterruptionTypeKey];
355 AVAudioSessionInterruptionType type =
356 (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue;
357 LOG(LS_INFO) << "Audio session interruption:";
358 switch (type) {
359 case AVAudioSessionInterruptionTypeBegan:
360 // The system has deactivated our audio session.
361 // Stop the active audio unit.
362 LOG(LS_INFO) << " Began => stopping the audio unit";
363 LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
364 "Failed to stop the the Voice-Processing I/O unit");
365 break;
366 case AVAudioSessionInterruptionTypeEnded:
367 // The interruption has ended. Restart the audio session and start the
368 // initialized audio unit again.
369 LOG(LS_INFO) << " Ended => restarting audio session and audio unit";
370 NSError* error = nil;
371 BOOL success = NO;
372 AVAudioSession* session = [AVAudioSession sharedInstance];
373 success = [session setActive:YES error:&error];
374 if (CheckAndLogError(success, error)) {
375 LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
376 "Failed to start the the Voice-Processing I/O unit");
377 }
378 break;
379 }
380 };
381
382 // This code block will be called when AVAudioSessionRouteChangeNotification
383 // is observed.
384 void (^route_change_block)(NSNotification*) =
385 ^(NSNotification* notification) {
386 // Get reason for current route change.
387 NSNumber* reason_number =
388 notification.userInfo[AVAudioSessionRouteChangeReasonKey];
389 AVAudioSessionRouteChangeReason reason =
390 (AVAudioSessionRouteChangeReason)reason_number.unsignedIntegerValue;
391 bool valid_route_change = true;
392 LOG(LS_INFO) << "Route change:";
393 switch (reason) {
394 case AVAudioSessionRouteChangeReasonUnknown:
395 LOG(LS_INFO) << " ReasonUnknown";
396 break;
397 case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
398 LOG(LS_INFO) << " NewDeviceAvailable";
399 break;
400 case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
401 LOG(LS_INFO) << " OldDeviceUnavailable";
402 break;
403 case AVAudioSessionRouteChangeReasonCategoryChange:
404 // It turns out that we see this notification (at least in iOS 9.2)
405 // when making a switch from a BT device to e.g. Speaker using the
406 // iOS Control Center and that we therefore must check if the sample
407 // rate has changed. And if so is the case, restart the audio unit.
408 LOG(LS_INFO) << " CategoryChange";
409 LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory();
410 break;
411 case AVAudioSessionRouteChangeReasonOverride:
412 LOG(LS_INFO) << " Override";
413 break;
414 case AVAudioSessionRouteChangeReasonWakeFromSleep:
415 LOG(LS_INFO) << " WakeFromSleep";
416 break;
417 case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
418 LOG(LS_INFO) << " NoSuitableRouteForCategory";
419 break;
420 case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
421 // The set of input and output ports has not changed, but their
422 // configuration has, e.g., a port’s selected data source has
423 // changed. Ignore this type of route change since we are focusing
424 // on detecting headset changes.
425 LOG(LS_INFO) << " RouteConfigurationChange (ignored)";
426 valid_route_change = false;
427 break;
428 }
429
430 if (valid_route_change) {
431 // Log previous route configuration.
432 AVAudioSessionRouteDescription* prev_route =
433 notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
434 LOG(LS_INFO) << "Previous route:";
435 LOG(LS_INFO) << ios::StdStringFromNSString(
436 [NSString stringWithFormat:@"%@", prev_route]);
437
438 // Only restart audio for a valid route change and if the
439 // session sample rate has changed.
440 RTCAudioSession* session = [RTCAudioSession sharedInstance];
441 const double session_sample_rate = session.sampleRate;
442 LOG(LS_INFO) << "session sample rate: " << session_sample_rate;
443 if (playout_parameters_.sample_rate() != session_sample_rate) {
444 if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
445 LOG(LS_ERROR) << "Audio restart failed";
446 }
447 }
448 }
449 };
450
451 // Get the default notification center of the current process.
452 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
453
454 // Add AVAudioSessionInterruptionNotification observer.
455 id interruption_observer =
456 [center addObserverForName:AVAudioSessionInterruptionNotification
457 object:nil
458 queue:[NSOperationQueue mainQueue]
459 usingBlock:interrupt_block];
460 // Add AVAudioSessionRouteChangeNotification observer.
461 id route_change_observer =
462 [center addObserverForName:AVAudioSessionRouteChangeNotification
463 object:nil
464 queue:[NSOperationQueue mainQueue]
465 usingBlock:route_change_block];
466
467 // Increment refcount on observers using ARC bridge. Instance variable is a
468 // void* instead of an id because header is included in other pure C++
469 // files.
470 audio_interruption_observer_ = (__bridge_retained void*)interruption_observer;
471 route_change_observer_ = (__bridge_retained void*)route_change_observer;
472 }
473
474 void AudioDeviceIOS::UnregisterNotificationObservers() {
475 LOGI() << "UnregisterNotificationObservers";
476 // Transfer ownership of observer back to ARC, which will deallocate the
477 // observer once it exits this scope.
478 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
479 if (audio_interruption_observer_ != nullptr) {
480 id observer = (__bridge_transfer id)audio_interruption_observer_;
481 [center removeObserver:observer];
482 audio_interruption_observer_ = nullptr;
483 }
484 if (route_change_observer_ != nullptr) {
485 id observer = (__bridge_transfer id)route_change_observer_;
486 [center removeObserver:observer];
487 route_change_observer_ = nullptr;
488 }
489 }
490
491 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 390 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
492 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 391 LOGI() << "SetupAudioBuffersForActiveAudioSession";
493 // Verify the current values once the audio session has been activated. 392 // Verify the current values once the audio session has been activated.
494 RTCAudioSession* session = [RTCAudioSession sharedInstance]; 393 RTCAudioSession* session = [RTCAudioSession sharedInstance];
495 LOG(LS_INFO) << " sample rate: " << session.sampleRate; 394 double sample_rate = session.sampleRate;
496 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; 395 NSTimeInterval io_buffer_duration = session.IOBufferDuration;
396 LOG(LS_INFO) << " sample rate: " << sample_rate;
397 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
497 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; 398 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
498 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; 399 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
499 LOG(LS_INFO) << " output latency: " << session.outputLatency; 400 LOG(LS_INFO) << " output latency: " << session.outputLatency;
500 LOG(LS_INFO) << " input latency: " << session.inputLatency; 401 LOG(LS_INFO) << " input latency: " << session.inputLatency;
501 402
502 // Log a warning message for the case when we are unable to set the preferred 403 // Log a warning message for the case when we are unable to set the preferred
503 // hardware sample rate but continue and use the non-ideal sample rate after 404 // hardware sample rate but continue and use the non-ideal sample rate after
504 // reinitializing the audio parameters. Most BT headsets only support 8kHz or 405 // reinitializing the audio parameters. Most BT headsets only support 8kHz or
505 // 16kHz. 406 // 16kHz.
506 RTCAudioSessionConfiguration* webRTCConfig = 407 RTCAudioSessionConfiguration* webRTCConfig =
507 [RTCAudioSessionConfiguration webRTCConfiguration]; 408 [RTCAudioSessionConfiguration webRTCConfiguration];
508 if (session.sampleRate != webRTCConfig.sampleRate) { 409 if (sample_rate != webRTCConfig.sampleRate) {
509 LOG(LS_WARNING) << "Unable to set the preferred sample rate"; 410 LOG(LS_WARNING) << "Unable to set the preferred sample rate";
510 } 411 }
511 412
512 // At this stage, we also know the exact IO buffer duration and can add 413 // At this stage, we also know the exact IO buffer duration and can add
513 // that info to the existing audio parameters where it is converted into 414 // that info to the existing audio parameters where it is converted into
514 // number of audio frames. 415 // number of audio frames.
515 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 416 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
516 // Hence, 128 is the size we expect to see in upcoming render callbacks. 417 // Hence, 128 is the size we expect to see in upcoming render callbacks.
517 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(), 418 playout_parameters_.reset(sample_rate, playout_parameters_.channels(),
518 session.IOBufferDuration); 419 io_buffer_duration);
519 RTC_DCHECK(playout_parameters_.is_complete()); 420 RTC_DCHECK(playout_parameters_.is_complete());
520 record_parameters_.reset(session.sampleRate, record_parameters_.channels(), 421 record_parameters_.reset(sample_rate, record_parameters_.channels(),
521 session.IOBufferDuration); 422 io_buffer_duration);
522 RTC_DCHECK(record_parameters_.is_complete()); 423 RTC_DCHECK(record_parameters_.is_complete());
523 LOG(LS_INFO) << " frames per I/O buffer: " 424 LOG(LS_INFO) << " frames per I/O buffer: "
524 << playout_parameters_.frames_per_buffer(); 425 << playout_parameters_.frames_per_buffer();
525 LOG(LS_INFO) << " bytes per I/O buffer: " 426 LOG(LS_INFO) << " bytes per I/O buffer: "
526 << playout_parameters_.GetBytesPerBuffer(); 427 << playout_parameters_.GetBytesPerBuffer();
527 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), 428 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
528 record_parameters_.GetBytesPerBuffer()); 429 record_parameters_.GetBytesPerBuffer());
529 430
530 // Update the ADB parameters since the sample rate might have changed. 431 // Update the ADB parameters since the sample rate might have changed.
531 UpdateAudioDeviceBuffer(); 432 UpdateAudioDeviceBuffer();
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after
777 [session lockForConfiguration]; 678 [session lockForConfiguration];
778 NSError* error = nil; 679 NSError* error = nil;
779 if (![session configureWebRTCSession:&error]) { 680 if (![session configureWebRTCSession:&error]) {
780 RTCLogError(@"Failed to configure WebRTC session: %@", 681 RTCLogError(@"Failed to configure WebRTC session: %@",
781 error.localizedDescription); 682 error.localizedDescription);
782 [session unlockForConfiguration]; 683 [session unlockForConfiguration];
783 return false; 684 return false;
784 } 685 }
785 686
786 // Start observing audio session interruptions and route changes. 687 // Start observing audio session interruptions and route changes.
787 RegisterNotificationObservers(); 688 [session pushDelegate:audio_session_observer_];
788 689
789 // Ensure that we got what what we asked for in our active audio session. 690 // Ensure that we got what what we asked for in our active audio session.
790 SetupAudioBuffersForActiveAudioSession(); 691 SetupAudioBuffersForActiveAudioSession();
791 692
792 // Create, setup and initialize a new Voice-Processing I/O unit. 693 // Create, setup and initialize a new Voice-Processing I/O unit.
793 if (!SetupAndInitializeVoiceProcessingAudioUnit()) { 694 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
794 [session setActive:NO error:nil]; 695 [session setActive:NO error:nil];
795 [session unlockForConfiguration]; 696 [session unlockForConfiguration];
796 return false; 697 return false;
797 } 698 }
(...skipping 11 matching lines...) Expand all
809 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 710 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
810 } 711 }
811 result = AudioUnitUninitialize(vpio_unit_); 712 result = AudioUnitUninitialize(vpio_unit_);
812 if (result != noErr) { 713 if (result != noErr) {
813 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result; 714 LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
814 } 715 }
815 DisposeAudioUnit(); 716 DisposeAudioUnit();
816 } 717 }
817 718
818 // Remove audio session notification observers. 719 // Remove audio session notification observers.
819 UnregisterNotificationObservers(); 720 RTCAudioSession* session = [RTCAudioSession sharedInstance];
721 [session removeDelegate:audio_session_observer_];
820 722
821 // All I/O should be stopped or paused prior to deactivating the audio 723 // All I/O should be stopped or paused prior to deactivating the audio
822 // session, hence we deactivate as last action. 724 // session, hence we deactivate as last action.
823 RTCAudioSession* session = [RTCAudioSession sharedInstance];
824 [session lockForConfiguration]; 725 [session lockForConfiguration];
825 [session setActive:NO error:nil]; 726 [session setActive:NO error:nil];
826 [session unlockForConfiguration]; 727 [session unlockForConfiguration];
827 } 728 }
828 729
829 void AudioDeviceIOS::DisposeAudioUnit() { 730 void AudioDeviceIOS::DisposeAudioUnit() {
830 if (nullptr == vpio_unit_) 731 if (nullptr == vpio_unit_)
831 return; 732 return;
832 OSStatus result = AudioComponentInstanceDispose(vpio_unit_); 733 OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
833 if (result != noErr) { 734 if (result != noErr) {
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
928 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 829 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
929 // the native I/O audio unit) to a preallocated intermediate buffer and 830 // the native I/O audio unit) to a preallocated intermediate buffer and
930 // copy the result to the audio buffer in the |io_data| destination. 831 // copy the result to the audio buffer in the |io_data| destination.
931 SInt8* source = playout_audio_buffer_.get(); 832 SInt8* source = playout_audio_buffer_.get();
932 fine_audio_buffer_->GetPlayoutData(source); 833 fine_audio_buffer_->GetPlayoutData(source);
933 memcpy(destination, source, dataSizeInBytes); 834 memcpy(destination, source, dataSizeInBytes);
934 return noErr; 835 return noErr;
935 } 836 }
936 837
937 } // namespace webrtc 838 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698