| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license | |
| 5 * that can be found in the LICENSE file in the root of the source | |
| 6 * tree. An additional intellectual property rights grant can be found | |
| 7 * in the file PATENTS. All contributing project authors may | |
| 8 * be found in the AUTHORS file in the root of the source tree. | |
| 9 */ | |
| 10 | |
| 11 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" | |
| 12 | |
| 13 #import "WebRTC/RTCDispatcher.h" | |
| 14 #import "WebRTC/UIDevice+RTCDevice.h" | |
| 15 | |
| 16 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" | |
| 17 | |
| 18 // Try to use mono to save resources. Also avoids channel format conversion | |
| 19 // in the I/O audio unit. Initial tests have shown that it is possible to use | |
| 20 // mono natively for built-in microphones and for BT headsets but not for | |
| 21 // wired headsets. Wired headsets only support stereo as native channel format | |
| 22 // but it is a low cost operation to do a format conversion to mono in the | |
| 23 // audio unit. Hence, we will not hit a RTC_CHECK in | |
| 24 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the | |
| 25 // preferred number of channels and the actual number of channels. | |
| 26 const int kRTCAudioSessionPreferredNumberOfChannels = 1; | |
| 27 | |
| 28 // Preferred hardware sample rate (unit is in Hertz). The client sample rate | |
| 29 // will be set to this value as well to avoid resampling the the audio unit's | |
| 30 // format converter. Note that, some devices, e.g. BT headsets, only supports | |
| 31 // 8000Hz as native sample rate. | |
| 32 const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0; | |
| 33 | |
| 34 // A lower sample rate will be used for devices with only one core | |
| 35 // (e.g. iPhone 4). The goal is to reduce the CPU load of the application. | |
| 36 const double kRTCAudioSessionLowComplexitySampleRate = 16000.0; | |
| 37 | |
| 38 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms | |
| 39 // size used by WebRTC. The exact actual size will differ between devices. | |
| 40 // Example: using 48kHz on iPhone 6 results in a native buffer size of | |
| 41 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will | |
| 42 // take care of any buffering required to convert between native buffers and | |
| 43 // buffers used by WebRTC. It is beneficial for the performance if the native | |
| 44 // size is as close to 10ms as possible since it results in "clean" callback | |
| 45 // sequence without bursts of callbacks back to back. | |
| 46 const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01; | |
| 47 | |
| 48 // Use a larger buffer size on devices with only one core (e.g. iPhone 4). | |
| 49 // It will result in a lower CPU consumption at the cost of a larger latency. | |
| 50 // The size of 60ms is based on instrumentation that shows a significant | |
| 51 // reduction in CPU load compared with 10ms on low-end devices. | |
| 52 // TODO(henrika): monitor this size and determine if it should be modified. | |
| 53 const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06; | |
| 54 | |
| 55 static RTCAudioSessionConfiguration *gWebRTCConfiguration = nil; | |
| 56 | |
| 57 @implementation RTCAudioSessionConfiguration | |
| 58 | |
| 59 @synthesize category = _category; | |
| 60 @synthesize categoryOptions = _categoryOptions; | |
| 61 @synthesize mode = _mode; | |
| 62 @synthesize sampleRate = _sampleRate; | |
| 63 @synthesize ioBufferDuration = _ioBufferDuration; | |
| 64 @synthesize inputNumberOfChannels = _inputNumberOfChannels; | |
| 65 @synthesize outputNumberOfChannels = _outputNumberOfChannels; | |
| 66 | |
| 67 - (instancetype)init { | |
| 68 if (self = [super init]) { | |
| 69 // Use a category which supports simultaneous recording and playback. | |
| 70 // By default, using this category implies that our app’s audio is | |
| 71 // nonmixable, hence activating the session will interrupt any other | |
| 72 // audio sessions which are also nonmixable. | |
| 73 _category = AVAudioSessionCategoryPlayAndRecord; | |
| 74 _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth; | |
| 75 | |
| 76 // Specify mode for two-way voice communication (e.g. VoIP). | |
| 77 _mode = AVAudioSessionModeVoiceChat; | |
| 78 | |
| 79 // Set the session's sample rate or the hardware sample rate. | |
| 80 // It is essential that we use the same sample rate as stream format | |
| 81 // to ensure that the I/O unit does not have to do sample rate conversion. | |
| 82 // Set the preferred audio I/O buffer duration, in seconds. | |
| 83 NSUInteger processorCount = [NSProcessInfo processInfo].processorCount; | |
| 84 // Use best sample rate and buffer duration if the CPU has more than one | |
| 85 // core. | |
| 86 if (processorCount > 1 && [UIDevice deviceType] != RTCDeviceTypeIPhone4S) { | |
| 87 _sampleRate = kRTCAudioSessionHighPerformanceSampleRate; | |
| 88 _ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration; | |
| 89 } else { | |
| 90 _sampleRate = kRTCAudioSessionLowComplexitySampleRate; | |
| 91 _ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration; | |
| 92 } | |
| 93 | |
| 94 // We try to use mono in both directions to save resources and format | |
| 95 // conversions in the audio unit. Some devices does only support stereo; | |
| 96 // e.g. wired headset on iPhone 6. | |
| 97 // TODO(henrika): add support for stereo if needed. | |
| 98 _inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; | |
| 99 _outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; | |
| 100 } | |
| 101 return self; | |
| 102 } | |
| 103 | |
| 104 + (void)initialize { | |
| 105 gWebRTCConfiguration = [[self alloc] init]; | |
| 106 } | |
| 107 | |
| 108 + (instancetype)currentConfiguration { | |
| 109 RTCAudioSession *session = [RTCAudioSession sharedInstance]; | |
| 110 RTCAudioSessionConfiguration *config = | |
| 111 [[RTCAudioSessionConfiguration alloc] init]; | |
| 112 config.category = session.category; | |
| 113 config.categoryOptions = session.categoryOptions; | |
| 114 config.mode = session.mode; | |
| 115 config.sampleRate = session.sampleRate; | |
| 116 config.ioBufferDuration = session.IOBufferDuration; | |
| 117 config.inputNumberOfChannels = session.inputNumberOfChannels; | |
| 118 config.outputNumberOfChannels = session.outputNumberOfChannels; | |
| 119 return config; | |
| 120 } | |
| 121 | |
| 122 + (instancetype)webRTCConfiguration { | |
| 123 @synchronized(self) { | |
| 124 return (RTCAudioSessionConfiguration *)gWebRTCConfiguration; | |
| 125 } | |
| 126 } | |
| 127 | |
| 128 + (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration { | |
| 129 @synchronized(self) { | |
| 130 gWebRTCConfiguration = configuration; | |
| 131 } | |
| 132 } | |
| 133 | |
| 134 @end | |
| OLD | NEW |