OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" |
| 12 |
| 13 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" |
| 14 |
| 15 // Try to use mono to save resources. Also avoids channel format conversion |
| 16 // in the I/O audio unit. Initial tests have shown that it is possible to use |
| 17 // mono natively for built-in microphones and for BT headsets but not for |
| 18 // wired headsets. Wired headsets only support stereo as native channel format |
| 19 // but it is a low cost operation to do a format conversion to mono in the |
| 20 // audio unit. Hence, we will not hit a RTC_CHECK in |
| 21 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the |
| 22 // preferred number of channels and the actual number of channels. |
| 23 const int kRTCAudioSessionPreferredNumberOfChannels = 1; |
| 24 |
| 25 // Preferred hardware sample rate (unit is in Hertz). The client sample rate |
| 26 // will be set to this value as well to avoid resampling the the audio unit's |
| 27 // format converter. Note that, some devices, e.g. BT headsets, only supports |
| 28 // 8000Hz as native sample rate. |
| 29 const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0; |
| 30 |
| 31 // A lower sample rate will be used for devices with only one core |
| 32 // (e.g. iPhone 4). The goal is to reduce the CPU load of the application. |
| 33 const double kRTCAudioSessionLowComplexitySampleRate = 16000.0; |
| 34 |
| 35 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms |
| 36 // size used by WebRTC. The exact actual size will differ between devices. |
| 37 // Example: using 48kHz on iPhone 6 results in a native buffer size of |
| 38 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will |
| 39 // take care of any buffering required to convert between native buffers and |
| 40 // buffers used by WebRTC. It is beneficial for the performance if the native |
| 41 // size is as close to 10ms as possible since it results in "clean" callback |
| 42 // sequence without bursts of callbacks back to back. |
| 43 const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01; |
| 44 |
| 45 // Use a larger buffer size on devices with only one core (e.g. iPhone 4). |
| 46 // It will result in a lower CPU consumption at the cost of a larger latency. |
| 47 // The size of 60ms is based on instrumentation that shows a significant |
| 48 // reduction in CPU load compared with 10ms on low-end devices. |
| 49 // TODO(henrika): monitor this size and determine if it should be modified. |
| 50 const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06; |
| 51 |
| 52 @implementation RTCAudioSessionConfiguration |
| 53 |
| 54 @synthesize category = _category; |
| 55 @synthesize categoryOptions = _categoryOptions; |
| 56 @synthesize mode = _mode; |
| 57 @synthesize sampleRate = _sampleRate; |
| 58 @synthesize ioBufferDuration = _ioBufferDuration; |
| 59 @synthesize inputNumberOfChannels = _inputNumberOfChannels; |
| 60 @synthesize outputNumberOfChannels = _outputNumberOfChannels; |
| 61 |
| 62 - (instancetype)init { |
| 63 if (self = [super init]) { |
| 64 // Use a category which supports simultaneous recording and playback. |
| 65 // By default, using this category implies that our app’s audio is |
| 66 // nonmixable, hence activating the session will interrupt any other |
| 67 // audio sessions which are also nonmixable. |
| 68 _category = AVAudioSessionCategoryPlayAndRecord; |
| 69 _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth; |
| 70 |
| 71 // Specify mode for two-way voice communication (e.g. VoIP). |
| 72 _mode = AVAudioSessionModeVoiceChat; |
| 73 |
| 74 // Set the session's sample rate or the hardware sample rate. |
| 75 // It is essential that we use the same sample rate as stream format |
| 76 // to ensure that the I/O unit does not have to do sample rate conversion. |
| 77 // Set the preferred audio I/O buffer duration, in seconds. |
| 78 NSUInteger processorCount = [NSProcessInfo processInfo].processorCount; |
| 79 // Use best sample rate and buffer duration if the CPU has more than one |
| 80 // core. |
| 81 if (processorCount > 1) { |
| 82 _sampleRate = kRTCAudioSessionHighPerformanceSampleRate; |
| 83 _ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration; |
| 84 } else { |
| 85 _sampleRate = kRTCAudioSessionLowComplexitySampleRate; |
| 86 _ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration; |
| 87 } |
| 88 |
| 89 // We try to use mono in both directions to save resources and format |
| 90 // conversions in the audio unit. Some devices does only support stereo; |
| 91 // e.g. wired headset on iPhone 6. |
| 92 // TODO(henrika): add support for stereo if needed. |
| 93 _inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; |
| 94 _outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; |
| 95 } |
| 96 return self; |
| 97 } |
| 98 |
| 99 + (instancetype)currentConfiguration { |
| 100 RTCAudioSession *session = [RTCAudioSession sharedInstance]; |
| 101 RTCAudioSessionConfiguration *config = |
| 102 [[RTCAudioSessionConfiguration alloc] init]; |
| 103 config.mode = session.mode; |
| 104 config.sampleRate = session.sampleRate; |
| 105 config.ioBufferDuration = session.IOBufferDuration; |
| 106 config.inputNumberOfChannels = session.inputNumberOfChannels; |
| 107 config.outputNumberOfChannels = session.outputNumberOfChannels; |
| 108 return config; |
| 109 } |
| 110 |
| 111 + (instancetype)webRTCConfiguration { |
| 112 return [[self alloc] init]; |
| 113 } |
| 114 |
| 115 @end |
OLD | NEW |