OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2016 The WebRTC Project Authors. All rights reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSessionConfiguration.h" | |
12 | |
13 #import "WebRTC/RTCDispatcher.h" | |
14 | |
15 #import "webrtc/modules/audio_device/ios/objc/RTCAudioSession.h" | |
16 | |
17 // Try to use mono to save resources. Also avoids channel format conversion | |
18 // in the I/O audio unit. Initial tests have shown that it is possible to use | |
19 // mono natively for built-in microphones and for BT headsets but not for | |
20 // wired headsets. Wired headsets only support stereo as native channel format | |
21 // but it is a low cost operation to do a format conversion to mono in the | |
22 // audio unit. Hence, we will not hit a RTC_CHECK in | |
23 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the | |
24 // preferred number of channels and the actual number of channels. | |
25 const int kRTCAudioSessionPreferredNumberOfChannels = 1; | |
26 | |
27 // Preferred hardware sample rate (unit is in Hertz). The client sample rate | |
28 // will be set to this value as well to avoid resampling the the audio unit's | |
29 // format converter. Note that, some devices, e.g. BT headsets, only supports | |
30 // 8000Hz as native sample rate. | |
31 const double kRTCAudioSessionHighPerformanceSampleRate = 48000.0; | |
32 | |
33 // A lower sample rate will be used for devices with only one core | |
34 // (e.g. iPhone 4). The goal is to reduce the CPU load of the application. | |
35 const double kRTCAudioSessionLowComplexitySampleRate = 16000.0; | |
36 | |
37 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms | |
38 // size used by WebRTC. The exact actual size will differ between devices. | |
39 // Example: using 48kHz on iPhone 6 results in a native buffer size of | |
40 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will | |
41 // take care of any buffering required to convert between native buffers and | |
42 // buffers used by WebRTC. It is beneficial for the performance if the native | |
43 // size is as close to 10ms as possible since it results in "clean" callback | |
44 // sequence without bursts of callbacks back to back. | |
45 const double kRTCAudioSessionHighPerformanceIOBufferDuration = 0.01; | |
46 | |
47 // Use a larger buffer size on devices with only one core (e.g. iPhone 4). | |
48 // It will result in a lower CPU consumption at the cost of a larger latency. | |
49 // The size of 60ms is based on instrumentation that shows a significant | |
50 // reduction in CPU load compared with 10ms on low-end devices. | |
51 // TODO(henrika): monitor this size and determine if it should be modified. | |
52 const double kRTCAudioSessionLowComplexityIOBufferDuration = 0.06; | |
53 | |
54 static RTCAudioSessionConfiguration *gWebRTCConfiguration = nil; | |
55 | |
56 @implementation RTCAudioSessionConfiguration | |
57 | |
58 @synthesize category = _category; | |
59 @synthesize categoryOptions = _categoryOptions; | |
60 @synthesize mode = _mode; | |
61 @synthesize sampleRate = _sampleRate; | |
62 @synthesize ioBufferDuration = _ioBufferDuration; | |
63 @synthesize inputNumberOfChannels = _inputNumberOfChannels; | |
64 @synthesize outputNumberOfChannels = _outputNumberOfChannels; | |
65 | |
66 - (instancetype)init { | |
67 if (self = [super init]) { | |
68 // Use a category which supports simultaneous recording and playback. | |
69 // By default, using this category implies that our app’s audio is | |
70 // nonmixable, hence activating the session will interrupt any other | |
71 // audio sessions which are also nonmixable. | |
72 _category = AVAudioSessionCategoryPlayAndRecord; | |
73 _categoryOptions = AVAudioSessionCategoryOptionAllowBluetooth; | |
74 | |
75 // Specify mode for two-way voice communication (e.g. VoIP). | |
76 _mode = AVAudioSessionModeVoiceChat; | |
77 | |
78 // Set the session's sample rate or the hardware sample rate. | |
79 // It is essential that we use the same sample rate as stream format | |
80 // to ensure that the I/O unit does not have to do sample rate conversion. | |
81 // Set the preferred audio I/O buffer duration, in seconds. | |
82 NSUInteger processorCount = [NSProcessInfo processInfo].processorCount; | |
83 // Use best sample rate and buffer duration if the CPU has more than one | |
84 // core. | |
85 if (processorCount > 1) { | |
86 _sampleRate = kRTCAudioSessionHighPerformanceSampleRate; | |
87 _ioBufferDuration = kRTCAudioSessionHighPerformanceIOBufferDuration; | |
88 } else { | |
89 _sampleRate = kRTCAudioSessionLowComplexitySampleRate; | |
90 _ioBufferDuration = kRTCAudioSessionLowComplexityIOBufferDuration; | |
91 } | |
92 | |
93 // We try to use mono in both directions to save resources and format | |
94 // conversions in the audio unit. Some devices does only support stereo; | |
95 // e.g. wired headset on iPhone 6. | |
96 // TODO(henrika): add support for stereo if needed. | |
97 _inputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; | |
98 _outputNumberOfChannels = kRTCAudioSessionPreferredNumberOfChannels; | |
99 } | |
100 return self; | |
101 } | |
102 | |
103 + (void)initialize { | |
104 gWebRTCConfiguration = [[self alloc] init]; | |
105 } | |
106 | |
107 + (instancetype)currentConfiguration { | |
108 RTCAudioSession *session = [RTCAudioSession sharedInstance]; | |
109 RTCAudioSessionConfiguration *config = | |
110 [[RTCAudioSessionConfiguration alloc] init]; | |
111 config.category = session.category; | |
112 config.categoryOptions = session.categoryOptions; | |
113 config.mode = session.mode; | |
114 config.sampleRate = session.sampleRate; | |
115 config.ioBufferDuration = session.IOBufferDuration; | |
116 config.inputNumberOfChannels = session.inputNumberOfChannels; | |
117 config.outputNumberOfChannels = session.outputNumberOfChannels; | |
118 return config; | |
119 } | |
120 | |
121 + (instancetype)webRTCConfiguration { | |
122 @synchronized(self) { | |
123 return (RTCAudioSessionConfiguration *)gWebRTCConfiguration; | |
124 } | |
125 } | |
126 | |
127 + (void)setWebRTCConfiguration:(RTCAudioSessionConfiguration *)configuration { | |
128 @synchronized(self) { | |
129 gWebRTCConfiguration = configuration; | |
130 } | |
131 } | |
132 | |
133 @end | |
OLD | NEW |