Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1254883002: Refactor the AudioDevice for iOS and improve the performance and stability (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebased and cleaned up Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #if !defined(__has_feature) || !__has_feature(objc_arc) 11 #if !defined(__has_feature) || !__has_feature(objc_arc)
12 #error "This file requires ARC support." 12 #error "This file requires ARC support."
13 #endif 13 #endif
14 14
15 #import <AVFoundation/AVFoundation.h> 15 #import <AVFoundation/AVFoundation.h>
16 #import <Foundation/Foundation.h> 16 #import <Foundation/Foundation.h>
17 17
18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
19 #include "webrtc/modules/utility/interface/helpers_ios.h"
20 19
20 #include "webrtc/base/atomicops.h"
21 #include "webrtc/base/checks.h" 21 #include "webrtc/base/checks.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/system_wrappers/interface/trace.h" 23 #include "webrtc/modules/audio_device/fine_audio_buffer.h"
24 #include "webrtc/modules/utility/interface/helpers_ios.h"
24 25
25 namespace webrtc { 26 namespace webrtc {
26 27
27 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" 28 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
28 29
30 #define LOG_AND_RETURN_IF_ERROR(error, message) \
31 do { \
32 OSStatus err = error; \
33 if (err) { \
34 LOG(LS_ERROR) << message << ": " << err; \
35 return false; \
36 } \
37 } while (0)
38
39 // Preferred hardware sample rate (unit is in Hertz). The client sample rate
40 // will be set to this value as well to avoid resampling the the audio unit's
41 // format converter. Note that, some devices, e.g. BT headsets, only supports
42 // 8000Hz as native sample rate.
43 const double kPreferredSampleRate = 48000.0;
44 // Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
45 // size used by WebRTC. The exact actual size will differ between devices.
46 // Example: using 48kHz on iPhone 6 results in a native buffer size of
47 // ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
48 // take care of any buffering required to convert between native buffers and
49 // buffers used by WebRTC. It is beneficial for the performance if the native
50 // size is as close to 10ms as possible since it results in "clean" callback
51 // sequence without bursts of callbacks back to back.
52 const double kPreferredIOBufferDuration = 0.01;
53 // Try to use mono to save resources. Also avoids channel format conversion
54 // in the I/O audio unit. Initial tests have shown that it is possible to use
55 // mono natively for built-in microphones and for BT headsets but not for
56 // wired headsets. Wired headsets only support stereo as native channel format
57 // but it is a low cost operation to do a format conversion to mono in the
58 // audio unit. Hence, we will not hit a CHECK in
59 // VerifyAudioParametersForActiveAudioSession() for a mismatch between the
60 // preferred number of channels and the actual number of channels.
61 const int kPreferredNumberOfChannels = 1;
62 // Number of bytes per audio sample for 16-bit signed integer representation.
63 const UInt32 kBytesPerSample = 2;
64 // Hardcoded delay estimates based on real measurements.
65 // TODO(henrika): these value is not used in combination with built-in AEC.
66 // Can most likely be removed.
67 const UInt16 kFixedPlayoutDelayEstimate = 30;
68 const UInt16 kFixedRecordDelayEstimate = 30;
69
29 using ios::CheckAndLogError; 70 using ios::CheckAndLogError;
30 71
72 // Activates an audio session suitable for full duplex VoIP sessions when
73 // |activate| is true. Also sets the preferred sample rate and IO buffer
74 // duration. Deactivates an active audio session if |activate| is set to false.
31 static void ActivateAudioSession(AVAudioSession* session, bool activate) { 75 static void ActivateAudioSession(AVAudioSession* session, bool activate) {
32 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; 76 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
33 @autoreleasepool { 77 @autoreleasepool {
34 NSError* error = nil; 78 NSError* error = nil;
35 BOOL success = NO; 79 BOOL success = NO;
80 // Deactivate the audio session and return if |activate| is false.
36 if (!activate) { 81 if (!activate) {
37 // Deactivate the audio session.
38 success = [session setActive:NO error:&error]; 82 success = [session setActive:NO error:&error];
39 DCHECK(CheckAndLogError(success, error)); 83 DCHECK(CheckAndLogError(success, error));
40 return; 84 return;
41 } 85 }
42 // Activate an audio session and set category and mode. Only make changes 86 // Use a category which supports simultaneous recording and playback.
43 // if needed since setting them to the value they already have will clear 87 // By default, using this category implies that our app’s audio is
44 // transient properties (such as PortOverride) that some other component 88 // nonmixable, hence activating the session will interrupt any other
45 // have set up. 89 // audio sessions which are also nonmixable.
46 if (session.category != AVAudioSessionCategoryPlayAndRecord) { 90 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
47 error = nil; 91 error = nil;
48 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord 92 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
49 error:&error]; 93 error:&error];
50 DCHECK(CheckAndLogError(success, error)); 94 DCHECK(CheckAndLogError(success, error));
51 } 95 }
96 // Specify mode for two-way voice communication (e.g. VoIP).
52 if (session.mode != AVAudioSessionModeVoiceChat) { 97 if (session.mode != AVAudioSessionModeVoiceChat) {
53 error = nil; 98 error = nil;
54 success = [session setMode:AVAudioSessionModeVoiceChat error:&error]; 99 success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
55 DCHECK(CheckAndLogError(success, error)); 100 DCHECK(CheckAndLogError(success, error));
56 } 101 }
102 // Set the session's sample rate or the hardware sample rate.
103 // It is essential that we use the same sample rate as stream format
104 // to ensure that the I/O unit does not have to do sample rate conversion.
105 error = nil;
106 success =
107 [session setPreferredSampleRate:kPreferredSampleRate error:&error];
108 DCHECK(CheckAndLogError(success, error));
109 // Set the preferred audio I/O buffer duration, in seconds.
110 // TODO(henrika): add more comments here.
111 error = nil;
112 success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
113 error:&error];
114 DCHECK(CheckAndLogError(success, error));
115
116 // TODO(henrika): add observers here...
117
118 // Activate the audio session. Activation can fail if another active audio
119 // session (e.g. phone call) has higher priority than ours.
57 error = nil; 120 error = nil;
58 success = [session setActive:YES error:&error]; 121 success = [session setActive:YES error:&error];
59 DCHECK(CheckAndLogError(success, error)); 122 DCHECK(CheckAndLogError(success, error));
123 CHECK(session.isInputAvailable) << "No input path is available!";
60 // Ensure that category and mode are actually activated. 124 // Ensure that category and mode are actually activated.
61 DCHECK( 125 DCHECK(
62 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); 126 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
63 DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); 127 DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
64 } 128 // Try to set the preferred number of hardware audio channels. These calls
65 } 129 // must be done after setting the audio session’s category and mode and
66 130 // activating the session.
67 // Query hardware characteristics, such as input and output latency, input and 131 // We try to use mono in both directions to save resources and format
68 // output channel count, hardware sample rate, hardware volume setting, and 132 // conversions in the audio unit. Some devices does only support stereo;
69 // whether audio input is available. To obtain meaningful values for hardware 133 // e.g. wired headset on iPhone 6.
70 // characteristics,the audio session must be initialized and active before we 134 // TODO(henrika): add support for stereo if needed.
71 // query the values. 135 error = nil;
72 // TODO(henrika): Note that these characteristics can change at runtime. For 136 success =
73 // instance, input sample rate may change when a user plugs in a headset. 137 [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
74 static void GetHardwareAudioParameters(AudioParameters* playout_parameters, 138 error:&error];
75 AudioParameters* record_parameters) { 139 DCHECK(CheckAndLogError(success, error));
76 LOG(LS_INFO) << "GetHardwareAudioParameters"; 140 error = nil;
77 @autoreleasepool { 141 success =
78 // Implicit initialization happens when we obtain a reference to the 142 [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
79 // AVAudioSession object. 143 error:&error];
80 AVAudioSession* session = [AVAudioSession sharedInstance]; 144 DCHECK(CheckAndLogError(success, error));
81 // Always get values when the audio session is active.
82 ActivateAudioSession(session, true);
83 CHECK(session.isInputAvailable) << "No input path is available!";
84 // Get current hardware parameters.
85 double sample_rate = (double)session.sampleRate;
86 double io_buffer_duration = (double)session.IOBufferDuration;
87 int output_channels = (int)session.outputNumberOfChannels;
88 int input_channels = (int)session.inputNumberOfChannels;
89 size_t frames_per_buffer =
90 static_cast<size_t>(sample_rate * io_buffer_duration + 0.5);
91 // Copy hardware parameters to output parameters.
92 playout_parameters->reset(sample_rate, output_channels, frames_per_buffer);
93 record_parameters->reset(sample_rate, input_channels, frames_per_buffer);
94 // Add logging for debugging purposes.
95 LOG(LS_INFO) << " sample rate: " << sample_rate;
96 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
97 LOG(LS_INFO) << " frames_per_buffer: " << frames_per_buffer;
98 LOG(LS_INFO) << " output channels: " << output_channels;
99 LOG(LS_INFO) << " input channels: " << input_channels;
100 LOG(LS_INFO) << " output latency: " << (double)session.outputLatency;
101 LOG(LS_INFO) << " input latency: " << (double)session.inputLatency;
102 // Don't keep the audio session active. Instead, deactivate when needed.
103 ActivateAudioSession(session, false);
104 // TODO(henrika): to be extra safe, we can do more here. E.g., set
105 // preferred values for sample rate, channels etc., re-activate an audio
106 // session and verify the actual values again. Then we know for sure that
107 // the current values will in fact be correct. Or, we can skip all this
108 // and check setting when audio is started. Probably better.
109 } 145 }
110 } 146 }
111 147
112 #if !defined(NDEBUG) 148 #if !defined(NDEBUG)
149 // Helper method for printing out an AudioStreamBasicDescription structure.
150 static void LogABSD(AudioStreamBasicDescription absd) {
151 char formatIDString[5];
152 UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID);
153 bcopy(&formatID, formatIDString, 4);
154 formatIDString[4] = '\0';
155 LOG(LS_INFO) << "LogABSD";
156 LOG(LS_INFO) << " sample rate: " << absd.mSampleRate;
157 LOG(LS_INFO) << " format ID: " << formatIDString;
158 LOG(LS_INFO) << " format flags: " << std::hex << absd.mFormatFlags;
159 LOG(LS_INFO) << " bytes per packet: " << absd.mBytesPerPacket;
160 LOG(LS_INFO) << " frames per packet: " << absd.mFramesPerPacket;
161 LOG(LS_INFO) << " bytes per frame: " << absd.mBytesPerFrame;
162 LOG(LS_INFO) << " channels per packet: " << absd.mChannelsPerFrame;
163 LOG(LS_INFO) << " bits per channel: " << absd.mBitsPerChannel;
164 LOG(LS_INFO) << " reserved: " << absd.mReserved;
165 }
166
167 // Helper method that logs essential device information strings.
113 static void LogDeviceInfo() { 168 static void LogDeviceInfo() {
114 LOG(LS_INFO) << "LogDeviceInfo"; 169 LOG(LS_INFO) << "LogDeviceInfo";
115 @autoreleasepool { 170 @autoreleasepool {
116 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); 171 LOG(LS_INFO) << " system name: " << ios::GetSystemName();
117 LOG(LS_INFO) << " system version: " << ios::GetSystemVersion(); 172 LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
118 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); 173 LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
119 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); 174 LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
120 } 175 }
121 } 176 }
122 #endif 177 #endif // !defined(NDEBUG)
123 178
124 AudioDeviceIOS::AudioDeviceIOS() 179 AudioDeviceIOS::AudioDeviceIOS()
125 : audio_device_buffer_(nullptr), 180 : _audioDeviceBuffer(nullptr),
126 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), 181 _vpioUnit(nullptr),
127 _auVoiceProcessing(nullptr), 182 _recording(0),
128 _audioInterruptionObserver(nullptr), 183 _playing(0),
129 _initialized(false), 184 _initialized(false),
130 _isShutDown(false),
131 _recording(false),
132 _playing(false),
133 _recIsInitialized(false), 185 _recIsInitialized(false),
134 _playIsInitialized(false), 186 _playIsInitialized(false),
135 _adbSampFreq(0), 187 _audioInterruptionObserver(nullptr) {
136 _recordingDelay(0),
137 _playoutDelay(0),
138 _playoutDelayMeasurementCounter(9999),
139 _recordingDelayHWAndOS(0),
140 _recordingDelayMeasurementCounter(9999),
141 _playoutBufferUsed(0),
142 _recordingCurrentSeq(0),
143 _recordingBufferTotalSize(0) {
144 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); 188 LOGI() << "ctor" << ios::GetCurrentThreadDescription();
145 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
146 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
147 memset(_recordingLength, 0, sizeof(_recordingLength));
148 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
149 } 189 }
150 190
151 AudioDeviceIOS::~AudioDeviceIOS() { 191 AudioDeviceIOS::~AudioDeviceIOS() {
152 LOGI() << "~dtor"; 192 LOGI() << "~dtor";
153 DCHECK(thread_checker_.CalledOnValidThread()); 193 DCHECK(_threadChecker.CalledOnValidThread());
154 Terminate(); 194 Terminate();
155 delete &_critSect;
156 } 195 }
157 196
158 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 197 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
159 LOGI() << "AttachAudioBuffer"; 198 LOGI() << "AttachAudioBuffer";
160 DCHECK(audioBuffer); 199 DCHECK(audioBuffer);
161 DCHECK(thread_checker_.CalledOnValidThread()); 200 DCHECK(_threadChecker.CalledOnValidThread());
162 audio_device_buffer_ = audioBuffer; 201 _audioDeviceBuffer = audioBuffer;
163 } 202 }
164 203
165 int32_t AudioDeviceIOS::Init() { 204 int32_t AudioDeviceIOS::Init() {
166 LOGI() << "Init"; 205 LOGI() << "Init";
167 DCHECK(thread_checker_.CalledOnValidThread()); 206 DCHECK(_threadChecker.CalledOnValidThread());
168 if (_initialized) { 207 if (_initialized) {
169 return 0; 208 return 0;
170 } 209 }
171 #if !defined(NDEBUG) 210 #if !defined(NDEBUG)
172 LogDeviceInfo(); 211 LogDeviceInfo();
173 #endif 212 #endif
174 // Query hardware audio parameters and cache the results. These parameters 213 // Store the preferred sample rate and preferred number of channels already
175 // will be used as preferred values later when streaming starts. 214 // here. They have not been set and confirmed yet since ActivateAudioSession()
176 // Note that I override these "optimal" value below since I don't want to 215 // is not called until audio is about to start. However, it makes sense to
177 // modify the existing behavior yet. 216 // store the parameters now and then verify at a later stage.
178 GetHardwareAudioParameters(&playout_parameters_, &record_parameters_); 217 _playoutParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
179 // TODO(henrika): these parameters are currently hard coded to match the 218 _recordParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
180 // existing implementation where we always use 16kHz as preferred sample 219 // Ensure that the audio device buffer (ADB) knows about the internal audio
181 // rate and mono only. Goal is to improve this scheme and make it more 220 // parameters. Note that, even if we are unable to get a mono audio session,
182 // flexible. In addition, a better native buffer size shall be derived. 221 // we will always tell the I/O audio unit to do a channel format conversion
183 // Using 10ms as default here (only used by unit test so far). 222 // to guarantee mono on the "input side" of the audio unit.
184 // We should also implemented observers for notification of any change in 223 UpdateAudioDeviceBuffer();
185 // these parameters.
186 playout_parameters_.reset(16000, 1, 160);
187 record_parameters_.reset(16000, 1, 160);
188
189 // AttachAudioBuffer() is called at construction by the main class but check
190 // just in case.
191 DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
192 // Inform the audio device buffer (ADB) about the new audio format.
193 // TODO(henrika): try to improve this section.
194 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
195 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
196 audio_device_buffer_->SetRecordingSampleRate(
197 record_parameters_.sample_rate());
198 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
199
200 DCHECK(!_captureWorkerThread);
201 // Create and start the capture thread.
202 // TODO(henrika): do we need this thread?
203 _isShutDown = false;
204 _captureWorkerThread =
205 ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
206 if (!_captureWorkerThread->Start()) {
207 LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!";
208 return -1;
209 }
210 _captureWorkerThread->SetPriority(kRealtimePriority);
211 _initialized = true; 224 _initialized = true;
212 return 0; 225 return 0;
213 } 226 }
214 227
215 int32_t AudioDeviceIOS::Terminate() { 228 int32_t AudioDeviceIOS::Terminate() {
216 LOGI() << "Terminate"; 229 LOGI() << "Terminate";
217 DCHECK(thread_checker_.CalledOnValidThread()); 230 DCHECK(_threadChecker.CalledOnValidThread());
218 if (!_initialized) { 231 if (!_initialized) {
219 return 0; 232 return 0;
220 } 233 }
221 // Stop the capture thread.
222 if (_captureWorkerThread) {
223 if (!_captureWorkerThread->Stop()) {
224 LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!";
225 return -1;
226 }
227 _captureWorkerThread.reset();
228 }
229 ShutdownPlayOrRecord(); 234 ShutdownPlayOrRecord();
230 _isShutDown = true;
231 _initialized = false; 235 _initialized = false;
232 return 0; 236 return 0;
233 } 237 }
234 238
235 int32_t AudioDeviceIOS::InitPlayout() { 239 int32_t AudioDeviceIOS::InitPlayout() {
236 LOGI() << "InitPlayout"; 240 LOGI() << "InitPlayout";
237 DCHECK(thread_checker_.CalledOnValidThread()); 241 DCHECK(_threadChecker.CalledOnValidThread());
238 DCHECK(_initialized); 242 DCHECK(_initialized);
239 DCHECK(!_playIsInitialized); 243 DCHECK(!_playIsInitialized);
240 DCHECK(!_playing); 244 DCHECK(!_playing);
241 if (!_recIsInitialized) { 245 if (!_recIsInitialized) {
242 if (InitPlayOrRecord() == -1) { 246 if (!InitPlayOrRecord()) {
243 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 247 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
244 return -1; 248 return -1;
245 } 249 }
246 } 250 }
247 _playIsInitialized = true; 251 _playIsInitialized = true;
248 return 0; 252 return 0;
249 } 253 }
250 254
251 int32_t AudioDeviceIOS::InitRecording() { 255 int32_t AudioDeviceIOS::InitRecording() {
252 LOGI() << "InitRecording"; 256 LOGI() << "InitRecording";
253 DCHECK(thread_checker_.CalledOnValidThread()); 257 DCHECK(_threadChecker.CalledOnValidThread());
254 DCHECK(_initialized); 258 DCHECK(_initialized);
255 DCHECK(!_recIsInitialized); 259 DCHECK(!_recIsInitialized);
256 DCHECK(!_recording); 260 DCHECK(!_recording);
257 if (!_playIsInitialized) { 261 if (!_playIsInitialized) {
258 if (InitPlayOrRecord() == -1) { 262 if (!InitPlayOrRecord()) {
259 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 263 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
260 return -1; 264 return -1;
261 } 265 }
262 } 266 }
263 _recIsInitialized = true; 267 _recIsInitialized = true;
264 return 0; 268 return 0;
265 } 269 }
266 270
267 int32_t AudioDeviceIOS::StartPlayout() { 271 int32_t AudioDeviceIOS::StartPlayout() {
268 LOGI() << "StartPlayout"; 272 LOGI() << "StartPlayout";
269 DCHECK(thread_checker_.CalledOnValidThread()); 273 DCHECK(_threadChecker.CalledOnValidThread());
270 DCHECK(_playIsInitialized); 274 DCHECK(_playIsInitialized);
271 DCHECK(!_playing); 275 DCHECK(!_playing);
272 276 _fineAudioBuffer->ResetPlayout();
273 CriticalSectionScoped lock(&_critSect);
274
275 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
276 _playoutBufferUsed = 0;
277 _playoutDelay = 0;
278 // Make sure first call to update delay function will update delay
279 _playoutDelayMeasurementCounter = 9999;
280
281 if (!_recording) { 277 if (!_recording) {
282 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); 278 OSStatus result = AudioOutputUnitStart(_vpioUnit);
283 if (result != noErr) { 279 if (result != noErr) {
284 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 280 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
285 return -1; 281 return -1;
286 } 282 }
287 } 283 }
288 _playing = true; 284 rtc::AtomicOps::ReleaseStore(&_playing, 1);
289 return 0; 285 return 0;
290 } 286 }
291 287
292 int32_t AudioDeviceIOS::StopPlayout() { 288 int32_t AudioDeviceIOS::StopPlayout() {
293 LOGI() << "StopPlayout"; 289 LOGI() << "StopPlayout";
294 DCHECK(thread_checker_.CalledOnValidThread()); 290 DCHECK(_threadChecker.CalledOnValidThread());
295 if (!_playIsInitialized || !_playing) { 291 if (!_playIsInitialized || !_playing) {
296 return 0; 292 return 0;
297 } 293 }
298
299 CriticalSectionScoped lock(&_critSect);
300
301 if (!_recording) { 294 if (!_recording) {
302 // Both playout and recording has stopped, shutdown the device.
303 ShutdownPlayOrRecord(); 295 ShutdownPlayOrRecord();
304 } 296 }
305 _playIsInitialized = false; 297 _playIsInitialized = false;
306 _playing = false; 298 rtc::AtomicOps::ReleaseStore(&_playing, 0);
307 return 0; 299 return 0;
308 } 300 }
309 301
310 int32_t AudioDeviceIOS::StartRecording() { 302 int32_t AudioDeviceIOS::StartRecording() {
311 LOGI() << "StartRecording"; 303 LOGI() << "StartRecording";
312 DCHECK(thread_checker_.CalledOnValidThread()); 304 DCHECK(_threadChecker.CalledOnValidThread());
313 DCHECK(_recIsInitialized); 305 DCHECK(_recIsInitialized);
314 DCHECK(!_recording); 306 DCHECK(!_recording);
315 307 _fineAudioBuffer->ResetRecord();
316 CriticalSectionScoped lock(&_critSect);
317
318 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
319 memset(_recordingLength, 0, sizeof(_recordingLength));
320 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
321
322 _recordingCurrentSeq = 0;
323 _recordingBufferTotalSize = 0;
324 _recordingDelay = 0;
325 _recordingDelayHWAndOS = 0;
326 // Make sure first call to update delay function will update delay
327 _recordingDelayMeasurementCounter = 9999;
328
329 if (!_playing) { 308 if (!_playing) {
330 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); 309 OSStatus result = AudioOutputUnitStart(_vpioUnit);
331 if (result != noErr) { 310 if (result != noErr) {
332 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 311 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
333 return -1; 312 return -1;
334 } 313 }
335 } 314 }
336 _recording = true; 315 rtc::AtomicOps::ReleaseStore(&_recording, 1);
337 return 0; 316 return 0;
338 } 317 }
339 318
340 int32_t AudioDeviceIOS::StopRecording() { 319 int32_t AudioDeviceIOS::StopRecording() {
341 LOGI() << "StopRecording"; 320 LOGI() << "StopRecording";
342 DCHECK(thread_checker_.CalledOnValidThread()); 321 DCHECK(_threadChecker.CalledOnValidThread());
343 if (!_recIsInitialized || !_recording) { 322 if (!_recIsInitialized || !_recording) {
344 return 0; 323 return 0;
345 } 324 }
346
347 CriticalSectionScoped lock(&_critSect);
348
349 if (!_playing) { 325 if (!_playing) {
350 // Both playout and recording has stopped, shutdown the device.
351 ShutdownPlayOrRecord(); 326 ShutdownPlayOrRecord();
352 } 327 }
353 _recIsInitialized = false; 328 _recIsInitialized = false;
354 _recording = false; 329 rtc::AtomicOps::ReleaseStore(&_recording, 0);
355 return 0; 330 return 0;
356 } 331 }
357 332
358 // Change the default receiver playout route to speaker. 333 // Change the default receiver playout route to speaker.
359 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { 334 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
360 LOGI() << "SetLoudspeakerStatus(" << enable << ")"; 335 LOGI() << "SetLoudspeakerStatus(" << enable << ")";
361 336
362 AVAudioSession* session = [AVAudioSession sharedInstance]; 337 AVAudioSession* session = [AVAudioSession sharedInstance];
363 NSString* category = session.category; 338 NSString* category = session.category;
364 AVAudioSessionCategoryOptions options = session.categoryOptions; 339 AVAudioSessionCategoryOptions options = session.categoryOptions;
(...skipping 19 matching lines...) Expand all
384 359
385 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const { 360 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
386 LOGI() << "GetLoudspeakerStatus"; 361 LOGI() << "GetLoudspeakerStatus";
387 AVAudioSession* session = [AVAudioSession sharedInstance]; 362 AVAudioSession* session = [AVAudioSession sharedInstance];
388 AVAudioSessionCategoryOptions options = session.categoryOptions; 363 AVAudioSessionCategoryOptions options = session.categoryOptions;
389 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker; 364 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
390 return 0; 365 return 0;
391 } 366 }
392 367
393 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { 368 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
394 delayMS = _playoutDelay; 369 delayMS = kFixedPlayoutDelayEstimate;
395 return 0; 370 return 0;
396 } 371 }
397 372
398 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { 373 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
399 delayMS = _recordingDelay; 374 delayMS = kFixedRecordDelayEstimate;
400 return 0;
401 }
402
403 int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
404 uint16_t& sizeMS) const {
405 type = AudioDeviceModule::kAdaptiveBufferSize;
406 sizeMS = _playoutDelay;
407 return 0; 375 return 0;
408 } 376 }
409 377
410 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { 378 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
411 CHECK(playout_parameters_.is_valid()); 379 LOGI() << "GetPlayoutAudioParameters";
412 DCHECK(thread_checker_.CalledOnValidThread()); 380 DCHECK(_playoutParameters.is_valid());
413 *params = playout_parameters_; 381 DCHECK(_threadChecker.CalledOnValidThread());
382 *params = _playoutParameters;
414 return 0; 383 return 0;
415 } 384 }
416 385
417 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { 386 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
418 CHECK(record_parameters_.is_valid()); 387 LOGI() << "GetRecordAudioParameters";
419 DCHECK(thread_checker_.CalledOnValidThread()); 388 DCHECK(_recordParameters.is_valid());
420 *params = record_parameters_; 389 DCHECK(_threadChecker.CalledOnValidThread());
421 return 0; 390 *params = _recordParameters;
422 } 391 return 0;
423 392 }
424 // ============================================================================ 393
425 // Private Methods 394 void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
426 // ============================================================================ 395 LOGI() << "UpdateAudioDevicebuffer";
427 396 // AttachAudioBuffer() is called at construction by the main class but check
428 int32_t AudioDeviceIOS::InitPlayOrRecord() { 397 // just in case.
429 LOGI() << "AudioDeviceIOS::InitPlayOrRecord"; 398 DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first";
430 DCHECK(!_auVoiceProcessing); 399 // Inform the audio device buffer (ADB) about the new audio format.
431 400 _audioDeviceBuffer->SetPlayoutSampleRate(_playoutParameters.sample_rate());
432 OSStatus result = -1; 401 _audioDeviceBuffer->SetPlayoutChannels(_playoutParameters.channels());
433 402 _audioDeviceBuffer->SetRecordingSampleRate(_recordParameters.sample_rate());
434 // Create Voice Processing Audio Unit 403 _audioDeviceBuffer->SetRecordingChannels(_recordParameters.channels());
435 AudioComponentDescription desc; 404 }
436 AudioComponent comp; 405
437 406 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
438 desc.componentType = kAudioUnitType_Output; 407 LOGI() << "SetupAudioBuffersForActiveAudioSession";
439 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; 408 AVAudioSession* session = [AVAudioSession sharedInstance];
440 desc.componentManufacturer = kAudioUnitManufacturer_Apple; 409 // Verify the current values once the audio session has been activated.
441 desc.componentFlags = 0; 410 LOG(LS_INFO) << " sample rate: " << session.sampleRate;
442 desc.componentFlagsMask = 0; 411 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
443 412 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
444 comp = AudioComponentFindNext(nullptr, &desc); 413 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
445 if (nullptr == comp) { 414 LOG(LS_INFO) << " output latency: " << session.outputLatency;
446 LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit"; 415 LOG(LS_INFO) << " input latency: " << session.inputLatency;
447 return -1; 416 // Log a warning message for the case when we are unable to set the preferred
417 // hardware sample rate but continue and use the non-ideal sample rate after
418 // reinitializing the audio parameters.
419 if (session.sampleRate != _playoutParameters.sample_rate()) {
420 LOG(LS_WARNING)
421 << "Failed to enable an audio session with the preferred sample rate!";
448 } 422 }
449 423
450 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); 424 // At this stage, we also know the exact IO buffer duration and can add
451 if (0 != result) { 425 // that info to the existing audio parameters where it is converted into
452 LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result; 426 // number of audio frames.
453 return -1; 427 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
428 // Hence, 128 is the size we expect to see in upcoming render callbacks.
429 _playoutParameters.reset(session.sampleRate, _playoutParameters.channels(),
430 session.IOBufferDuration);
431 DCHECK(_playoutParameters.is_complete());
432 _recordParameters.reset(session.sampleRate, _recordParameters.channels(),
433 session.IOBufferDuration);
434 DCHECK(_recordParameters.is_complete());
435 LOG(LS_INFO) << " frames per I/O buffer: "
436 << _playoutParameters.frames_per_buffer();
437 LOG(LS_INFO) << " bytes per I/O buffer: "
438 << _playoutParameters.GetBytesPerBuffer();
439 DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(),
440 _recordParameters.GetBytesPerBuffer());
441
442 // Update the ADB parameters since the sample rate might have changed.
443 UpdateAudioDeviceBuffer();
444
445 // Create a modified audio buffer class which allows us to ask for,
446 // or deliver, any number of samples (and not only multiple of 10ms) to match
447 // the native audio unit buffer size.
448 DCHECK(_audioDeviceBuffer);
449 _fineAudioBuffer.reset(new FineAudioBuffer(
450 _audioDeviceBuffer, _playoutParameters.GetBytesPerBuffer(),
451 _playoutParameters.sample_rate()));
452
453 // The extra/temporary playoutbuffer must be of this size to avoid
454 // unnecessary memcpy while caching data between successive callbacks.
455 const int requiredPlayoutBufferSize =
456 _fineAudioBuffer->RequiredPlayoutBufferSizeBytes();
457 LOG(LS_INFO) << " required playout buffer size: "
458 << requiredPlayoutBufferSize;
459 _playoutAudioBuffer.reset(new SInt8[requiredPlayoutBufferSize]);
460
461 // Allocate AudioBuffers to be used as storage for the received audio.
462 // The AudioBufferList structure works as a placeholder for the
463 // AudioBuffer structure, which holds a pointer to the actual data buffer
464 // in |_recordAudioBuffer|. Recorded audio will be rendered into this memory
465 // at each input callback when calling AudioUnitRender().
466 const int dataByteSize = _recordParameters.GetBytesPerBuffer();
467 _recordAudioBuffer.reset(new SInt8[dataByteSize]);
468 _audioRecordBufferList.mNumberBuffers = 1;
469 AudioBuffer* audioBuffer = &_audioRecordBufferList.mBuffers[0];
470 audioBuffer->mNumberChannels = _recordParameters.channels();
471 audioBuffer->mDataByteSize = dataByteSize;
472 audioBuffer->mData = _recordAudioBuffer.get();
473 }
474
475 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
476 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
477 DCHECK(!_vpioUnit);
478 // Create an audio component description to identify the Voice-Processing
479 // I/O audio unit.
480 AudioComponentDescription vpioUnitDescription;
481 vpioUnitDescription.componentType = kAudioUnitType_Output;
482 vpioUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
483 vpioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
484 vpioUnitDescription.componentFlags = 0;
485 vpioUnitDescription.componentFlagsMask = 0;
486 // Obtain an audio unit instance given the description.
487 AudioComponent foundVpioUnitRef =
488 AudioComponentFindNext(nullptr, &vpioUnitDescription);
489
490 // Create a Voice-Processing IO audio unit.
491 LOG_AND_RETURN_IF_ERROR(
492 AudioComponentInstanceNew(foundVpioUnitRef, &_vpioUnit),
493 "Failed to create a VoiceProcessingIO audio unit");
494
495 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
496 // input on the input scope of the input element.
497 AudioUnitElement inputBus = 1;
498 UInt32 enableInput = 1;
499 LOG_AND_RETURN_IF_ERROR(
500 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_EnableIO,
501 kAudioUnitScope_Input, inputBus, &enableInput,
502 sizeof(enableInput)),
503 "Failed to enable input on input scope of input element");
504
505 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
506 // output on the output scope of the output element.
507 AudioUnitElement outputBus = 0;
508 UInt32 enableOutput = 1;
509 LOG_AND_RETURN_IF_ERROR(
510 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_EnableIO,
511 kAudioUnitScope_Output, outputBus, &enableOutput,
512 sizeof(enableOutput)),
513 "Failed to enable output on output scope of output element");
514
515 // Set the application formats for input and output:
516 // - use same format in both directions
517 // - avoid resampling in the I/O unit by using the hardware sample rate
518 // - linear PCM => noncompressed audio data format with one frame per packet
519 // - no need to specify interleaving since only mono is supported
520 AudioStreamBasicDescription applicationFormat = {0};
521 UInt32 size = sizeof(applicationFormat);
522 DCHECK_EQ(_playoutParameters.sample_rate(), _recordParameters.sample_rate());
523 DCHECK_EQ(1, kPreferredNumberOfChannels);
524 applicationFormat.mSampleRate = _playoutParameters.sample_rate();
525 applicationFormat.mFormatID = kAudioFormatLinearPCM;
526 applicationFormat.mFormatFlags =
527 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
528 applicationFormat.mBytesPerPacket = kBytesPerSample;
529 applicationFormat.mFramesPerPacket = 1; // uncompressed
530 applicationFormat.mBytesPerFrame = kBytesPerSample;
531 applicationFormat.mChannelsPerFrame = kPreferredNumberOfChannels;
532 applicationFormat.mBitsPerChannel = 8 * kBytesPerSample;
533 #if !defined(NDEBUG)
534 LogABSD(applicationFormat);
535 #endif
536
537 // Set the application format on the output scope of the input element/bus.
538 LOG_AND_RETURN_IF_ERROR(
539 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_StreamFormat,
540 kAudioUnitScope_Output, inputBus, &applicationFormat,
541 size),
542 "Failed to set application format on output scope of input element");
543
544 // Set the application format on the input scope of the output element/bus.
545 LOG_AND_RETURN_IF_ERROR(
546 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_StreamFormat,
547 kAudioUnitScope_Input, outputBus, &applicationFormat,
548 size),
549 "Failed to set application format on input scope of output element");
550
551 // Specify the callback function that provides audio samples to the audio
552 // unit.
553 AURenderCallbackStruct renderCallback;
554 renderCallback.inputProc = GetPlayoutData;
555 renderCallback.inputProcRefCon = this;
556 LOG_AND_RETURN_IF_ERROR(
557 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_SetRenderCallback,
558 kAudioUnitScope_Input, outputBus, &renderCallback,
559 sizeof(renderCallback)),
560 "Failed to specify the render callback on the output element");
561
562 // Disable AU buffer allocation for the recorder, we allocate our own.
563 // TODO(henrika): not sure that it actually saves resource to make this call.
564 UInt32 flag = 0;
565 LOG_AND_RETURN_IF_ERROR(
566 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
567 kAudioUnitScope_Output, inputBus, &flag,
568 sizeof(flag)),
569 "Failed to disable buffer allocation on the input element");
570
571 // Specify the callback to be called by the I/O thread to us when input audio
572 // is available. The recorded samples can then be obtained by calling the
573 // AudioUnitRender() method.
574 AURenderCallbackStruct inputCallback;
575 inputCallback.inputProc = RecordedDataIsAvailable;
576 inputCallback.inputProcRefCon = this;
577 LOG_AND_RETURN_IF_ERROR(
578 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_SetInputCallback,
579 kAudioUnitScope_Global, inputBus, &inputCallback,
580 sizeof(inputCallback)),
581 "Failed to specify the input callback on the input element");
582
583 // Initialize the Voice-Processing I/O unit instance.
584 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(_vpioUnit),
585 "Failed to initialize the Voice-Processing I/O unit");
586 return true;
587 }
588
589 bool AudioDeviceIOS::InitPlayOrRecord() {
590 LOGI() << "InitPlayOrRecord";
591 AVAudioSession* session = [AVAudioSession sharedInstance];
592 // Activate the audio session and ask for a set of preferred audio parameters.
593 ActivateAudioSession(session, true);
594
595 // Ensure that we got what what we asked for in our active audio session.
596 SetupAudioBuffersForActiveAudioSession();
597
598 // Create, setup and initialize a new Voice-Processing I/O unit.
599 if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
600 return false;
454 } 601 }
455 602
456 // TODO(henrika): I think we should set the preferred channel configuration
457 // in both directions as well to be safe.
458
459 // Set preferred hardware sample rate to 16 kHz.
460 // TODO(henrika): improve this selection of sample rate. Why do we currently
461 // use a hard coded value? How can we fail and still continue?
462 NSError* error = nil;
463 AVAudioSession* session = [AVAudioSession sharedInstance];
464 Float64 preferredSampleRate(playout_parameters_.sample_rate());
465 [session setPreferredSampleRate:preferredSampleRate error:&error];
466 if (error != nil) {
467 const char* errorString = [[error localizedDescription] UTF8String];
468 LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString;
469 }
470
471 // TODO(henrika): we can reduce latency by setting the IOBufferDuration
472 // here. Default size for 16kHz is 0.016 sec or 16 msec on an iPhone 6.
473
474 // Activate the audio session.
475 ActivateAudioSession(session, true);
476
477 UInt32 enableIO = 1;
478 result = AudioUnitSetProperty(_auVoiceProcessing,
479 kAudioOutputUnitProperty_EnableIO,
480 kAudioUnitScope_Input,
481 1, // input bus
482 &enableIO, sizeof(enableIO));
483 if (0 != result) {
484 LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result;
485 }
486
487 result = AudioUnitSetProperty(_auVoiceProcessing,
488 kAudioOutputUnitProperty_EnableIO,
489 kAudioUnitScope_Output,
490 0, // output bus
491 &enableIO, sizeof(enableIO));
492 if (0 != result) {
493 LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result;
494 }
495
496 // Disable AU buffer allocation for the recorder, we allocate our own.
497 // TODO(henrika): understand this part better.
498 UInt32 flag = 0;
499 result = AudioUnitSetProperty(_auVoiceProcessing,
500 kAudioUnitProperty_ShouldAllocateBuffer,
501 kAudioUnitScope_Output, 1, &flag, sizeof(flag));
502 if (0 != result) {
503 LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result;
504 // Should work anyway
505 }
506
507 // Set recording callback.
508 AURenderCallbackStruct auCbS;
509 memset(&auCbS, 0, sizeof(auCbS));
510 auCbS.inputProc = RecordProcess;
511 auCbS.inputProcRefCon = this;
512 result = AudioUnitSetProperty(
513 _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback,
514 kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS));
515 if (0 != result) {
516 LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result;
517 }
518
519 // Set playout callback.
520 memset(&auCbS, 0, sizeof(auCbS));
521 auCbS.inputProc = PlayoutProcess;
522 auCbS.inputProcRefCon = this;
523 result = AudioUnitSetProperty(
524 _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback,
525 kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS));
526 if (0 != result) {
527 LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result;
528 }
529
530 // Get stream format for out/0
531 AudioStreamBasicDescription playoutDesc;
532 UInt32 size = sizeof(playoutDesc);
533 result =
534 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
535 kAudioUnitScope_Output, 0, &playoutDesc, &size);
536 if (0 != result) {
537 LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result;
538 }
539
540 playoutDesc.mSampleRate = preferredSampleRate;
541 LOG(LS_INFO) << "Audio Unit playout opened in sampling rate: "
542 << playoutDesc.mSampleRate;
543
544 // Store the sampling frequency to use towards the Audio Device Buffer
545 // todo: Add 48 kHz (increase buffer sizes). Other fs?
546 // TODO(henrika): Figure out if we really need this complex handling.
547 if ((playoutDesc.mSampleRate > 44090.0) &&
548 (playoutDesc.mSampleRate < 44110.0)) {
549 _adbSampFreq = 44100;
550 } else if ((playoutDesc.mSampleRate > 15990.0) &&
551 (playoutDesc.mSampleRate < 16010.0)) {
552 _adbSampFreq = 16000;
553 } else if ((playoutDesc.mSampleRate > 7990.0) &&
554 (playoutDesc.mSampleRate < 8010.0)) {
555 _adbSampFreq = 8000;
556 } else {
557 _adbSampFreq = 0;
558 FATAL() << "Invalid sample rate";
559 }
560
561 // Set the audio device buffer sampling rates (use same for play and record).
562 // TODO(henrika): this is not a good place to set these things up.
563 DCHECK(audio_device_buffer_);
564 DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate());
565 audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq);
566 audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq);
567
568 // Set stream format for out/0.
569 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
570 kLinearPCMFormatFlagIsPacked |
571 kLinearPCMFormatFlagIsNonInterleaved;
572 playoutDesc.mBytesPerPacket = 2;
573 playoutDesc.mFramesPerPacket = 1;
574 playoutDesc.mBytesPerFrame = 2;
575 playoutDesc.mChannelsPerFrame = 1;
576 playoutDesc.mBitsPerChannel = 16;
577 result =
578 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
579 kAudioUnitScope_Input, 0, &playoutDesc, size);
580 if (0 != result) {
581 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0";
582 }
583
584 // Get stream format for in/1.
585 AudioStreamBasicDescription recordingDesc;
586 size = sizeof(recordingDesc);
587 result =
588 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
589 kAudioUnitScope_Input, 1, &recordingDesc, &size);
590 if (0 != result) {
591 LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1";
592 }
593
594 recordingDesc.mSampleRate = preferredSampleRate;
595 LOG(LS_INFO) << "Audio Unit recording opened in sampling rate: "
596 << recordingDesc.mSampleRate;
597
598 // Set stream format for out/1 (use same sampling frequency as for in/1).
599 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
600 kLinearPCMFormatFlagIsPacked |
601 kLinearPCMFormatFlagIsNonInterleaved;
602 recordingDesc.mBytesPerPacket = 2;
603 recordingDesc.mFramesPerPacket = 1;
604 recordingDesc.mBytesPerFrame = 2;
605 recordingDesc.mChannelsPerFrame = 1;
606 recordingDesc.mBitsPerChannel = 16;
607 result =
608 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
609 kAudioUnitScope_Output, 1, &recordingDesc, size);
610 if (0 != result) {
611 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1";
612 }
613
614 // Initialize here already to be able to get/set stream properties.
615 result = AudioUnitInitialize(_auVoiceProcessing);
616 if (0 != result) {
617 LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result;
618 }
619
620 // Get hardware sample rate for logging (see if we get what we asked for).
621 // TODO(henrika): what if we don't get what we ask for?
622 double sampleRate = session.sampleRate;
623 LOG(LS_INFO) << "Current HW sample rate is: " << sampleRate
624 << ", ADB sample rate is: " << _adbSampFreq;
625 LOG(LS_INFO) << "Current HW IO buffer size is: " <<
626 [session IOBufferDuration];
627
628 // Listen to audio interruptions. 603 // Listen to audio interruptions.
629 // TODO(henrika): learn this area better. 604 // TODO(henrika): learn this area better.
630 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; 605 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
631 id observer = [center 606 id observer = [center
632 addObserverForName:AVAudioSessionInterruptionNotification 607 addObserverForName:AVAudioSessionInterruptionNotification
633 object:nil 608 object:nil
634 queue:[NSOperationQueue mainQueue] 609 queue:[NSOperationQueue mainQueue]
635 usingBlock:^(NSNotification* notification) { 610 usingBlock:^(NSNotification* notification) {
636 NSNumber* typeNumber = 611 NSNumber* typeNumber =
637 [notification userInfo][AVAudioSessionInterruptionTypeKey]; 612 [notification userInfo][AVAudioSessionInterruptionTypeKey];
(...skipping 10 matching lines...) Expand all
648 case AVAudioSessionInterruptionTypeEnded: { 623 case AVAudioSessionInterruptionTypeEnded: {
649 NSError* error = nil; 624 NSError* error = nil;
650 AVAudioSession* session = [AVAudioSession sharedInstance]; 625 AVAudioSession* session = [AVAudioSession sharedInstance];
651 [session setActive:YES error:&error]; 626 [session setActive:YES error:&error];
652 if (error != nil) { 627 if (error != nil) {
653 LOG_F(LS_ERROR) << "Failed to active audio session"; 628 LOG_F(LS_ERROR) << "Failed to active audio session";
654 } 629 }
655 // Post interruption the audio unit render callbacks don't 630 // Post interruption the audio unit render callbacks don't
656 // automatically continue, so we restart the unit manually 631 // automatically continue, so we restart the unit manually
657 // here. 632 // here.
658 AudioOutputUnitStop(_auVoiceProcessing); 633 AudioOutputUnitStop(_vpioUnit);
659 AudioOutputUnitStart(_auVoiceProcessing); 634 AudioOutputUnitStart(_vpioUnit);
660 break; 635 break;
661 } 636 }
662 } 637 }
663 }]; 638 }];
664 // Increment refcount on observer using ARC bridge. Instance variable is a 639 // Increment refcount on observer using ARC bridge. Instance variable is a
665 // void* instead of an id because header is included in other pure C++ 640 // void* instead of an id because header is included in other pure C++
666 // files. 641 // files.
667 _audioInterruptionObserver = (__bridge_retained void*)observer; 642 _audioInterruptionObserver = (__bridge_retained void*)observer;
668 643 return true;
669 return 0;
670 } 644 }
671 645
672 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() { 646 bool AudioDeviceIOS::ShutdownPlayOrRecord() {
673 LOGI() << "ShutdownPlayOrRecord"; 647 LOGI() << "ShutdownPlayOrRecord";
674
675 if (_audioInterruptionObserver != nullptr) { 648 if (_audioInterruptionObserver != nullptr) {
676 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; 649 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
677 // Transfer ownership of observer back to ARC, which will dealloc the 650 // Transfer ownership of observer back to ARC, which will dealloc the
678 // observer once it exits this scope. 651 // observer once it exits this scope.
679 id observer = (__bridge_transfer id)_audioInterruptionObserver; 652 id observer = (__bridge_transfer id)_audioInterruptionObserver;
680 [center removeObserver:observer]; 653 [center removeObserver:observer];
681 _audioInterruptionObserver = nullptr; 654 _audioInterruptionObserver = nullptr;
682 } 655 }
683 656 // Close and delete the voice-processing I/O unit.
684 // Close and delete AU.
685 OSStatus result = -1; 657 OSStatus result = -1;
686 if (nullptr != _auVoiceProcessing) { 658 if (nullptr != _vpioUnit) {
687 result = AudioOutputUnitStop(_auVoiceProcessing); 659 result = AudioOutputUnitStop(_vpioUnit);
688 if (0 != result) { 660 if (result != noErr) {
689 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 661 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
690 } 662 }
691 result = AudioComponentInstanceDispose(_auVoiceProcessing); 663 result = AudioComponentInstanceDispose(_vpioUnit);
692 if (0 != result) { 664 if (result != noErr) {
693 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; 665 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
694 } 666 }
695 _auVoiceProcessing = nullptr; 667 _vpioUnit = nullptr;
696 } 668 }
697
698 // All I/O should be stopped or paused prior to deactivating the audio 669 // All I/O should be stopped or paused prior to deactivating the audio
699 // session, hence we deactivate as last action. 670 // session, hence we deactivate as last action.
700 AVAudioSession* session = [AVAudioSession sharedInstance]; 671 AVAudioSession* session = [AVAudioSession sharedInstance];
701 ActivateAudioSession(session, false); 672 ActivateAudioSession(session, false);
702 return 0; 673 return true;
703 } 674 }
704 675
705 // ============================================================================ 676 OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
706 // Thread Methods
707 // ============================================================================
708
709 OSStatus AudioDeviceIOS::RecordProcess(
710 void* inRefCon, 677 void* inRefCon,
711 AudioUnitRenderActionFlags* ioActionFlags, 678 AudioUnitRenderActionFlags* ioActionFlags,
712 const AudioTimeStamp* inTimeStamp, 679 const AudioTimeStamp* inTimeStamp,
713 UInt32 inBusNumber, 680 UInt32 inBusNumber,
714 UInt32 inNumberFrames, 681 UInt32 inNumberFrames,
715 AudioBufferList* ioData) { 682 AudioBufferList* ioData) {
716 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); 683 DCHECK_EQ(1u, inBusNumber);
717 return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber, 684 DCHECK(!ioData); // no buffer should be allocated for input at this stage
718 inNumberFrames); 685 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon);
686 return audio_device_ios->OnRecordedDataIsAvailable(
687 ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames);
719 } 688 }
720 689
721 OSStatus AudioDeviceIOS::RecordProcessImpl( 690 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
722 AudioUnitRenderActionFlags* ioActionFlags, 691 AudioUnitRenderActionFlags* ioActionFlags,
723 const AudioTimeStamp* inTimeStamp, 692 const AudioTimeStamp* inTimeStamp,
724 uint32_t inBusNumber, 693 UInt32 inBusNumber,
725 uint32_t inNumberFrames) { 694 UInt32 inNumberFrames) {
726 // Setup some basic stuff 695 DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames);
727 // Use temp buffer not to lock up recording buffer more than necessary 696 OSStatus result = noErr;
728 // todo: Make dataTmp a member variable with static size that holds 697 // Simply return if recording is not enabled.
729 // max possible frames? 698 if (!rtc::AtomicOps::AcquireLoad(&_recording))
730 int16_t* dataTmp = new int16_t[inNumberFrames]; 699 return result;
731 memset(dataTmp, 0, 2 * inNumberFrames); 700 // Obtain the recorded audio samples by initiating a rendering cycle.
732 701 // Since it happens on the input bus, the |ioData| parameter is a reference
733 AudioBufferList abList; 702 // to the preallocated audio buffer list that the audio unit renders into.
734 abList.mNumberBuffers = 1; 703 // TODO(henrika): should error handling be improved?
735 abList.mBuffers[0].mData = dataTmp; 704 AudioBufferList* ioData = &_audioRecordBufferList;
736 abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample 705 result = AudioUnitRender(_vpioUnit, ioActionFlags, inTimeStamp, inBusNumber,
737 abList.mBuffers[0].mNumberChannels = 1; 706 inNumberFrames, ioData);
738 707 if (result != noErr) {
739 // Get data from mic 708 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
740 OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp, 709 return result;
741 inBusNumber, inNumberFrames, &abList);
742 if (res != 0) {
743 // TODO(henrika): improve error handling.
744 delete[] dataTmp;
745 return 0;
746 } 710 }
747 711 // Get a pointer to the recorded audio and send it to the WebRTC ADB.
748 if (_recording) { 712 // Use the FineAudioBuffer instance to convert between native buffer size
749 // Insert all data in temp buffer into recording buffers 713 // and the 10ms buffer size used by WebRTC.
750 // There is zero or one buffer partially full at any given time, 714 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize;
751 // all others are full or empty 715 CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
752 // Full means filled with noSamp10ms samples. 716 SInt8* data = static_cast<SInt8*>(ioData->mBuffers[0].mData);
753 717 _fineAudioBuffer->DeliverRecordedData(data, dataSizeInBytes,
754 const unsigned int noSamp10ms = _adbSampFreq / 100; 718 kFixedPlayoutDelayEstimate,
755 unsigned int dataPos = 0; 719 kFixedRecordDelayEstimate);
756 uint16_t bufPos = 0; 720 return noErr;
757 int16_t insertPos = -1;
758 unsigned int nCopy = 0; // Number of samples to copy
759
760 while (dataPos < inNumberFrames) {
761 // Loop over all recording buffers or
762 // until we find the partially full buffer
763 // First choice is to insert into partially full buffer,
764 // second choice is to insert into empty buffer
765 bufPos = 0;
766 insertPos = -1;
767 nCopy = 0;
768 while (bufPos < N_REC_BUFFERS) {
769 if ((_recordingLength[bufPos] > 0) &&
770 (_recordingLength[bufPos] < noSamp10ms)) {
771 // Found the partially full buffer
772 insertPos = static_cast<int16_t>(bufPos);
773 // Don't need to search more, quit loop
774 bufPos = N_REC_BUFFERS;
775 } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) {
776 // Found an empty buffer
777 insertPos = static_cast<int16_t>(bufPos);
778 }
779 ++bufPos;
780 }
781
782 // Insert data into buffer
783 if (insertPos > -1) {
784 // We found a non-full buffer, copy data to it
785 unsigned int dataToCopy = inNumberFrames - dataPos;
786 unsigned int currentRecLen = _recordingLength[insertPos];
787 unsigned int roomInBuffer = noSamp10ms - currentRecLen;
788 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
789
790 memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos],
791 nCopy * sizeof(int16_t));
792 if (0 == currentRecLen) {
793 _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
794 ++_recordingCurrentSeq;
795 }
796 _recordingBufferTotalSize += nCopy;
797 // Has to be done last to avoid interrupt problems between threads.
798 _recordingLength[insertPos] += nCopy;
799 dataPos += nCopy;
800 } else {
801 // Didn't find a non-full buffer
802 // TODO(henrika): improve error handling
803 dataPos = inNumberFrames; // Don't try to insert more
804 }
805 }
806 }
807 delete[] dataTmp;
808 return 0;
809 } 721 }
810 722
811 OSStatus AudioDeviceIOS::PlayoutProcess( 723 OSStatus AudioDeviceIOS::GetPlayoutData(
812 void* inRefCon, 724 void* inRefCon,
813 AudioUnitRenderActionFlags* ioActionFlags, 725 AudioUnitRenderActionFlags* ioActionFlags,
814 const AudioTimeStamp* inTimeStamp, 726 const AudioTimeStamp* inTimeStamp,
815 UInt32 inBusNumber, 727 UInt32 inBusNumber,
816 UInt32 inNumberFrames, 728 UInt32 inNumberFrames,
817 AudioBufferList* ioData) { 729 AudioBufferList* ioData) {
818 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); 730 DCHECK_EQ(0u, inBusNumber);
819 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); 731 DCHECK(ioData);
732 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon);
733 return audio_device_ios->OnGetPlayoutData(ioActionFlags, inNumberFrames,
734 ioData);
820 } 735 }
821 736
822 OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, 737 OSStatus AudioDeviceIOS::OnGetPlayoutData(
823 AudioBufferList* ioData) { 738 AudioUnitRenderActionFlags* ioActionFlags,
824 int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData); 739 UInt32 inNumberFrames,
825 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; 740 AudioBufferList* ioData) {
826 unsigned int dataSize = dataSizeBytes / 2; // Number of samples 741 // Verify 16-bit, noninterleaved mono PCM signal format.
827 CHECK_EQ(dataSize, inNumberFrames); 742 DCHECK_EQ(1u, ioData->mNumberBuffers);
828 memset(data, 0, dataSizeBytes); // Start with empty buffer 743 DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels);
829 744 // Get pointer to internal audio buffer to which new audio data shall be
830 // Get playout data from Audio Device Buffer 745 // written.
831 746 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize;
832 if (_playing) { 747 CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames);
833 unsigned int noSamp10ms = _adbSampFreq / 100; 748 SInt8* destination = static_cast<SInt8*>(ioData->mBuffers[0].mData);
834 // todo: Member variable and allocate when samp freq is determined 749 // Produce silence and give audio unit a hint about it if playout is not
835 int16_t* dataTmp = new int16_t[noSamp10ms]; 750 // activated.
836 memset(dataTmp, 0, 2 * noSamp10ms); 751 if (!rtc::AtomicOps::AcquireLoad(&_playing)) {
837 unsigned int dataPos = 0; 752 *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
838 int noSamplesOut = 0; 753 memset(destination, 0, dataSizeInBytes);
839 unsigned int nCopy = 0; 754 return noErr;
840
841 // First insert data from playout buffer if any
842 if (_playoutBufferUsed > 0) {
843 nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed;
844 DCHECK_EQ(nCopy, _playoutBufferUsed);
845 memcpy(data, _playoutBuffer, 2 * nCopy);
846 dataPos = nCopy;
847 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
848 _playoutBufferUsed = 0;
849 }
850
851 // Now get the rest from Audio Device Buffer.
852 while (dataPos < dataSize) {
853 // Update playout delay
854 UpdatePlayoutDelay();
855
856 // Ask for new PCM data to be played out using the AudioDeviceBuffer
857 noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms);
858
859 // Get data from Audio Device Buffer
860 noSamplesOut = audio_device_buffer_->GetPlayoutData(
861 reinterpret_cast<int8_t*>(dataTmp));
862 CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut);
863
864 // Insert as much as fits in data buffer
865 nCopy =
866 (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos);
867 memcpy(&data[dataPos], dataTmp, 2 * nCopy);
868
869 // Save rest in playout buffer if any
870 if (nCopy < noSamp10ms) {
871 memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy));
872 _playoutBufferUsed = noSamp10ms - nCopy;
873 }
874
875 // Update loop/index counter, if we copied less than noSamp10ms
876 // samples we shall quit loop anyway
877 dataPos += noSamp10ms;
878 }
879 delete[] dataTmp;
880 } 755 }
881 return 0; 756 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
882 } 757 // the native I/O audio unit) to a preallocated intermediate buffer and
883 758 // copy the result to the audio buffer in the |ioData| destination.
884 // TODO(henrika): can either be removed or simplified. 759 SInt8* source = _playoutAudioBuffer.get();
885 void AudioDeviceIOS::UpdatePlayoutDelay() { 760 _fineAudioBuffer->GetPlayoutData(source);
886 ++_playoutDelayMeasurementCounter; 761 memcpy(destination, source, dataSizeInBytes);
887 762 return noErr;
888 if (_playoutDelayMeasurementCounter >= 100) {
889 // Update HW and OS delay every second, unlikely to change
890
891 // Since this is eventually rounded to integral ms, add 0.5ms
892 // here to get round-to-nearest-int behavior instead of
893 // truncation.
894 double totalDelaySeconds = 0.0005;
895
896 // HW output latency
897 AVAudioSession* session = [AVAudioSession sharedInstance];
898 double latency = session.outputLatency;
899 assert(latency >= 0);
900 totalDelaySeconds += latency;
901
902 // HW buffer duration
903 double ioBufferDuration = session.IOBufferDuration;
904 assert(ioBufferDuration >= 0);
905 totalDelaySeconds += ioBufferDuration;
906
907 // AU latency
908 Float64 f64(0);
909 UInt32 size = sizeof(f64);
910 OSStatus result =
911 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
912 kAudioUnitScope_Global, 0, &f64, &size);
913 if (0 != result) {
914 LOG_F(LS_ERROR) << "AU latency error: " << result;
915 }
916 assert(f64 >= 0);
917 totalDelaySeconds += f64;
918
919 // To ms
920 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds * 1000);
921
922 // Reset counter
923 _playoutDelayMeasurementCounter = 0;
924 }
925
926 // todo: Add playout buffer?
927 }
928
929 void AudioDeviceIOS::UpdateRecordingDelay() {
930 ++_recordingDelayMeasurementCounter;
931
932 if (_recordingDelayMeasurementCounter >= 100) {
933 // Update HW and OS delay every second, unlikely to change
934
935 // Since this is eventually rounded to integral ms, add 0.5ms
936 // here to get round-to-nearest-int behavior instead of
937 // truncation.
938 double totalDelaySeconds = 0.0005;
939
940 // HW input latency
941 AVAudioSession* session = [AVAudioSession sharedInstance];
942 double latency = session.inputLatency;
943 assert(latency >= 0);
944 totalDelaySeconds += latency;
945
946 // HW buffer duration
947 double ioBufferDuration = session.IOBufferDuration;
948 assert(ioBufferDuration >= 0);
949 totalDelaySeconds += ioBufferDuration;
950
951 // AU latency
952 Float64 f64(0);
953 UInt32 size = sizeof(f64);
954 OSStatus result =
955 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
956 kAudioUnitScope_Global, 0, &f64, &size);
957 if (0 != result) {
958 LOG_F(LS_ERROR) << "AU latency error: " << result;
959 }
960 assert(f64 >= 0);
961 totalDelaySeconds += f64;
962
963 // To ms
964 _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000);
965
966 // Reset counter
967 _recordingDelayMeasurementCounter = 0;
968 }
969
970 _recordingDelay = _recordingDelayHWAndOS;
971
972 // ADB recording buffer size, update every time
973 // Don't count the one next 10 ms to be sent, then convert samples => ms
974 const uint32_t noSamp10ms = _adbSampFreq / 100;
975 if (_recordingBufferTotalSize > noSamp10ms) {
976 _recordingDelay +=
977 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
978 }
979 }
980
981 bool AudioDeviceIOS::RunCapture(void* ptrThis) {
982 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
983 }
984
985 bool AudioDeviceIOS::CaptureWorkerThread() {
986 if (_recording) {
987 int bufPos = 0;
988 unsigned int lowestSeq = 0;
989 int lowestSeqBufPos = 0;
990 bool foundBuf = true;
991 const unsigned int noSamp10ms = _adbSampFreq / 100;
992
993 while (foundBuf) {
994 // Check if we have any buffer with data to insert
995 // into the Audio Device Buffer,
996 // and find the one with the lowest seq number
997 foundBuf = false;
998 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
999 if (noSamp10ms == _recordingLength[bufPos]) {
1000 if (!foundBuf) {
1001 lowestSeq = _recordingSeqNumber[bufPos];
1002 lowestSeqBufPos = bufPos;
1003 foundBuf = true;
1004 } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
1005 lowestSeq = _recordingSeqNumber[bufPos];
1006 lowestSeqBufPos = bufPos;
1007 }
1008 }
1009 }
1010
1011 // Insert data into the Audio Device Buffer if found any
1012 if (foundBuf) {
1013 // Update recording delay
1014 UpdateRecordingDelay();
1015
1016 // Set the recorded buffer
1017 audio_device_buffer_->SetRecordedBuffer(
1018 reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]),
1019 _recordingLength[lowestSeqBufPos]);
1020
1021 // Don't need to set the current mic level in ADB since we only
1022 // support digital AGC,
1023 // and besides we cannot get or set the IOS mic level anyway.
1024
1025 // Set VQE info, use clockdrift == 0
1026 audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0);
1027
1028 // Deliver recorded samples at specified sample rate, mic level
1029 // etc. to the observer using callback
1030 audio_device_buffer_->DeliverRecordedData();
1031
1032 // Make buffer available
1033 _recordingSeqNumber[lowestSeqBufPos] = 0;
1034 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
1035 // Must be done last to avoid interrupt problems between threads
1036 _recordingLength[lowestSeqBufPos] = 0;
1037 }
1038 }
1039 }
1040
1041 {
1042 // Normal case
1043 // Sleep thread (5ms) to let other threads get to work
1044 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
1045 // Device Buffer?
1046 timespec t;
1047 t.tv_sec = 0;
1048 t.tv_nsec = 5 * 1000 * 1000;
1049 nanosleep(&t, nullptr);
1050 }
1051 return true;
1052 } 763 }
1053 764
1054 } // namespace webrtc 765 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698