Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(589)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1206783002: Cleanup of iOS AudioDevice implementation (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebased Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #if !defined(__has_feature) || !__has_feature(objc_arc)
12 #error "This file requires ARC support."
13 #endif
14
11 #import <AVFoundation/AVFoundation.h> 15 #import <AVFoundation/AVFoundation.h>
12 #import <Foundation/Foundation.h> 16 #import <Foundation/Foundation.h>
13 17
14 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
15 19 #include "webrtc/modules/utility/interface/helpers_ios.h"
20
21 #include "webrtc/base/checks.h"
22 #include "webrtc/base/logging.h"
16 #include "webrtc/system_wrappers/interface/trace.h" 23 #include "webrtc/system_wrappers/interface/trace.h"
17 24
18 namespace webrtc { 25 namespace webrtc {
19 AudioDeviceIOS::AudioDeviceIOS(const int32_t id) 26
20 : 27 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
21 _ptrAudioBuffer(NULL), 28
22 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), 29 using ios::CheckAndLogError;
23 _id(id), 30
24 _auVoiceProcessing(NULL), 31 #if !defined(NDEBUG)
25 _audioInterruptionObserver(NULL), 32 static void LogDeviceInfo() {
26 _initialized(false), 33 LOG(LS_INFO) << "LogDeviceInfo";
27 _isShutDown(false), 34 @autoreleasepool {
28 _recording(false), 35 LOG(LS_INFO) << " system name: " << ios::GetSystemName();
29 _playing(false), 36 LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
30 _recIsInitialized(false), 37 LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
31 _playIsInitialized(false), 38 LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
32 _recordingDeviceIsSpecified(false), 39 }
33 _playoutDeviceIsSpecified(false), 40 }
34 _micIsInitialized(false), 41 #endif
35 _speakerIsInitialized(false), 42
36 _AGC(false), 43 static void ActivateAudioSession(AVAudioSession* session, bool activate) {
37 _adbSampFreq(0), 44 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
38 _recordingDelay(0), 45 @autoreleasepool {
39 _playoutDelay(0), 46 NSError* error = nil;
40 _playoutDelayMeasurementCounter(9999), 47 BOOL success = NO;
41 _recordingDelayHWAndOS(0), 48 if (!activate) {
42 _recordingDelayMeasurementCounter(9999), 49 // Deactivate the audio session.
43 _playWarning(0), 50 success = [session setActive:NO error:&error];
44 _playError(0), 51 DCHECK(CheckAndLogError(success, error));
45 _recWarning(0), 52 return;
46 _recError(0), 53 }
47 _playoutBufferUsed(0), 54 // Activate an audio session and set category and mode. Only make changes
48 _recordingCurrentSeq(0), 55 // if needed since setting them to the value they already have will clear
49 _recordingBufferTotalSize(0) { 56 // transient properties (such as PortOverride) that some other component
50 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, 57 // have set up.
51 "%s created", __FUNCTION__); 58 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
52 59 error = nil;
53 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); 60 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
54 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); 61 error:&error];
55 memset(_recordingLength, 0, sizeof(_recordingLength)); 62 DCHECK(CheckAndLogError(success, error));
56 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); 63 }
64 if (session.mode != AVAudioSessionModeVoiceChat) {
65 error = nil;
66 success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
67 DCHECK(CheckAndLogError(success, error));
68 }
69 error = nil;
70 success = [session setActive:YES error:&error];
71 DCHECK(CheckAndLogError(success, error));
72 // Ensure that category and mode are actually activated.
73 DCHECK(
74 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
75 DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
76 }
77 }
78
79 // Query hardware characteristics, such as input and output latency, input and
80 // output channel count, hardware sample rate, hardware volume setting, and
81 // whether audio input is available. To obtain meaningful values for hardware
82 // characteristics,the audio session must be initialized and active before we
83 // query the values.
84 // TODO(henrika): Note that these characteristics can change at runtime. For
85 // instance, input sample rate may change when a user plugs in a headset.
86 static void GetHardwareAudioParameters(AudioParameters* playout_parameters,
87 AudioParameters* record_parameters) {
88 LOG(LS_INFO) << "GetHardwareAudioParameters";
89 @autoreleasepool {
90 // Implicit initialization happens when we obtain a reference to the
91 // AVAudioSession object.
92 AVAudioSession* session = [AVAudioSession sharedInstance];
93 // Always get values when the audio session is active.
94 ActivateAudioSession(session, true);
95 CHECK(session.isInputAvailable) << "No input path is available!";
96 // Get current hardware parameters.
97 double sample_rate = (double)session.sampleRate;
98 double io_buffer_duration = (double)session.IOBufferDuration;
99 int output_channels = (int)session.outputNumberOfChannels;
100 int input_channels = (int)session.inputNumberOfChannels;
101 int frames_per_buffer =
102 static_cast<int>(sample_rate * io_buffer_duration + 0.5);
103 // Copy hardware parameters to output parameters.
104 playout_parameters->reset(sample_rate, output_channels, frames_per_buffer);
105 record_parameters->reset(sample_rate, input_channels, frames_per_buffer);
106 // Add logging for debugging purposes.
107 LOG(LS_INFO) << " sample rate: " << sample_rate;
108 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
109 LOG(LS_INFO) << " frames_per_buffer: " << frames_per_buffer;
110 LOG(LS_INFO) << " output channels: " << output_channels;
111 LOG(LS_INFO) << " input channels: " << input_channels;
112 LOG(LS_INFO) << " output latency: " << (double)session.outputLatency;
113 LOG(LS_INFO) << " input latency: " << (double)session.inputLatency;
114 // Don't keep the audio session active. Instead, deactivate when needed.
115 ActivateAudioSession(session, false);
116 // TODO(henrika): to be extra safe, we can do more here. E.g., set
117 // preferred values for sample rate, channels etc., re-activate an audio
118 // session and verify the actual values again. Then we know for sure that
119 // the current values will in fact be correct. Or, we can skip all this
120 // and check setting when audio is started. Probably better.
121 }
122 }
123
124 AudioDeviceIOS::AudioDeviceIOS()
125 : audio_device_buffer_(nullptr),
126 _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
127 _auVoiceProcessing(nullptr),
128 _audioInterruptionObserver(nullptr),
129 _initialized(false),
130 _isShutDown(false),
131 _recording(false),
132 _playing(false),
133 _recIsInitialized(false),
134 _playIsInitialized(false),
135 _adbSampFreq(0),
136 _recordingDelay(0),
137 _playoutDelay(0),
138 _playoutDelayMeasurementCounter(9999),
139 _recordingDelayHWAndOS(0),
140 _recordingDelayMeasurementCounter(9999),
141 _playoutBufferUsed(0),
142 _recordingCurrentSeq(0),
143 _recordingBufferTotalSize(0) {
144 LOGI() << "ctor" << ios::GetCurrentThreadDescription();
145 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
146 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
147 memset(_recordingLength, 0, sizeof(_recordingLength));
148 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
57 } 149 }
58 150
59 AudioDeviceIOS::~AudioDeviceIOS() { 151 AudioDeviceIOS::~AudioDeviceIOS() {
60 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, 152 LOGI() << "~dtor";
61 "%s destroyed", __FUNCTION__); 153 DCHECK(thread_checker_.CalledOnValidThread());
62 154 Terminate();
63 Terminate(); 155 delete &_critSect;
64 156 }
65 delete &_critSect;
66 }
67
68
69 // ============================================================================
70 // API
71 // ============================================================================
72 157
73 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 158 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
74 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, 159 LOGI() << "AttachAudioBuffer";
75 "%s", __FUNCTION__); 160 DCHECK(audioBuffer);
76 161 DCHECK(thread_checker_.CalledOnValidThread());
77 CriticalSectionScoped lock(&_critSect); 162 audio_device_buffer_ = audioBuffer;
78 163 }
79 _ptrAudioBuffer = audioBuffer; 164
80 165 int32_t AudioDeviceIOS::Init() {
81 // inform the AudioBuffer about default settings for this implementation 166 LOGI() << "Init";
82 _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES); 167 DCHECK(thread_checker_.CalledOnValidThread());
83 _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); 168 if (_initialized) {
84 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
85 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
86 }
87
88 int32_t AudioDeviceIOS::ActiveAudioLayer(
89 AudioDeviceModule::AudioLayer& audioLayer) const {
90 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
91 "%s", __FUNCTION__);
92 audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
93 return 0; 169 return 0;
94 } 170 }
95 171 #if !defined(NDEBUG)
96 int32_t AudioDeviceIOS::Init() { 172 LogDeviceInfo();
97 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, 173 #endif
98 "%s", __FUNCTION__); 174 // Query hardware audio parameters and cache the results. These parameters
99 175 // will be used as preferred values later when streaming starts.
100 CriticalSectionScoped lock(&_critSect); 176 // Note that I override these "optimal" value below since I don't want to
101 177 // modify the existing behavior yet.
102 if (_initialized) { 178 GetHardwareAudioParameters(&playout_parameters_, &record_parameters_);
103 return 0; 179 // TODO(henrika): these parameters are currently hard coded to match the
104 } 180 // existing implementation where we always use 16kHz as preferred sample
105 181 // rate and mono only. Goal is to improve this scheme and make it more
106 _isShutDown = false; 182 // flexible. In addition, a better native buffer size shall be derived.
107 183 // Using 10ms as default here (only used by unit test so far).
108 // Create and start capture thread 184 // We should also implemented observers for notification of any change in
109 if (!_captureWorkerThread) { 185 // these parameters.
110 _captureWorkerThread = ThreadWrapper::CreateThread( 186 playout_parameters_.reset(16000, 1, 160);
111 RunCapture, this, "CaptureWorkerThread"); 187 record_parameters_.reset(16000, 1, 160);
112 bool res = _captureWorkerThread->Start(); 188
113 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, 189 // AttachAudioBuffer() is called at construction by the main class but check
114 _id, "CaptureWorkerThread started (res=%d)", res); 190 // just in case.
115 _captureWorkerThread->SetPriority(kRealtimePriority); 191 DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
192 // Inform the audio device buffer (ADB) about the new audio format.
193 // TODO(henrika): try to improve this section.
194 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
195 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
196 audio_device_buffer_->SetRecordingSampleRate(
197 record_parameters_.sample_rate());
198 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
199
200 DCHECK(!_captureWorkerThread);
201 // Create and start the capture thread.
202 // TODO(henrika): do we need this thread?
203 _isShutDown = false;
204 _captureWorkerThread =
205 ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
206 if (!_captureWorkerThread->Start()) {
207 LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!";
208 return -1;
209 }
210 _captureWorkerThread->SetPriority(kRealtimePriority);
211 _initialized = true;
212 return 0;
213 }
214
215 int32_t AudioDeviceIOS::Terminate() {
216 LOGI() << "Terminate";
217 DCHECK(thread_checker_.CalledOnValidThread());
218 if (!_initialized) {
219 return 0;
220 }
221 // Stop the capture thread.
222 if (_captureWorkerThread) {
223 if (!_captureWorkerThread->Stop()) {
224 LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!";
225 return -1;
226 }
227 _captureWorkerThread.reset();
228 }
229 ShutdownPlayOrRecord();
230 _isShutDown = true;
231 _initialized = false;
232 return 0;
233 }
234
235 int32_t AudioDeviceIOS::InitPlayout() {
236 LOGI() << "InitPlayout";
237 DCHECK(thread_checker_.CalledOnValidThread());
238 DCHECK(_initialized);
239 DCHECK(!_playIsInitialized);
240 DCHECK(!_playing);
241 if (!_recIsInitialized) {
242 if (InitPlayOrRecord() == -1) {
243 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
244 return -1;
245 }
246 }
247 _playIsInitialized = true;
248 return 0;
249 }
250
251 int32_t AudioDeviceIOS::InitRecording() {
252 LOGI() << "InitPlayout";
253 DCHECK(thread_checker_.CalledOnValidThread());
254 DCHECK(_initialized);
255 DCHECK(!_recIsInitialized);
256 DCHECK(!_recording);
257 if (!_playIsInitialized) {
258 if (InitPlayOrRecord() == -1) {
259 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
260 return -1;
261 }
262 }
263 _recIsInitialized = true;
264 return 0;
265 }
266
267 int32_t AudioDeviceIOS::StartPlayout() {
268 LOGI() << "StartPlayout";
269 DCHECK(thread_checker_.CalledOnValidThread());
270 DCHECK(_playIsInitialized);
271 DCHECK(!_playing);
272
273 CriticalSectionScoped lock(&_critSect);
274
275 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
276 _playoutBufferUsed = 0;
277 _playoutDelay = 0;
278 // Make sure first call to update delay function will update delay
279 _playoutDelayMeasurementCounter = 9999;
280
281 if (!_recording) {
282 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
283 if (result != noErr) {
284 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
285 return -1;
286 }
287 }
288 _playing = true;
289 return 0;
290 }
291
292 int32_t AudioDeviceIOS::StopPlayout() {
293 LOGI() << "StopPlayout";
294 DCHECK(thread_checker_.CalledOnValidThread());
295 if (!_playIsInitialized || !_playing) {
296 return 0;
297 }
298
299 CriticalSectionScoped lock(&_critSect);
300
301 if (!_recording) {
302 // Both playout and recording has stopped, shutdown the device.
303 ShutdownPlayOrRecord();
304 }
305 _playIsInitialized = false;
306 _playing = false;
307 return 0;
308 }
309
310 int32_t AudioDeviceIOS::StartRecording() {
311 LOGI() << "StartRecording";
312 DCHECK(thread_checker_.CalledOnValidThread());
313 DCHECK(_recIsInitialized);
314 DCHECK(!_recording);
315
316 CriticalSectionScoped lock(&_critSect);
317
318 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
319 memset(_recordingLength, 0, sizeof(_recordingLength));
320 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
321
322 _recordingCurrentSeq = 0;
323 _recordingBufferTotalSize = 0;
324 _recordingDelay = 0;
325 _recordingDelayHWAndOS = 0;
326 // Make sure first call to update delay function will update delay
327 _recordingDelayMeasurementCounter = 9999;
328
329 if (!_playing) {
330 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
331 if (result != noErr) {
332 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
333 return -1;
334 }
335 }
336 _recording = true;
337 return 0;
338 }
339
340 int32_t AudioDeviceIOS::StopRecording() {
341 LOGI() << "StopRecording";
342 DCHECK(thread_checker_.CalledOnValidThread());
343 if (!_recIsInitialized || !_recording) {
344 return 0;
345 }
346
347 CriticalSectionScoped lock(&_critSect);
348
349 if (!_playing) {
350 // Both playout and recording has stopped, shutdown the device.
351 ShutdownPlayOrRecord();
352 }
353 _recIsInitialized = false;
354 _recording = false;
355 return 0;
356 }
357
358 // Change the default receiver playout route to speaker.
359 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
360 LOGI() << "SetLoudspeakerStatus(" << enable << ")";
361
362 AVAudioSession* session = [AVAudioSession sharedInstance];
363 NSString* category = session.category;
364 AVAudioSessionCategoryOptions options = session.categoryOptions;
365 // Respect old category options if category is
366 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
367 // might not be valid for this category.
368 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
369 if (enable) {
370 options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
116 } else { 371 } else {
117 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, 372 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
118 _id, "Thread already created"); 373 }
119 } 374 } else {
120 _playWarning = 0; 375 options = AVAudioSessionCategoryOptionDefaultToSpeaker;
121 _playError = 0; 376 }
122 _recWarning = 0; 377 NSError* error = nil;
123 _recError = 0; 378 BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
124 379 withOptions:options
125 _initialized = true; 380 error:&error];
126 381 ios::CheckAndLogError(success, error);
127 return 0; 382 return (error == nil) ? 0 : -1;
128 } 383 }
129 384
130 int32_t AudioDeviceIOS::Terminate() { 385 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
131 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, 386 LOGI() << "GetLoudspeakerStatus";
132 "%s", __FUNCTION__); 387 AVAudioSession* session = [AVAudioSession sharedInstance];
133 388 AVAudioSessionCategoryOptions options = session.categoryOptions;
134 if (!_initialized) { 389 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
135 return 0; 390 return 0;
136 }
137
138
139 // Stop capture thread
140 if (_captureWorkerThread) {
141 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
142 _id, "Stopping CaptureWorkerThread");
143 bool res = _captureWorkerThread->Stop();
144 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
145 _id, "CaptureWorkerThread stopped (res=%d)", res);
146 _captureWorkerThread.reset();
147 }
148
149 // Shut down Audio Unit
150 ShutdownPlayOrRecord();
151
152 _isShutDown = true;
153 _initialized = false;
154 _speakerIsInitialized = false;
155 _micIsInitialized = false;
156 _playoutDeviceIsSpecified = false;
157 _recordingDeviceIsSpecified = false;
158 return 0;
159 }
160
161 bool AudioDeviceIOS::Initialized() const {
162 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
163 "%s", __FUNCTION__);
164 return (_initialized);
165 }
166
167 int32_t AudioDeviceIOS::InitSpeaker() {
168 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
169 "%s", __FUNCTION__);
170
171 CriticalSectionScoped lock(&_critSect);
172
173 if (!_initialized) {
174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
175 _id, " Not initialized");
176 return -1;
177 }
178
179 if (_playing) {
180 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
181 _id, " Cannot init speaker when playing");
182 return -1;
183 }
184
185 if (!_playoutDeviceIsSpecified) {
186 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
187 _id, " Playout device is not specified");
188 return -1;
189 }
190
191 // Do nothing
192 _speakerIsInitialized = true;
193
194 return 0;
195 }
196
197 int32_t AudioDeviceIOS::InitMicrophone() {
198 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
199 "%s", __FUNCTION__);
200
201 CriticalSectionScoped lock(&_critSect);
202
203 if (!_initialized) {
204 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
205 _id, " Not initialized");
206 return -1;
207 }
208
209 if (_recording) {
210 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
211 _id, " Cannot init mic when recording");
212 return -1;
213 }
214
215 if (!_recordingDeviceIsSpecified) {
216 WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
217 _id, " Recording device is not specified");
218 return -1;
219 }
220
221 // Do nothing
222
223 _micIsInitialized = true;
224
225 return 0;
226 }
227
228 bool AudioDeviceIOS::SpeakerIsInitialized() const {
229 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
230 "%s", __FUNCTION__);
231 return _speakerIsInitialized;
232 }
233
234 bool AudioDeviceIOS::MicrophoneIsInitialized() const {
235 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
236 "%s", __FUNCTION__);
237 return _micIsInitialized;
238 }
239
240 int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
241 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
242 "%s", __FUNCTION__);
243
244 available = false; // Speaker volume not supported on iOS
245
246 return 0;
247 }
248
249 int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
250 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
251 "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
252
253 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
254 " API call not supported on this platform");
255 return -1;
256 }
257
258 int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
259 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
260 "%s", __FUNCTION__);
261
262 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
263 " API call not supported on this platform");
264 return -1;
265 }
266
267 int32_t
268 AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
269 uint16_t volumeRight) {
270 WEBRTC_TRACE(
271 kTraceModuleCall,
272 kTraceAudioDevice,
273 _id,
274 "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
275 volumeLeft, volumeRight);
276
277 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
278 " API call not supported on this platform");
279
280 return -1;
281 }
282
283 int32_t
284 AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
285 uint16_t& /*volumeRight*/) const {
286 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
287 "%s", __FUNCTION__);
288
289 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
290 " API call not supported on this platform");
291 return -1;
292 }
293
294 int32_t
295 AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
296 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
297 "%s", __FUNCTION__);
298
299 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
300 " API call not supported on this platform");
301 return -1;
302 }
303
304 int32_t AudioDeviceIOS::MinSpeakerVolume(
305 uint32_t& minVolume) const {
306 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
307 "%s", __FUNCTION__);
308
309 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
310 " API call not supported on this platform");
311 return -1;
312 }
313
314 int32_t
315 AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
316 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
317 "%s", __FUNCTION__);
318
319 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
320 " API call not supported on this platform");
321 return -1;
322 }
323
324 int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
325 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
326 "%s", __FUNCTION__);
327
328 available = false; // Speaker mute not supported on iOS
329
330 return 0;
331 }
332
333 int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
334 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
335 "%s", __FUNCTION__);
336
337 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
338 " API call not supported on this platform");
339 return -1;
340 }
341
342 int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
343 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
344 "%s", __FUNCTION__);
345
346 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
347 " API call not supported on this platform");
348 return -1;
349 }
350
351 int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
352 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
353 "%s", __FUNCTION__);
354
355 available = false; // Mic mute not supported on iOS
356
357 return 0;
358 }
359
360 int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
361 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
362 "%s", __FUNCTION__);
363
364 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
365 " API call not supported on this platform");
366 return -1;
367 }
368
369 int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
370 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
371 "%s", __FUNCTION__);
372
373 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
374 " API call not supported on this platform");
375 return -1;
376 }
377
378 int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
379 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
380 "%s", __FUNCTION__);
381
382 available = false; // Mic boost not supported on iOS
383
384 return 0;
385 }
386
387 int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
388 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
389 "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
390
391 if (!_micIsInitialized) {
392 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
393 " Microphone not initialized");
394 return -1;
395 }
396
397 if (enable) {
398 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
399 " SetMicrophoneBoost cannot be enabled on this platform");
400 return -1;
401 }
402
403 return 0;
404 }
405
406 int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
407 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
408 "%s", __FUNCTION__);
409 if (!_micIsInitialized) {
410 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
411 " Microphone not initialized");
412 return -1;
413 }
414
415 enabled = false;
416
417 return 0;
418 }
419
420 int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
421 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
422 "%s", __FUNCTION__);
423
424 available = false; // Stereo recording not supported on iOS
425
426 return 0;
427 }
428
429 int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
430 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
431 "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
432
433 if (enable) {
434 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
435 " Stereo recording is not supported on this platform");
436 return -1;
437 }
438 return 0;
439 }
440
441 int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
442 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
443 "%s", __FUNCTION__);
444
445 enabled = false;
446 return 0;
447 }
448
449 int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
450 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
451 "%s", __FUNCTION__);
452
453 available = false; // Stereo playout not supported on iOS
454
455 return 0;
456 }
457
458 int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
459 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
460 "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
461
462 if (enable) {
463 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
464 " Stereo playout is not supported on this platform");
465 return -1;
466 }
467 return 0;
468 }
469
470 int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
471 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
472 "%s", __FUNCTION__);
473
474 enabled = false;
475 return 0;
476 }
477
478 int32_t AudioDeviceIOS::SetAGC(bool enable) {
479 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
480 "AudioDeviceIOS::SetAGC(enable=%d)", enable);
481
482 _AGC = enable;
483
484 return 0;
485 }
486
487 bool AudioDeviceIOS::AGC() const {
488 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
489 "%s", __FUNCTION__);
490
491 return _AGC;
492 }
493
494 int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
495 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
496 "%s", __FUNCTION__);
497
498 available = false; // Mic volume not supported on IOS
499
500 return 0;
501 }
502
503 int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
504 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
505 "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
506
507 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
508 " API call not supported on this platform");
509 return -1;
510 }
511
512 int32_t
513 AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
514 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
515 "%s", __FUNCTION__);
516
517 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
518 " API call not supported on this platform");
519 return -1;
520 }
521
522 int32_t
523 AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
524 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
525 "%s", __FUNCTION__);
526
527 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
528 " API call not supported on this platform");
529 return -1;
530 }
531
532 int32_t
533 AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
534 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
535 "%s", __FUNCTION__);
536
537 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
538 " API call not supported on this platform");
539 return -1;
540 }
541
542 int32_t
543 AudioDeviceIOS::MicrophoneVolumeStepSize(
544 uint16_t& stepSize) const {
545 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
546 "%s", __FUNCTION__);
547
548 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
549 " API call not supported on this platform");
550 return -1;
551 }
552
553 int16_t AudioDeviceIOS::PlayoutDevices() {
554 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
555 "%s", __FUNCTION__);
556
557 return (int16_t)1;
558 }
559
560 int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
561 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
562 "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
563
564 if (_playIsInitialized) {
565 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
566 " Playout already initialized");
567 return -1;
568 }
569
570 if (index !=0) {
571 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
572 " SetPlayoutDevice invalid index");
573 return -1;
574 }
575 _playoutDeviceIsSpecified = true;
576
577 return 0;
578 }
579
580 int32_t
581 AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
582 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
583 "WindowsDeviceType not supported");
584 return -1;
585 }
586
587 int32_t
588 AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
589 char name[kAdmMaxDeviceNameSize],
590 char guid[kAdmMaxGuidSize]) {
591 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
592 "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
593
594 if (index != 0) {
595 return -1;
596 }
597 // return empty strings
598 memset(name, 0, kAdmMaxDeviceNameSize);
599 if (guid != NULL) {
600 memset(guid, 0, kAdmMaxGuidSize);
601 }
602
603 return 0;
604 }
605
606 int32_t
607 AudioDeviceIOS::RecordingDeviceName(uint16_t index,
608 char name[kAdmMaxDeviceNameSize],
609 char guid[kAdmMaxGuidSize]) {
610 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
611 "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
612
613 if (index != 0) {
614 return -1;
615 }
616 // return empty strings
617 memset(name, 0, kAdmMaxDeviceNameSize);
618 if (guid != NULL) {
619 memset(guid, 0, kAdmMaxGuidSize);
620 }
621
622 return 0;
623 }
624
625 int16_t AudioDeviceIOS::RecordingDevices() {
626 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
627
628 return (int16_t)1;
629 }
630
631 int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
632 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
633 "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
634
635 if (_recIsInitialized) {
636 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
637 " Recording already initialized");
638 return -1;
639 }
640
641 if (index !=0) {
642 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
643 " SetRecordingDevice invalid index");
644 return -1;
645 }
646
647 _recordingDeviceIsSpecified = true;
648
649 return 0;
650 }
651
652 int32_t
653 AudioDeviceIOS::SetRecordingDevice(
654 AudioDeviceModule::WindowsDeviceType) {
655 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
656 "WindowsDeviceType not supported");
657 return -1;
658 }
659
660 // ----------------------------------------------------------------------------
661 // SetLoudspeakerStatus
662 //
663 // Change the default receiver playout route to speaker.
664 //
665 // ----------------------------------------------------------------------------
666
667 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
668 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
669 "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
670
671 AVAudioSession* session = [AVAudioSession sharedInstance];
672 NSString* category = session.category;
673 AVAudioSessionCategoryOptions options = session.categoryOptions;
674 // Respect old category options if category is
675 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
676 // might not be valid for this category.
677 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
678 if (enable) {
679 options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
680 } else {
681 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
682 }
683 } else {
684 options = AVAudioSessionCategoryOptionDefaultToSpeaker;
685 }
686
687 NSError* error = nil;
688 [session setCategory:AVAudioSessionCategoryPlayAndRecord
689 withOptions:options
690 error:&error];
691 if (error != nil) {
692 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
693 "Error changing default output route ");
694 return -1;
695 }
696
697 return 0;
698 }
699
700 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
701 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
702 "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
703
704 AVAudioSession* session = [AVAudioSession sharedInstance];
705 AVAudioSessionCategoryOptions options = session.categoryOptions;
706 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
707
708 return 0;
709 }
710
711 int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
712 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
713
714 available = false;
715
716 // Try to initialize the playout side
717 int32_t res = InitPlayout();
718
719 // Cancel effect of initialization
720 StopPlayout();
721
722 if (res != -1) {
723 available = true;
724 }
725
726 return 0;
727 }
728
729 int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
730 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
731
732 available = false;
733
734 // Try to initialize the recording side
735 int32_t res = InitRecording();
736
737 // Cancel effect of initialization
738 StopRecording();
739
740 if (res != -1) {
741 available = true;
742 }
743
744 return 0;
745 }
746
747 int32_t AudioDeviceIOS::InitPlayout() {
748 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
749
750 CriticalSectionScoped lock(&_critSect);
751
752 if (!_initialized) {
753 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized");
754 return -1;
755 }
756
757 if (_playing) {
758 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
759 " Playout already started");
760 return -1;
761 }
762
763 if (_playIsInitialized) {
764 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
765 " Playout already initialized");
766 return 0;
767 }
768
769 if (!_playoutDeviceIsSpecified) {
770 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
771 " Playout device is not specified");
772 return -1;
773 }
774
775 // Initialize the speaker
776 if (InitSpeaker() == -1) {
777 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
778 " InitSpeaker() failed");
779 }
780
781 _playIsInitialized = true;
782
783 if (!_recIsInitialized) {
784 // Audio init
785 if (InitPlayOrRecord() == -1) {
786 // todo: Handle error
787 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
788 " InitPlayOrRecord() failed");
789 }
790 } else {
791 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
792 " Recording already initialized - InitPlayOrRecord() not called");
793 }
794
795 return 0;
796 }
797
798 bool AudioDeviceIOS::PlayoutIsInitialized() const {
799 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
800 return (_playIsInitialized);
801 }
802
803 int32_t AudioDeviceIOS::InitRecording() {
804 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
805
806 CriticalSectionScoped lock(&_critSect);
807
808 if (!_initialized) {
809 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
810 " Not initialized");
811 return -1;
812 }
813
814 if (_recording) {
815 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
816 " Recording already started");
817 return -1;
818 }
819
820 if (_recIsInitialized) {
821 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
822 " Recording already initialized");
823 return 0;
824 }
825
826 if (!_recordingDeviceIsSpecified) {
827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
828 " Recording device is not specified");
829 return -1;
830 }
831
832 // Initialize the microphone
833 if (InitMicrophone() == -1) {
834 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
835 " InitMicrophone() failed");
836 }
837
838 _recIsInitialized = true;
839
840 if (!_playIsInitialized) {
841 // Audio init
842 if (InitPlayOrRecord() == -1) {
843 // todo: Handle error
844 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
845 " InitPlayOrRecord() failed");
846 }
847 } else {
848 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
849 " Playout already initialized - InitPlayOrRecord() " \
850 "not called");
851 }
852
853 return 0;
854 }
855
856 bool AudioDeviceIOS::RecordingIsInitialized() const {
857 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
858 return (_recIsInitialized);
859 }
860
861 int32_t AudioDeviceIOS::StartRecording() {
862 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
863
864 CriticalSectionScoped lock(&_critSect);
865
866 if (!_recIsInitialized) {
867 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
868 " Recording not initialized");
869 return -1;
870 }
871
872 if (_recording) {
873 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
874 " Recording already started");
875 return 0;
876 }
877
878 // Reset recording buffer
879 memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
880 memset(_recordingLength, 0, sizeof(_recordingLength));
881 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
882 _recordingCurrentSeq = 0;
883 _recordingBufferTotalSize = 0;
884 _recordingDelay = 0;
885 _recordingDelayHWAndOS = 0;
886 // Make sure first call to update delay function will update delay
887 _recordingDelayMeasurementCounter = 9999;
888 _recWarning = 0;
889 _recError = 0;
890
891 if (!_playing) {
892 // Start Audio Unit
893 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
894 " Starting Audio Unit");
895 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
896 if (0 != result) {
897 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
898 " Error starting Audio Unit (result=%d)", result);
899 return -1;
900 }
901 }
902
903 _recording = true;
904
905 return 0;
906 }
907
908 int32_t AudioDeviceIOS::StopRecording() {
909 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
910
911 CriticalSectionScoped lock(&_critSect);
912
913 if (!_recIsInitialized) {
914 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
915 " Recording is not initialized");
916 return 0;
917 }
918
919 _recording = false;
920
921 if (!_playing) {
922 // Both playout and recording has stopped, shutdown the device
923 ShutdownPlayOrRecord();
924 }
925
926 _recIsInitialized = false;
927 _micIsInitialized = false;
928
929 return 0;
930 }
931
932 bool AudioDeviceIOS::Recording() const {
933 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
934 return (_recording);
935 }
936
937 int32_t AudioDeviceIOS::StartPlayout() {
938 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
939
940 // This lock is (among other things) needed to avoid concurrency issues
941 // with capture thread
942 // shutting down Audio Unit
943 CriticalSectionScoped lock(&_critSect);
944
945 if (!_playIsInitialized) {
946 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
947 " Playout not initialized");
948 return -1;
949 }
950
951 if (_playing) {
952 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
953 " Playing already started");
954 return 0;
955 }
956
957 // Reset playout buffer
958 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
959 _playoutBufferUsed = 0;
960 _playoutDelay = 0;
961 // Make sure first call to update delay function will update delay
962 _playoutDelayMeasurementCounter = 9999;
963 _playWarning = 0;
964 _playError = 0;
965
966 if (!_recording) {
967 // Start Audio Unit
968 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
969 " Starting Audio Unit");
970 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
971 if (0 != result) {
972 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
973 " Error starting Audio Unit (result=%d)", result);
974 return -1;
975 }
976 }
977
978 _playing = true;
979
980 return 0;
981 }
982
983 int32_t AudioDeviceIOS::StopPlayout() {
984 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
985
986 CriticalSectionScoped lock(&_critSect);
987
988 if (!_playIsInitialized) {
989 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
990 " Playout is not initialized");
991 return 0;
992 }
993
994 _playing = false;
995
996 if (!_recording) {
997 // Both playout and recording has stopped, signal shutdown the device
998 ShutdownPlayOrRecord();
999 }
1000
1001 _playIsInitialized = false;
1002 _speakerIsInitialized = false;
1003
1004 return 0;
1005 }
1006
1007 bool AudioDeviceIOS::Playing() const {
1008 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
1009 "%s", __FUNCTION__);
1010 return (_playing);
1011 }
1012
1013 // ----------------------------------------------------------------------------
1014 // ResetAudioDevice
1015 //
1016 // Disable playout and recording, signal to capture thread to shutdown,
1017 // and set enable states after shutdown to same as current.
1018 // In capture thread audio device will be shutdown, then started again.
1019 // ----------------------------------------------------------------------------
1020 int32_t AudioDeviceIOS::ResetAudioDevice() {
1021 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1022
1023 CriticalSectionScoped lock(&_critSect);
1024
1025 if (!_playIsInitialized && !_recIsInitialized) {
1026 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1027 " Playout or recording not initialized, doing nothing");
1028 return 0; // Nothing to reset
1029 }
1030
1031 // Store the states we have before stopping to restart below
1032 bool initPlay = _playIsInitialized;
1033 bool play = _playing;
1034 bool initRec = _recIsInitialized;
1035 bool rec = _recording;
1036
1037 int res(0);
1038
1039 // Stop playout and recording
1040 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1041 " Stopping playout and recording");
1042 res += StopPlayout();
1043 res += StopRecording();
1044
1045 // Restart
1046 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1047 " Restarting playout and recording (%d, %d, %d, %d)",
1048 initPlay, play, initRec, rec);
1049 if (initPlay) res += InitPlayout();
1050 if (initRec) res += InitRecording();
1051 if (play) res += StartPlayout();
1052 if (rec) res += StartRecording();
1053
1054 if (0 != res) {
1055 // Logging is done in init/start/stop calls above
1056 return -1;
1057 }
1058
1059 return 0;
1060 } 391 }
1061 392
1062 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { 393 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
1063 delayMS = _playoutDelay; 394 delayMS = _playoutDelay;
1064 return 0; 395 return 0;
1065 } 396 }
1066 397
1067 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { 398 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
1068 delayMS = _recordingDelay; 399 delayMS = _recordingDelay;
1069 return 0; 400 return 0;
1070 } 401 }
1071 402
1072 int32_t 403 int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
1073 AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, 404 uint16_t& sizeMS) const {
1074 uint16_t sizeMS) { 405 type = AudioDeviceModule::kAdaptiveBufferSize;
1075 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, 406 sizeMS = _playoutDelay;
1076 "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)", 407 return 0;
1077 type, sizeMS); 408 }
1078 409
1079 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 410 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
1080 " API call not supported on this platform"); 411 CHECK(playout_parameters_.is_valid());
1081 return -1; 412 DCHECK(thread_checker_.CalledOnValidThread());
1082 } 413 *params = playout_parameters_;
1083 414 return 0;
1084 int32_t 415 }
1085 AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type, 416
1086 uint16_t& sizeMS) const { 417 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
1087 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); 418 CHECK(record_parameters_.is_valid());
1088 419 DCHECK(thread_checker_.CalledOnValidThread());
1089 type = AudioDeviceModule::kAdaptiveBufferSize; 420 *params = record_parameters_;
1090 421 return 0;
1091 sizeMS = _playoutDelay;
1092
1093 return 0;
1094 }
1095
1096 int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
1097 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1098
1099 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1100 " API call not supported on this platform");
1101 return -1;
1102 }
1103
1104 bool AudioDeviceIOS::PlayoutWarning() const {
1105 return (_playWarning > 0);
1106 }
1107
1108 bool AudioDeviceIOS::PlayoutError() const {
1109 return (_playError > 0);
1110 }
1111
1112 bool AudioDeviceIOS::RecordingWarning() const {
1113 return (_recWarning > 0);
1114 }
1115
1116 bool AudioDeviceIOS::RecordingError() const {
1117 return (_recError > 0);
1118 }
1119
1120 void AudioDeviceIOS::ClearPlayoutWarning() {
1121 _playWarning = 0;
1122 }
1123
1124 void AudioDeviceIOS::ClearPlayoutError() {
1125 _playError = 0;
1126 }
1127
1128 void AudioDeviceIOS::ClearRecordingWarning() {
1129 _recWarning = 0;
1130 }
1131
1132 void AudioDeviceIOS::ClearRecordingError() {
1133 _recError = 0;
1134 } 422 }
1135 423
1136 // ============================================================================ 424 // ============================================================================
1137 // Private Methods 425 // Private Methods
1138 // ============================================================================ 426 // ============================================================================
1139 427
1140 int32_t AudioDeviceIOS::InitPlayOrRecord() { 428 int32_t AudioDeviceIOS::InitPlayOrRecord() {
1141 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); 429 LOGI() << "AudioDeviceIOS::InitPlayOrRecord";
1142 430 DCHECK(!_auVoiceProcessing);
1143 OSStatus result = -1; 431
1144 432 OSStatus result = -1;
1145 // Check if already initialized 433
1146 if (NULL != _auVoiceProcessing) { 434 // Create Voice Processing Audio Unit
1147 // We already have initialized before and created any of the audio unit, 435 AudioComponentDescription desc;
1148 // check that all exist 436 AudioComponent comp;
1149 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 437
1150 " Already initialized"); 438 desc.componentType = kAudioUnitType_Output;
1151 // todo: Call AudioUnitReset() here and empty all buffers? 439 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
1152 return 0; 440 desc.componentManufacturer = kAudioUnitManufacturer_Apple;
1153 } 441 desc.componentFlags = 0;
1154 442 desc.componentFlagsMask = 0;
1155 // Create Voice Processing Audio Unit 443
1156 AudioComponentDescription desc; 444 comp = AudioComponentFindNext(nullptr, &desc);
1157 AudioComponent comp; 445 if (nullptr == comp) {
1158 446 LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit";
1159 desc.componentType = kAudioUnitType_Output; 447 return -1;
1160 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; 448 }
1161 desc.componentManufacturer = kAudioUnitManufacturer_Apple; 449
1162 desc.componentFlags = 0; 450 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
1163 desc.componentFlagsMask = 0; 451 if (0 != result) {
1164 452 LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result;
1165 comp = AudioComponentFindNext(NULL, &desc); 453 return -1;
1166 if (NULL == comp) { 454 }
1167 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 455
1168 " Could not find audio component for Audio Unit"); 456 // TODO(henrika): I think we should set the preferred channel configuration
1169 return -1; 457 // in both directions as well to be safe.
1170 } 458
1171 459 // Set preferred hardware sample rate to 16 kHz.
1172 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); 460 // TODO(henrika): improve this selection of sample rate. Why do we currently
461 // use a hard coded value? How can we fail and still continue?
462 NSError* error = nil;
463 AVAudioSession* session = [AVAudioSession sharedInstance];
464 Float64 preferredSampleRate(playout_parameters_.sample_rate());
465 [session setPreferredSampleRate:preferredSampleRate error:&error];
466 if (error != nil) {
467 const char* errorString = [[error localizedDescription] UTF8String];
468 LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString;
469 }
470
471 // TODO(henrika): we can reduce latency by setting the IOBufferDuration
472 // here. Default size for 16kHz is 0.016 sec or 16 msec on an iPhone 6.
473
474 // Activate the audio session.
475 ActivateAudioSession(session, true);
476
477 UInt32 enableIO = 1;
478 result = AudioUnitSetProperty(_auVoiceProcessing,
479 kAudioOutputUnitProperty_EnableIO,
480 kAudioUnitScope_Input,
481 1, // input bus
482 &enableIO, sizeof(enableIO));
483 if (0 != result) {
484 LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result;
485 }
486
487 result = AudioUnitSetProperty(_auVoiceProcessing,
488 kAudioOutputUnitProperty_EnableIO,
489 kAudioUnitScope_Output,
490 0, // output bus
491 &enableIO, sizeof(enableIO));
492 if (0 != result) {
493 LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result;
494 }
495
496 // Disable AU buffer allocation for the recorder, we allocate our own.
497 // TODO(henrika): understand this part better.
498 UInt32 flag = 0;
499 result = AudioUnitSetProperty(_auVoiceProcessing,
500 kAudioUnitProperty_ShouldAllocateBuffer,
501 kAudioUnitScope_Output, 1, &flag, sizeof(flag));
502 if (0 != result) {
503 LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result;
504 // Should work anyway
505 }
506
507 // Set recording callback.
508 AURenderCallbackStruct auCbS;
509 memset(&auCbS, 0, sizeof(auCbS));
510 auCbS.inputProc = RecordProcess;
511 auCbS.inputProcRefCon = this;
512 result = AudioUnitSetProperty(
513 _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback,
514 kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS));
515 if (0 != result) {
516 LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result;
517 }
518
519 // Set playout callback.
520 memset(&auCbS, 0, sizeof(auCbS));
521 auCbS.inputProc = PlayoutProcess;
522 auCbS.inputProcRefCon = this;
523 result = AudioUnitSetProperty(
524 _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback,
525 kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS));
526 if (0 != result) {
527 LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result;
528 }
529
530 // Get stream format for out/0
531 AudioStreamBasicDescription playoutDesc;
532 UInt32 size = sizeof(playoutDesc);
533 result =
534 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
535 kAudioUnitScope_Output, 0, &playoutDesc, &size);
536 if (0 != result) {
537 LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result;
538 }
539
540 playoutDesc.mSampleRate = preferredSampleRate;
541 LOG(LS_INFO) << "Audio Unit playout opened in sampling rate: "
542 << playoutDesc.mSampleRate;
543
544 // Store the sampling frequency to use towards the Audio Device Buffer
545 // todo: Add 48 kHz (increase buffer sizes). Other fs?
546 // TODO(henrika): Figure out if we really need this complex handling.
547 if ((playoutDesc.mSampleRate > 44090.0) &&
548 (playoutDesc.mSampleRate < 44110.0)) {
549 _adbSampFreq = 44100;
550 } else if ((playoutDesc.mSampleRate > 15990.0) &&
551 (playoutDesc.mSampleRate < 16010.0)) {
552 _adbSampFreq = 16000;
553 } else if ((playoutDesc.mSampleRate > 7990.0) &&
554 (playoutDesc.mSampleRate < 8010.0)) {
555 _adbSampFreq = 8000;
556 } else {
557 _adbSampFreq = 0;
558 FATAL() << "Invalid sample rate";
559 }
560
561 // Set the audio device buffer sampling rates (use same for play and record).
562 // TODO(henrika): this is not a good place to set these things up.
563 DCHECK(audio_device_buffer_);
564 DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate());
565 audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq);
566 audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq);
567
568 // Set stream format for out/0.
569 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
570 kLinearPCMFormatFlagIsPacked |
571 kLinearPCMFormatFlagIsNonInterleaved;
572 playoutDesc.mBytesPerPacket = 2;
573 playoutDesc.mFramesPerPacket = 1;
574 playoutDesc.mBytesPerFrame = 2;
575 playoutDesc.mChannelsPerFrame = 1;
576 playoutDesc.mBitsPerChannel = 16;
577 result =
578 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
579 kAudioUnitScope_Input, 0, &playoutDesc, size);
580 if (0 != result) {
581 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0";
582 }
583
584 // Get stream format for in/1.
585 AudioStreamBasicDescription recordingDesc;
586 size = sizeof(recordingDesc);
587 result =
588 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
589 kAudioUnitScope_Input, 1, &recordingDesc, &size);
590 if (0 != result) {
591 LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1";
592 }
593
594 recordingDesc.mSampleRate = preferredSampleRate;
595 LOG(LS_INFO) << "Audio Unit recording opened in sampling rate: "
596 << recordingDesc.mSampleRate;
597
598 // Set stream format for out/1 (use same sampling frequency as for in/1).
599 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
600 kLinearPCMFormatFlagIsPacked |
601 kLinearPCMFormatFlagIsNonInterleaved;
602 recordingDesc.mBytesPerPacket = 2;
603 recordingDesc.mFramesPerPacket = 1;
604 recordingDesc.mBytesPerFrame = 2;
605 recordingDesc.mChannelsPerFrame = 1;
606 recordingDesc.mBitsPerChannel = 16;
607 result =
608 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
609 kAudioUnitScope_Output, 1, &recordingDesc, size);
610 if (0 != result) {
611 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1";
612 }
613
614 // Initialize here already to be able to get/set stream properties.
615 result = AudioUnitInitialize(_auVoiceProcessing);
616 if (0 != result) {
617 LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result;
618 }
619
620 // Get hardware sample rate for logging (see if we get what we asked for).
621 // TODO(henrika): what if we don't get what we ask for?
622 double sampleRate = session.sampleRate;
623 LOG(LS_INFO) << "Current HW sample rate is: " << sampleRate
624 << ", ADB sample rate is: " << _adbSampFreq;
625 LOG(LS_INFO) << "Current HW IO buffer size is: " <<
626 [session IOBufferDuration];
627
628 // Listen to audio interruptions.
629 // TODO(henrika): learn this area better.
630 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
631 id observer = [center
632 addObserverForName:AVAudioSessionInterruptionNotification
633 object:nil
634 queue:[NSOperationQueue mainQueue]
635 usingBlock:^(NSNotification* notification) {
636 NSNumber* typeNumber =
637 [notification userInfo][AVAudioSessionInterruptionTypeKey];
638 AVAudioSessionInterruptionType type =
639 (AVAudioSessionInterruptionType)[typeNumber
640 unsignedIntegerValue];
641 switch (type) {
642 case AVAudioSessionInterruptionTypeBegan:
643 // At this point our audio session has been deactivated and
644 // the
645 // audio unit render callbacks no longer occur. Nothing to
646 // do.
647 break;
648 case AVAudioSessionInterruptionTypeEnded: {
649 NSError* error = nil;
650 AVAudioSession* session = [AVAudioSession sharedInstance];
651 [session setActive:YES error:&error];
652 if (error != nil) {
653 LOG_F(LS_ERROR) << "Failed to active audio session";
654 }
655 // Post interruption the audio unit render callbacks don't
656 // automatically continue, so we restart the unit manually
657 // here.
658 AudioOutputUnitStop(_auVoiceProcessing);
659 AudioOutputUnitStart(_auVoiceProcessing);
660 break;
661 }
662 }
663 }];
664 // Increment refcount on observer using ARC bridge. Instance variable is a
665 // void* instead of an id because header is included in other pure C++
666 // files.
667 _audioInterruptionObserver = (__bridge_retained void*)observer;
668
669 // Deactivate the audio session.
670 ActivateAudioSession(session, false);
671
672 return 0;
673 }
674
675 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
676 LOGI() << "ShutdownPlayOrRecord";
677
678 if (_audioInterruptionObserver != nullptr) {
679 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
680 // Transfer ownership of observer back to ARC, which will dealloc the
681 // observer once it exits this scope.
682 id observer = (__bridge_transfer id)_audioInterruptionObserver;
683 [center removeObserver:observer];
684 _audioInterruptionObserver = nullptr;
685 }
686
687 // Close and delete AU.
688 OSStatus result = -1;
689 if (nullptr != _auVoiceProcessing) {
690 result = AudioOutputUnitStop(_auVoiceProcessing);
1173 if (0 != result) { 691 if (0 != result) {
1174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 692 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
1175 " Could not create Audio Unit instance (result=%d)", 693 }
1176 result); 694 result = AudioComponentInstanceDispose(_auVoiceProcessing);
1177 return -1;
1178 }
1179
1180 // Set preferred hardware sample rate to 16 kHz
1181 NSError* error = nil;
1182 AVAudioSession* session = [AVAudioSession sharedInstance];
1183 Float64 preferredSampleRate(16000.0);
1184 [session setPreferredSampleRate:preferredSampleRate
1185 error:&error];
1186 if (error != nil) {
1187 const char* errorString = [[error localizedDescription] UTF8String];
1188 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1189 "Could not set preferred sample rate: %s", errorString);
1190 }
1191 error = nil;
1192 // Make the setMode:error: and setCategory:error: calls only if necessary.
1193 // Non-obviously, setting them to the value they already have will clear
1194 // transient properties (such as PortOverride) that some other component may
1195 // have set up.
1196 if (session.mode != AVAudioSessionModeVoiceChat) {
1197 [session setMode:AVAudioSessionModeVoiceChat error:&error];
1198 if (error != nil) {
1199 const char* errorString = [[error localizedDescription] UTF8String];
1200 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1201 "Could not set mode: %s", errorString);
1202 }
1203 }
1204 error = nil;
1205 if (session.category != AVAudioSessionCategoryPlayAndRecord) {
1206 [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
1207 if (error != nil) {
1208 const char* errorString = [[error localizedDescription] UTF8String];
1209 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1210 "Could not set category: %s", errorString);
1211 }
1212 }
1213
1214 //////////////////////
1215 // Setup Voice Processing Audio Unit
1216
1217 // Note: For Signal Processing AU element 0 is output bus, element 1 is
1218 // input bus for global scope element is irrelevant (always use
1219 // element 0)
1220
1221 // Enable IO on both elements
1222
1223 // todo: Below we just log and continue upon error. We might want
1224 // to close AU and return error for some cases.
1225 // todo: Log info about setup.
1226
1227 UInt32 enableIO = 1;
1228 result = AudioUnitSetProperty(_auVoiceProcessing,
1229 kAudioOutputUnitProperty_EnableIO,
1230 kAudioUnitScope_Input,
1231 1, // input bus
1232 &enableIO,
1233 sizeof(enableIO));
1234 if (0 != result) { 695 if (0 != result) {
1235 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 696 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
1236 " Could not enable IO on input (result=%d)", result); 697 }
1237 } 698 _auVoiceProcessing = nullptr;
1238 699 }
1239 result = AudioUnitSetProperty(_auVoiceProcessing, 700
1240 kAudioOutputUnitProperty_EnableIO, 701 return 0;
1241 kAudioUnitScope_Output,
1242 0, // output bus
1243 &enableIO,
1244 sizeof(enableIO));
1245 if (0 != result) {
1246 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1247 " Could not enable IO on output (result=%d)", result);
1248 }
1249
1250 // Disable AU buffer allocation for the recorder, we allocate our own
1251 UInt32 flag = 0;
1252 result = AudioUnitSetProperty(
1253 _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
1254 kAudioUnitScope_Output, 1, &flag, sizeof(flag));
1255 if (0 != result) {
1256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1257 " Could not disable AU buffer allocation (result=%d)",
1258 result);
1259 // Should work anyway
1260 }
1261
1262 // Set recording callback
1263 AURenderCallbackStruct auCbS;
1264 memset(&auCbS, 0, sizeof(auCbS));
1265 auCbS.inputProc = RecordProcess;
1266 auCbS.inputProcRefCon = this;
1267 result = AudioUnitSetProperty(_auVoiceProcessing,
1268 kAudioOutputUnitProperty_SetInputCallback,
1269 kAudioUnitScope_Global, 1,
1270 &auCbS, sizeof(auCbS));
1271 if (0 != result) {
1272 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1273 " Could not set record callback for Audio Unit (result=%d)",
1274 result);
1275 }
1276
1277 // Set playout callback
1278 memset(&auCbS, 0, sizeof(auCbS));
1279 auCbS.inputProc = PlayoutProcess;
1280 auCbS.inputProcRefCon = this;
1281 result = AudioUnitSetProperty(_auVoiceProcessing,
1282 kAudioUnitProperty_SetRenderCallback,
1283 kAudioUnitScope_Global, 0,
1284 &auCbS, sizeof(auCbS));
1285 if (0 != result) {
1286 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1287 " Could not set play callback for Audio Unit (result=%d)",
1288 result);
1289 }
1290
1291 // Get stream format for out/0
1292 AudioStreamBasicDescription playoutDesc;
1293 UInt32 size = sizeof(playoutDesc);
1294 result = AudioUnitGetProperty(_auVoiceProcessing,
1295 kAudioUnitProperty_StreamFormat,
1296 kAudioUnitScope_Output, 0, &playoutDesc,
1297 &size);
1298 if (0 != result) {
1299 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1300 " Could not get stream format Audio Unit out/0 (result=%d)",
1301 result);
1302 }
1303 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1304 " Audio Unit playout opened in sampling rate %f",
1305 playoutDesc.mSampleRate);
1306
1307 playoutDesc.mSampleRate = preferredSampleRate;
1308
1309 // Store the sampling frequency to use towards the Audio Device Buffer
1310 // todo: Add 48 kHz (increase buffer sizes). Other fs?
1311 if ((playoutDesc.mSampleRate > 44090.0)
1312 && (playoutDesc.mSampleRate < 44110.0)) {
1313 _adbSampFreq = 44100;
1314 } else if ((playoutDesc.mSampleRate > 15990.0)
1315 && (playoutDesc.mSampleRate < 16010.0)) {
1316 _adbSampFreq = 16000;
1317 } else if ((playoutDesc.mSampleRate > 7990.0)
1318 && (playoutDesc.mSampleRate < 8010.0)) {
1319 _adbSampFreq = 8000;
1320 } else {
1321 _adbSampFreq = 0;
1322 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1323 " Audio Unit out/0 opened in unknown sampling rate (%f)",
1324 playoutDesc.mSampleRate);
1325 // todo: We should bail out here.
1326 }
1327
1328 // Set the audio device buffer sampling rate,
1329 // we assume we get the same for play and record
1330 if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
1331 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1332 " Could not set audio device buffer recording sampling rate (%d)",
1333 _adbSampFreq);
1334 }
1335
1336 if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
1337 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1338 " Could not set audio device buffer playout sampling rate (%d)",
1339 _adbSampFreq);
1340 }
1341
1342 // Set stream format for in/0 (use same sampling frequency as for out/0)
1343 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1344 | kLinearPCMFormatFlagIsPacked
1345 | kLinearPCMFormatFlagIsNonInterleaved;
1346 playoutDesc.mBytesPerPacket = 2;
1347 playoutDesc.mFramesPerPacket = 1;
1348 playoutDesc.mBytesPerFrame = 2;
1349 playoutDesc.mChannelsPerFrame = 1;
1350 playoutDesc.mBitsPerChannel = 16;
1351 result = AudioUnitSetProperty(_auVoiceProcessing,
1352 kAudioUnitProperty_StreamFormat,
1353 kAudioUnitScope_Input, 0, &playoutDesc, size);
1354 if (0 != result) {
1355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1356 " Could not set stream format Audio Unit in/0 (result=%d)",
1357 result);
1358 }
1359
1360 // Get stream format for in/1
1361 AudioStreamBasicDescription recordingDesc;
1362 size = sizeof(recordingDesc);
1363 result = AudioUnitGetProperty(_auVoiceProcessing,
1364 kAudioUnitProperty_StreamFormat,
1365 kAudioUnitScope_Input, 1, &recordingDesc,
1366 &size);
1367 if (0 != result) {
1368 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1369 " Could not get stream format Audio Unit in/1 (result=%d)",
1370 result);
1371 }
1372 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1373 " Audio Unit recording opened in sampling rate %f",
1374 recordingDesc.mSampleRate);
1375
1376 recordingDesc.mSampleRate = preferredSampleRate;
1377
1378 // Set stream format for out/1 (use same sampling frequency as for in/1)
1379 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
1380 | kLinearPCMFormatFlagIsPacked
1381 | kLinearPCMFormatFlagIsNonInterleaved;
1382
1383 recordingDesc.mBytesPerPacket = 2;
1384 recordingDesc.mFramesPerPacket = 1;
1385 recordingDesc.mBytesPerFrame = 2;
1386 recordingDesc.mChannelsPerFrame = 1;
1387 recordingDesc.mBitsPerChannel = 16;
1388 result = AudioUnitSetProperty(_auVoiceProcessing,
1389 kAudioUnitProperty_StreamFormat,
1390 kAudioUnitScope_Output, 1, &recordingDesc,
1391 size);
1392 if (0 != result) {
1393 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1394 " Could not set stream format Audio Unit out/1 (result=%d)",
1395 result);
1396 }
1397
1398 // Initialize here already to be able to get/set stream properties.
1399 result = AudioUnitInitialize(_auVoiceProcessing);
1400 if (0 != result) {
1401 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1402 " Could not init Audio Unit (result=%d)", result);
1403 }
1404
1405 // Get hardware sample rate for logging (see if we get what we asked for)
1406 double sampleRate = session.sampleRate;
1407 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1408 " Current HW sample rate is %f, ADB sample rate is %d",
1409 sampleRate, _adbSampFreq);
1410
1411 // Listen to audio interruptions.
1412 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
1413 id observer =
1414 [center addObserverForName:AVAudioSessionInterruptionNotification
1415 object:nil
1416 queue:[NSOperationQueue mainQueue]
1417 usingBlock:^(NSNotification* notification) {
1418 NSNumber* typeNumber =
1419 [notification userInfo][AVAudioSessionInterruptionTypeKey];
1420 AVAudioSessionInterruptionType type =
1421 (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
1422 switch (type) {
1423 case AVAudioSessionInterruptionTypeBegan:
1424 // At this point our audio session has been deactivated and the
1425 // audio unit render callbacks no longer occur. Nothing to do.
1426 break;
1427 case AVAudioSessionInterruptionTypeEnded: {
1428 NSError* error = nil;
1429 AVAudioSession* session = [AVAudioSession sharedInstance];
1430 [session setActive:YES
1431 error:&error];
1432 if (error != nil) {
1433 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1434 "Error activating audio session");
1435 }
1436 // Post interruption the audio unit render callbacks don't
1437 // automatically continue, so we restart the unit manually here.
1438 AudioOutputUnitStop(_auVoiceProcessing);
1439 AudioOutputUnitStart(_auVoiceProcessing);
1440 break;
1441 }
1442 }
1443 }];
1444 // Increment refcount on observer using ARC bridge. Instance variable is a
1445 // void* instead of an id because header is included in other pure C++
1446 // files.
1447 _audioInterruptionObserver = (__bridge_retained void*)observer;
1448
1449 // Activate audio session.
1450 error = nil;
1451 [session setActive:YES
1452 error:&error];
1453 if (error != nil) {
1454 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1455 "Error activating audio session");
1456 }
1457
1458 return 0;
1459 }
1460
1461 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
1462 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1463
1464 if (_audioInterruptionObserver != NULL) {
1465 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
1466 // Transfer ownership of observer back to ARC, which will dealloc the
1467 // observer once it exits this scope.
1468 id observer = (__bridge_transfer id)_audioInterruptionObserver;
1469 [center removeObserver:observer];
1470 _audioInterruptionObserver = NULL;
1471 }
1472
1473 // Close and delete AU
1474 OSStatus result = -1;
1475 if (NULL != _auVoiceProcessing) {
1476 result = AudioOutputUnitStop(_auVoiceProcessing);
1477 if (0 != result) {
1478 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1479 " Error stopping Audio Unit (result=%d)", result);
1480 }
1481 result = AudioComponentInstanceDispose(_auVoiceProcessing);
1482 if (0 != result) {
1483 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1484 " Error disposing Audio Unit (result=%d)", result);
1485 }
1486 _auVoiceProcessing = NULL;
1487 }
1488
1489 return 0;
1490 } 702 }
1491 703
1492 // ============================================================================ 704 // ============================================================================
1493 // Thread Methods 705 // Thread Methods
1494 // ============================================================================ 706 // ============================================================================
1495 707
1496 OSStatus 708 OSStatus AudioDeviceIOS::RecordProcess(
1497 AudioDeviceIOS::RecordProcess(void *inRefCon, 709 void* inRefCon,
1498 AudioUnitRenderActionFlags *ioActionFlags, 710 AudioUnitRenderActionFlags* ioActionFlags,
1499 const AudioTimeStamp *inTimeStamp, 711 const AudioTimeStamp* inTimeStamp,
1500 UInt32 inBusNumber, 712 UInt32 inBusNumber,
1501 UInt32 inNumberFrames, 713 UInt32 inNumberFrames,
1502 AudioBufferList *ioData) { 714 AudioBufferList* ioData) {
1503 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); 715 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
1504 716 return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber,
1505 return ptrThis->RecordProcessImpl(ioActionFlags, 717 inNumberFrames);
1506 inTimeStamp, 718 }
1507 inBusNumber, 719
1508 inNumberFrames); 720 OSStatus AudioDeviceIOS::RecordProcessImpl(
1509 } 721 AudioUnitRenderActionFlags* ioActionFlags,
1510 722 const AudioTimeStamp* inTimeStamp,
1511 723 uint32_t inBusNumber,
1512 OSStatus 724 uint32_t inNumberFrames) {
1513 AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags, 725 // Setup some basic stuff
1514 const AudioTimeStamp *inTimeStamp, 726 // Use temp buffer not to lock up recording buffer more than necessary
1515 uint32_t inBusNumber, 727 // todo: Make dataTmp a member variable with static size that holds
1516 uint32_t inNumberFrames) { 728 // max possible frames?
1517 // Setup some basic stuff 729 int16_t* dataTmp = new int16_t[inNumberFrames];
1518 // Use temp buffer not to lock up recording buffer more than necessary 730 memset(dataTmp, 0, 2 * inNumberFrames);
1519 // todo: Make dataTmp a member variable with static size that holds 731
1520 // max possible frames? 732 AudioBufferList abList;
1521 int16_t* dataTmp = new int16_t[inNumberFrames]; 733 abList.mNumberBuffers = 1;
1522 memset(dataTmp, 0, 2*inNumberFrames); 734 abList.mBuffers[0].mData = dataTmp;
1523 735 abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample
1524 AudioBufferList abList; 736 abList.mBuffers[0].mNumberChannels = 1;
1525 abList.mNumberBuffers = 1; 737
1526 abList.mBuffers[0].mData = dataTmp; 738 // Get data from mic
1527 abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample 739 OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp,
1528 abList.mBuffers[0].mNumberChannels = 1; 740 inBusNumber, inNumberFrames, &abList);
1529 741 if (res != 0) {
1530 // Get data from mic 742 // TODO(henrika): improve error handling.
1531 OSStatus res = AudioUnitRender(_auVoiceProcessing, 743 delete[] dataTmp;
1532 ioActionFlags, inTimeStamp, 744 return 0;
1533 inBusNumber, inNumberFrames, &abList); 745 }
1534 if (res != 0) { 746
1535 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 747 if (_recording) {
1536 " Error getting rec data, error = %d", res); 748 // Insert all data in temp buffer into recording buffers
1537 749 // There is zero or one buffer partially full at any given time,
1538 if (_recWarning > 0) { 750 // all others are full or empty
1539 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 751 // Full means filled with noSamp10ms samples.
1540 " Pending rec warning exists"); 752
753 const unsigned int noSamp10ms = _adbSampFreq / 100;
754 unsigned int dataPos = 0;
755 uint16_t bufPos = 0;
756 int16_t insertPos = -1;
757 unsigned int nCopy = 0; // Number of samples to copy
758
759 while (dataPos < inNumberFrames) {
760 // Loop over all recording buffers or
761 // until we find the partially full buffer
762 // First choice is to insert into partially full buffer,
763 // second choice is to insert into empty buffer
764 bufPos = 0;
765 insertPos = -1;
766 nCopy = 0;
767 while (bufPos < N_REC_BUFFERS) {
768 if ((_recordingLength[bufPos] > 0) &&
769 (_recordingLength[bufPos] < noSamp10ms)) {
770 // Found the partially full buffer
771 insertPos = static_cast<int16_t>(bufPos);
772 // Don't need to search more, quit loop
773 bufPos = N_REC_BUFFERS;
774 } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) {
775 // Found an empty buffer
776 insertPos = static_cast<int16_t>(bufPos);
1541 } 777 }
1542 _recWarning = 1; 778 ++bufPos;
1543 779 }
1544 delete [] dataTmp; 780
1545 return 0; 781 // Insert data into buffer
1546 } 782 if (insertPos > -1) {
1547 783 // We found a non-full buffer, copy data to it
1548 if (_recording) { 784 unsigned int dataToCopy = inNumberFrames - dataPos;
1549 // Insert all data in temp buffer into recording buffers 785 unsigned int currentRecLen = _recordingLength[insertPos];
1550 // There is zero or one buffer partially full at any given time, 786 unsigned int roomInBuffer = noSamp10ms - currentRecLen;
1551 // all others are full or empty 787 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
1552 // Full means filled with noSamp10ms samples. 788
1553 789 memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos],
1554 const unsigned int noSamp10ms = _adbSampFreq / 100; 790 nCopy * sizeof(int16_t));
1555 unsigned int dataPos = 0; 791 if (0 == currentRecLen) {
1556 uint16_t bufPos = 0; 792 _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
1557 int16_t insertPos = -1; 793 ++_recordingCurrentSeq;
1558 unsigned int nCopy = 0; // Number of samples to copy
1559
1560 while (dataPos < inNumberFrames) {
1561 // Loop over all recording buffers or
1562 // until we find the partially full buffer
1563 // First choice is to insert into partially full buffer,
1564 // second choice is to insert into empty buffer
1565 bufPos = 0;
1566 insertPos = -1;
1567 nCopy = 0;
1568 while (bufPos < N_REC_BUFFERS) {
1569 if ((_recordingLength[bufPos] > 0)
1570 && (_recordingLength[bufPos] < noSamp10ms)) {
1571 // Found the partially full buffer
1572 insertPos = static_cast<int16_t>(bufPos);
1573 // Don't need to search more, quit loop
1574 bufPos = N_REC_BUFFERS;
1575 } else if ((-1 == insertPos)
1576 && (0 == _recordingLength[bufPos])) {
1577 // Found an empty buffer
1578 insertPos = static_cast<int16_t>(bufPos);
1579 }
1580 ++bufPos;
1581 }
1582
1583 // Insert data into buffer
1584 if (insertPos > -1) {
1585 // We found a non-full buffer, copy data to it
1586 unsigned int dataToCopy = inNumberFrames - dataPos;
1587 unsigned int currentRecLen = _recordingLength[insertPos];
1588 unsigned int roomInBuffer = noSamp10ms - currentRecLen;
1589 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
1590
1591 memcpy(&_recordingBuffer[insertPos][currentRecLen],
1592 &dataTmp[dataPos], nCopy*sizeof(int16_t));
1593 if (0 == currentRecLen) {
1594 _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
1595 ++_recordingCurrentSeq;
1596 }
1597 _recordingBufferTotalSize += nCopy;
1598 // Has to be done last to avoid interrupt problems
1599 // between threads
1600 _recordingLength[insertPos] += nCopy;
1601 dataPos += nCopy;
1602 } else {
1603 // Didn't find a non-full buffer
1604 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1605 " Could not insert into recording buffer");
1606 if (_recWarning > 0) {
1607 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1608 " Pending rec warning exists");
1609 }
1610 _recWarning = 1;
1611 dataPos = inNumberFrames; // Don't try to insert more
1612 }
1613 } 794 }
1614 } 795 _recordingBufferTotalSize += nCopy;
1615 796 // Has to be done last to avoid interrupt problems between threads.
1616 delete [] dataTmp; 797 _recordingLength[insertPos] += nCopy;
1617 798 dataPos += nCopy;
1618 return 0; 799 } else {
1619 } 800 // Didn't find a non-full buffer
1620 801 // TODO(henrika): improve error handling
1621 OSStatus 802 dataPos = inNumberFrames; // Don't try to insert more
1622 AudioDeviceIOS::PlayoutProcess(void *inRefCon, 803 }
1623 AudioUnitRenderActionFlags *ioActionFlags, 804 }
1624 const AudioTimeStamp *inTimeStamp, 805 }
1625 UInt32 inBusNumber, 806 delete[] dataTmp;
1626 UInt32 inNumberFrames, 807 return 0;
1627 AudioBufferList *ioData) { 808 }
1628 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); 809
1629 810 OSStatus AudioDeviceIOS::PlayoutProcess(
1630 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); 811 void* inRefCon,
1631 } 812 AudioUnitRenderActionFlags* ioActionFlags,
1632 813 const AudioTimeStamp* inTimeStamp,
1633 OSStatus 814 UInt32 inBusNumber,
1634 AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, 815 UInt32 inNumberFrames,
1635 AudioBufferList *ioData) { 816 AudioBufferList* ioData) {
1636 // Setup some basic stuff 817 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
1637 // assert(sizeof(short) == 2); // Assumption for implementation 818 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
1638 819 }
1639 int16_t* data = 820
1640 static_cast<int16_t*>(ioData->mBuffers[0].mData); 821 OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
1641 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; 822 AudioBufferList* ioData) {
1642 unsigned int dataSize = dataSizeBytes/2; // Number of samples 823 int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData);
1643 if (dataSize != inNumberFrames) { // Should always be the same 824 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
1644 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 825 unsigned int dataSize = dataSizeBytes / 2; // Number of samples
1645 "dataSize (%u) != inNumberFrames (%u)", 826 CHECK_EQ(dataSize, inNumberFrames);
1646 dataSize, (unsigned int)inNumberFrames); 827 memset(data, 0, dataSizeBytes); // Start with empty buffer
1647 if (_playWarning > 0) { 828
1648 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 829 // Get playout data from Audio Device Buffer
1649 " Pending play warning exists"); 830
831 if (_playing) {
832 unsigned int noSamp10ms = _adbSampFreq / 100;
833 // todo: Member variable and allocate when samp freq is determined
834 int16_t* dataTmp = new int16_t[noSamp10ms];
835 memset(dataTmp, 0, 2 * noSamp10ms);
836 unsigned int dataPos = 0;
837 int noSamplesOut = 0;
838 unsigned int nCopy = 0;
839
840 // First insert data from playout buffer if any
841 if (_playoutBufferUsed > 0) {
842 nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed;
843 DCHECK_EQ(nCopy, _playoutBufferUsed);
844 memcpy(data, _playoutBuffer, 2 * nCopy);
845 dataPos = nCopy;
846 memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
847 _playoutBufferUsed = 0;
848 }
849
850 // Now get the rest from Audio Device Buffer.
851 while (dataPos < dataSize) {
852 // Update playout delay
853 UpdatePlayoutDelay();
854
855 // Ask for new PCM data to be played out using the AudioDeviceBuffer
856 noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms);
857
858 // Get data from Audio Device Buffer
859 noSamplesOut = audio_device_buffer_->GetPlayoutData(
860 reinterpret_cast<int8_t*>(dataTmp));
861 CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut);
862
863 // Insert as much as fits in data buffer
864 nCopy =
865 (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos);
866 memcpy(&data[dataPos], dataTmp, 2 * nCopy);
867
868 // Save rest in playout buffer if any
869 if (nCopy < noSamp10ms) {
870 memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy));
871 _playoutBufferUsed = noSamp10ms - nCopy;
872 }
873
874 // Update loop/index counter, if we copied less than noSamp10ms
875 // samples we shall quit loop anyway
876 dataPos += noSamp10ms;
877 }
878 delete[] dataTmp;
879 }
880 return 0;
881 }
882
883 // TODO(henrika): can either be removed or simplified.
884 void AudioDeviceIOS::UpdatePlayoutDelay() {
885 ++_playoutDelayMeasurementCounter;
886
887 if (_playoutDelayMeasurementCounter >= 100) {
888 // Update HW and OS delay every second, unlikely to change
889
890 // Since this is eventually rounded to integral ms, add 0.5ms
891 // here to get round-to-nearest-int behavior instead of
892 // truncation.
893 double totalDelaySeconds = 0.0005;
894
895 // HW output latency
896 AVAudioSession* session = [AVAudioSession sharedInstance];
897 double latency = session.outputLatency;
898 assert(latency >= 0);
899 totalDelaySeconds += latency;
900
901 // HW buffer duration
902 double ioBufferDuration = session.IOBufferDuration;
903 assert(ioBufferDuration >= 0);
904 totalDelaySeconds += ioBufferDuration;
905
906 // AU latency
907 Float64 f64(0);
908 UInt32 size = sizeof(f64);
909 OSStatus result =
910 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
911 kAudioUnitScope_Global, 0, &f64, &size);
912 if (0 != result) {
913 LOG_F(LS_ERROR) << "AU latency error: " << result;
914 }
915 assert(f64 >= 0);
916 totalDelaySeconds += f64;
917
918 // To ms
919 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
920
921 // Reset counter
922 _playoutDelayMeasurementCounter = 0;
923 }
924
925 // todo: Add playout buffer?
926 }
927
928 void AudioDeviceIOS::UpdateRecordingDelay() {
929 ++_recordingDelayMeasurementCounter;
930
931 if (_recordingDelayMeasurementCounter >= 100) {
932 // Update HW and OS delay every second, unlikely to change
933
934 // Since this is eventually rounded to integral ms, add 0.5ms
935 // here to get round-to-nearest-int behavior instead of
936 // truncation.
937 double totalDelaySeconds = 0.0005;
938
939 // HW input latency
940 AVAudioSession* session = [AVAudioSession sharedInstance];
941 double latency = session.inputLatency;
942 assert(latency >= 0);
943 totalDelaySeconds += latency;
944
945 // HW buffer duration
946 double ioBufferDuration = session.IOBufferDuration;
947 assert(ioBufferDuration >= 0);
948 totalDelaySeconds += ioBufferDuration;
949
950 // AU latency
951 Float64 f64(0);
952 UInt32 size = sizeof(f64);
953 OSStatus result =
954 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
955 kAudioUnitScope_Global, 0, &f64, &size);
956 if (0 != result) {
957 LOG_F(LS_ERROR) << "AU latency error: " << result;
958 }
959 assert(f64 >= 0);
960 totalDelaySeconds += f64;
961
962 // To ms
963 _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000);
964
965 // Reset counter
966 _recordingDelayMeasurementCounter = 0;
967 }
968
969 _recordingDelay = _recordingDelayHWAndOS;
970
971 // ADB recording buffer size, update every time
972 // Don't count the one next 10 ms to be sent, then convert samples => ms
973 const uint32_t noSamp10ms = _adbSampFreq / 100;
974 if (_recordingBufferTotalSize > noSamp10ms) {
975 _recordingDelay +=
976 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
977 }
978 }
979
980 bool AudioDeviceIOS::RunCapture(void* ptrThis) {
981 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
982 }
983
984 bool AudioDeviceIOS::CaptureWorkerThread() {
985 if (_recording) {
986 int bufPos = 0;
987 unsigned int lowestSeq = 0;
988 int lowestSeqBufPos = 0;
989 bool foundBuf = true;
990 const unsigned int noSamp10ms = _adbSampFreq / 100;
991
992 while (foundBuf) {
993 // Check if we have any buffer with data to insert
994 // into the Audio Device Buffer,
995 // and find the one with the lowest seq number
996 foundBuf = false;
997 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
998 if (noSamp10ms == _recordingLength[bufPos]) {
999 if (!foundBuf) {
1000 lowestSeq = _recordingSeqNumber[bufPos];
1001 lowestSeqBufPos = bufPos;
1002 foundBuf = true;
1003 } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
1004 lowestSeq = _recordingSeqNumber[bufPos];
1005 lowestSeqBufPos = bufPos;
1006 }
1650 } 1007 }
1651 _playWarning = 1; 1008 }
1652 } 1009
1653 memset(data, 0, dataSizeBytes); // Start with empty buffer 1010 // Insert data into the Audio Device Buffer if found any
1654 1011 if (foundBuf) {
1655 1012 // Update recording delay
1656 // Get playout data from Audio Device Buffer 1013 UpdateRecordingDelay();
1657 1014
1658 if (_playing) { 1015 // Set the recorded buffer
1659 unsigned int noSamp10ms = _adbSampFreq / 100; 1016 audio_device_buffer_->SetRecordedBuffer(
1660 // todo: Member variable and allocate when samp freq is determined 1017 reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]),
1661 int16_t* dataTmp = new int16_t[noSamp10ms]; 1018 _recordingLength[lowestSeqBufPos]);
1662 memset(dataTmp, 0, 2*noSamp10ms); 1019
1663 unsigned int dataPos = 0; 1020 // Don't need to set the current mic level in ADB since we only
1664 int noSamplesOut = 0; 1021 // support digital AGC,
1665 unsigned int nCopy = 0; 1022 // and besides we cannot get or set the IOS mic level anyway.
1666 1023
1667 // First insert data from playout buffer if any 1024 // Set VQE info, use clockdrift == 0
1668 if (_playoutBufferUsed > 0) { 1025 audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0);
1669 nCopy = (dataSize < _playoutBufferUsed) ? 1026
1670 dataSize : _playoutBufferUsed; 1027 // Deliver recorded samples at specified sample rate, mic level
1671 if (nCopy != _playoutBufferUsed) { 1028 // etc. to the observer using callback
1672 // todo: If dataSize < _playoutBufferUsed 1029 audio_device_buffer_->DeliverRecordedData();
1673 // (should normally never be) 1030
1674 // we must move the remaining data 1031 // Make buffer available
1675 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1032 _recordingSeqNumber[lowestSeqBufPos] = 0;
1676 "nCopy (%u) != _playoutBufferUsed (%u)", 1033 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
1677 nCopy, _playoutBufferUsed); 1034 // Must be done last to avoid interrupt problems between threads
1678 if (_playWarning > 0) { 1035 _recordingLength[lowestSeqBufPos] = 0;
1679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1036 }
1680 " Pending play warning exists"); 1037 }
1681 } 1038 }
1682 _playWarning = 1; 1039
1683 } 1040 {
1684 memcpy(data, _playoutBuffer, 2*nCopy); 1041 // Normal case
1685 dataPos = nCopy; 1042 // Sleep thread (5ms) to let other threads get to work
1686 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); 1043 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
1687 _playoutBufferUsed = 0; 1044 // Device Buffer?
1688 } 1045 timespec t;
1689 1046 t.tv_sec = 0;
1690 // Now get the rest from Audio Device Buffer 1047 t.tv_nsec = 5 * 1000 * 1000;
1691 while (dataPos < dataSize) { 1048 nanosleep(&t, nullptr);
1692 // Update playout delay 1049 }
1693 UpdatePlayoutDelay(); 1050 return true;
1694
1695 // Ask for new PCM data to be played out using the AudioDeviceBuffer
1696 noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
1697
1698 // Get data from Audio Device Buffer
1699 noSamplesOut =
1700 _ptrAudioBuffer->GetPlayoutData(
1701 reinterpret_cast<int8_t*>(dataTmp));
1702 // Cast OK since only equality comparison
1703 if (noSamp10ms != (unsigned int)noSamplesOut) {
1704 // Should never happen
1705 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1706 "noSamp10ms (%u) != noSamplesOut (%d)",
1707 noSamp10ms, noSamplesOut);
1708
1709 if (_playWarning > 0) {
1710 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1711 " Pending play warning exists");
1712 }
1713 _playWarning = 1;
1714 }
1715
1716 // Insert as much as fits in data buffer
1717 nCopy = (dataSize-dataPos) > noSamp10ms ?
1718 noSamp10ms : (dataSize-dataPos);
1719 memcpy(&data[dataPos], dataTmp, 2*nCopy);
1720
1721 // Save rest in playout buffer if any
1722 if (nCopy < noSamp10ms) {
1723 memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
1724 _playoutBufferUsed = noSamp10ms - nCopy;
1725 }
1726
1727 // Update loop/index counter, if we copied less than noSamp10ms
1728 // samples we shall quit loop anyway
1729 dataPos += noSamp10ms;
1730 }
1731
1732 delete [] dataTmp;
1733 }
1734
1735 return 0;
1736 }
1737
1738 void AudioDeviceIOS::UpdatePlayoutDelay() {
1739 ++_playoutDelayMeasurementCounter;
1740
1741 if (_playoutDelayMeasurementCounter >= 100) {
1742 // Update HW and OS delay every second, unlikely to change
1743
1744 // Since this is eventually rounded to integral ms, add 0.5ms
1745 // here to get round-to-nearest-int behavior instead of
1746 // truncation.
1747 double totalDelaySeconds = 0.0005;
1748
1749 // HW output latency
1750 AVAudioSession* session = [AVAudioSession sharedInstance];
1751 double latency = session.outputLatency;
1752 assert(latency >= 0);
1753 totalDelaySeconds += latency;
1754
1755 // HW buffer duration
1756 double ioBufferDuration = session.IOBufferDuration;
1757 assert(ioBufferDuration >= 0);
1758 totalDelaySeconds += ioBufferDuration;
1759
1760 // AU latency
1761 Float64 f64(0);
1762 UInt32 size = sizeof(f64);
1763 OSStatus result = AudioUnitGetProperty(
1764 _auVoiceProcessing, kAudioUnitProperty_Latency,
1765 kAudioUnitScope_Global, 0, &f64, &size);
1766 if (0 != result) {
1767 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1768 "error AU latency (result=%d)", result);
1769 }
1770 assert(f64 >= 0);
1771 totalDelaySeconds += f64;
1772
1773 // To ms
1774 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
1775
1776 // Reset counter
1777 _playoutDelayMeasurementCounter = 0;
1778 }
1779
1780 // todo: Add playout buffer?
1781 }
1782
1783 void AudioDeviceIOS::UpdateRecordingDelay() {
1784 ++_recordingDelayMeasurementCounter;
1785
1786 if (_recordingDelayMeasurementCounter >= 100) {
1787 // Update HW and OS delay every second, unlikely to change
1788
1789 // Since this is eventually rounded to integral ms, add 0.5ms
1790 // here to get round-to-nearest-int behavior instead of
1791 // truncation.
1792 double totalDelaySeconds = 0.0005;
1793
1794 // HW input latency
1795 AVAudioSession* session = [AVAudioSession sharedInstance];
1796 double latency = session.inputLatency;
1797 assert(latency >= 0);
1798 totalDelaySeconds += latency;
1799
1800 // HW buffer duration
1801 double ioBufferDuration = session.IOBufferDuration;
1802 assert(ioBufferDuration >= 0);
1803 totalDelaySeconds += ioBufferDuration;
1804
1805 // AU latency
1806 Float64 f64(0);
1807 UInt32 size = sizeof(f64);
1808 OSStatus result = AudioUnitGetProperty(
1809 _auVoiceProcessing, kAudioUnitProperty_Latency,
1810 kAudioUnitScope_Global, 0, &f64, &size);
1811 if (0 != result) {
1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1813 "error AU latency (result=%d)", result);
1814 }
1815 assert(f64 >= 0);
1816 totalDelaySeconds += f64;
1817
1818 // To ms
1819 _recordingDelayHWAndOS =
1820 static_cast<uint32_t>(totalDelaySeconds / 1000);
1821
1822 // Reset counter
1823 _recordingDelayMeasurementCounter = 0;
1824 }
1825
1826 _recordingDelay = _recordingDelayHWAndOS;
1827
1828 // ADB recording buffer size, update every time
1829 // Don't count the one next 10 ms to be sent, then convert samples => ms
1830 const uint32_t noSamp10ms = _adbSampFreq / 100;
1831 if (_recordingBufferTotalSize > noSamp10ms) {
1832 _recordingDelay +=
1833 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
1834 }
1835 }
1836
1837 bool AudioDeviceIOS::RunCapture(void* ptrThis) {
1838 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
1839 }
1840
1841 bool AudioDeviceIOS::CaptureWorkerThread() {
1842 if (_recording) {
1843 int bufPos = 0;
1844 unsigned int lowestSeq = 0;
1845 int lowestSeqBufPos = 0;
1846 bool foundBuf = true;
1847 const unsigned int noSamp10ms = _adbSampFreq / 100;
1848
1849 while (foundBuf) {
1850 // Check if we have any buffer with data to insert
1851 // into the Audio Device Buffer,
1852 // and find the one with the lowest seq number
1853 foundBuf = false;
1854 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
1855 if (noSamp10ms == _recordingLength[bufPos]) {
1856 if (!foundBuf) {
1857 lowestSeq = _recordingSeqNumber[bufPos];
1858 lowestSeqBufPos = bufPos;
1859 foundBuf = true;
1860 } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
1861 lowestSeq = _recordingSeqNumber[bufPos];
1862 lowestSeqBufPos = bufPos;
1863 }
1864 }
1865 } // for
1866
1867 // Insert data into the Audio Device Buffer if found any
1868 if (foundBuf) {
1869 // Update recording delay
1870 UpdateRecordingDelay();
1871
1872 // Set the recorded buffer
1873 _ptrAudioBuffer->SetRecordedBuffer(
1874 reinterpret_cast<int8_t*>(
1875 _recordingBuffer[lowestSeqBufPos]),
1876 _recordingLength[lowestSeqBufPos]);
1877
1878 // Don't need to set the current mic level in ADB since we only
1879 // support digital AGC,
1880 // and besides we cannot get or set the IOS mic level anyway.
1881
1882 // Set VQE info, use clockdrift == 0
1883 _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
1884
1885 // Deliver recorded samples at specified sample rate, mic level
1886 // etc. to the observer using callback
1887 _ptrAudioBuffer->DeliverRecordedData();
1888
1889 // Make buffer available
1890 _recordingSeqNumber[lowestSeqBufPos] = 0;
1891 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
1892 // Must be done last to avoid interrupt problems between threads
1893 _recordingLength[lowestSeqBufPos] = 0;
1894 }
1895 } // while (foundBuf)
1896 } // if (_recording)
1897
1898 {
1899 // Normal case
1900 // Sleep thread (5ms) to let other threads get to work
1901 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
1902 // Device Buffer?
1903 timespec t;
1904 t.tv_sec = 0;
1905 t.tv_nsec = 5*1000*1000;
1906 nanosleep(&t, NULL);
1907 }
1908
1909 return true;
1910 } 1051 }
1911 1052
1912 } // namespace webrtc 1053 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698