OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #if !defined(__has_feature) || !__has_feature(objc_arc) | |
12 #error "This file requires ARC support." | |
13 #endif | |
14 | |
11 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
12 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
13 | 17 |
14 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
15 | 19 #include "webrtc/modules/utility/interface/helpers_ios.h" |
20 | |
21 #include "webrtc/base/checks.h" | |
22 #include "webrtc/base/logging.h" | |
16 #include "webrtc/system_wrappers/interface/trace.h" | 23 #include "webrtc/system_wrappers/interface/trace.h" |
17 | 24 |
18 namespace webrtc { | 25 namespace webrtc { |
19 AudioDeviceIOS::AudioDeviceIOS(const int32_t id) | 26 |
20 : | 27 #define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::" |
21 _ptrAudioBuffer(NULL), | 28 |
22 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | 29 using ios::CheckAndLogError; |
23 _id(id), | 30 |
24 _auVoiceProcessing(NULL), | 31 static void ActivateAudioSession(AVAudioSession* session, bool activate) { |
25 _audioInterruptionObserver(NULL), | 32 LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")"; |
26 _initialized(false), | 33 @autoreleasepool { |
27 _isShutDown(false), | 34 NSError* errorObject = nil; |
tkchin_webrtc
2015/07/08 19:41:14
nit: usually just
NSError* error = nil;
henrika_webrtc
2015/07/09 12:58:01
Done.
| |
28 _recording(false), | 35 BOOL success = NO; |
29 _playing(false), | 36 if (!activate) { |
30 _recIsInitialized(false), | 37 // Deactivate the audio session. |
31 _playIsInitialized(false), | 38 success = [session setActive:NO error:&errorObject]; |
32 _recordingDeviceIsSpecified(false), | 39 DCHECK(CheckAndLogError(success, errorObject)); |
33 _playoutDeviceIsSpecified(false), | 40 return; |
34 _micIsInitialized(false), | 41 } |
35 _speakerIsInitialized(false), | 42 // Activate an audio session and set category and mode. Only make changes |
36 _AGC(false), | 43 // if needed since setting them to the value they already have will clear |
37 _adbSampFreq(0), | 44 // transient properties (such as PortOverride) that some other component |
38 _recordingDelay(0), | 45 // have set up. |
39 _playoutDelay(0), | 46 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
40 _playoutDelayMeasurementCounter(9999), | 47 success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
41 _recordingDelayHWAndOS(0), | 48 error:&errorObject]; |
tkchin_webrtc
2015/07/08 19:41:14
nit: I think it is good practice to explicitly set
henrika_webrtc
2015/07/09 12:58:02
In fact, I tried CheckAndLogError(BOOL success, NS
| |
42 _recordingDelayMeasurementCounter(9999), | 49 DCHECK(CheckAndLogError(success, errorObject)); |
43 _playWarning(0), | 50 } |
44 _playError(0), | 51 if (session.mode != AVAudioSessionModeVoiceChat) { |
45 _recWarning(0), | 52 success = |
46 _recError(0), | 53 [session setMode:AVAudioSessionModeVoiceChat error:&errorObject]; |
47 _playoutBufferUsed(0), | 54 DCHECK(CheckAndLogError(success, errorObject)); |
48 _recordingCurrentSeq(0), | 55 } |
49 _recordingBufferTotalSize(0) { | 56 success = [session setActive:YES error:&errorObject]; |
50 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, | 57 DCHECK(CheckAndLogError(success, errorObject)); |
51 "%s created", __FUNCTION__); | 58 // Ensure that category and mode are actually activated. |
52 | 59 DCHECK( |
53 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | 60 [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]); |
54 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | 61 DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]); |
55 memset(_recordingLength, 0, sizeof(_recordingLength)); | 62 } |
56 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | 63 } |
64 | |
65 // Query hardware characteristics, such as input and output latency, input and | |
66 // output channel count, hardware sample rate, hardware volume setting, and | |
67 // whether audio input is available. To obtain meaningful values for hardware | |
68 // characteristics,the audio session must be initialized and active before we | |
69 // query the values. | |
70 // TODO(henrika): Note that these characteristics can change at runtime. For | |
71 // instance, input sample rate may change when a user plugs in a headset. | |
72 static void GetHardwareAudioParameters(AudioParameters* playout_parameters, | |
73 AudioParameters* record_parameters) { | |
74 LOG(LS_INFO) << "GetHardwareAudioParameters"; | |
75 @autoreleasepool { | |
76 // Implicit initialization happens when we obtain a reference to the | |
77 // AVAudioSession object. | |
78 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
79 // Always get values when the audio session is active. | |
80 ActivateAudioSession(session, true); | |
81 CHECK(session.isInputAvailable) << "No input path is available!"; | |
82 // Get current hardware parameters. | |
83 double sample_rate = (double)session.sampleRate; | |
84 double io_buffer_duration = (double)session.IOBufferDuration; | |
85 int output_channels = (int)session.outputNumberOfChannels; | |
86 int input_channels = (int)session.inputNumberOfChannels; | |
87 int frames_per_buffer = | |
88 static_cast<int>(sample_rate * io_buffer_duration + 0.5); | |
89 // Copy hardware parameters to output parameters. | |
90 playout_parameters->reset(sample_rate, output_channels, frames_per_buffer); | |
91 record_parameters->reset(sample_rate, input_channels, frames_per_buffer); | |
92 // Add logging for debugging purposes. | |
93 LOG(LS_INFO) << " sample rate: " << sample_rate; | |
94 LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration; | |
95 LOG(LS_INFO) << " frames_per_buffer: " << frames_per_buffer; | |
96 LOG(LS_INFO) << " output channels: " << output_channels; | |
97 LOG(LS_INFO) << " input channels: " << input_channels; | |
98 LOG(LS_INFO) << " output latency: " << (double)[session outputLatency]; | |
99 LOG(LS_INFO) << " input latency: " << (double)[session inputLatency]; | |
100 // Don't keep the audio session active. Instead, deactivate when needed. | |
101 ActivateAudioSession(session, false); | |
102 // TODO(henrika): to be extra safe, we can do more here. E.g., set | |
103 // preferred values for sample rate, channels etc., re-activate an audio | |
104 // session and verify the actual values again. Then we know for sure that | |
105 // the current values will in fact be correct. Or, we can skip all this | |
106 // and check setting when audio is started. Probably better. | |
107 } | |
108 } | |
109 | |
110 AudioDeviceIOS::AudioDeviceIOS() | |
111 : audio_device_buffer_(nullptr), | |
112 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | |
113 _auVoiceProcessing(nullptr), | |
114 _audioInterruptionObserver(nullptr), | |
115 _initialized(false), | |
116 _isShutDown(false), | |
117 _recording(false), | |
118 _playing(false), | |
119 _recIsInitialized(false), | |
120 _playIsInitialized(false), | |
121 _adbSampFreq(0), | |
122 _recordingDelay(0), | |
123 _playoutDelay(0), | |
124 _playoutDelayMeasurementCounter(9999), | |
125 _recordingDelayHWAndOS(0), | |
126 _recordingDelayMeasurementCounter(9999), | |
127 _playoutBufferUsed(0), | |
128 _recordingCurrentSeq(0), | |
129 _recordingBufferTotalSize(0) { | |
130 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); | |
131 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | |
132 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | |
133 memset(_recordingLength, 0, sizeof(_recordingLength)); | |
134 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | |
57 } | 135 } |
58 | 136 |
59 AudioDeviceIOS::~AudioDeviceIOS() { | 137 AudioDeviceIOS::~AudioDeviceIOS() { |
60 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, | 138 LOGI() << "~dtor"; |
61 "%s destroyed", __FUNCTION__); | 139 DCHECK(thread_checker_.CalledOnValidThread()); |
62 | 140 Terminate(); |
63 Terminate(); | 141 delete &_critSect; |
64 | 142 } |
65 delete &_critSect; | |
66 } | |
67 | |
68 | |
69 // ============================================================================ | |
70 // API | |
71 // ============================================================================ | |
72 | 143 |
73 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 144 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
74 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 145 LOGI() << "AttachAudioBuffer"; |
75 "%s", __FUNCTION__); | 146 DCHECK(audioBuffer); |
76 | 147 DCHECK(thread_checker_.CalledOnValidThread()); |
77 CriticalSectionScoped lock(&_critSect); | 148 audio_device_buffer_ = audioBuffer; |
78 | 149 } |
79 _ptrAudioBuffer = audioBuffer; | 150 |
80 | 151 int32_t AudioDeviceIOS::Init() { |
81 // inform the AudioBuffer about default settings for this implementation | 152 LOGI() << "Init"; |
82 _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES); | 153 DCHECK(thread_checker_.CalledOnValidThread()); |
83 _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); | 154 if (_initialized) { |
84 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); | |
85 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); | |
86 } | |
87 | |
88 int32_t AudioDeviceIOS::ActiveAudioLayer( | |
89 AudioDeviceModule::AudioLayer& audioLayer) const { | |
90 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
91 "%s", __FUNCTION__); | |
92 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; | |
93 return 0; | 155 return 0; |
94 } | 156 } |
95 | 157 // Query hardware audio parameters and cache the results. These parameters |
96 int32_t AudioDeviceIOS::Init() { | 158 // will be used as preferred values later when streaming starts. |
97 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 159 // Note that I override these "optimal" value below since I don't want to |
98 "%s", __FUNCTION__); | 160 // modify the existing behavior yet. |
99 | 161 GetHardwareAudioParameters(&playout_parameters_, &record_parameters_); |
100 CriticalSectionScoped lock(&_critSect); | 162 // TODO(henrika): these parameters are currently hard coded to match the |
101 | 163 // existing implementation where we always use 16kHz as preferred sample |
102 if (_initialized) { | 164 // rate and mono only. Goal is to improve this scheme and make it more |
103 return 0; | 165 // flexible. In addition, a better native buffer size shall be derived. |
104 } | 166 // Using 10ms as default here (only used by unit test so far). |
105 | 167 // We should also implemented observers for notification of any change in |
106 _isShutDown = false; | 168 // these parameters. |
107 | 169 playout_parameters_.reset(16000, 1, 160); |
108 // Create and start capture thread | 170 record_parameters_.reset(16000, 1, 160); |
109 if (!_captureWorkerThread) { | 171 |
110 _captureWorkerThread = ThreadWrapper::CreateThread( | 172 // AttachAudioBuffer() is called at construction by the main class but check |
111 RunCapture, this, "CaptureWorkerThread"); | 173 // just in case. |
112 bool res = _captureWorkerThread->Start(); | 174 DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; |
113 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | 175 // Inform the audio device buffer (ADB) about the new audio format. |
114 _id, "CaptureWorkerThread started (res=%d)", res); | 176 // TODO(henrika): try to improve this section. |
115 _captureWorkerThread->SetPriority(kRealtimePriority); | 177 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
178 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); | |
179 audio_device_buffer_->SetRecordingSampleRate( | |
180 record_parameters_.sample_rate()); | |
181 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); | |
182 | |
183 DCHECK(!_captureWorkerThread); | |
184 // Create and start the capture thread. | |
185 // TODO(henrika): do we need this thread? | |
186 _isShutDown = false; | |
187 _captureWorkerThread = | |
188 ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread"); | |
189 if (!_captureWorkerThread->Start()) { | |
190 LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!"; | |
191 return -1; | |
192 } | |
193 _captureWorkerThread->SetPriority(kRealtimePriority); | |
194 _initialized = true; | |
195 return 0; | |
196 } | |
197 | |
198 int32_t AudioDeviceIOS::Terminate() { | |
199 LOGI() << "Terminate"; | |
200 DCHECK(thread_checker_.CalledOnValidThread()); | |
201 if (!_initialized) { | |
202 return 0; | |
203 } | |
204 // Stop the capture thread. | |
205 if (_captureWorkerThread) { | |
206 if (!_captureWorkerThread->Stop()) { | |
207 LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!"; | |
208 return -1; | |
209 } | |
210 _captureWorkerThread.reset(); | |
211 } | |
212 ShutdownPlayOrRecord(); | |
213 _isShutDown = true; | |
214 _initialized = false; | |
215 return 0; | |
216 } | |
217 | |
218 int32_t AudioDeviceIOS::InitPlayout() { | |
219 LOGI() << "InitPlayout"; | |
220 DCHECK(thread_checker_.CalledOnValidThread()); | |
221 DCHECK(_initialized); | |
222 DCHECK(!_playIsInitialized); | |
223 DCHECK(!_playing); | |
224 if (!_recIsInitialized) { | |
225 if (InitPlayOrRecord() == -1) { | |
226 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | |
227 return -1; | |
228 } | |
229 } | |
230 _playIsInitialized = true; | |
231 return 0; | |
232 } | |
233 | |
234 int32_t AudioDeviceIOS::InitRecording() { | |
235 LOGI() << "InitPlayout"; | |
236 DCHECK(thread_checker_.CalledOnValidThread()); | |
237 DCHECK(_initialized); | |
238 DCHECK(!_recIsInitialized); | |
239 DCHECK(!_recording); | |
240 if (!_playIsInitialized) { | |
241 if (InitPlayOrRecord() == -1) { | |
242 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; | |
243 return -1; | |
244 } | |
245 } | |
246 _recIsInitialized = true; | |
247 return 0; | |
248 } | |
249 | |
250 int32_t AudioDeviceIOS::StartPlayout() { | |
251 LOGI() << "StartPlayout"; | |
252 DCHECK(thread_checker_.CalledOnValidThread()); | |
253 DCHECK(_playIsInitialized); | |
254 DCHECK(!_playing); | |
255 | |
256 CriticalSectionScoped lock(&_critSect); | |
257 | |
258 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | |
259 _playoutBufferUsed = 0; | |
260 _playoutDelay = 0; | |
261 // Make sure first call to update delay function will update delay | |
262 _playoutDelayMeasurementCounter = 9999; | |
263 | |
264 if (!_recording) { | |
265 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
266 if (result != noErr) { | |
267 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | |
268 return -1; | |
269 } | |
270 } | |
271 _playing = true; | |
272 return 0; | |
273 } | |
274 | |
275 int32_t AudioDeviceIOS::StopPlayout() { | |
276 LOGI() << "StopPlayout"; | |
277 DCHECK(thread_checker_.CalledOnValidThread()); | |
278 if (!_playIsInitialized || !_playing) { | |
279 return 0; | |
280 } | |
281 | |
282 CriticalSectionScoped lock(&_critSect); | |
283 | |
284 if (!_recording) { | |
285 // Both playout and recording has stopped, shutdown the device. | |
286 ShutdownPlayOrRecord(); | |
287 } | |
288 _playIsInitialized = false; | |
289 _playing = false; | |
290 return 0; | |
291 } | |
292 | |
293 int32_t AudioDeviceIOS::StartRecording() { | |
294 LOGI() << "StartRecording"; | |
295 DCHECK(thread_checker_.CalledOnValidThread()); | |
296 DCHECK(_recIsInitialized); | |
297 DCHECK(!_recording); | |
298 | |
299 CriticalSectionScoped lock(&_critSect); | |
300 | |
301 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | |
302 memset(_recordingLength, 0, sizeof(_recordingLength)); | |
303 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | |
304 | |
305 _recordingCurrentSeq = 0; | |
306 _recordingBufferTotalSize = 0; | |
307 _recordingDelay = 0; | |
308 _recordingDelayHWAndOS = 0; | |
309 // Make sure first call to update delay function will update delay | |
310 _recordingDelayMeasurementCounter = 9999; | |
311 | |
312 if (!_playing) { | |
313 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
314 if (result != noErr) { | |
315 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; | |
316 return -1; | |
317 } | |
318 } | |
319 _recording = true; | |
320 return 0; | |
321 } | |
322 | |
323 int32_t AudioDeviceIOS::StopRecording() { | |
324 LOGI() << "StopRecording"; | |
325 DCHECK(thread_checker_.CalledOnValidThread()); | |
326 if (!_recIsInitialized || !_recording) { | |
327 return 0; | |
328 } | |
329 | |
330 CriticalSectionScoped lock(&_critSect); | |
331 | |
332 if (!_playing) { | |
333 // Both playout and recording has stopped, shutdown the device. | |
334 ShutdownPlayOrRecord(); | |
335 } | |
336 _recIsInitialized = false; | |
337 _recording = false; | |
338 return 0; | |
339 } | |
340 | |
341 // Change the default receiver playout route to speaker. | |
342 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { | |
343 LOGI() << "SetLoudspeakerStatus(" << enable << ")"; | |
344 | |
345 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
346 NSString* category = session.category; | |
347 AVAudioSessionCategoryOptions options = session.categoryOptions; | |
348 // Respect old category options if category is | |
349 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options | |
350 // might not be valid for this category. | |
351 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
352 if (enable) { | |
353 options |= AVAudioSessionCategoryOptionDefaultToSpeaker; | |
116 } else { | 354 } else { |
117 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | 355 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker; |
118 _id, "Thread already created"); | 356 } |
119 } | 357 } else { |
120 _playWarning = 0; | 358 options = AVAudioSessionCategoryOptionDefaultToSpeaker; |
121 _playError = 0; | 359 } |
122 _recWarning = 0; | 360 NSError* error = nil; |
123 _recError = 0; | 361 BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
124 | 362 withOptions:options |
125 _initialized = true; | 363 error:&error]; |
126 | 364 ios::CheckAndLogError(success, error); |
127 return 0; | 365 return (error == nil) ? 0 : -1; |
128 } | 366 } |
129 | 367 |
130 int32_t AudioDeviceIOS::Terminate() { | 368 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const { |
131 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 369 LOGI() << "GetLoudspeakerStatus"; |
132 "%s", __FUNCTION__); | 370 AVAudioSession* session = [AVAudioSession sharedInstance]; |
133 | 371 AVAudioSessionCategoryOptions options = session.categoryOptions; |
134 if (!_initialized) { | 372 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker; |
135 return 0; | 373 return 0; |
136 } | |
137 | |
138 | |
139 // Stop capture thread | |
140 if (_captureWorkerThread) { | |
141 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
142 _id, "Stopping CaptureWorkerThread"); | |
143 bool res = _captureWorkerThread->Stop(); | |
144 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
145 _id, "CaptureWorkerThread stopped (res=%d)", res); | |
146 _captureWorkerThread.reset(); | |
147 } | |
148 | |
149 // Shut down Audio Unit | |
150 ShutdownPlayOrRecord(); | |
151 | |
152 _isShutDown = true; | |
153 _initialized = false; | |
154 _speakerIsInitialized = false; | |
155 _micIsInitialized = false; | |
156 _playoutDeviceIsSpecified = false; | |
157 _recordingDeviceIsSpecified = false; | |
158 return 0; | |
159 } | |
160 | |
161 bool AudioDeviceIOS::Initialized() const { | |
162 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
163 "%s", __FUNCTION__); | |
164 return (_initialized); | |
165 } | |
166 | |
167 int32_t AudioDeviceIOS::InitSpeaker() { | |
168 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
169 "%s", __FUNCTION__); | |
170 | |
171 CriticalSectionScoped lock(&_critSect); | |
172 | |
173 if (!_initialized) { | |
174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
175 _id, " Not initialized"); | |
176 return -1; | |
177 } | |
178 | |
179 if (_playing) { | |
180 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
181 _id, " Cannot init speaker when playing"); | |
182 return -1; | |
183 } | |
184 | |
185 if (!_playoutDeviceIsSpecified) { | |
186 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
187 _id, " Playout device is not specified"); | |
188 return -1; | |
189 } | |
190 | |
191 // Do nothing | |
192 _speakerIsInitialized = true; | |
193 | |
194 return 0; | |
195 } | |
196 | |
197 int32_t AudioDeviceIOS::InitMicrophone() { | |
198 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
199 "%s", __FUNCTION__); | |
200 | |
201 CriticalSectionScoped lock(&_critSect); | |
202 | |
203 if (!_initialized) { | |
204 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
205 _id, " Not initialized"); | |
206 return -1; | |
207 } | |
208 | |
209 if (_recording) { | |
210 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
211 _id, " Cannot init mic when recording"); | |
212 return -1; | |
213 } | |
214 | |
215 if (!_recordingDeviceIsSpecified) { | |
216 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
217 _id, " Recording device is not specified"); | |
218 return -1; | |
219 } | |
220 | |
221 // Do nothing | |
222 | |
223 _micIsInitialized = true; | |
224 | |
225 return 0; | |
226 } | |
227 | |
228 bool AudioDeviceIOS::SpeakerIsInitialized() const { | |
229 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
230 "%s", __FUNCTION__); | |
231 return _speakerIsInitialized; | |
232 } | |
233 | |
234 bool AudioDeviceIOS::MicrophoneIsInitialized() const { | |
235 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
236 "%s", __FUNCTION__); | |
237 return _micIsInitialized; | |
238 } | |
239 | |
240 int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) { | |
241 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
242 "%s", __FUNCTION__); | |
243 | |
244 available = false; // Speaker volume not supported on iOS | |
245 | |
246 return 0; | |
247 } | |
248 | |
249 int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) { | |
250 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
251 "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume); | |
252 | |
253 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
254 " API call not supported on this platform"); | |
255 return -1; | |
256 } | |
257 | |
258 int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const { | |
259 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
260 "%s", __FUNCTION__); | |
261 | |
262 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
263 " API call not supported on this platform"); | |
264 return -1; | |
265 } | |
266 | |
267 int32_t | |
268 AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft, | |
269 uint16_t volumeRight) { | |
270 WEBRTC_TRACE( | |
271 kTraceModuleCall, | |
272 kTraceAudioDevice, | |
273 _id, | |
274 "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", | |
275 volumeLeft, volumeRight); | |
276 | |
277 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
278 " API call not supported on this platform"); | |
279 | |
280 return -1; | |
281 } | |
282 | |
283 int32_t | |
284 AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/, | |
285 uint16_t& /*volumeRight*/) const { | |
286 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
287 "%s", __FUNCTION__); | |
288 | |
289 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
290 " API call not supported on this platform"); | |
291 return -1; | |
292 } | |
293 | |
294 int32_t | |
295 AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const { | |
296 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
297 "%s", __FUNCTION__); | |
298 | |
299 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
300 " API call not supported on this platform"); | |
301 return -1; | |
302 } | |
303 | |
304 int32_t AudioDeviceIOS::MinSpeakerVolume( | |
305 uint32_t& minVolume) const { | |
306 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
307 "%s", __FUNCTION__); | |
308 | |
309 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
310 " API call not supported on this platform"); | |
311 return -1; | |
312 } | |
313 | |
314 int32_t | |
315 AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const { | |
316 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
317 "%s", __FUNCTION__); | |
318 | |
319 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
320 " API call not supported on this platform"); | |
321 return -1; | |
322 } | |
323 | |
324 int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) { | |
325 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
326 "%s", __FUNCTION__); | |
327 | |
328 available = false; // Speaker mute not supported on iOS | |
329 | |
330 return 0; | |
331 } | |
332 | |
333 int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) { | |
334 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
335 "%s", __FUNCTION__); | |
336 | |
337 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
338 " API call not supported on this platform"); | |
339 return -1; | |
340 } | |
341 | |
342 int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const { | |
343 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
344 "%s", __FUNCTION__); | |
345 | |
346 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
347 " API call not supported on this platform"); | |
348 return -1; | |
349 } | |
350 | |
351 int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) { | |
352 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
353 "%s", __FUNCTION__); | |
354 | |
355 available = false; // Mic mute not supported on iOS | |
356 | |
357 return 0; | |
358 } | |
359 | |
360 int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) { | |
361 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
362 "%s", __FUNCTION__); | |
363 | |
364 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
365 " API call not supported on this platform"); | |
366 return -1; | |
367 } | |
368 | |
369 int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const { | |
370 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
371 "%s", __FUNCTION__); | |
372 | |
373 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
374 " API call not supported on this platform"); | |
375 return -1; | |
376 } | |
377 | |
378 int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) { | |
379 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
380 "%s", __FUNCTION__); | |
381 | |
382 available = false; // Mic boost not supported on iOS | |
383 | |
384 return 0; | |
385 } | |
386 | |
387 int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) { | |
388 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
389 "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable); | |
390 | |
391 if (!_micIsInitialized) { | |
392 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
393 " Microphone not initialized"); | |
394 return -1; | |
395 } | |
396 | |
397 if (enable) { | |
398 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
399 " SetMicrophoneBoost cannot be enabled on this platform"); | |
400 return -1; | |
401 } | |
402 | |
403 return 0; | |
404 } | |
405 | |
406 int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const { | |
407 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
408 "%s", __FUNCTION__); | |
409 if (!_micIsInitialized) { | |
410 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
411 " Microphone not initialized"); | |
412 return -1; | |
413 } | |
414 | |
415 enabled = false; | |
416 | |
417 return 0; | |
418 } | |
419 | |
420 int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) { | |
421 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
422 "%s", __FUNCTION__); | |
423 | |
424 available = false; // Stereo recording not supported on iOS | |
425 | |
426 return 0; | |
427 } | |
428 | |
429 int32_t AudioDeviceIOS::SetStereoRecording(bool enable) { | |
430 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
431 "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable); | |
432 | |
433 if (enable) { | |
434 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
435 " Stereo recording is not supported on this platform"); | |
436 return -1; | |
437 } | |
438 return 0; | |
439 } | |
440 | |
441 int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const { | |
442 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
443 "%s", __FUNCTION__); | |
444 | |
445 enabled = false; | |
446 return 0; | |
447 } | |
448 | |
449 int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) { | |
450 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
451 "%s", __FUNCTION__); | |
452 | |
453 available = false; // Stereo playout not supported on iOS | |
454 | |
455 return 0; | |
456 } | |
457 | |
458 int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) { | |
459 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
460 "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable); | |
461 | |
462 if (enable) { | |
463 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
464 " Stereo playout is not supported on this platform"); | |
465 return -1; | |
466 } | |
467 return 0; | |
468 } | |
469 | |
470 int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const { | |
471 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
472 "%s", __FUNCTION__); | |
473 | |
474 enabled = false; | |
475 return 0; | |
476 } | |
477 | |
478 int32_t AudioDeviceIOS::SetAGC(bool enable) { | |
479 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
480 "AudioDeviceIOS::SetAGC(enable=%d)", enable); | |
481 | |
482 _AGC = enable; | |
483 | |
484 return 0; | |
485 } | |
486 | |
487 bool AudioDeviceIOS::AGC() const { | |
488 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
489 "%s", __FUNCTION__); | |
490 | |
491 return _AGC; | |
492 } | |
493 | |
494 int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) { | |
495 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
496 "%s", __FUNCTION__); | |
497 | |
498 available = false; // Mic volume not supported on IOS | |
499 | |
500 return 0; | |
501 } | |
502 | |
503 int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) { | |
504 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
505 "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume); | |
506 | |
507 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
508 " API call not supported on this platform"); | |
509 return -1; | |
510 } | |
511 | |
512 int32_t | |
513 AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const { | |
514 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
515 "%s", __FUNCTION__); | |
516 | |
517 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
518 " API call not supported on this platform"); | |
519 return -1; | |
520 } | |
521 | |
522 int32_t | |
523 AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const { | |
524 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
525 "%s", __FUNCTION__); | |
526 | |
527 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
528 " API call not supported on this platform"); | |
529 return -1; | |
530 } | |
531 | |
532 int32_t | |
533 AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const { | |
534 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
535 "%s", __FUNCTION__); | |
536 | |
537 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
538 " API call not supported on this platform"); | |
539 return -1; | |
540 } | |
541 | |
542 int32_t | |
543 AudioDeviceIOS::MicrophoneVolumeStepSize( | |
544 uint16_t& stepSize) const { | |
545 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
546 "%s", __FUNCTION__); | |
547 | |
548 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
549 " API call not supported on this platform"); | |
550 return -1; | |
551 } | |
552 | |
553 int16_t AudioDeviceIOS::PlayoutDevices() { | |
554 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
555 "%s", __FUNCTION__); | |
556 | |
557 return (int16_t)1; | |
558 } | |
559 | |
560 int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) { | |
561 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
562 "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index); | |
563 | |
564 if (_playIsInitialized) { | |
565 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
566 " Playout already initialized"); | |
567 return -1; | |
568 } | |
569 | |
570 if (index !=0) { | |
571 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
572 " SetPlayoutDevice invalid index"); | |
573 return -1; | |
574 } | |
575 _playoutDeviceIsSpecified = true; | |
576 | |
577 return 0; | |
578 } | |
579 | |
580 int32_t | |
581 AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) { | |
582 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
583 "WindowsDeviceType not supported"); | |
584 return -1; | |
585 } | |
586 | |
587 int32_t | |
588 AudioDeviceIOS::PlayoutDeviceName(uint16_t index, | |
589 char name[kAdmMaxDeviceNameSize], | |
590 char guid[kAdmMaxGuidSize]) { | |
591 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
592 "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index); | |
593 | |
594 if (index != 0) { | |
595 return -1; | |
596 } | |
597 // return empty strings | |
598 memset(name, 0, kAdmMaxDeviceNameSize); | |
599 if (guid != NULL) { | |
600 memset(guid, 0, kAdmMaxGuidSize); | |
601 } | |
602 | |
603 return 0; | |
604 } | |
605 | |
606 int32_t | |
607 AudioDeviceIOS::RecordingDeviceName(uint16_t index, | |
608 char name[kAdmMaxDeviceNameSize], | |
609 char guid[kAdmMaxGuidSize]) { | |
610 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
611 "AudioDeviceIOS::RecordingDeviceName(index=%u)", index); | |
612 | |
613 if (index != 0) { | |
614 return -1; | |
615 } | |
616 // return empty strings | |
617 memset(name, 0, kAdmMaxDeviceNameSize); | |
618 if (guid != NULL) { | |
619 memset(guid, 0, kAdmMaxGuidSize); | |
620 } | |
621 | |
622 return 0; | |
623 } | |
624 | |
625 int16_t AudioDeviceIOS::RecordingDevices() { | |
626 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
627 | |
628 return (int16_t)1; | |
629 } | |
630 | |
631 int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) { | |
632 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
633 "AudioDeviceIOS::SetRecordingDevice(index=%u)", index); | |
634 | |
635 if (_recIsInitialized) { | |
636 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
637 " Recording already initialized"); | |
638 return -1; | |
639 } | |
640 | |
641 if (index !=0) { | |
642 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
643 " SetRecordingDevice invalid index"); | |
644 return -1; | |
645 } | |
646 | |
647 _recordingDeviceIsSpecified = true; | |
648 | |
649 return 0; | |
650 } | |
651 | |
652 int32_t | |
653 AudioDeviceIOS::SetRecordingDevice( | |
654 AudioDeviceModule::WindowsDeviceType) { | |
655 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
656 "WindowsDeviceType not supported"); | |
657 return -1; | |
658 } | |
659 | |
660 // ---------------------------------------------------------------------------- | |
661 // SetLoudspeakerStatus | |
662 // | |
663 // Change the default receiver playout route to speaker. | |
664 // | |
665 // ---------------------------------------------------------------------------- | |
666 | |
667 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { | |
668 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
669 "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable); | |
670 | |
671 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
672 NSString* category = session.category; | |
673 AVAudioSessionCategoryOptions options = session.categoryOptions; | |
674 // Respect old category options if category is | |
675 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options | |
676 // might not be valid for this category. | |
677 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
678 if (enable) { | |
679 options |= AVAudioSessionCategoryOptionDefaultToSpeaker; | |
680 } else { | |
681 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker; | |
682 } | |
683 } else { | |
684 options = AVAudioSessionCategoryOptionDefaultToSpeaker; | |
685 } | |
686 | |
687 NSError* error = nil; | |
688 [session setCategory:AVAudioSessionCategoryPlayAndRecord | |
689 withOptions:options | |
690 error:&error]; | |
691 if (error != nil) { | |
692 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
693 "Error changing default output route "); | |
694 return -1; | |
695 } | |
696 | |
697 return 0; | |
698 } | |
699 | |
700 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const { | |
701 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
702 "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)"); | |
703 | |
704 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
705 AVAudioSessionCategoryOptions options = session.categoryOptions; | |
706 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker; | |
707 | |
708 return 0; | |
709 } | |
710 | |
711 int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) { | |
712 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
713 | |
714 available = false; | |
715 | |
716 // Try to initialize the playout side | |
717 int32_t res = InitPlayout(); | |
718 | |
719 // Cancel effect of initialization | |
720 StopPlayout(); | |
721 | |
722 if (res != -1) { | |
723 available = true; | |
724 } | |
725 | |
726 return 0; | |
727 } | |
728 | |
729 int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) { | |
730 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
731 | |
732 available = false; | |
733 | |
734 // Try to initialize the recording side | |
735 int32_t res = InitRecording(); | |
736 | |
737 // Cancel effect of initialization | |
738 StopRecording(); | |
739 | |
740 if (res != -1) { | |
741 available = true; | |
742 } | |
743 | |
744 return 0; | |
745 } | |
746 | |
747 int32_t AudioDeviceIOS::InitPlayout() { | |
748 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
749 | |
750 CriticalSectionScoped lock(&_critSect); | |
751 | |
752 if (!_initialized) { | |
753 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized"); | |
754 return -1; | |
755 } | |
756 | |
757 if (_playing) { | |
758 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
759 " Playout already started"); | |
760 return -1; | |
761 } | |
762 | |
763 if (_playIsInitialized) { | |
764 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
765 " Playout already initialized"); | |
766 return 0; | |
767 } | |
768 | |
769 if (!_playoutDeviceIsSpecified) { | |
770 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
771 " Playout device is not specified"); | |
772 return -1; | |
773 } | |
774 | |
775 // Initialize the speaker | |
776 if (InitSpeaker() == -1) { | |
777 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
778 " InitSpeaker() failed"); | |
779 } | |
780 | |
781 _playIsInitialized = true; | |
782 | |
783 if (!_recIsInitialized) { | |
784 // Audio init | |
785 if (InitPlayOrRecord() == -1) { | |
786 // todo: Handle error | |
787 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
788 " InitPlayOrRecord() failed"); | |
789 } | |
790 } else { | |
791 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
792 " Recording already initialized - InitPlayOrRecord() not called"); | |
793 } | |
794 | |
795 return 0; | |
796 } | |
797 | |
798 bool AudioDeviceIOS::PlayoutIsInitialized() const { | |
799 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
800 return (_playIsInitialized); | |
801 } | |
802 | |
803 int32_t AudioDeviceIOS::InitRecording() { | |
804 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
805 | |
806 CriticalSectionScoped lock(&_critSect); | |
807 | |
808 if (!_initialized) { | |
809 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
810 " Not initialized"); | |
811 return -1; | |
812 } | |
813 | |
814 if (_recording) { | |
815 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
816 " Recording already started"); | |
817 return -1; | |
818 } | |
819 | |
820 if (_recIsInitialized) { | |
821 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
822 " Recording already initialized"); | |
823 return 0; | |
824 } | |
825 | |
826 if (!_recordingDeviceIsSpecified) { | |
827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
828 " Recording device is not specified"); | |
829 return -1; | |
830 } | |
831 | |
832 // Initialize the microphone | |
833 if (InitMicrophone() == -1) { | |
834 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
835 " InitMicrophone() failed"); | |
836 } | |
837 | |
838 _recIsInitialized = true; | |
839 | |
840 if (!_playIsInitialized) { | |
841 // Audio init | |
842 if (InitPlayOrRecord() == -1) { | |
843 // todo: Handle error | |
844 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
845 " InitPlayOrRecord() failed"); | |
846 } | |
847 } else { | |
848 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
849 " Playout already initialized - InitPlayOrRecord() " \ | |
850 "not called"); | |
851 } | |
852 | |
853 return 0; | |
854 } | |
855 | |
856 bool AudioDeviceIOS::RecordingIsInitialized() const { | |
857 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
858 return (_recIsInitialized); | |
859 } | |
860 | |
861 int32_t AudioDeviceIOS::StartRecording() { | |
862 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
863 | |
864 CriticalSectionScoped lock(&_critSect); | |
865 | |
866 if (!_recIsInitialized) { | |
867 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
868 " Recording not initialized"); | |
869 return -1; | |
870 } | |
871 | |
872 if (_recording) { | |
873 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
874 " Recording already started"); | |
875 return 0; | |
876 } | |
877 | |
878 // Reset recording buffer | |
879 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | |
880 memset(_recordingLength, 0, sizeof(_recordingLength)); | |
881 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | |
882 _recordingCurrentSeq = 0; | |
883 _recordingBufferTotalSize = 0; | |
884 _recordingDelay = 0; | |
885 _recordingDelayHWAndOS = 0; | |
886 // Make sure first call to update delay function will update delay | |
887 _recordingDelayMeasurementCounter = 9999; | |
888 _recWarning = 0; | |
889 _recError = 0; | |
890 | |
891 if (!_playing) { | |
892 // Start Audio Unit | |
893 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
894 " Starting Audio Unit"); | |
895 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
896 if (0 != result) { | |
897 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
898 " Error starting Audio Unit (result=%d)", result); | |
899 return -1; | |
900 } | |
901 } | |
902 | |
903 _recording = true; | |
904 | |
905 return 0; | |
906 } | |
907 | |
908 int32_t AudioDeviceIOS::StopRecording() { | |
909 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
910 | |
911 CriticalSectionScoped lock(&_critSect); | |
912 | |
913 if (!_recIsInitialized) { | |
914 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
915 " Recording is not initialized"); | |
916 return 0; | |
917 } | |
918 | |
919 _recording = false; | |
920 | |
921 if (!_playing) { | |
922 // Both playout and recording has stopped, shutdown the device | |
923 ShutdownPlayOrRecord(); | |
924 } | |
925 | |
926 _recIsInitialized = false; | |
927 _micIsInitialized = false; | |
928 | |
929 return 0; | |
930 } | |
931 | |
932 bool AudioDeviceIOS::Recording() const { | |
933 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
934 return (_recording); | |
935 } | |
936 | |
937 int32_t AudioDeviceIOS::StartPlayout() { | |
938 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
939 | |
940 // This lock is (among other things) needed to avoid concurrency issues | |
941 // with capture thread | |
942 // shutting down Audio Unit | |
943 CriticalSectionScoped lock(&_critSect); | |
944 | |
945 if (!_playIsInitialized) { | |
946 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
947 " Playout not initialized"); | |
948 return -1; | |
949 } | |
950 | |
951 if (_playing) { | |
952 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
953 " Playing already started"); | |
954 return 0; | |
955 } | |
956 | |
957 // Reset playout buffer | |
958 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | |
959 _playoutBufferUsed = 0; | |
960 _playoutDelay = 0; | |
961 // Make sure first call to update delay function will update delay | |
962 _playoutDelayMeasurementCounter = 9999; | |
963 _playWarning = 0; | |
964 _playError = 0; | |
965 | |
966 if (!_recording) { | |
967 // Start Audio Unit | |
968 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
969 " Starting Audio Unit"); | |
970 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
971 if (0 != result) { | |
972 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
973 " Error starting Audio Unit (result=%d)", result); | |
974 return -1; | |
975 } | |
976 } | |
977 | |
978 _playing = true; | |
979 | |
980 return 0; | |
981 } | |
982 | |
983 int32_t AudioDeviceIOS::StopPlayout() { | |
984 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
985 | |
986 CriticalSectionScoped lock(&_critSect); | |
987 | |
988 if (!_playIsInitialized) { | |
989 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
990 " Playout is not initialized"); | |
991 return 0; | |
992 } | |
993 | |
994 _playing = false; | |
995 | |
996 if (!_recording) { | |
997 // Both playout and recording has stopped, signal shutdown the device | |
998 ShutdownPlayOrRecord(); | |
999 } | |
1000 | |
1001 _playIsInitialized = false; | |
1002 _speakerIsInitialized = false; | |
1003 | |
1004 return 0; | |
1005 } | |
1006 | |
1007 bool AudioDeviceIOS::Playing() const { | |
1008 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
1009 "%s", __FUNCTION__); | |
1010 return (_playing); | |
1011 } | |
1012 | |
1013 // ---------------------------------------------------------------------------- | |
1014 // ResetAudioDevice | |
1015 // | |
1016 // Disable playout and recording, signal to capture thread to shutdown, | |
1017 // and set enable states after shutdown to same as current. | |
1018 // In capture thread audio device will be shutdown, then started again. | |
1019 // ---------------------------------------------------------------------------- | |
1020 int32_t AudioDeviceIOS::ResetAudioDevice() { | |
1021 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1022 | |
1023 CriticalSectionScoped lock(&_critSect); | |
1024 | |
1025 if (!_playIsInitialized && !_recIsInitialized) { | |
1026 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1027 " Playout or recording not initialized, doing nothing"); | |
1028 return 0; // Nothing to reset | |
1029 } | |
1030 | |
1031 // Store the states we have before stopping to restart below | |
1032 bool initPlay = _playIsInitialized; | |
1033 bool play = _playing; | |
1034 bool initRec = _recIsInitialized; | |
1035 bool rec = _recording; | |
1036 | |
1037 int res(0); | |
1038 | |
1039 // Stop playout and recording | |
1040 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1041 " Stopping playout and recording"); | |
1042 res += StopPlayout(); | |
1043 res += StopRecording(); | |
1044 | |
1045 // Restart | |
1046 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1047 " Restarting playout and recording (%d, %d, %d, %d)", | |
1048 initPlay, play, initRec, rec); | |
1049 if (initPlay) res += InitPlayout(); | |
1050 if (initRec) res += InitRecording(); | |
1051 if (play) res += StartPlayout(); | |
1052 if (rec) res += StartRecording(); | |
1053 | |
1054 if (0 != res) { | |
1055 // Logging is done in init/start/stop calls above | |
1056 return -1; | |
1057 } | |
1058 | |
1059 return 0; | |
1060 } | 374 } |
1061 | 375 |
1062 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { | 376 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { |
1063 delayMS = _playoutDelay; | 377 delayMS = _playoutDelay; |
1064 return 0; | 378 return 0; |
1065 } | 379 } |
1066 | 380 |
1067 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { | 381 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { |
1068 delayMS = _recordingDelay; | 382 delayMS = _recordingDelay; |
1069 return 0; | 383 return 0; |
1070 } | 384 } |
1071 | 385 |
1072 int32_t | 386 int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type, |
1073 AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, | 387 uint16_t& sizeMS) const { |
1074 uint16_t sizeMS) { | 388 type = AudioDeviceModule::kAdaptiveBufferSize; |
1075 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 389 sizeMS = _playoutDelay; |
1076 "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)", | 390 return 0; |
1077 type, sizeMS); | 391 } |
1078 | 392 |
1079 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 393 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { |
1080 " API call not supported on this platform"); | 394 CHECK(playout_parameters_.is_valid()); |
1081 return -1; | 395 DCHECK(thread_checker_.CalledOnValidThread()); |
1082 } | 396 *params = playout_parameters_; |
1083 | 397 return 0; |
1084 int32_t | 398 } |
1085 AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type, | 399 |
1086 uint16_t& sizeMS) const { | 400 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { |
1087 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 401 CHECK(record_parameters_.is_valid()); |
1088 | 402 DCHECK(thread_checker_.CalledOnValidThread()); |
1089 type = AudioDeviceModule::kAdaptiveBufferSize; | 403 *params = record_parameters_; |
1090 | 404 return 0; |
1091 sizeMS = _playoutDelay; | |
1092 | |
1093 return 0; | |
1094 } | |
1095 | |
1096 int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const { | |
1097 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1098 | |
1099 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1100 " API call not supported on this platform"); | |
1101 return -1; | |
1102 } | |
1103 | |
1104 bool AudioDeviceIOS::PlayoutWarning() const { | |
1105 return (_playWarning > 0); | |
1106 } | |
1107 | |
1108 bool AudioDeviceIOS::PlayoutError() const { | |
1109 return (_playError > 0); | |
1110 } | |
1111 | |
1112 bool AudioDeviceIOS::RecordingWarning() const { | |
1113 return (_recWarning > 0); | |
1114 } | |
1115 | |
1116 bool AudioDeviceIOS::RecordingError() const { | |
1117 return (_recError > 0); | |
1118 } | |
1119 | |
1120 void AudioDeviceIOS::ClearPlayoutWarning() { | |
1121 _playWarning = 0; | |
1122 } | |
1123 | |
1124 void AudioDeviceIOS::ClearPlayoutError() { | |
1125 _playError = 0; | |
1126 } | |
1127 | |
1128 void AudioDeviceIOS::ClearRecordingWarning() { | |
1129 _recWarning = 0; | |
1130 } | |
1131 | |
1132 void AudioDeviceIOS::ClearRecordingError() { | |
1133 _recError = 0; | |
1134 } | 405 } |
1135 | 406 |
1136 // ============================================================================ | 407 // ============================================================================ |
1137 // Private Methods | 408 // Private Methods |
1138 // ============================================================================ | 409 // ============================================================================ |
1139 | 410 |
1140 int32_t AudioDeviceIOS::InitPlayOrRecord() { | 411 int32_t AudioDeviceIOS::InitPlayOrRecord() { |
1141 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 412 LOGI() << "AudioDeviceIOS::InitPlayOrRecord"; |
1142 | 413 DCHECK(!_auVoiceProcessing); |
1143 OSStatus result = -1; | 414 |
1144 | 415 OSStatus result = -1; |
1145 // Check if already initialized | 416 |
1146 if (NULL != _auVoiceProcessing) { | 417 // Create Voice Processing Audio Unit |
1147 // We already have initialized before and created any of the audio unit, | 418 AudioComponentDescription desc; |
1148 // check that all exist | 419 AudioComponent comp; |
1149 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 420 |
1150 " Already initialized"); | 421 desc.componentType = kAudioUnitType_Output; |
1151 // todo: Call AudioUnitReset() here and empty all buffers? | 422 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
1152 return 0; | 423 desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
1153 } | 424 desc.componentFlags = 0; |
1154 | 425 desc.componentFlagsMask = 0; |
1155 // Create Voice Processing Audio Unit | 426 |
1156 AudioComponentDescription desc; | 427 comp = AudioComponentFindNext(nullptr, &desc); |
1157 AudioComponent comp; | 428 if (nullptr == comp) { |
1158 | 429 LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit"; |
1159 desc.componentType = kAudioUnitType_Output; | 430 return -1; |
1160 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 431 } |
1161 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | 432 |
1162 desc.componentFlags = 0; | 433 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); |
1163 desc.componentFlagsMask = 0; | 434 if (0 != result) { |
1164 | 435 LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result; |
1165 comp = AudioComponentFindNext(NULL, &desc); | 436 return -1; |
1166 if (NULL == comp) { | 437 } |
1167 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 438 |
1168 " Could not find audio component for Audio Unit"); | 439 // TODO(henrika): I think we should set the preferred channel configuration |
1169 return -1; | 440 // in both directions as well to be safe. |
1170 } | 441 |
1171 | 442 // Set preferred hardware sample rate to 16 kHz. |
1172 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); | 443 // TODO(henrika): improve this selection of sample rate. Why do we currently |
444 // use a hard coded value? How can we fail and still continue? | |
445 NSError* error = nil; | |
446 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
447 Float64 preferredSampleRate(playout_parameters_.sample_rate()); | |
448 [session setPreferredSampleRate:preferredSampleRate error:&error]; | |
449 if (error != nil) { | |
450 const char* errorString = [[error localizedDescription] UTF8String]; | |
451 LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString; | |
452 } | |
453 | |
454 // TODO(henrika): we can reduce latency by setting the IOBufferDuration | |
455 // here. Default size for 16kHz is 0.016 sec or 16 msec on an iPhone 6. | |
456 | |
457 // Activate the audio session. | |
458 ActivateAudioSession(session, true); | |
459 | |
460 UInt32 enableIO = 1; | |
461 result = AudioUnitSetProperty(_auVoiceProcessing, | |
462 kAudioOutputUnitProperty_EnableIO, | |
463 kAudioUnitScope_Input, | |
464 1, // input bus | |
465 &enableIO, sizeof(enableIO)); | |
466 if (0 != result) { | |
467 LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result; | |
468 } | |
469 | |
470 result = AudioUnitSetProperty(_auVoiceProcessing, | |
471 kAudioOutputUnitProperty_EnableIO, | |
472 kAudioUnitScope_Output, | |
473 0, // output bus | |
474 &enableIO, sizeof(enableIO)); | |
475 if (0 != result) { | |
476 LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result; | |
477 } | |
478 | |
479 // Disable AU buffer allocation for the recorder, we allocate our own. | |
480 // TODO(henrika): understand this part better. | |
481 UInt32 flag = 0; | |
482 result = AudioUnitSetProperty(_auVoiceProcessing, | |
483 kAudioUnitProperty_ShouldAllocateBuffer, | |
484 kAudioUnitScope_Output, 1, &flag, sizeof(flag)); | |
485 if (0 != result) { | |
486 LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result; | |
487 // Should work anyway | |
488 } | |
489 | |
490 // Set recording callback. | |
491 AURenderCallbackStruct auCbS; | |
492 memset(&auCbS, 0, sizeof(auCbS)); | |
493 auCbS.inputProc = RecordProcess; | |
494 auCbS.inputProcRefCon = this; | |
495 result = AudioUnitSetProperty( | |
496 _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback, | |
497 kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS)); | |
498 if (0 != result) { | |
499 LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result; | |
500 } | |
501 | |
502 // Set playout callback. | |
503 memset(&auCbS, 0, sizeof(auCbS)); | |
504 auCbS.inputProc = PlayoutProcess; | |
505 auCbS.inputProcRefCon = this; | |
506 result = AudioUnitSetProperty( | |
507 _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback, | |
508 kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS)); | |
509 if (0 != result) { | |
510 LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result; | |
511 } | |
512 | |
513 // Get stream format for out/0 | |
514 AudioStreamBasicDescription playoutDesc; | |
515 UInt32 size = sizeof(playoutDesc); | |
516 result = | |
517 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, | |
518 kAudioUnitScope_Output, 0, &playoutDesc, &size); | |
519 if (0 != result) { | |
520 LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result; | |
521 } | |
522 | |
523 playoutDesc.mSampleRate = preferredSampleRate; | |
524 LOG(LS_INFO) << "Audio Unit playout opened in sampling rate: " | |
525 << playoutDesc.mSampleRate; | |
526 | |
527 // Store the sampling frequency to use towards the Audio Device Buffer | |
528 // todo: Add 48 kHz (increase buffer sizes). Other fs? | |
529 // TODO(henrika): Figure out if we really need this complex handling. | |
530 if ((playoutDesc.mSampleRate > 44090.0) && | |
531 (playoutDesc.mSampleRate < 44110.0)) { | |
532 _adbSampFreq = 44100; | |
533 } else if ((playoutDesc.mSampleRate > 15990.0) && | |
534 (playoutDesc.mSampleRate < 16010.0)) { | |
535 _adbSampFreq = 16000; | |
536 } else if ((playoutDesc.mSampleRate > 7990.0) && | |
537 (playoutDesc.mSampleRate < 8010.0)) { | |
538 _adbSampFreq = 8000; | |
539 } else { | |
540 _adbSampFreq = 0; | |
541 FATAL() << "Invalid sample rate"; | |
542 } | |
543 | |
544 // Set the audio device buffer sampling rates (use same for play and record). | |
545 // TODO(henrika): this is not a good place to set these things up. | |
546 DCHECK(audio_device_buffer_); | |
547 DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate()); | |
548 audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq); | |
549 audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq); | |
550 | |
551 // Set stream format for out/0. | |
552 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | | |
553 kLinearPCMFormatFlagIsPacked | | |
554 kLinearPCMFormatFlagIsNonInterleaved; | |
555 playoutDesc.mBytesPerPacket = 2; | |
556 playoutDesc.mFramesPerPacket = 1; | |
557 playoutDesc.mBytesPerFrame = 2; | |
558 playoutDesc.mChannelsPerFrame = 1; | |
559 playoutDesc.mBitsPerChannel = 16; | |
560 result = | |
561 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, | |
562 kAudioUnitScope_Input, 0, &playoutDesc, size); | |
563 if (0 != result) { | |
564 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0"; | |
565 } | |
566 | |
567 // Get stream format for in/1. | |
568 AudioStreamBasicDescription recordingDesc; | |
569 size = sizeof(recordingDesc); | |
570 result = | |
571 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, | |
572 kAudioUnitScope_Input, 1, &recordingDesc, &size); | |
573 if (0 != result) { | |
574 LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1"; | |
575 } | |
576 | |
577 recordingDesc.mSampleRate = preferredSampleRate; | |
578 LOG(LS_INFO) << "Audio Unit recording opened in sampling rate: " | |
579 << recordingDesc.mSampleRate; | |
580 | |
581 // Set stream format for out/1 (use same sampling frequency as for in/1). | |
582 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | | |
583 kLinearPCMFormatFlagIsPacked | | |
584 kLinearPCMFormatFlagIsNonInterleaved; | |
585 recordingDesc.mBytesPerPacket = 2; | |
586 recordingDesc.mFramesPerPacket = 1; | |
587 recordingDesc.mBytesPerFrame = 2; | |
588 recordingDesc.mChannelsPerFrame = 1; | |
589 recordingDesc.mBitsPerChannel = 16; | |
590 result = | |
591 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, | |
592 kAudioUnitScope_Output, 1, &recordingDesc, size); | |
593 if (0 != result) { | |
594 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1"; | |
595 } | |
596 | |
597 // Initialize here already to be able to get/set stream properties. | |
598 result = AudioUnitInitialize(_auVoiceProcessing); | |
599 if (0 != result) { | |
600 LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result; | |
601 } | |
602 | |
603 // Get hardware sample rate for logging (see if we get what we asked for). | |
604 // TODO(henrika): what if we don't get what we ask for? | |
605 double sampleRate = session.sampleRate; | |
606 LOG(LS_INFO) << "Current HW sample rate is: " << sampleRate | |
607 << ", ADB sample rate is: " << _adbSampFreq; | |
608 LOG(LS_INFO) << "Current HW IO buffer size is: " << | |
609 [session IOBufferDuration]; | |
610 | |
611 // Listen to audio interruptions. | |
612 // TODO(henrika): learn this area better. | |
613 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
614 id observer = [center | |
615 addObserverForName:AVAudioSessionInterruptionNotification | |
616 object:nil | |
617 queue:[NSOperationQueue mainQueue] | |
618 usingBlock:^(NSNotification* notification) { | |
619 NSNumber* typeNumber = | |
620 [notification userInfo][AVAudioSessionInterruptionTypeKey]; | |
621 AVAudioSessionInterruptionType type = | |
622 (AVAudioSessionInterruptionType)[typeNumber | |
623 unsignedIntegerValue]; | |
624 switch (type) { | |
625 case AVAudioSessionInterruptionTypeBegan: | |
626 // At this point our audio session has been deactivated and | |
627 // the | |
628 // audio unit render callbacks no longer occur. Nothing to | |
629 // do. | |
630 break; | |
631 case AVAudioSessionInterruptionTypeEnded: { | |
632 NSError* error = nil; | |
633 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
634 [session setActive:YES error:&error]; | |
635 if (error != nil) { | |
636 LOG_F(LS_ERROR) << "Failed to active audio session"; | |
637 } | |
638 // Post interruption the audio unit render callbacks don't | |
639 // automatically continue, so we restart the unit manually | |
640 // here. | |
641 AudioOutputUnitStop(_auVoiceProcessing); | |
642 AudioOutputUnitStart(_auVoiceProcessing); | |
643 break; | |
644 } | |
645 } | |
646 }]; | |
647 // Increment refcount on observer using ARC bridge. Instance variable is a | |
648 // void* instead of an id because header is included in other pure C++ | |
649 // files. | |
650 _audioInterruptionObserver = (__bridge_retained void*)observer; | |
651 | |
652 // Deactivate the audio session. | |
653 ActivateAudioSession(session, false); | |
654 | |
655 return 0; | |
656 } | |
657 | |
658 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() { | |
659 LOGI() << "ShutdownPlayOrRecord"; | |
660 | |
661 if (_audioInterruptionObserver != nullptr) { | |
662 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
663 // Transfer ownership of observer back to ARC, which will dealloc the | |
664 // observer once it exits this scope. | |
665 id observer = (__bridge_transfer id)_audioInterruptionObserver; | |
666 [center removeObserver:observer]; | |
667 _audioInterruptionObserver = nullptr; | |
668 } | |
669 | |
670 // Close and delete AU. | |
671 OSStatus result = -1; | |
672 if (nullptr != _auVoiceProcessing) { | |
673 result = AudioOutputUnitStop(_auVoiceProcessing); | |
1173 if (0 != result) { | 674 if (0 != result) { |
1174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 675 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
1175 " Could not create Audio Unit instance (result=%d)", | 676 } |
1176 result); | 677 result = AudioComponentInstanceDispose(_auVoiceProcessing); |
1177 return -1; | |
1178 } | |
1179 | |
1180 // Set preferred hardware sample rate to 16 kHz | |
1181 NSError* error = nil; | |
1182 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1183 Float64 preferredSampleRate(16000.0); | |
1184 [session setPreferredSampleRate:preferredSampleRate | |
1185 error:&error]; | |
1186 if (error != nil) { | |
1187 const char* errorString = [[error localizedDescription] UTF8String]; | |
1188 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1189 "Could not set preferred sample rate: %s", errorString); | |
1190 } | |
1191 error = nil; | |
1192 // Make the setMode:error: and setCategory:error: calls only if necessary. | |
1193 // Non-obviously, setting them to the value they already have will clear | |
1194 // transient properties (such as PortOverride) that some other component may | |
1195 // have set up. | |
1196 if (session.mode != AVAudioSessionModeVoiceChat) { | |
1197 [session setMode:AVAudioSessionModeVoiceChat error:&error]; | |
1198 if (error != nil) { | |
1199 const char* errorString = [[error localizedDescription] UTF8String]; | |
1200 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1201 "Could not set mode: %s", errorString); | |
1202 } | |
1203 } | |
1204 error = nil; | |
1205 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | |
1206 [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]; | |
1207 if (error != nil) { | |
1208 const char* errorString = [[error localizedDescription] UTF8String]; | |
1209 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1210 "Could not set category: %s", errorString); | |
1211 } | |
1212 } | |
1213 | |
1214 ////////////////////// | |
1215 // Setup Voice Processing Audio Unit | |
1216 | |
1217 // Note: For Signal Processing AU element 0 is output bus, element 1 is | |
1218 // input bus for global scope element is irrelevant (always use | |
1219 // element 0) | |
1220 | |
1221 // Enable IO on both elements | |
1222 | |
1223 // todo: Below we just log and continue upon error. We might want | |
1224 // to close AU and return error for some cases. | |
1225 // todo: Log info about setup. | |
1226 | |
1227 UInt32 enableIO = 1; | |
1228 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1229 kAudioOutputUnitProperty_EnableIO, | |
1230 kAudioUnitScope_Input, | |
1231 1, // input bus | |
1232 &enableIO, | |
1233 sizeof(enableIO)); | |
1234 if (0 != result) { | 678 if (0 != result) { |
1235 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 679 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; |
1236 " Could not enable IO on input (result=%d)", result); | 680 } |
1237 } | 681 _auVoiceProcessing = nullptr; |
1238 | 682 } |
1239 result = AudioUnitSetProperty(_auVoiceProcessing, | 683 |
1240 kAudioOutputUnitProperty_EnableIO, | 684 return 0; |
1241 kAudioUnitScope_Output, | |
1242 0, // output bus | |
1243 &enableIO, | |
1244 sizeof(enableIO)); | |
1245 if (0 != result) { | |
1246 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1247 " Could not enable IO on output (result=%d)", result); | |
1248 } | |
1249 | |
1250 // Disable AU buffer allocation for the recorder, we allocate our own | |
1251 UInt32 flag = 0; | |
1252 result = AudioUnitSetProperty( | |
1253 _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer, | |
1254 kAudioUnitScope_Output, 1, &flag, sizeof(flag)); | |
1255 if (0 != result) { | |
1256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1257 " Could not disable AU buffer allocation (result=%d)", | |
1258 result); | |
1259 // Should work anyway | |
1260 } | |
1261 | |
1262 // Set recording callback | |
1263 AURenderCallbackStruct auCbS; | |
1264 memset(&auCbS, 0, sizeof(auCbS)); | |
1265 auCbS.inputProc = RecordProcess; | |
1266 auCbS.inputProcRefCon = this; | |
1267 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1268 kAudioOutputUnitProperty_SetInputCallback, | |
1269 kAudioUnitScope_Global, 1, | |
1270 &auCbS, sizeof(auCbS)); | |
1271 if (0 != result) { | |
1272 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1273 " Could not set record callback for Audio Unit (result=%d)", | |
1274 result); | |
1275 } | |
1276 | |
1277 // Set playout callback | |
1278 memset(&auCbS, 0, sizeof(auCbS)); | |
1279 auCbS.inputProc = PlayoutProcess; | |
1280 auCbS.inputProcRefCon = this; | |
1281 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1282 kAudioUnitProperty_SetRenderCallback, | |
1283 kAudioUnitScope_Global, 0, | |
1284 &auCbS, sizeof(auCbS)); | |
1285 if (0 != result) { | |
1286 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1287 " Could not set play callback for Audio Unit (result=%d)", | |
1288 result); | |
1289 } | |
1290 | |
1291 // Get stream format for out/0 | |
1292 AudioStreamBasicDescription playoutDesc; | |
1293 UInt32 size = sizeof(playoutDesc); | |
1294 result = AudioUnitGetProperty(_auVoiceProcessing, | |
1295 kAudioUnitProperty_StreamFormat, | |
1296 kAudioUnitScope_Output, 0, &playoutDesc, | |
1297 &size); | |
1298 if (0 != result) { | |
1299 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1300 " Could not get stream format Audio Unit out/0 (result=%d)", | |
1301 result); | |
1302 } | |
1303 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1304 " Audio Unit playout opened in sampling rate %f", | |
1305 playoutDesc.mSampleRate); | |
1306 | |
1307 playoutDesc.mSampleRate = preferredSampleRate; | |
1308 | |
1309 // Store the sampling frequency to use towards the Audio Device Buffer | |
1310 // todo: Add 48 kHz (increase buffer sizes). Other fs? | |
1311 if ((playoutDesc.mSampleRate > 44090.0) | |
1312 && (playoutDesc.mSampleRate < 44110.0)) { | |
1313 _adbSampFreq = 44100; | |
1314 } else if ((playoutDesc.mSampleRate > 15990.0) | |
1315 && (playoutDesc.mSampleRate < 16010.0)) { | |
1316 _adbSampFreq = 16000; | |
1317 } else if ((playoutDesc.mSampleRate > 7990.0) | |
1318 && (playoutDesc.mSampleRate < 8010.0)) { | |
1319 _adbSampFreq = 8000; | |
1320 } else { | |
1321 _adbSampFreq = 0; | |
1322 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1323 " Audio Unit out/0 opened in unknown sampling rate (%f)", | |
1324 playoutDesc.mSampleRate); | |
1325 // todo: We should bail out here. | |
1326 } | |
1327 | |
1328 // Set the audio device buffer sampling rate, | |
1329 // we assume we get the same for play and record | |
1330 if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) { | |
1331 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1332 " Could not set audio device buffer recording sampling rate (%d)", | |
1333 _adbSampFreq); | |
1334 } | |
1335 | |
1336 if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) { | |
1337 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1338 " Could not set audio device buffer playout sampling rate (%d)", | |
1339 _adbSampFreq); | |
1340 } | |
1341 | |
1342 // Set stream format for in/0 (use same sampling frequency as for out/0) | |
1343 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
1344 | kLinearPCMFormatFlagIsPacked | |
1345 | kLinearPCMFormatFlagIsNonInterleaved; | |
1346 playoutDesc.mBytesPerPacket = 2; | |
1347 playoutDesc.mFramesPerPacket = 1; | |
1348 playoutDesc.mBytesPerFrame = 2; | |
1349 playoutDesc.mChannelsPerFrame = 1; | |
1350 playoutDesc.mBitsPerChannel = 16; | |
1351 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1352 kAudioUnitProperty_StreamFormat, | |
1353 kAudioUnitScope_Input, 0, &playoutDesc, size); | |
1354 if (0 != result) { | |
1355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1356 " Could not set stream format Audio Unit in/0 (result=%d)", | |
1357 result); | |
1358 } | |
1359 | |
1360 // Get stream format for in/1 | |
1361 AudioStreamBasicDescription recordingDesc; | |
1362 size = sizeof(recordingDesc); | |
1363 result = AudioUnitGetProperty(_auVoiceProcessing, | |
1364 kAudioUnitProperty_StreamFormat, | |
1365 kAudioUnitScope_Input, 1, &recordingDesc, | |
1366 &size); | |
1367 if (0 != result) { | |
1368 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1369 " Could not get stream format Audio Unit in/1 (result=%d)", | |
1370 result); | |
1371 } | |
1372 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1373 " Audio Unit recording opened in sampling rate %f", | |
1374 recordingDesc.mSampleRate); | |
1375 | |
1376 recordingDesc.mSampleRate = preferredSampleRate; | |
1377 | |
1378 // Set stream format for out/1 (use same sampling frequency as for in/1) | |
1379 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
1380 | kLinearPCMFormatFlagIsPacked | |
1381 | kLinearPCMFormatFlagIsNonInterleaved; | |
1382 | |
1383 recordingDesc.mBytesPerPacket = 2; | |
1384 recordingDesc.mFramesPerPacket = 1; | |
1385 recordingDesc.mBytesPerFrame = 2; | |
1386 recordingDesc.mChannelsPerFrame = 1; | |
1387 recordingDesc.mBitsPerChannel = 16; | |
1388 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1389 kAudioUnitProperty_StreamFormat, | |
1390 kAudioUnitScope_Output, 1, &recordingDesc, | |
1391 size); | |
1392 if (0 != result) { | |
1393 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1394 " Could not set stream format Audio Unit out/1 (result=%d)", | |
1395 result); | |
1396 } | |
1397 | |
1398 // Initialize here already to be able to get/set stream properties. | |
1399 result = AudioUnitInitialize(_auVoiceProcessing); | |
1400 if (0 != result) { | |
1401 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1402 " Could not init Audio Unit (result=%d)", result); | |
1403 } | |
1404 | |
1405 // Get hardware sample rate for logging (see if we get what we asked for) | |
1406 double sampleRate = session.sampleRate; | |
1407 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1408 " Current HW sample rate is %f, ADB sample rate is %d", | |
1409 sampleRate, _adbSampFreq); | |
1410 | |
1411 // Listen to audio interruptions. | |
1412 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
1413 id observer = | |
1414 [center addObserverForName:AVAudioSessionInterruptionNotification | |
1415 object:nil | |
1416 queue:[NSOperationQueue mainQueue] | |
1417 usingBlock:^(NSNotification* notification) { | |
1418 NSNumber* typeNumber = | |
1419 [notification userInfo][AVAudioSessionInterruptionTypeKey]; | |
1420 AVAudioSessionInterruptionType type = | |
1421 (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue]; | |
1422 switch (type) { | |
1423 case AVAudioSessionInterruptionTypeBegan: | |
1424 // At this point our audio session has been deactivated and the | |
1425 // audio unit render callbacks no longer occur. Nothing to do. | |
1426 break; | |
1427 case AVAudioSessionInterruptionTypeEnded: { | |
1428 NSError* error = nil; | |
1429 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1430 [session setActive:YES | |
1431 error:&error]; | |
1432 if (error != nil) { | |
1433 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1434 "Error activating audio session"); | |
1435 } | |
1436 // Post interruption the audio unit render callbacks don't | |
1437 // automatically continue, so we restart the unit manually here. | |
1438 AudioOutputUnitStop(_auVoiceProcessing); | |
1439 AudioOutputUnitStart(_auVoiceProcessing); | |
1440 break; | |
1441 } | |
1442 } | |
1443 }]; | |
1444 // Increment refcount on observer using ARC bridge. Instance variable is a | |
1445 // void* instead of an id because header is included in other pure C++ | |
1446 // files. | |
1447 _audioInterruptionObserver = (__bridge_retained void*)observer; | |
1448 | |
1449 // Activate audio session. | |
1450 error = nil; | |
1451 [session setActive:YES | |
1452 error:&error]; | |
1453 if (error != nil) { | |
1454 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1455 "Error activating audio session"); | |
1456 } | |
1457 | |
1458 return 0; | |
1459 } | |
1460 | |
1461 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() { | |
1462 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1463 | |
1464 if (_audioInterruptionObserver != NULL) { | |
1465 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
1466 // Transfer ownership of observer back to ARC, which will dealloc the | |
1467 // observer once it exits this scope. | |
1468 id observer = (__bridge_transfer id)_audioInterruptionObserver; | |
1469 [center removeObserver:observer]; | |
1470 _audioInterruptionObserver = NULL; | |
1471 } | |
1472 | |
1473 // Close and delete AU | |
1474 OSStatus result = -1; | |
1475 if (NULL != _auVoiceProcessing) { | |
1476 result = AudioOutputUnitStop(_auVoiceProcessing); | |
1477 if (0 != result) { | |
1478 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1479 " Error stopping Audio Unit (result=%d)", result); | |
1480 } | |
1481 result = AudioComponentInstanceDispose(_auVoiceProcessing); | |
1482 if (0 != result) { | |
1483 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1484 " Error disposing Audio Unit (result=%d)", result); | |
1485 } | |
1486 _auVoiceProcessing = NULL; | |
1487 } | |
1488 | |
1489 return 0; | |
1490 } | 685 } |
1491 | 686 |
1492 // ============================================================================ | 687 // ============================================================================ |
1493 // Thread Methods | 688 // Thread Methods |
1494 // ============================================================================ | 689 // ============================================================================ |
1495 | 690 |
1496 OSStatus | 691 OSStatus AudioDeviceIOS::RecordProcess( |
1497 AudioDeviceIOS::RecordProcess(void *inRefCon, | 692 void* inRefCon, |
1498 AudioUnitRenderActionFlags *ioActionFlags, | 693 AudioUnitRenderActionFlags* ioActionFlags, |
1499 const AudioTimeStamp *inTimeStamp, | 694 const AudioTimeStamp* inTimeStamp, |
1500 UInt32 inBusNumber, | 695 UInt32 inBusNumber, |
1501 UInt32 inNumberFrames, | 696 UInt32 inNumberFrames, |
1502 AudioBufferList *ioData) { | 697 AudioBufferList* ioData) { |
1503 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); | 698 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); |
1504 | 699 return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber, |
1505 return ptrThis->RecordProcessImpl(ioActionFlags, | 700 inNumberFrames); |
1506 inTimeStamp, | 701 } |
1507 inBusNumber, | 702 |
1508 inNumberFrames); | 703 OSStatus AudioDeviceIOS::RecordProcessImpl( |
1509 } | 704 AudioUnitRenderActionFlags* ioActionFlags, |
1510 | 705 const AudioTimeStamp* inTimeStamp, |
1511 | 706 uint32_t inBusNumber, |
1512 OSStatus | 707 uint32_t inNumberFrames) { |
1513 AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags, | 708 // Setup some basic stuff |
1514 const AudioTimeStamp *inTimeStamp, | 709 // Use temp buffer not to lock up recording buffer more than necessary |
1515 uint32_t inBusNumber, | 710 // todo: Make dataTmp a member variable with static size that holds |
1516 uint32_t inNumberFrames) { | 711 // max possible frames? |
1517 // Setup some basic stuff | 712 int16_t* dataTmp = new int16_t[inNumberFrames]; |
1518 // Use temp buffer not to lock up recording buffer more than necessary | 713 memset(dataTmp, 0, 2 * inNumberFrames); |
1519 // todo: Make dataTmp a member variable with static size that holds | 714 |
1520 // max possible frames? | 715 AudioBufferList abList; |
1521 int16_t* dataTmp = new int16_t[inNumberFrames]; | 716 abList.mNumberBuffers = 1; |
1522 memset(dataTmp, 0, 2*inNumberFrames); | 717 abList.mBuffers[0].mData = dataTmp; |
1523 | 718 abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample |
1524 AudioBufferList abList; | 719 abList.mBuffers[0].mNumberChannels = 1; |
1525 abList.mNumberBuffers = 1; | 720 |
1526 abList.mBuffers[0].mData = dataTmp; | 721 // Get data from mic |
1527 abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample | 722 OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp, |
1528 abList.mBuffers[0].mNumberChannels = 1; | 723 inBusNumber, inNumberFrames, &abList); |
1529 | 724 if (res != 0) { |
1530 // Get data from mic | 725 // TODO(henrika): improve error handling. |
1531 OSStatus res = AudioUnitRender(_auVoiceProcessing, | 726 delete[] dataTmp; |
1532 ioActionFlags, inTimeStamp, | 727 return 0; |
1533 inBusNumber, inNumberFrames, &abList); | 728 } |
1534 if (res != 0) { | 729 |
1535 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 730 if (_recording) { |
1536 " Error getting rec data, error = %d", res); | 731 // Insert all data in temp buffer into recording buffers |
1537 | 732 // There is zero or one buffer partially full at any given time, |
1538 if (_recWarning > 0) { | 733 // all others are full or empty |
1539 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 734 // Full means filled with noSamp10ms samples. |
1540 " Pending rec warning exists"); | 735 |
736 const unsigned int noSamp10ms = _adbSampFreq / 100; | |
737 unsigned int dataPos = 0; | |
738 uint16_t bufPos = 0; | |
739 int16_t insertPos = -1; | |
740 unsigned int nCopy = 0; // Number of samples to copy | |
741 | |
742 while (dataPos < inNumberFrames) { | |
743 // Loop over all recording buffers or | |
744 // until we find the partially full buffer | |
745 // First choice is to insert into partially full buffer, | |
746 // second choice is to insert into empty buffer | |
747 bufPos = 0; | |
748 insertPos = -1; | |
749 nCopy = 0; | |
750 while (bufPos < N_REC_BUFFERS) { | |
751 if ((_recordingLength[bufPos] > 0) && | |
752 (_recordingLength[bufPos] < noSamp10ms)) { | |
753 // Found the partially full buffer | |
754 insertPos = static_cast<int16_t>(bufPos); | |
755 // Don't need to search more, quit loop | |
756 bufPos = N_REC_BUFFERS; | |
757 } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) { | |
758 // Found an empty buffer | |
759 insertPos = static_cast<int16_t>(bufPos); | |
1541 } | 760 } |
1542 _recWarning = 1; | 761 ++bufPos; |
1543 | 762 } |
1544 delete [] dataTmp; | 763 |
1545 return 0; | 764 // Insert data into buffer |
1546 } | 765 if (insertPos > -1) { |
1547 | 766 // We found a non-full buffer, copy data to it |
1548 if (_recording) { | 767 unsigned int dataToCopy = inNumberFrames - dataPos; |
1549 // Insert all data in temp buffer into recording buffers | 768 unsigned int currentRecLen = _recordingLength[insertPos]; |
1550 // There is zero or one buffer partially full at any given time, | 769 unsigned int roomInBuffer = noSamp10ms - currentRecLen; |
1551 // all others are full or empty | 770 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer); |
1552 // Full means filled with noSamp10ms samples. | 771 |
1553 | 772 memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos], |
1554 const unsigned int noSamp10ms = _adbSampFreq / 100; | 773 nCopy * sizeof(int16_t)); |
1555 unsigned int dataPos = 0; | 774 if (0 == currentRecLen) { |
1556 uint16_t bufPos = 0; | 775 _recordingSeqNumber[insertPos] = _recordingCurrentSeq; |
1557 int16_t insertPos = -1; | 776 ++_recordingCurrentSeq; |
1558 unsigned int nCopy = 0; // Number of samples to copy | |
1559 | |
1560 while (dataPos < inNumberFrames) { | |
1561 // Loop over all recording buffers or | |
1562 // until we find the partially full buffer | |
1563 // First choice is to insert into partially full buffer, | |
1564 // second choice is to insert into empty buffer | |
1565 bufPos = 0; | |
1566 insertPos = -1; | |
1567 nCopy = 0; | |
1568 while (bufPos < N_REC_BUFFERS) { | |
1569 if ((_recordingLength[bufPos] > 0) | |
1570 && (_recordingLength[bufPos] < noSamp10ms)) { | |
1571 // Found the partially full buffer | |
1572 insertPos = static_cast<int16_t>(bufPos); | |
1573 // Don't need to search more, quit loop | |
1574 bufPos = N_REC_BUFFERS; | |
1575 } else if ((-1 == insertPos) | |
1576 && (0 == _recordingLength[bufPos])) { | |
1577 // Found an empty buffer | |
1578 insertPos = static_cast<int16_t>(bufPos); | |
1579 } | |
1580 ++bufPos; | |
1581 } | |
1582 | |
1583 // Insert data into buffer | |
1584 if (insertPos > -1) { | |
1585 // We found a non-full buffer, copy data to it | |
1586 unsigned int dataToCopy = inNumberFrames - dataPos; | |
1587 unsigned int currentRecLen = _recordingLength[insertPos]; | |
1588 unsigned int roomInBuffer = noSamp10ms - currentRecLen; | |
1589 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer); | |
1590 | |
1591 memcpy(&_recordingBuffer[insertPos][currentRecLen], | |
1592 &dataTmp[dataPos], nCopy*sizeof(int16_t)); | |
1593 if (0 == currentRecLen) { | |
1594 _recordingSeqNumber[insertPos] = _recordingCurrentSeq; | |
1595 ++_recordingCurrentSeq; | |
1596 } | |
1597 _recordingBufferTotalSize += nCopy; | |
1598 // Has to be done last to avoid interrupt problems | |
1599 // between threads | |
1600 _recordingLength[insertPos] += nCopy; | |
1601 dataPos += nCopy; | |
1602 } else { | |
1603 // Didn't find a non-full buffer | |
1604 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1605 " Could not insert into recording buffer"); | |
1606 if (_recWarning > 0) { | |
1607 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1608 " Pending rec warning exists"); | |
1609 } | |
1610 _recWarning = 1; | |
1611 dataPos = inNumberFrames; // Don't try to insert more | |
1612 } | |
1613 } | 777 } |
1614 } | 778 _recordingBufferTotalSize += nCopy; |
1615 | 779 // Has to be done last to avoid interrupt problems between threads. |
1616 delete [] dataTmp; | 780 _recordingLength[insertPos] += nCopy; |
1617 | 781 dataPos += nCopy; |
1618 return 0; | 782 } else { |
1619 } | 783 // Didn't find a non-full buffer |
1620 | 784 // TODO(henrika): improve error handling |
1621 OSStatus | 785 dataPos = inNumberFrames; // Don't try to insert more |
1622 AudioDeviceIOS::PlayoutProcess(void *inRefCon, | 786 } |
1623 AudioUnitRenderActionFlags *ioActionFlags, | 787 } |
1624 const AudioTimeStamp *inTimeStamp, | 788 } |
1625 UInt32 inBusNumber, | 789 delete[] dataTmp; |
1626 UInt32 inNumberFrames, | 790 return 0; |
1627 AudioBufferList *ioData) { | 791 } |
1628 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); | 792 |
1629 | 793 OSStatus AudioDeviceIOS::PlayoutProcess( |
1630 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); | 794 void* inRefCon, |
1631 } | 795 AudioUnitRenderActionFlags* ioActionFlags, |
1632 | 796 const AudioTimeStamp* inTimeStamp, |
1633 OSStatus | 797 UInt32 inBusNumber, |
1634 AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, | 798 UInt32 inNumberFrames, |
1635 AudioBufferList *ioData) { | 799 AudioBufferList* ioData) { |
1636 // Setup some basic stuff | 800 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); |
1637 // assert(sizeof(short) == 2); // Assumption for implementation | 801 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); |
1638 | 802 } |
1639 int16_t* data = | 803 |
1640 static_cast<int16_t*>(ioData->mBuffers[0].mData); | 804 OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, |
1641 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; | 805 AudioBufferList* ioData) { |
1642 unsigned int dataSize = dataSizeBytes/2; // Number of samples | 806 int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData); |
1643 if (dataSize != inNumberFrames) { // Should always be the same | 807 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; |
1644 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 808 unsigned int dataSize = dataSizeBytes / 2; // Number of samples |
1645 "dataSize (%u) != inNumberFrames (%u)", | 809 CHECK_EQ(dataSize, inNumberFrames); |
1646 dataSize, (unsigned int)inNumberFrames); | 810 memset(data, 0, dataSizeBytes); // Start with empty buffer |
1647 if (_playWarning > 0) { | 811 |
1648 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 812 // Get playout data from Audio Device Buffer |
1649 " Pending play warning exists"); | 813 |
814 if (_playing) { | |
815 unsigned int noSamp10ms = _adbSampFreq / 100; | |
816 // todo: Member variable and allocate when samp freq is determined | |
817 int16_t* dataTmp = new int16_t[noSamp10ms]; | |
818 memset(dataTmp, 0, 2 * noSamp10ms); | |
819 unsigned int dataPos = 0; | |
820 int noSamplesOut = 0; | |
821 unsigned int nCopy = 0; | |
822 | |
823 // First insert data from playout buffer if any | |
824 if (_playoutBufferUsed > 0) { | |
825 nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed; | |
826 DCHECK_EQ(nCopy, _playoutBufferUsed); | |
827 memcpy(data, _playoutBuffer, 2 * nCopy); | |
828 dataPos = nCopy; | |
829 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | |
830 _playoutBufferUsed = 0; | |
831 } | |
832 | |
833 // Now get the rest from Audio Device Buffer. | |
834 while (dataPos < dataSize) { | |
835 // Update playout delay | |
836 UpdatePlayoutDelay(); | |
837 | |
838 // Ask for new PCM data to be played out using the AudioDeviceBuffer | |
839 noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms); | |
840 | |
841 // Get data from Audio Device Buffer | |
842 noSamplesOut = audio_device_buffer_->GetPlayoutData( | |
843 reinterpret_cast<int8_t*>(dataTmp)); | |
844 CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut); | |
845 | |
846 // Insert as much as fits in data buffer | |
847 nCopy = | |
848 (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos); | |
849 memcpy(&data[dataPos], dataTmp, 2 * nCopy); | |
850 | |
851 // Save rest in playout buffer if any | |
852 if (nCopy < noSamp10ms) { | |
853 memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy)); | |
854 _playoutBufferUsed = noSamp10ms - nCopy; | |
855 } | |
856 | |
857 // Update loop/index counter, if we copied less than noSamp10ms | |
858 // samples we shall quit loop anyway | |
859 dataPos += noSamp10ms; | |
860 } | |
861 delete[] dataTmp; | |
862 } | |
863 return 0; | |
864 } | |
865 | |
866 // TODO(henrika): can either be removed or simplified. | |
867 void AudioDeviceIOS::UpdatePlayoutDelay() { | |
868 ++_playoutDelayMeasurementCounter; | |
869 | |
870 if (_playoutDelayMeasurementCounter >= 100) { | |
871 // Update HW and OS delay every second, unlikely to change | |
872 | |
873 // Since this is eventually rounded to integral ms, add 0.5ms | |
874 // here to get round-to-nearest-int behavior instead of | |
875 // truncation. | |
876 double totalDelaySeconds = 0.0005; | |
877 | |
878 // HW output latency | |
879 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
880 double latency = session.outputLatency; | |
881 assert(latency >= 0); | |
882 totalDelaySeconds += latency; | |
883 | |
884 // HW buffer duration | |
885 double ioBufferDuration = session.IOBufferDuration; | |
886 assert(ioBufferDuration >= 0); | |
887 totalDelaySeconds += ioBufferDuration; | |
888 | |
889 // AU latency | |
890 Float64 f64(0); | |
891 UInt32 size = sizeof(f64); | |
892 OSStatus result = | |
893 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency, | |
894 kAudioUnitScope_Global, 0, &f64, &size); | |
895 if (0 != result) { | |
896 LOG_F(LS_ERROR) << "AU latency error: " << result; | |
897 } | |
898 assert(f64 >= 0); | |
899 totalDelaySeconds += f64; | |
900 | |
901 // To ms | |
902 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000); | |
903 | |
904 // Reset counter | |
905 _playoutDelayMeasurementCounter = 0; | |
906 } | |
907 | |
908 // todo: Add playout buffer? | |
909 } | |
910 | |
911 void AudioDeviceIOS::UpdateRecordingDelay() { | |
912 ++_recordingDelayMeasurementCounter; | |
913 | |
914 if (_recordingDelayMeasurementCounter >= 100) { | |
915 // Update HW and OS delay every second, unlikely to change | |
916 | |
917 // Since this is eventually rounded to integral ms, add 0.5ms | |
918 // here to get round-to-nearest-int behavior instead of | |
919 // truncation. | |
920 double totalDelaySeconds = 0.0005; | |
921 | |
922 // HW input latency | |
923 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
924 double latency = session.inputLatency; | |
925 assert(latency >= 0); | |
926 totalDelaySeconds += latency; | |
927 | |
928 // HW buffer duration | |
929 double ioBufferDuration = session.IOBufferDuration; | |
930 assert(ioBufferDuration >= 0); | |
931 totalDelaySeconds += ioBufferDuration; | |
932 | |
933 // AU latency | |
934 Float64 f64(0); | |
935 UInt32 size = sizeof(f64); | |
936 OSStatus result = | |
937 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency, | |
938 kAudioUnitScope_Global, 0, &f64, &size); | |
939 if (0 != result) { | |
940 LOG_F(LS_ERROR) << "AU latency error: " << result; | |
941 } | |
942 assert(f64 >= 0); | |
943 totalDelaySeconds += f64; | |
944 | |
945 // To ms | |
946 _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000); | |
947 | |
948 // Reset counter | |
949 _recordingDelayMeasurementCounter = 0; | |
950 } | |
951 | |
952 _recordingDelay = _recordingDelayHWAndOS; | |
953 | |
954 // ADB recording buffer size, update every time | |
955 // Don't count the one next 10 ms to be sent, then convert samples => ms | |
956 const uint32_t noSamp10ms = _adbSampFreq / 100; | |
957 if (_recordingBufferTotalSize > noSamp10ms) { | |
958 _recordingDelay += | |
959 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000); | |
960 } | |
961 } | |
962 | |
963 bool AudioDeviceIOS::RunCapture(void* ptrThis) { | |
964 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread(); | |
965 } | |
966 | |
967 bool AudioDeviceIOS::CaptureWorkerThread() { | |
968 if (_recording) { | |
969 int bufPos = 0; | |
970 unsigned int lowestSeq = 0; | |
971 int lowestSeqBufPos = 0; | |
972 bool foundBuf = true; | |
973 const unsigned int noSamp10ms = _adbSampFreq / 100; | |
974 | |
975 while (foundBuf) { | |
976 // Check if we have any buffer with data to insert | |
977 // into the Audio Device Buffer, | |
978 // and find the one with the lowest seq number | |
979 foundBuf = false; | |
980 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) { | |
981 if (noSamp10ms == _recordingLength[bufPos]) { | |
982 if (!foundBuf) { | |
983 lowestSeq = _recordingSeqNumber[bufPos]; | |
984 lowestSeqBufPos = bufPos; | |
985 foundBuf = true; | |
986 } else if (_recordingSeqNumber[bufPos] < lowestSeq) { | |
987 lowestSeq = _recordingSeqNumber[bufPos]; | |
988 lowestSeqBufPos = bufPos; | |
989 } | |
1650 } | 990 } |
1651 _playWarning = 1; | 991 } |
1652 } | 992 |
1653 memset(data, 0, dataSizeBytes); // Start with empty buffer | 993 // Insert data into the Audio Device Buffer if found any |
1654 | 994 if (foundBuf) { |
1655 | 995 // Update recording delay |
1656 // Get playout data from Audio Device Buffer | 996 UpdateRecordingDelay(); |
1657 | 997 |
1658 if (_playing) { | 998 // Set the recorded buffer |
1659 unsigned int noSamp10ms = _adbSampFreq / 100; | 999 audio_device_buffer_->SetRecordedBuffer( |
1660 // todo: Member variable and allocate when samp freq is determined | 1000 reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]), |
1661 int16_t* dataTmp = new int16_t[noSamp10ms]; | 1001 _recordingLength[lowestSeqBufPos]); |
1662 memset(dataTmp, 0, 2*noSamp10ms); | 1002 |
1663 unsigned int dataPos = 0; | 1003 // Don't need to set the current mic level in ADB since we only |
1664 int noSamplesOut = 0; | 1004 // support digital AGC, |
1665 unsigned int nCopy = 0; | 1005 // and besides we cannot get or set the IOS mic level anyway. |
1666 | 1006 |
1667 // First insert data from playout buffer if any | 1007 // Set VQE info, use clockdrift == 0 |
1668 if (_playoutBufferUsed > 0) { | 1008 audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0); |
1669 nCopy = (dataSize < _playoutBufferUsed) ? | 1009 |
1670 dataSize : _playoutBufferUsed; | 1010 // Deliver recorded samples at specified sample rate, mic level |
1671 if (nCopy != _playoutBufferUsed) { | 1011 // etc. to the observer using callback |
1672 // todo: If dataSize < _playoutBufferUsed | 1012 audio_device_buffer_->DeliverRecordedData(); |
1673 // (should normally never be) | 1013 |
1674 // we must move the remaining data | 1014 // Make buffer available |
1675 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1015 _recordingSeqNumber[lowestSeqBufPos] = 0; |
1676 "nCopy (%u) != _playoutBufferUsed (%u)", | 1016 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos]; |
1677 nCopy, _playoutBufferUsed); | 1017 // Must be done last to avoid interrupt problems between threads |
1678 if (_playWarning > 0) { | 1018 _recordingLength[lowestSeqBufPos] = 0; |
1679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1019 } |
1680 " Pending play warning exists"); | 1020 } |
1681 } | 1021 } |
1682 _playWarning = 1; | 1022 |
1683 } | 1023 { |
1684 memcpy(data, _playoutBuffer, 2*nCopy); | 1024 // Normal case |
1685 dataPos = nCopy; | 1025 // Sleep thread (5ms) to let other threads get to work |
1686 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | 1026 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio |
1687 _playoutBufferUsed = 0; | 1027 // Device Buffer? |
1688 } | 1028 timespec t; |
1689 | 1029 t.tv_sec = 0; |
1690 // Now get the rest from Audio Device Buffer | 1030 t.tv_nsec = 5 * 1000 * 1000; |
1691 while (dataPos < dataSize) { | 1031 nanosleep(&t, nullptr); |
1692 // Update playout delay | 1032 } |
1693 UpdatePlayoutDelay(); | 1033 return true; |
1694 | |
1695 // Ask for new PCM data to be played out using the AudioDeviceBuffer | |
1696 noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms); | |
1697 | |
1698 // Get data from Audio Device Buffer | |
1699 noSamplesOut = | |
1700 _ptrAudioBuffer->GetPlayoutData( | |
1701 reinterpret_cast<int8_t*>(dataTmp)); | |
1702 // Cast OK since only equality comparison | |
1703 if (noSamp10ms != (unsigned int)noSamplesOut) { | |
1704 // Should never happen | |
1705 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1706 "noSamp10ms (%u) != noSamplesOut (%d)", | |
1707 noSamp10ms, noSamplesOut); | |
1708 | |
1709 if (_playWarning > 0) { | |
1710 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1711 " Pending play warning exists"); | |
1712 } | |
1713 _playWarning = 1; | |
1714 } | |
1715 | |
1716 // Insert as much as fits in data buffer | |
1717 nCopy = (dataSize-dataPos) > noSamp10ms ? | |
1718 noSamp10ms : (dataSize-dataPos); | |
1719 memcpy(&data[dataPos], dataTmp, 2*nCopy); | |
1720 | |
1721 // Save rest in playout buffer if any | |
1722 if (nCopy < noSamp10ms) { | |
1723 memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy)); | |
1724 _playoutBufferUsed = noSamp10ms - nCopy; | |
1725 } | |
1726 | |
1727 // Update loop/index counter, if we copied less than noSamp10ms | |
1728 // samples we shall quit loop anyway | |
1729 dataPos += noSamp10ms; | |
1730 } | |
1731 | |
1732 delete [] dataTmp; | |
1733 } | |
1734 | |
1735 return 0; | |
1736 } | |
1737 | |
1738 void AudioDeviceIOS::UpdatePlayoutDelay() { | |
1739 ++_playoutDelayMeasurementCounter; | |
1740 | |
1741 if (_playoutDelayMeasurementCounter >= 100) { | |
1742 // Update HW and OS delay every second, unlikely to change | |
1743 | |
1744 // Since this is eventually rounded to integral ms, add 0.5ms | |
1745 // here to get round-to-nearest-int behavior instead of | |
1746 // truncation. | |
1747 double totalDelaySeconds = 0.0005; | |
1748 | |
1749 // HW output latency | |
1750 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1751 double latency = session.outputLatency; | |
1752 assert(latency >= 0); | |
1753 totalDelaySeconds += latency; | |
1754 | |
1755 // HW buffer duration | |
1756 double ioBufferDuration = session.IOBufferDuration; | |
1757 assert(ioBufferDuration >= 0); | |
1758 totalDelaySeconds += ioBufferDuration; | |
1759 | |
1760 // AU latency | |
1761 Float64 f64(0); | |
1762 UInt32 size = sizeof(f64); | |
1763 OSStatus result = AudioUnitGetProperty( | |
1764 _auVoiceProcessing, kAudioUnitProperty_Latency, | |
1765 kAudioUnitScope_Global, 0, &f64, &size); | |
1766 if (0 != result) { | |
1767 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1768 "error AU latency (result=%d)", result); | |
1769 } | |
1770 assert(f64 >= 0); | |
1771 totalDelaySeconds += f64; | |
1772 | |
1773 // To ms | |
1774 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000); | |
1775 | |
1776 // Reset counter | |
1777 _playoutDelayMeasurementCounter = 0; | |
1778 } | |
1779 | |
1780 // todo: Add playout buffer? | |
1781 } | |
1782 | |
1783 void AudioDeviceIOS::UpdateRecordingDelay() { | |
1784 ++_recordingDelayMeasurementCounter; | |
1785 | |
1786 if (_recordingDelayMeasurementCounter >= 100) { | |
1787 // Update HW and OS delay every second, unlikely to change | |
1788 | |
1789 // Since this is eventually rounded to integral ms, add 0.5ms | |
1790 // here to get round-to-nearest-int behavior instead of | |
1791 // truncation. | |
1792 double totalDelaySeconds = 0.0005; | |
1793 | |
1794 // HW input latency | |
1795 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1796 double latency = session.inputLatency; | |
1797 assert(latency >= 0); | |
1798 totalDelaySeconds += latency; | |
1799 | |
1800 // HW buffer duration | |
1801 double ioBufferDuration = session.IOBufferDuration; | |
1802 assert(ioBufferDuration >= 0); | |
1803 totalDelaySeconds += ioBufferDuration; | |
1804 | |
1805 // AU latency | |
1806 Float64 f64(0); | |
1807 UInt32 size = sizeof(f64); | |
1808 OSStatus result = AudioUnitGetProperty( | |
1809 _auVoiceProcessing, kAudioUnitProperty_Latency, | |
1810 kAudioUnitScope_Global, 0, &f64, &size); | |
1811 if (0 != result) { | |
1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1813 "error AU latency (result=%d)", result); | |
1814 } | |
1815 assert(f64 >= 0); | |
1816 totalDelaySeconds += f64; | |
1817 | |
1818 // To ms | |
1819 _recordingDelayHWAndOS = | |
1820 static_cast<uint32_t>(totalDelaySeconds / 1000); | |
1821 | |
1822 // Reset counter | |
1823 _recordingDelayMeasurementCounter = 0; | |
1824 } | |
1825 | |
1826 _recordingDelay = _recordingDelayHWAndOS; | |
1827 | |
1828 // ADB recording buffer size, update every time | |
1829 // Don't count the one next 10 ms to be sent, then convert samples => ms | |
1830 const uint32_t noSamp10ms = _adbSampFreq / 100; | |
1831 if (_recordingBufferTotalSize > noSamp10ms) { | |
1832 _recordingDelay += | |
1833 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000); | |
1834 } | |
1835 } | |
1836 | |
1837 bool AudioDeviceIOS::RunCapture(void* ptrThis) { | |
1838 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread(); | |
1839 } | |
1840 | |
1841 bool AudioDeviceIOS::CaptureWorkerThread() { | |
1842 if (_recording) { | |
1843 int bufPos = 0; | |
1844 unsigned int lowestSeq = 0; | |
1845 int lowestSeqBufPos = 0; | |
1846 bool foundBuf = true; | |
1847 const unsigned int noSamp10ms = _adbSampFreq / 100; | |
1848 | |
1849 while (foundBuf) { | |
1850 // Check if we have any buffer with data to insert | |
1851 // into the Audio Device Buffer, | |
1852 // and find the one with the lowest seq number | |
1853 foundBuf = false; | |
1854 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) { | |
1855 if (noSamp10ms == _recordingLength[bufPos]) { | |
1856 if (!foundBuf) { | |
1857 lowestSeq = _recordingSeqNumber[bufPos]; | |
1858 lowestSeqBufPos = bufPos; | |
1859 foundBuf = true; | |
1860 } else if (_recordingSeqNumber[bufPos] < lowestSeq) { | |
1861 lowestSeq = _recordingSeqNumber[bufPos]; | |
1862 lowestSeqBufPos = bufPos; | |
1863 } | |
1864 } | |
1865 } // for | |
1866 | |
1867 // Insert data into the Audio Device Buffer if found any | |
1868 if (foundBuf) { | |
1869 // Update recording delay | |
1870 UpdateRecordingDelay(); | |
1871 | |
1872 // Set the recorded buffer | |
1873 _ptrAudioBuffer->SetRecordedBuffer( | |
1874 reinterpret_cast<int8_t*>( | |
1875 _recordingBuffer[lowestSeqBufPos]), | |
1876 _recordingLength[lowestSeqBufPos]); | |
1877 | |
1878 // Don't need to set the current mic level in ADB since we only | |
1879 // support digital AGC, | |
1880 // and besides we cannot get or set the IOS mic level anyway. | |
1881 | |
1882 // Set VQE info, use clockdrift == 0 | |
1883 _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0); | |
1884 | |
1885 // Deliver recorded samples at specified sample rate, mic level | |
1886 // etc. to the observer using callback | |
1887 _ptrAudioBuffer->DeliverRecordedData(); | |
1888 | |
1889 // Make buffer available | |
1890 _recordingSeqNumber[lowestSeqBufPos] = 0; | |
1891 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos]; | |
1892 // Must be done last to avoid interrupt problems between threads | |
1893 _recordingLength[lowestSeqBufPos] = 0; | |
1894 } | |
1895 } // while (foundBuf) | |
1896 } // if (_recording) | |
1897 | |
1898 { | |
1899 // Normal case | |
1900 // Sleep thread (5ms) to let other threads get to work | |
1901 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio | |
1902 // Device Buffer? | |
1903 timespec t; | |
1904 t.tv_sec = 0; | |
1905 t.tv_nsec = 5*1000*1000; | |
1906 nanosleep(&t, NULL); | |
1907 } | |
1908 | |
1909 return true; | |
1910 } | 1034 } |
1911 | 1035 |
1912 } // namespace webrtc | 1036 } // namespace webrtc |
OLD | NEW |