OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
| 11 #if !defined(__has_feature) || !__has_feature(objc_arc) |
| 12 #error "This file requires ARC support." |
| 13 #endif |
| 14 |
11 #import <AVFoundation/AVFoundation.h> | 15 #import <AVFoundation/AVFoundation.h> |
12 #import <Foundation/Foundation.h> | 16 #import <Foundation/Foundation.h> |
13 | 17 |
14 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 18 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
15 | 19 #include "webrtc/modules/utility/interface/helpers_ios.h" |
| 20 |
| 21 #include "webrtc/base/checks.h" |
| 22 #include "webrtc/base/logging.h" |
16 #include "webrtc/system_wrappers/interface/trace.h" | 23 #include "webrtc/system_wrappers/interface/trace.h" |
17 | 24 |
| 25 #define TAG "AudioDeviceIOS::" |
| 26 |
18 namespace webrtc { | 27 namespace webrtc { |
19 AudioDeviceIOS::AudioDeviceIOS(const int32_t id) | 28 |
20 : | 29 // TODO(henrika): use this method as base for querying the hardware sample |
21 _ptrAudioBuffer(NULL), | 30 // rate, channel configuration and buffer size. Currently only added for |
22 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | 31 // debugging purposes. |
23 _id(id), | 32 static void LogAudioSessionParameters() { |
24 _auVoiceProcessing(NULL), | 33 NSLog(@"LogAudioSessionParameters"); |
25 _audioInterruptionObserver(NULL), | 34 @autoreleasepool { |
26 _initialized(false), | 35 // Initialize our audio session. |
27 _isShutDown(false), | 36 AVAudioSession* session = [AVAudioSession sharedInstance]; |
28 _recording(false), | 37 NSError* errorCode = nil; |
29 _playing(false), | 38 // Set category to AVAudioSessionCategoryPlayAndRecord. |
30 _recIsInitialized(false), | 39 BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
31 _playIsInitialized(false), | 40 error:&errorCode]; |
32 _recordingDeviceIsSpecified(false), | 41 ios::CheckAndLogError(success, errorCode); |
33 _playoutDeviceIsSpecified(false), | 42 // Set mode to AVAudioSessionModeVoiceChat. |
34 _micIsInitialized(false), | 43 success = [session setMode:AVAudioSessionModeVoiceChat error:&errorCode]; |
35 _speakerIsInitialized(false), | 44 ios::CheckAndLogError(success, errorCode); |
36 _AGC(false), | 45 // Activate the audio session. |
37 _adbSampFreq(0), | 46 success = [session setActive:YES error:&errorCode]; |
38 _recordingDelay(0), | 47 ios::CheckAndLogError(success, errorCode); |
39 _playoutDelay(0), | 48 |
40 _playoutDelayMeasurementCounter(9999), | 49 // Log current properties. |
41 _recordingDelayHWAndOS(0), | 50 NSLog(@"category: %@", [session category]); |
42 _recordingDelayMeasurementCounter(9999), | 51 AVAudioSessionCategoryOptions options = [session categoryOptions]; |
43 _playWarning(0), | 52 NSLog(@"category options: %lu", (unsigned long)options); |
44 _playError(0), | 53 NSLog(@"mode: %@", [session mode]); |
45 _recWarning(0), | 54 NSLog(@"sample rate: %0.0f", [session sampleRate]); |
46 _recError(0), | 55 NSLog(@"output latency: %f", (double)[session outputLatency]); |
47 _playoutBufferUsed(0), | 56 NSLog(@"input latency: %f", (double)[session inputLatency]); |
48 _recordingCurrentSeq(0), | 57 NSLog(@"buffer duration: %f", (double)[session IOBufferDuration]); |
49 _recordingBufferTotalSize(0) { | 58 NSLog(@"#output channels: %ld", (long)[session outputNumberOfChannels]); |
50 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, | 59 NSLog(@"#input channels: %ld", (long)[session inputNumberOfChannels]); |
51 "%s created", __FUNCTION__); | 60 |
52 | 61 // Deactivate the audio session. |
53 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | 62 success = [session setActive:NO error:&errorCode]; |
54 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | 63 ios::CheckAndLogError(success, errorCode); |
55 memset(_recordingLength, 0, sizeof(_recordingLength)); | 64 } |
56 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | 65 } |
| 66 |
| 67 AudioDeviceIOS::AudioDeviceIOS() |
| 68 : audio_device_buffer_(nullptr), |
| 69 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), |
| 70 _auVoiceProcessing(nullptr), |
| 71 _audioInterruptionObserver(nullptr), |
| 72 _initialized(false), |
| 73 _isShutDown(false), |
| 74 _recording(false), |
| 75 _playing(false), |
| 76 _recIsInitialized(false), |
| 77 _playIsInitialized(false), |
| 78 _adbSampFreq(0), |
| 79 _recordingDelay(0), |
| 80 _playoutDelay(0), |
| 81 _playoutDelayMeasurementCounter(9999), |
| 82 _recordingDelayHWAndOS(0), |
| 83 _recordingDelayMeasurementCounter(9999), |
| 84 _playoutBufferUsed(0), |
| 85 _recordingCurrentSeq(0), |
| 86 _recordingBufferTotalSize(0) { |
| 87 LOG(LS_INFO) << TAG << "ctor" << ios::GetThreadInfo() |
| 88 << ios::GetCurrentThreadDescription(); |
| 89 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); |
| 90 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); |
| 91 memset(_recordingLength, 0, sizeof(_recordingLength)); |
| 92 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); |
| 93 #if !defined(NDEBUG) |
| 94 LogAudioSessionParameters(); |
| 95 #endif |
| 96 // TODO(henrika): these parameters are currently hard coded to match the |
| 97 // existing implementation where we always use 16kHz as preferred sample |
| 98 // rate. Goal is to improve this scheme and make it more flexible. |
| 99 // In addition, a better native buffer size shall be derived. Using 10ms |
| 100 // as default here. |
| 101 // We should also implemented observers for notification of any change in |
| 102 // these parameters. |
| 103 playout_parameters_.reset(16000, 1, 160); |
| 104 record_parameters_.reset(16000, 1, 160); |
57 } | 105 } |
58 | 106 |
59 AudioDeviceIOS::~AudioDeviceIOS() { | 107 AudioDeviceIOS::~AudioDeviceIOS() { |
60 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, | 108 LOG(LS_INFO) << TAG << "~dtor" << ios::GetThreadInfo(); |
61 "%s destroyed", __FUNCTION__); | 109 DCHECK(thread_checker_.CalledOnValidThread()); |
62 | 110 Terminate(); |
63 Terminate(); | 111 delete &_critSect; |
64 | 112 } |
65 delete &_critSect; | |
66 } | |
67 | |
68 | |
69 // ============================================================================ | |
70 // API | |
71 // ============================================================================ | |
72 | 113 |
73 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 114 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
74 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 115 LOG(LS_INFO) << TAG << "AttachAudioBuffer"; |
75 "%s", __FUNCTION__); | 116 DCHECK(audioBuffer); |
76 | 117 DCHECK(thread_checker_.CalledOnValidThread()); |
77 CriticalSectionScoped lock(&_critSect); | 118 audio_device_buffer_ = audioBuffer; |
78 | 119 // TODO(henrika): try to improve this section. |
79 _ptrAudioBuffer = audioBuffer; | 120 audioBuffer->SetPlayoutSampleRate(playout_parameters_.sample_rate()); |
80 | 121 audioBuffer->SetPlayoutChannels(playout_parameters_.channels()); |
81 // inform the AudioBuffer about default settings for this implementation | 122 audioBuffer->SetRecordingSampleRate(record_parameters_.sample_rate()); |
82 _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES); | 123 audioBuffer->SetRecordingChannels(record_parameters_.channels()); |
83 _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); | 124 } |
84 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); | 125 |
85 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); | 126 int32_t AudioDeviceIOS::Init() { |
86 } | 127 LOG(LS_INFO) << TAG << "Init"; |
87 | 128 DCHECK(thread_checker_.CalledOnValidThread()); |
88 int32_t AudioDeviceIOS::ActiveAudioLayer( | 129 if (_initialized) { |
89 AudioDeviceModule::AudioLayer& audioLayer) const { | |
90 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
91 "%s", __FUNCTION__); | |
92 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; | |
93 return 0; | 130 return 0; |
94 } | 131 } |
95 | 132 DCHECK(!_captureWorkerThread); |
96 int32_t AudioDeviceIOS::Init() { | 133 // Create and start the capture thread. |
97 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 134 // TODO(henrika): do we need this thread? |
98 "%s", __FUNCTION__); | 135 _isShutDown = false; |
99 | 136 _captureWorkerThread = |
100 CriticalSectionScoped lock(&_critSect); | 137 ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread"); |
101 | 138 if (!_captureWorkerThread->Start()) { |
102 if (_initialized) { | 139 LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!"; |
103 return 0; | 140 return -1; |
104 } | 141 } |
105 | 142 _captureWorkerThread->SetPriority(kRealtimePriority); |
106 _isShutDown = false; | 143 _initialized = true; |
107 | 144 return 0; |
108 // Create and start capture thread | 145 } |
109 if (!_captureWorkerThread) { | 146 |
110 _captureWorkerThread = ThreadWrapper::CreateThread( | 147 int32_t AudioDeviceIOS::Terminate() { |
111 RunCapture, this, "CaptureWorkerThread"); | 148 LOG(LS_INFO) << TAG << "Terminate"; |
112 bool res = _captureWorkerThread->Start(); | 149 DCHECK(thread_checker_.CalledOnValidThread()); |
113 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | 150 if (!_initialized) { |
114 _id, "CaptureWorkerThread started (res=%d)", res); | 151 return 0; |
115 _captureWorkerThread->SetPriority(kRealtimePriority); | 152 } |
| 153 // Stop the capture thread. |
| 154 if (_captureWorkerThread) { |
| 155 if (!_captureWorkerThread->Stop()) { |
| 156 LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!"; |
| 157 return -1; |
| 158 } |
| 159 _captureWorkerThread.reset(); |
| 160 } |
| 161 ShutdownPlayOrRecord(); |
| 162 _isShutDown = true; |
| 163 _initialized = false; |
| 164 return 0; |
| 165 } |
| 166 |
| 167 int32_t AudioDeviceIOS::InitPlayout() { |
| 168 LOG(LS_INFO) << TAG << "InitPlayout"; |
| 169 DCHECK(thread_checker_.CalledOnValidThread()); |
| 170 DCHECK(_initialized); |
| 171 DCHECK(!_playIsInitialized); |
| 172 DCHECK(!_playing); |
| 173 if (!_recIsInitialized) { |
| 174 if (InitPlayOrRecord() == -1) { |
| 175 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
| 176 return -1; |
| 177 } |
| 178 } |
| 179 _playIsInitialized = true; |
| 180 return 0; |
| 181 } |
| 182 |
| 183 int32_t AudioDeviceIOS::InitRecording() { |
| 184 LOG(LS_INFO) << TAG << "InitPlayout"; |
| 185 DCHECK(thread_checker_.CalledOnValidThread()); |
| 186 DCHECK(_initialized); |
| 187 DCHECK(!_recIsInitialized); |
| 188 DCHECK(!_recording); |
| 189 if (!_playIsInitialized) { |
| 190 if (InitPlayOrRecord() == -1) { |
| 191 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; |
| 192 return -1; |
| 193 } |
| 194 } |
| 195 _recIsInitialized = true; |
| 196 return 0; |
| 197 } |
| 198 |
| 199 int32_t AudioDeviceIOS::StartPlayout() { |
| 200 LOG(LS_INFO) << TAG << "StartPlayout"; |
| 201 DCHECK(thread_checker_.CalledOnValidThread()); |
| 202 DCHECK(_playIsInitialized); |
| 203 DCHECK(!_playing); |
| 204 |
| 205 CriticalSectionScoped lock(&_critSect); |
| 206 |
| 207 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); |
| 208 _playoutBufferUsed = 0; |
| 209 _playoutDelay = 0; |
| 210 // Make sure first call to update delay function will update delay |
| 211 _playoutDelayMeasurementCounter = 9999; |
| 212 |
| 213 if (!_recording) { |
| 214 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); |
| 215 if (result != noErr) { |
| 216 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
| 217 return -1; |
| 218 } |
| 219 } |
| 220 _playing = true; |
| 221 return 0; |
| 222 } |
| 223 |
| 224 int32_t AudioDeviceIOS::StopPlayout() { |
| 225 LOG(LS_INFO) << TAG << "StopPlayout"; |
| 226 DCHECK(thread_checker_.CalledOnValidThread()); |
| 227 if (!_playIsInitialized || !_playing) { |
| 228 return 0; |
| 229 } |
| 230 |
| 231 CriticalSectionScoped lock(&_critSect); |
| 232 |
| 233 if (!_recording) { |
| 234 // Both playout and recording has stopped, shutdown the device. |
| 235 ShutdownPlayOrRecord(); |
| 236 } |
| 237 _playIsInitialized = false; |
| 238 _playing = false; |
| 239 return 0; |
| 240 } |
| 241 |
| 242 int32_t AudioDeviceIOS::StartRecording() { |
| 243 LOG(LS_INFO) << TAG << "StartRecording"; |
| 244 DCHECK(thread_checker_.CalledOnValidThread()); |
| 245 DCHECK(_recIsInitialized); |
| 246 DCHECK(!_recording); |
| 247 |
| 248 CriticalSectionScoped lock(&_critSect); |
| 249 |
| 250 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); |
| 251 memset(_recordingLength, 0, sizeof(_recordingLength)); |
| 252 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); |
| 253 |
| 254 _recordingCurrentSeq = 0; |
| 255 _recordingBufferTotalSize = 0; |
| 256 _recordingDelay = 0; |
| 257 _recordingDelayHWAndOS = 0; |
| 258 // Make sure first call to update delay function will update delay |
| 259 _recordingDelayMeasurementCounter = 9999; |
| 260 |
| 261 if (!_playing) { |
| 262 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); |
| 263 if (result != noErr) { |
| 264 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; |
| 265 return -1; |
| 266 } |
| 267 } |
| 268 _recording = true; |
| 269 return 0; |
| 270 } |
| 271 |
| 272 int32_t AudioDeviceIOS::StopRecording() { |
| 273 LOG(LS_INFO) << TAG << "StopRecording"; |
| 274 DCHECK(thread_checker_.CalledOnValidThread()); |
| 275 if (!_recIsInitialized || !_recording) { |
| 276 return 0; |
| 277 } |
| 278 |
| 279 CriticalSectionScoped lock(&_critSect); |
| 280 |
| 281 if (!_playing) { |
| 282 // Both playout and recording has stopped, shutdown the device. |
| 283 ShutdownPlayOrRecord(); |
| 284 } |
| 285 _recIsInitialized = false; |
| 286 _recording = false; |
| 287 return 0; |
| 288 } |
| 289 |
| 290 // Change the default receiver playout route to speaker. |
| 291 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { |
| 292 LOG(LS_INFO) << TAG << "SetLoudspeakerStatus(" << enable << ")"; |
| 293 |
| 294 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 295 NSString* category = session.category; |
| 296 AVAudioSessionCategoryOptions options = session.categoryOptions; |
| 297 // Respect old category options if category is |
| 298 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options |
| 299 // might not be valid for this category. |
| 300 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { |
| 301 if (enable) { |
| 302 options |= AVAudioSessionCategoryOptionDefaultToSpeaker; |
116 } else { | 303 } else { |
117 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | 304 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker; |
118 _id, "Thread already created"); | 305 } |
119 } | 306 } else { |
120 _playWarning = 0; | 307 options = AVAudioSessionCategoryOptionDefaultToSpeaker; |
121 _playError = 0; | 308 } |
122 _recWarning = 0; | 309 NSError* error = nil; |
123 _recError = 0; | 310 BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord |
124 | 311 withOptions:options |
125 _initialized = true; | 312 error:&error]; |
126 | 313 ios::CheckAndLogError(success, error); |
127 return 0; | 314 return (error == nil) ? 0 : -1; |
128 } | 315 } |
129 | 316 |
130 int32_t AudioDeviceIOS::Terminate() { | 317 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const { |
131 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 318 LOG(LS_INFO) << TAG << "GetLoudspeakerStatus"; |
132 "%s", __FUNCTION__); | 319 AVAudioSession* session = [AVAudioSession sharedInstance]; |
133 | 320 AVAudioSessionCategoryOptions options = session.categoryOptions; |
134 if (!_initialized) { | 321 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker; |
135 return 0; | 322 return 0; |
136 } | |
137 | |
138 | |
139 // Stop capture thread | |
140 if (_captureWorkerThread) { | |
141 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
142 _id, "Stopping CaptureWorkerThread"); | |
143 bool res = _captureWorkerThread->Stop(); | |
144 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
145 _id, "CaptureWorkerThread stopped (res=%d)", res); | |
146 _captureWorkerThread.reset(); | |
147 } | |
148 | |
149 // Shut down Audio Unit | |
150 ShutdownPlayOrRecord(); | |
151 | |
152 _isShutDown = true; | |
153 _initialized = false; | |
154 _speakerIsInitialized = false; | |
155 _micIsInitialized = false; | |
156 _playoutDeviceIsSpecified = false; | |
157 _recordingDeviceIsSpecified = false; | |
158 return 0; | |
159 } | |
160 | |
161 bool AudioDeviceIOS::Initialized() const { | |
162 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
163 "%s", __FUNCTION__); | |
164 return (_initialized); | |
165 } | |
166 | |
167 int32_t AudioDeviceIOS::InitSpeaker() { | |
168 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
169 "%s", __FUNCTION__); | |
170 | |
171 CriticalSectionScoped lock(&_critSect); | |
172 | |
173 if (!_initialized) { | |
174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
175 _id, " Not initialized"); | |
176 return -1; | |
177 } | |
178 | |
179 if (_playing) { | |
180 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
181 _id, " Cannot init speaker when playing"); | |
182 return -1; | |
183 } | |
184 | |
185 if (!_playoutDeviceIsSpecified) { | |
186 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
187 _id, " Playout device is not specified"); | |
188 return -1; | |
189 } | |
190 | |
191 // Do nothing | |
192 _speakerIsInitialized = true; | |
193 | |
194 return 0; | |
195 } | |
196 | |
197 int32_t AudioDeviceIOS::InitMicrophone() { | |
198 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
199 "%s", __FUNCTION__); | |
200 | |
201 CriticalSectionScoped lock(&_critSect); | |
202 | |
203 if (!_initialized) { | |
204 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
205 _id, " Not initialized"); | |
206 return -1; | |
207 } | |
208 | |
209 if (_recording) { | |
210 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
211 _id, " Cannot init mic when recording"); | |
212 return -1; | |
213 } | |
214 | |
215 if (!_recordingDeviceIsSpecified) { | |
216 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
217 _id, " Recording device is not specified"); | |
218 return -1; | |
219 } | |
220 | |
221 // Do nothing | |
222 | |
223 _micIsInitialized = true; | |
224 | |
225 return 0; | |
226 } | |
227 | |
228 bool AudioDeviceIOS::SpeakerIsInitialized() const { | |
229 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
230 "%s", __FUNCTION__); | |
231 return _speakerIsInitialized; | |
232 } | |
233 | |
234 bool AudioDeviceIOS::MicrophoneIsInitialized() const { | |
235 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
236 "%s", __FUNCTION__); | |
237 return _micIsInitialized; | |
238 } | |
239 | |
240 int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) { | |
241 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
242 "%s", __FUNCTION__); | |
243 | |
244 available = false; // Speaker volume not supported on iOS | |
245 | |
246 return 0; | |
247 } | |
248 | |
249 int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) { | |
250 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
251 "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume); | |
252 | |
253 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
254 " API call not supported on this platform"); | |
255 return -1; | |
256 } | |
257 | |
258 int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const { | |
259 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
260 "%s", __FUNCTION__); | |
261 | |
262 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
263 " API call not supported on this platform"); | |
264 return -1; | |
265 } | |
266 | |
267 int32_t | |
268 AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft, | |
269 uint16_t volumeRight) { | |
270 WEBRTC_TRACE( | |
271 kTraceModuleCall, | |
272 kTraceAudioDevice, | |
273 _id, | |
274 "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)", | |
275 volumeLeft, volumeRight); | |
276 | |
277 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
278 " API call not supported on this platform"); | |
279 | |
280 return -1; | |
281 } | |
282 | |
283 int32_t | |
284 AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/, | |
285 uint16_t& /*volumeRight*/) const { | |
286 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
287 "%s", __FUNCTION__); | |
288 | |
289 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
290 " API call not supported on this platform"); | |
291 return -1; | |
292 } | |
293 | |
294 int32_t | |
295 AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const { | |
296 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
297 "%s", __FUNCTION__); | |
298 | |
299 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
300 " API call not supported on this platform"); | |
301 return -1; | |
302 } | |
303 | |
304 int32_t AudioDeviceIOS::MinSpeakerVolume( | |
305 uint32_t& minVolume) const { | |
306 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
307 "%s", __FUNCTION__); | |
308 | |
309 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
310 " API call not supported on this platform"); | |
311 return -1; | |
312 } | |
313 | |
314 int32_t | |
315 AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const { | |
316 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
317 "%s", __FUNCTION__); | |
318 | |
319 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
320 " API call not supported on this platform"); | |
321 return -1; | |
322 } | |
323 | |
324 int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) { | |
325 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
326 "%s", __FUNCTION__); | |
327 | |
328 available = false; // Speaker mute not supported on iOS | |
329 | |
330 return 0; | |
331 } | |
332 | |
333 int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) { | |
334 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
335 "%s", __FUNCTION__); | |
336 | |
337 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
338 " API call not supported on this platform"); | |
339 return -1; | |
340 } | |
341 | |
342 int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const { | |
343 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
344 "%s", __FUNCTION__); | |
345 | |
346 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
347 " API call not supported on this platform"); | |
348 return -1; | |
349 } | |
350 | |
351 int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) { | |
352 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
353 "%s", __FUNCTION__); | |
354 | |
355 available = false; // Mic mute not supported on iOS | |
356 | |
357 return 0; | |
358 } | |
359 | |
360 int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) { | |
361 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
362 "%s", __FUNCTION__); | |
363 | |
364 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
365 " API call not supported on this platform"); | |
366 return -1; | |
367 } | |
368 | |
369 int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const { | |
370 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
371 "%s", __FUNCTION__); | |
372 | |
373 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
374 " API call not supported on this platform"); | |
375 return -1; | |
376 } | |
377 | |
378 int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) { | |
379 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
380 "%s", __FUNCTION__); | |
381 | |
382 available = false; // Mic boost not supported on iOS | |
383 | |
384 return 0; | |
385 } | |
386 | |
387 int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) { | |
388 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
389 "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable); | |
390 | |
391 if (!_micIsInitialized) { | |
392 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
393 " Microphone not initialized"); | |
394 return -1; | |
395 } | |
396 | |
397 if (enable) { | |
398 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
399 " SetMicrophoneBoost cannot be enabled on this platform"); | |
400 return -1; | |
401 } | |
402 | |
403 return 0; | |
404 } | |
405 | |
406 int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const { | |
407 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
408 "%s", __FUNCTION__); | |
409 if (!_micIsInitialized) { | |
410 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
411 " Microphone not initialized"); | |
412 return -1; | |
413 } | |
414 | |
415 enabled = false; | |
416 | |
417 return 0; | |
418 } | |
419 | |
420 int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) { | |
421 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
422 "%s", __FUNCTION__); | |
423 | |
424 available = false; // Stereo recording not supported on iOS | |
425 | |
426 return 0; | |
427 } | |
428 | |
429 int32_t AudioDeviceIOS::SetStereoRecording(bool enable) { | |
430 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
431 "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable); | |
432 | |
433 if (enable) { | |
434 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
435 " Stereo recording is not supported on this platform"); | |
436 return -1; | |
437 } | |
438 return 0; | |
439 } | |
440 | |
441 int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const { | |
442 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
443 "%s", __FUNCTION__); | |
444 | |
445 enabled = false; | |
446 return 0; | |
447 } | |
448 | |
449 int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) { | |
450 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
451 "%s", __FUNCTION__); | |
452 | |
453 available = false; // Stereo playout not supported on iOS | |
454 | |
455 return 0; | |
456 } | |
457 | |
458 int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) { | |
459 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
460 "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable); | |
461 | |
462 if (enable) { | |
463 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
464 " Stereo playout is not supported on this platform"); | |
465 return -1; | |
466 } | |
467 return 0; | |
468 } | |
469 | |
470 int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const { | |
471 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
472 "%s", __FUNCTION__); | |
473 | |
474 enabled = false; | |
475 return 0; | |
476 } | |
477 | |
478 int32_t AudioDeviceIOS::SetAGC(bool enable) { | |
479 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
480 "AudioDeviceIOS::SetAGC(enable=%d)", enable); | |
481 | |
482 _AGC = enable; | |
483 | |
484 return 0; | |
485 } | |
486 | |
487 bool AudioDeviceIOS::AGC() const { | |
488 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
489 "%s", __FUNCTION__); | |
490 | |
491 return _AGC; | |
492 } | |
493 | |
494 int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) { | |
495 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
496 "%s", __FUNCTION__); | |
497 | |
498 available = false; // Mic volume not supported on IOS | |
499 | |
500 return 0; | |
501 } | |
502 | |
503 int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) { | |
504 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
505 "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume); | |
506 | |
507 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
508 " API call not supported on this platform"); | |
509 return -1; | |
510 } | |
511 | |
512 int32_t | |
513 AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const { | |
514 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
515 "%s", __FUNCTION__); | |
516 | |
517 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
518 " API call not supported on this platform"); | |
519 return -1; | |
520 } | |
521 | |
522 int32_t | |
523 AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const { | |
524 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
525 "%s", __FUNCTION__); | |
526 | |
527 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
528 " API call not supported on this platform"); | |
529 return -1; | |
530 } | |
531 | |
532 int32_t | |
533 AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const { | |
534 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
535 "%s", __FUNCTION__); | |
536 | |
537 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
538 " API call not supported on this platform"); | |
539 return -1; | |
540 } | |
541 | |
542 int32_t | |
543 AudioDeviceIOS::MicrophoneVolumeStepSize( | |
544 uint16_t& stepSize) const { | |
545 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
546 "%s", __FUNCTION__); | |
547 | |
548 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
549 " API call not supported on this platform"); | |
550 return -1; | |
551 } | |
552 | |
553 int16_t AudioDeviceIOS::PlayoutDevices() { | |
554 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
555 "%s", __FUNCTION__); | |
556 | |
557 return (int16_t)1; | |
558 } | |
559 | |
560 int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) { | |
561 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
562 "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index); | |
563 | |
564 if (_playIsInitialized) { | |
565 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
566 " Playout already initialized"); | |
567 return -1; | |
568 } | |
569 | |
570 if (index !=0) { | |
571 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
572 " SetPlayoutDevice invalid index"); | |
573 return -1; | |
574 } | |
575 _playoutDeviceIsSpecified = true; | |
576 | |
577 return 0; | |
578 } | |
579 | |
580 int32_t | |
581 AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) { | |
582 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
583 "WindowsDeviceType not supported"); | |
584 return -1; | |
585 } | |
586 | |
587 int32_t | |
588 AudioDeviceIOS::PlayoutDeviceName(uint16_t index, | |
589 char name[kAdmMaxDeviceNameSize], | |
590 char guid[kAdmMaxGuidSize]) { | |
591 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
592 "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index); | |
593 | |
594 if (index != 0) { | |
595 return -1; | |
596 } | |
597 // return empty strings | |
598 memset(name, 0, kAdmMaxDeviceNameSize); | |
599 if (guid != NULL) { | |
600 memset(guid, 0, kAdmMaxGuidSize); | |
601 } | |
602 | |
603 return 0; | |
604 } | |
605 | |
606 int32_t | |
607 AudioDeviceIOS::RecordingDeviceName(uint16_t index, | |
608 char name[kAdmMaxDeviceNameSize], | |
609 char guid[kAdmMaxGuidSize]) { | |
610 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
611 "AudioDeviceIOS::RecordingDeviceName(index=%u)", index); | |
612 | |
613 if (index != 0) { | |
614 return -1; | |
615 } | |
616 // return empty strings | |
617 memset(name, 0, kAdmMaxDeviceNameSize); | |
618 if (guid != NULL) { | |
619 memset(guid, 0, kAdmMaxGuidSize); | |
620 } | |
621 | |
622 return 0; | |
623 } | |
624 | |
625 int16_t AudioDeviceIOS::RecordingDevices() { | |
626 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
627 | |
628 return (int16_t)1; | |
629 } | |
630 | |
631 int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) { | |
632 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
633 "AudioDeviceIOS::SetRecordingDevice(index=%u)", index); | |
634 | |
635 if (_recIsInitialized) { | |
636 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
637 " Recording already initialized"); | |
638 return -1; | |
639 } | |
640 | |
641 if (index !=0) { | |
642 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
643 " SetRecordingDevice invalid index"); | |
644 return -1; | |
645 } | |
646 | |
647 _recordingDeviceIsSpecified = true; | |
648 | |
649 return 0; | |
650 } | |
651 | |
652 int32_t | |
653 AudioDeviceIOS::SetRecordingDevice( | |
654 AudioDeviceModule::WindowsDeviceType) { | |
655 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
656 "WindowsDeviceType not supported"); | |
657 return -1; | |
658 } | |
659 | |
660 // ---------------------------------------------------------------------------- | |
661 // SetLoudspeakerStatus | |
662 // | |
663 // Change the default receiver playout route to speaker. | |
664 // | |
665 // ---------------------------------------------------------------------------- | |
666 | |
667 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { | |
668 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
669 "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable); | |
670 | |
671 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
672 NSString* category = session.category; | |
673 AVAudioSessionCategoryOptions options = session.categoryOptions; | |
674 // Respect old category options if category is | |
675 // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options | |
676 // might not be valid for this category. | |
677 if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) { | |
678 if (enable) { | |
679 options |= AVAudioSessionCategoryOptionDefaultToSpeaker; | |
680 } else { | |
681 options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker; | |
682 } | |
683 } else { | |
684 options = AVAudioSessionCategoryOptionDefaultToSpeaker; | |
685 } | |
686 | |
687 NSError* error = nil; | |
688 [session setCategory:AVAudioSessionCategoryPlayAndRecord | |
689 withOptions:options | |
690 error:&error]; | |
691 if (error != nil) { | |
692 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
693 "Error changing default output route "); | |
694 return -1; | |
695 } | |
696 | |
697 return 0; | |
698 } | |
699 | |
700 int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const { | |
701 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
702 "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)"); | |
703 | |
704 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
705 AVAudioSessionCategoryOptions options = session.categoryOptions; | |
706 enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker; | |
707 | |
708 return 0; | |
709 } | |
710 | |
711 int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) { | |
712 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
713 | |
714 available = false; | |
715 | |
716 // Try to initialize the playout side | |
717 int32_t res = InitPlayout(); | |
718 | |
719 // Cancel effect of initialization | |
720 StopPlayout(); | |
721 | |
722 if (res != -1) { | |
723 available = true; | |
724 } | |
725 | |
726 return 0; | |
727 } | |
728 | |
729 int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) { | |
730 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
731 | |
732 available = false; | |
733 | |
734 // Try to initialize the recording side | |
735 int32_t res = InitRecording(); | |
736 | |
737 // Cancel effect of initialization | |
738 StopRecording(); | |
739 | |
740 if (res != -1) { | |
741 available = true; | |
742 } | |
743 | |
744 return 0; | |
745 } | |
746 | |
747 int32_t AudioDeviceIOS::InitPlayout() { | |
748 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
749 | |
750 CriticalSectionScoped lock(&_critSect); | |
751 | |
752 if (!_initialized) { | |
753 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized"); | |
754 return -1; | |
755 } | |
756 | |
757 if (_playing) { | |
758 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
759 " Playout already started"); | |
760 return -1; | |
761 } | |
762 | |
763 if (_playIsInitialized) { | |
764 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
765 " Playout already initialized"); | |
766 return 0; | |
767 } | |
768 | |
769 if (!_playoutDeviceIsSpecified) { | |
770 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
771 " Playout device is not specified"); | |
772 return -1; | |
773 } | |
774 | |
775 // Initialize the speaker | |
776 if (InitSpeaker() == -1) { | |
777 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
778 " InitSpeaker() failed"); | |
779 } | |
780 | |
781 _playIsInitialized = true; | |
782 | |
783 if (!_recIsInitialized) { | |
784 // Audio init | |
785 if (InitPlayOrRecord() == -1) { | |
786 // todo: Handle error | |
787 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
788 " InitPlayOrRecord() failed"); | |
789 } | |
790 } else { | |
791 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
792 " Recording already initialized - InitPlayOrRecord() not called"); | |
793 } | |
794 | |
795 return 0; | |
796 } | |
797 | |
798 bool AudioDeviceIOS::PlayoutIsInitialized() const { | |
799 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
800 return (_playIsInitialized); | |
801 } | |
802 | |
803 int32_t AudioDeviceIOS::InitRecording() { | |
804 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
805 | |
806 CriticalSectionScoped lock(&_critSect); | |
807 | |
808 if (!_initialized) { | |
809 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
810 " Not initialized"); | |
811 return -1; | |
812 } | |
813 | |
814 if (_recording) { | |
815 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
816 " Recording already started"); | |
817 return -1; | |
818 } | |
819 | |
820 if (_recIsInitialized) { | |
821 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
822 " Recording already initialized"); | |
823 return 0; | |
824 } | |
825 | |
826 if (!_recordingDeviceIsSpecified) { | |
827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
828 " Recording device is not specified"); | |
829 return -1; | |
830 } | |
831 | |
832 // Initialize the microphone | |
833 if (InitMicrophone() == -1) { | |
834 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
835 " InitMicrophone() failed"); | |
836 } | |
837 | |
838 _recIsInitialized = true; | |
839 | |
840 if (!_playIsInitialized) { | |
841 // Audio init | |
842 if (InitPlayOrRecord() == -1) { | |
843 // todo: Handle error | |
844 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
845 " InitPlayOrRecord() failed"); | |
846 } | |
847 } else { | |
848 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
849 " Playout already initialized - InitPlayOrRecord() " \ | |
850 "not called"); | |
851 } | |
852 | |
853 return 0; | |
854 } | |
855 | |
856 bool AudioDeviceIOS::RecordingIsInitialized() const { | |
857 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
858 return (_recIsInitialized); | |
859 } | |
860 | |
861 int32_t AudioDeviceIOS::StartRecording() { | |
862 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
863 | |
864 CriticalSectionScoped lock(&_critSect); | |
865 | |
866 if (!_recIsInitialized) { | |
867 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
868 " Recording not initialized"); | |
869 return -1; | |
870 } | |
871 | |
872 if (_recording) { | |
873 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
874 " Recording already started"); | |
875 return 0; | |
876 } | |
877 | |
878 // Reset recording buffer | |
879 memset(_recordingBuffer, 0, sizeof(_recordingBuffer)); | |
880 memset(_recordingLength, 0, sizeof(_recordingLength)); | |
881 memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber)); | |
882 _recordingCurrentSeq = 0; | |
883 _recordingBufferTotalSize = 0; | |
884 _recordingDelay = 0; | |
885 _recordingDelayHWAndOS = 0; | |
886 // Make sure first call to update delay function will update delay | |
887 _recordingDelayMeasurementCounter = 9999; | |
888 _recWarning = 0; | |
889 _recError = 0; | |
890 | |
891 if (!_playing) { | |
892 // Start Audio Unit | |
893 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
894 " Starting Audio Unit"); | |
895 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
896 if (0 != result) { | |
897 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
898 " Error starting Audio Unit (result=%d)", result); | |
899 return -1; | |
900 } | |
901 } | |
902 | |
903 _recording = true; | |
904 | |
905 return 0; | |
906 } | |
907 | |
908 int32_t AudioDeviceIOS::StopRecording() { | |
909 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
910 | |
911 CriticalSectionScoped lock(&_critSect); | |
912 | |
913 if (!_recIsInitialized) { | |
914 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
915 " Recording is not initialized"); | |
916 return 0; | |
917 } | |
918 | |
919 _recording = false; | |
920 | |
921 if (!_playing) { | |
922 // Both playout and recording has stopped, shutdown the device | |
923 ShutdownPlayOrRecord(); | |
924 } | |
925 | |
926 _recIsInitialized = false; | |
927 _micIsInitialized = false; | |
928 | |
929 return 0; | |
930 } | |
931 | |
932 bool AudioDeviceIOS::Recording() const { | |
933 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
934 return (_recording); | |
935 } | |
936 | |
937 int32_t AudioDeviceIOS::StartPlayout() { | |
938 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
939 | |
940 // This lock is (among other things) needed to avoid concurrency issues | |
941 // with capture thread | |
942 // shutting down Audio Unit | |
943 CriticalSectionScoped lock(&_critSect); | |
944 | |
945 if (!_playIsInitialized) { | |
946 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
947 " Playout not initialized"); | |
948 return -1; | |
949 } | |
950 | |
951 if (_playing) { | |
952 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
953 " Playing already started"); | |
954 return 0; | |
955 } | |
956 | |
957 // Reset playout buffer | |
958 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | |
959 _playoutBufferUsed = 0; | |
960 _playoutDelay = 0; | |
961 // Make sure first call to update delay function will update delay | |
962 _playoutDelayMeasurementCounter = 9999; | |
963 _playWarning = 0; | |
964 _playError = 0; | |
965 | |
966 if (!_recording) { | |
967 // Start Audio Unit | |
968 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
969 " Starting Audio Unit"); | |
970 OSStatus result = AudioOutputUnitStart(_auVoiceProcessing); | |
971 if (0 != result) { | |
972 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
973 " Error starting Audio Unit (result=%d)", result); | |
974 return -1; | |
975 } | |
976 } | |
977 | |
978 _playing = true; | |
979 | |
980 return 0; | |
981 } | |
982 | |
983 int32_t AudioDeviceIOS::StopPlayout() { | |
984 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
985 | |
986 CriticalSectionScoped lock(&_critSect); | |
987 | |
988 if (!_playIsInitialized) { | |
989 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
990 " Playout is not initialized"); | |
991 return 0; | |
992 } | |
993 | |
994 _playing = false; | |
995 | |
996 if (!_recording) { | |
997 // Both playout and recording has stopped, signal shutdown the device | |
998 ShutdownPlayOrRecord(); | |
999 } | |
1000 | |
1001 _playIsInitialized = false; | |
1002 _speakerIsInitialized = false; | |
1003 | |
1004 return 0; | |
1005 } | |
1006 | |
1007 bool AudioDeviceIOS::Playing() const { | |
1008 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | |
1009 "%s", __FUNCTION__); | |
1010 return (_playing); | |
1011 } | |
1012 | |
1013 // ---------------------------------------------------------------------------- | |
1014 // ResetAudioDevice | |
1015 // | |
1016 // Disable playout and recording, signal to capture thread to shutdown, | |
1017 // and set enable states after shutdown to same as current. | |
1018 // In capture thread audio device will be shutdown, then started again. | |
1019 // ---------------------------------------------------------------------------- | |
1020 int32_t AudioDeviceIOS::ResetAudioDevice() { | |
1021 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1022 | |
1023 CriticalSectionScoped lock(&_critSect); | |
1024 | |
1025 if (!_playIsInitialized && !_recIsInitialized) { | |
1026 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1027 " Playout or recording not initialized, doing nothing"); | |
1028 return 0; // Nothing to reset | |
1029 } | |
1030 | |
1031 // Store the states we have before stopping to restart below | |
1032 bool initPlay = _playIsInitialized; | |
1033 bool play = _playing; | |
1034 bool initRec = _recIsInitialized; | |
1035 bool rec = _recording; | |
1036 | |
1037 int res(0); | |
1038 | |
1039 // Stop playout and recording | |
1040 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1041 " Stopping playout and recording"); | |
1042 res += StopPlayout(); | |
1043 res += StopRecording(); | |
1044 | |
1045 // Restart | |
1046 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1047 " Restarting playout and recording (%d, %d, %d, %d)", | |
1048 initPlay, play, initRec, rec); | |
1049 if (initPlay) res += InitPlayout(); | |
1050 if (initRec) res += InitRecording(); | |
1051 if (play) res += StartPlayout(); | |
1052 if (rec) res += StartRecording(); | |
1053 | |
1054 if (0 != res) { | |
1055 // Logging is done in init/start/stop calls above | |
1056 return -1; | |
1057 } | |
1058 | |
1059 return 0; | |
1060 } | 323 } |
1061 | 324 |
1062 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { | 325 int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { |
1063 delayMS = _playoutDelay; | 326 delayMS = _playoutDelay; |
1064 return 0; | 327 return 0; |
1065 } | 328 } |
1066 | 329 |
1067 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { | 330 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { |
1068 delayMS = _recordingDelay; | 331 delayMS = _recordingDelay; |
1069 return 0; | 332 return 0; |
1070 } | 333 } |
1071 | 334 |
1072 int32_t | 335 int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type, |
1073 AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, | 336 uint16_t& sizeMS) const { |
1074 uint16_t sizeMS) { | 337 type = AudioDeviceModule::kAdaptiveBufferSize; |
1075 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, | 338 sizeMS = _playoutDelay; |
1076 "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)", | 339 return 0; |
1077 type, sizeMS); | 340 } |
1078 | 341 |
1079 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 342 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { |
1080 " API call not supported on this platform"); | 343 CHECK(playout_parameters_.is_valid()); |
1081 return -1; | 344 DCHECK(thread_checker_.CalledOnValidThread()); |
1082 } | 345 *params = playout_parameters_; |
1083 | 346 return 0; |
1084 int32_t | 347 } |
1085 AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type, | 348 |
1086 uint16_t& sizeMS) const { | 349 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { |
1087 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 350 CHECK(record_parameters_.is_valid()); |
1088 | 351 DCHECK(thread_checker_.CalledOnValidThread()); |
1089 type = AudioDeviceModule::kAdaptiveBufferSize; | 352 *params = record_parameters_; |
1090 | 353 return 0; |
1091 sizeMS = _playoutDelay; | |
1092 | |
1093 return 0; | |
1094 } | |
1095 | |
1096 int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const { | |
1097 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1098 | |
1099 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1100 " API call not supported on this platform"); | |
1101 return -1; | |
1102 } | |
1103 | |
1104 bool AudioDeviceIOS::PlayoutWarning() const { | |
1105 return (_playWarning > 0); | |
1106 } | |
1107 | |
1108 bool AudioDeviceIOS::PlayoutError() const { | |
1109 return (_playError > 0); | |
1110 } | |
1111 | |
1112 bool AudioDeviceIOS::RecordingWarning() const { | |
1113 return (_recWarning > 0); | |
1114 } | |
1115 | |
1116 bool AudioDeviceIOS::RecordingError() const { | |
1117 return (_recError > 0); | |
1118 } | |
1119 | |
1120 void AudioDeviceIOS::ClearPlayoutWarning() { | |
1121 _playWarning = 0; | |
1122 } | |
1123 | |
1124 void AudioDeviceIOS::ClearPlayoutError() { | |
1125 _playError = 0; | |
1126 } | |
1127 | |
1128 void AudioDeviceIOS::ClearRecordingWarning() { | |
1129 _recWarning = 0; | |
1130 } | |
1131 | |
1132 void AudioDeviceIOS::ClearRecordingError() { | |
1133 _recError = 0; | |
1134 } | 354 } |
1135 | 355 |
1136 // ============================================================================ | 356 // ============================================================================ |
1137 // Private Methods | 357 // Private Methods |
1138 // ============================================================================ | 358 // ============================================================================ |
1139 | 359 |
1140 int32_t AudioDeviceIOS::InitPlayOrRecord() { | 360 int32_t AudioDeviceIOS::InitPlayOrRecord() { |
1141 WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 361 LOG_F(LS_INFO) << TAG << "InitPlayOrRecord"; |
1142 | 362 DCHECK(!_auVoiceProcessing); |
1143 OSStatus result = -1; | 363 |
1144 | 364 OSStatus result = -1; |
1145 // Check if already initialized | 365 |
1146 if (NULL != _auVoiceProcessing) { | 366 // Create Voice Processing Audio Unit |
1147 // We already have initialized before and created any of the audio unit, | 367 AudioComponentDescription desc; |
1148 // check that all exist | 368 AudioComponent comp; |
1149 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 369 |
1150 " Already initialized"); | 370 desc.componentType = kAudioUnitType_Output; |
1151 // todo: Call AudioUnitReset() here and empty all buffers? | 371 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; |
1152 return 0; | 372 desc.componentManufacturer = kAudioUnitManufacturer_Apple; |
1153 } | 373 desc.componentFlags = 0; |
1154 | 374 desc.componentFlagsMask = 0; |
1155 // Create Voice Processing Audio Unit | 375 |
1156 AudioComponentDescription desc; | 376 comp = AudioComponentFindNext(nullptr, &desc); |
1157 AudioComponent comp; | 377 if (nullptr == comp) { |
1158 | 378 LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit"; |
1159 desc.componentType = kAudioUnitType_Output; | 379 return -1; |
1160 desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; | 380 } |
1161 desc.componentManufacturer = kAudioUnitManufacturer_Apple; | 381 |
1162 desc.componentFlags = 0; | 382 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); |
1163 desc.componentFlagsMask = 0; | 383 if (0 != result) { |
1164 | 384 LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result; |
1165 comp = AudioComponentFindNext(NULL, &desc); | 385 return -1; |
1166 if (NULL == comp) { | 386 } |
1167 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 387 |
1168 " Could not find audio component for Audio Unit"); | 388 // TODO(henrika): I think we should set the preferred channel configuration |
1169 return -1; | 389 // in both directions as well to be safe. |
1170 } | 390 |
1171 | 391 // Set preferred hardware sample rate to 16 kHz. |
1172 result = AudioComponentInstanceNew(comp, &_auVoiceProcessing); | 392 // TODO(henrika): improve this selection of sample rate. Why do we currently |
| 393 // use a hard coded value? How can we fail and still continue? |
| 394 NSError* error = nil; |
| 395 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 396 Float64 preferredSampleRate(playout_parameters_.sample_rate()); |
| 397 [session setPreferredSampleRate:preferredSampleRate error:&error]; |
| 398 if (error != nil) { |
| 399 const char* errorString = [[error localizedDescription] UTF8String]; |
| 400 LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString; |
| 401 } |
| 402 |
| 403 error = nil; |
| 404 // Make the setMode:error: and setCategory:error: calls only if necessary. |
| 405 // Non-obviously, setting them to the value they already have will clear |
| 406 // transient properties (such as PortOverride) that some other component may |
| 407 // have set up. |
| 408 if (session.mode != AVAudioSessionModeVoiceChat) { |
| 409 [session setMode:AVAudioSessionModeVoiceChat error:&error]; |
| 410 if (error != nil) { |
| 411 const char* errorString = [[error localizedDescription] UTF8String]; |
| 412 LOG_F(LS_ERROR) << "setMode failed: " << errorString; |
| 413 } |
| 414 } |
| 415 |
| 416 error = nil; |
| 417 if (session.category != AVAudioSessionCategoryPlayAndRecord) { |
| 418 [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]; |
| 419 if (error != nil) { |
| 420 const char* errorString = [[error localizedDescription] UTF8String]; |
| 421 LOG_F(LS_ERROR) << "setCategory failed: " << errorString; |
| 422 } |
| 423 } |
| 424 |
| 425 UInt32 enableIO = 1; |
| 426 result = AudioUnitSetProperty(_auVoiceProcessing, |
| 427 kAudioOutputUnitProperty_EnableIO, |
| 428 kAudioUnitScope_Input, |
| 429 1, // input bus |
| 430 &enableIO, sizeof(enableIO)); |
| 431 if (0 != result) { |
| 432 LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result; |
| 433 } |
| 434 |
| 435 result = AudioUnitSetProperty(_auVoiceProcessing, |
| 436 kAudioOutputUnitProperty_EnableIO, |
| 437 kAudioUnitScope_Output, |
| 438 0, // output bus |
| 439 &enableIO, sizeof(enableIO)); |
| 440 if (0 != result) { |
| 441 LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result; |
| 442 } |
| 443 |
| 444 // Disable AU buffer allocation for the recorder, we allocate our own. |
| 445 // TODO(henrika): understand this part better. |
| 446 UInt32 flag = 0; |
| 447 result = AudioUnitSetProperty(_auVoiceProcessing, |
| 448 kAudioUnitProperty_ShouldAllocateBuffer, |
| 449 kAudioUnitScope_Output, 1, &flag, sizeof(flag)); |
| 450 if (0 != result) { |
| 451 LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result; |
| 452 // Should work anyway |
| 453 } |
| 454 |
| 455 // Set recording callback. |
| 456 AURenderCallbackStruct auCbS; |
| 457 memset(&auCbS, 0, sizeof(auCbS)); |
| 458 auCbS.inputProc = RecordProcess; |
| 459 auCbS.inputProcRefCon = this; |
| 460 result = AudioUnitSetProperty( |
| 461 _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback, |
| 462 kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS)); |
| 463 if (0 != result) { |
| 464 LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result; |
| 465 } |
| 466 |
| 467 // Set playout callback. |
| 468 memset(&auCbS, 0, sizeof(auCbS)); |
| 469 auCbS.inputProc = PlayoutProcess; |
| 470 auCbS.inputProcRefCon = this; |
| 471 result = AudioUnitSetProperty( |
| 472 _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback, |
| 473 kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS)); |
| 474 if (0 != result) { |
| 475 LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result; |
| 476 } |
| 477 |
| 478 // Get stream format for out/0 |
| 479 AudioStreamBasicDescription playoutDesc; |
| 480 UInt32 size = sizeof(playoutDesc); |
| 481 result = |
| 482 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, |
| 483 kAudioUnitScope_Output, 0, &playoutDesc, &size); |
| 484 if (0 != result) { |
| 485 LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result; |
| 486 } |
| 487 |
| 488 playoutDesc.mSampleRate = preferredSampleRate; |
| 489 LOG_F(LS_INFO) << "Audio Unit playout opened in sampling rate: " |
| 490 << playoutDesc.mSampleRate; |
| 491 |
| 492 // Store the sampling frequency to use towards the Audio Device Buffer |
| 493 // todo: Add 48 kHz (increase buffer sizes). Other fs? |
| 494 // TODO(henrika): Figure out if we really need this complex handling. |
| 495 if ((playoutDesc.mSampleRate > 44090.0) && |
| 496 (playoutDesc.mSampleRate < 44110.0)) { |
| 497 _adbSampFreq = 44100; |
| 498 } else if ((playoutDesc.mSampleRate > 15990.0) && |
| 499 (playoutDesc.mSampleRate < 16010.0)) { |
| 500 _adbSampFreq = 16000; |
| 501 } else if ((playoutDesc.mSampleRate > 7990.0) && |
| 502 (playoutDesc.mSampleRate < 8010.0)) { |
| 503 _adbSampFreq = 8000; |
| 504 } else { |
| 505 _adbSampFreq = 0; |
| 506 FATAL() << "Invalid sample rate"; |
| 507 } |
| 508 |
| 509 // Set the audio device buffer sampling rates (use same for play and record). |
| 510 // TODO(henrika): this is not a good place to set these things up. |
| 511 DCHECK(audio_device_buffer_); |
| 512 DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate()); |
| 513 audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq); |
| 514 audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq); |
| 515 |
| 516 // Set stream format for out/0. |
| 517 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
| 518 kLinearPCMFormatFlagIsPacked | |
| 519 kLinearPCMFormatFlagIsNonInterleaved; |
| 520 playoutDesc.mBytesPerPacket = 2; |
| 521 playoutDesc.mFramesPerPacket = 1; |
| 522 playoutDesc.mBytesPerFrame = 2; |
| 523 playoutDesc.mChannelsPerFrame = 1; |
| 524 playoutDesc.mBitsPerChannel = 16; |
| 525 result = |
| 526 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, |
| 527 kAudioUnitScope_Input, 0, &playoutDesc, size); |
| 528 if (0 != result) { |
| 529 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0"; |
| 530 } |
| 531 |
| 532 // Get stream format for in/1. |
| 533 AudioStreamBasicDescription recordingDesc; |
| 534 size = sizeof(recordingDesc); |
| 535 result = |
| 536 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, |
| 537 kAudioUnitScope_Input, 1, &recordingDesc, &size); |
| 538 if (0 != result) { |
| 539 LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1"; |
| 540 } |
| 541 |
| 542 recordingDesc.mSampleRate = preferredSampleRate; |
| 543 LOG_F(LS_INFO) << "Audio Unit recording opened in sampling rate: " |
| 544 << recordingDesc.mSampleRate; |
| 545 |
| 546 // Set stream format for out/1 (use same sampling frequency as for in/1). |
| 547 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
| 548 kLinearPCMFormatFlagIsPacked | |
| 549 kLinearPCMFormatFlagIsNonInterleaved; |
| 550 recordingDesc.mBytesPerPacket = 2; |
| 551 recordingDesc.mFramesPerPacket = 1; |
| 552 recordingDesc.mBytesPerFrame = 2; |
| 553 recordingDesc.mChannelsPerFrame = 1; |
| 554 recordingDesc.mBitsPerChannel = 16; |
| 555 result = |
| 556 AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat, |
| 557 kAudioUnitScope_Output, 1, &recordingDesc, size); |
| 558 if (0 != result) { |
| 559 LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1"; |
| 560 } |
| 561 |
| 562 // Initialize here already to be able to get/set stream properties. |
| 563 result = AudioUnitInitialize(_auVoiceProcessing); |
| 564 if (0 != result) { |
| 565 LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result; |
| 566 } |
| 567 |
| 568 // Get hardware sample rate for logging (see if we get what we asked for). |
| 569 // TODO(henrika): what if we don't get what we ask for? |
| 570 double sampleRate = session.sampleRate; |
| 571 LOG_F(LS_INFO) << "Current HW sample rate is: " << sampleRate |
| 572 << ", ADB sample rate is: " << _adbSampFreq; |
| 573 |
| 574 // Listen to audio interruptions. |
| 575 // TODO(henrika): learn this area better. |
| 576 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| 577 id observer = [center |
| 578 addObserverForName:AVAudioSessionInterruptionNotification |
| 579 object:nil |
| 580 queue:[NSOperationQueue mainQueue] |
| 581 usingBlock:^(NSNotification* notification) { |
| 582 NSNumber* typeNumber = |
| 583 [notification userInfo][AVAudioSessionInterruptionTypeKey]; |
| 584 AVAudioSessionInterruptionType type = |
| 585 (AVAudioSessionInterruptionType)[typeNumber |
| 586 unsignedIntegerValue]; |
| 587 switch (type) { |
| 588 case AVAudioSessionInterruptionTypeBegan: |
| 589 // At this point our audio session has been deactivated and |
| 590 // the |
| 591 // audio unit render callbacks no longer occur. Nothing to |
| 592 // do. |
| 593 break; |
| 594 case AVAudioSessionInterruptionTypeEnded: { |
| 595 NSError* error = nil; |
| 596 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 597 [session setActive:YES error:&error]; |
| 598 if (error != nil) { |
| 599 LOG_F(LS_ERROR) << "Failed to active audio session"; |
| 600 } |
| 601 // Post interruption the audio unit render callbacks don't |
| 602 // automatically continue, so we restart the unit manually |
| 603 // here. |
| 604 AudioOutputUnitStop(_auVoiceProcessing); |
| 605 AudioOutputUnitStart(_auVoiceProcessing); |
| 606 break; |
| 607 } |
| 608 } |
| 609 }]; |
| 610 // Increment refcount on observer using ARC bridge. Instance variable is a |
| 611 // void* instead of an id because header is included in other pure C++ |
| 612 // files. |
| 613 _audioInterruptionObserver = (__bridge_retained void*)observer; |
| 614 |
| 615 // Activate audio session. |
| 616 error = nil; |
| 617 [session setActive:YES error:&error]; |
| 618 if (error != nil) { |
| 619 LOG_F(LS_ERROR) << "Failed to active audio session"; |
| 620 } |
| 621 |
| 622 return 0; |
| 623 } |
| 624 |
| 625 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() { |
| 626 LOG_F(LS_INFO) << TAG << "ShutdownPlayOrRecord"; |
| 627 |
| 628 if (_audioInterruptionObserver != nullptr) { |
| 629 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; |
| 630 // Transfer ownership of observer back to ARC, which will dealloc the |
| 631 // observer once it exits this scope. |
| 632 id observer = (__bridge_transfer id)_audioInterruptionObserver; |
| 633 [center removeObserver:observer]; |
| 634 _audioInterruptionObserver = nullptr; |
| 635 } |
| 636 |
| 637 // Close and delete AU. |
| 638 OSStatus result = -1; |
| 639 if (nullptr != _auVoiceProcessing) { |
| 640 result = AudioOutputUnitStop(_auVoiceProcessing); |
1173 if (0 != result) { | 641 if (0 != result) { |
1174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 642 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; |
1175 " Could not create Audio Unit instance (result=%d)", | 643 } |
1176 result); | 644 result = AudioComponentInstanceDispose(_auVoiceProcessing); |
1177 return -1; | |
1178 } | |
1179 | |
1180 // Set preferred hardware sample rate to 16 kHz | |
1181 NSError* error = nil; | |
1182 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1183 Float64 preferredSampleRate(16000.0); | |
1184 [session setPreferredSampleRate:preferredSampleRate | |
1185 error:&error]; | |
1186 if (error != nil) { | |
1187 const char* errorString = [[error localizedDescription] UTF8String]; | |
1188 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1189 "Could not set preferred sample rate: %s", errorString); | |
1190 } | |
1191 error = nil; | |
1192 // Make the setMode:error: and setCategory:error: calls only if necessary. | |
1193 // Non-obviously, setting them to the value they already have will clear | |
1194 // transient properties (such as PortOverride) that some other component may | |
1195 // have set up. | |
1196 if (session.mode != AVAudioSessionModeVoiceChat) { | |
1197 [session setMode:AVAudioSessionModeVoiceChat error:&error]; | |
1198 if (error != nil) { | |
1199 const char* errorString = [[error localizedDescription] UTF8String]; | |
1200 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1201 "Could not set mode: %s", errorString); | |
1202 } | |
1203 } | |
1204 error = nil; | |
1205 if (session.category != AVAudioSessionCategoryPlayAndRecord) { | |
1206 [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]; | |
1207 if (error != nil) { | |
1208 const char* errorString = [[error localizedDescription] UTF8String]; | |
1209 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1210 "Could not set category: %s", errorString); | |
1211 } | |
1212 } | |
1213 | |
1214 ////////////////////// | |
1215 // Setup Voice Processing Audio Unit | |
1216 | |
1217 // Note: For Signal Processing AU element 0 is output bus, element 1 is | |
1218 // input bus for global scope element is irrelevant (always use | |
1219 // element 0) | |
1220 | |
1221 // Enable IO on both elements | |
1222 | |
1223 // todo: Below we just log and continue upon error. We might want | |
1224 // to close AU and return error for some cases. | |
1225 // todo: Log info about setup. | |
1226 | |
1227 UInt32 enableIO = 1; | |
1228 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1229 kAudioOutputUnitProperty_EnableIO, | |
1230 kAudioUnitScope_Input, | |
1231 1, // input bus | |
1232 &enableIO, | |
1233 sizeof(enableIO)); | |
1234 if (0 != result) { | 645 if (0 != result) { |
1235 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 646 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; |
1236 " Could not enable IO on input (result=%d)", result); | 647 } |
1237 } | 648 _auVoiceProcessing = nullptr; |
1238 | 649 } |
1239 result = AudioUnitSetProperty(_auVoiceProcessing, | 650 |
1240 kAudioOutputUnitProperty_EnableIO, | 651 return 0; |
1241 kAudioUnitScope_Output, | |
1242 0, // output bus | |
1243 &enableIO, | |
1244 sizeof(enableIO)); | |
1245 if (0 != result) { | |
1246 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1247 " Could not enable IO on output (result=%d)", result); | |
1248 } | |
1249 | |
1250 // Disable AU buffer allocation for the recorder, we allocate our own | |
1251 UInt32 flag = 0; | |
1252 result = AudioUnitSetProperty( | |
1253 _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer, | |
1254 kAudioUnitScope_Output, 1, &flag, sizeof(flag)); | |
1255 if (0 != result) { | |
1256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1257 " Could not disable AU buffer allocation (result=%d)", | |
1258 result); | |
1259 // Should work anyway | |
1260 } | |
1261 | |
1262 // Set recording callback | |
1263 AURenderCallbackStruct auCbS; | |
1264 memset(&auCbS, 0, sizeof(auCbS)); | |
1265 auCbS.inputProc = RecordProcess; | |
1266 auCbS.inputProcRefCon = this; | |
1267 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1268 kAudioOutputUnitProperty_SetInputCallback, | |
1269 kAudioUnitScope_Global, 1, | |
1270 &auCbS, sizeof(auCbS)); | |
1271 if (0 != result) { | |
1272 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1273 " Could not set record callback for Audio Unit (result=%d)", | |
1274 result); | |
1275 } | |
1276 | |
1277 // Set playout callback | |
1278 memset(&auCbS, 0, sizeof(auCbS)); | |
1279 auCbS.inputProc = PlayoutProcess; | |
1280 auCbS.inputProcRefCon = this; | |
1281 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1282 kAudioUnitProperty_SetRenderCallback, | |
1283 kAudioUnitScope_Global, 0, | |
1284 &auCbS, sizeof(auCbS)); | |
1285 if (0 != result) { | |
1286 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1287 " Could not set play callback for Audio Unit (result=%d)", | |
1288 result); | |
1289 } | |
1290 | |
1291 // Get stream format for out/0 | |
1292 AudioStreamBasicDescription playoutDesc; | |
1293 UInt32 size = sizeof(playoutDesc); | |
1294 result = AudioUnitGetProperty(_auVoiceProcessing, | |
1295 kAudioUnitProperty_StreamFormat, | |
1296 kAudioUnitScope_Output, 0, &playoutDesc, | |
1297 &size); | |
1298 if (0 != result) { | |
1299 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1300 " Could not get stream format Audio Unit out/0 (result=%d)", | |
1301 result); | |
1302 } | |
1303 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1304 " Audio Unit playout opened in sampling rate %f", | |
1305 playoutDesc.mSampleRate); | |
1306 | |
1307 playoutDesc.mSampleRate = preferredSampleRate; | |
1308 | |
1309 // Store the sampling frequency to use towards the Audio Device Buffer | |
1310 // todo: Add 48 kHz (increase buffer sizes). Other fs? | |
1311 if ((playoutDesc.mSampleRate > 44090.0) | |
1312 && (playoutDesc.mSampleRate < 44110.0)) { | |
1313 _adbSampFreq = 44100; | |
1314 } else if ((playoutDesc.mSampleRate > 15990.0) | |
1315 && (playoutDesc.mSampleRate < 16010.0)) { | |
1316 _adbSampFreq = 16000; | |
1317 } else if ((playoutDesc.mSampleRate > 7990.0) | |
1318 && (playoutDesc.mSampleRate < 8010.0)) { | |
1319 _adbSampFreq = 8000; | |
1320 } else { | |
1321 _adbSampFreq = 0; | |
1322 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1323 " Audio Unit out/0 opened in unknown sampling rate (%f)", | |
1324 playoutDesc.mSampleRate); | |
1325 // todo: We should bail out here. | |
1326 } | |
1327 | |
1328 // Set the audio device buffer sampling rate, | |
1329 // we assume we get the same for play and record | |
1330 if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) { | |
1331 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1332 " Could not set audio device buffer recording sampling rate (%d)", | |
1333 _adbSampFreq); | |
1334 } | |
1335 | |
1336 if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) { | |
1337 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1338 " Could not set audio device buffer playout sampling rate (%d)", | |
1339 _adbSampFreq); | |
1340 } | |
1341 | |
1342 // Set stream format for in/0 (use same sampling frequency as for out/0) | |
1343 playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
1344 | kLinearPCMFormatFlagIsPacked | |
1345 | kLinearPCMFormatFlagIsNonInterleaved; | |
1346 playoutDesc.mBytesPerPacket = 2; | |
1347 playoutDesc.mFramesPerPacket = 1; | |
1348 playoutDesc.mBytesPerFrame = 2; | |
1349 playoutDesc.mChannelsPerFrame = 1; | |
1350 playoutDesc.mBitsPerChannel = 16; | |
1351 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1352 kAudioUnitProperty_StreamFormat, | |
1353 kAudioUnitScope_Input, 0, &playoutDesc, size); | |
1354 if (0 != result) { | |
1355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1356 " Could not set stream format Audio Unit in/0 (result=%d)", | |
1357 result); | |
1358 } | |
1359 | |
1360 // Get stream format for in/1 | |
1361 AudioStreamBasicDescription recordingDesc; | |
1362 size = sizeof(recordingDesc); | |
1363 result = AudioUnitGetProperty(_auVoiceProcessing, | |
1364 kAudioUnitProperty_StreamFormat, | |
1365 kAudioUnitScope_Input, 1, &recordingDesc, | |
1366 &size); | |
1367 if (0 != result) { | |
1368 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1369 " Could not get stream format Audio Unit in/1 (result=%d)", | |
1370 result); | |
1371 } | |
1372 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1373 " Audio Unit recording opened in sampling rate %f", | |
1374 recordingDesc.mSampleRate); | |
1375 | |
1376 recordingDesc.mSampleRate = preferredSampleRate; | |
1377 | |
1378 // Set stream format for out/1 (use same sampling frequency as for in/1) | |
1379 recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
1380 | kLinearPCMFormatFlagIsPacked | |
1381 | kLinearPCMFormatFlagIsNonInterleaved; | |
1382 | |
1383 recordingDesc.mBytesPerPacket = 2; | |
1384 recordingDesc.mFramesPerPacket = 1; | |
1385 recordingDesc.mBytesPerFrame = 2; | |
1386 recordingDesc.mChannelsPerFrame = 1; | |
1387 recordingDesc.mBitsPerChannel = 16; | |
1388 result = AudioUnitSetProperty(_auVoiceProcessing, | |
1389 kAudioUnitProperty_StreamFormat, | |
1390 kAudioUnitScope_Output, 1, &recordingDesc, | |
1391 size); | |
1392 if (0 != result) { | |
1393 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1394 " Could not set stream format Audio Unit out/1 (result=%d)", | |
1395 result); | |
1396 } | |
1397 | |
1398 // Initialize here already to be able to get/set stream properties. | |
1399 result = AudioUnitInitialize(_auVoiceProcessing); | |
1400 if (0 != result) { | |
1401 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1402 " Could not init Audio Unit (result=%d)", result); | |
1403 } | |
1404 | |
1405 // Get hardware sample rate for logging (see if we get what we asked for) | |
1406 double sampleRate = session.sampleRate; | |
1407 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1408 " Current HW sample rate is %f, ADB sample rate is %d", | |
1409 sampleRate, _adbSampFreq); | |
1410 | |
1411 // Listen to audio interruptions. | |
1412 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
1413 id observer = | |
1414 [center addObserverForName:AVAudioSessionInterruptionNotification | |
1415 object:nil | |
1416 queue:[NSOperationQueue mainQueue] | |
1417 usingBlock:^(NSNotification* notification) { | |
1418 NSNumber* typeNumber = | |
1419 [notification userInfo][AVAudioSessionInterruptionTypeKey]; | |
1420 AVAudioSessionInterruptionType type = | |
1421 (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue]; | |
1422 switch (type) { | |
1423 case AVAudioSessionInterruptionTypeBegan: | |
1424 // At this point our audio session has been deactivated and the | |
1425 // audio unit render callbacks no longer occur. Nothing to do. | |
1426 break; | |
1427 case AVAudioSessionInterruptionTypeEnded: { | |
1428 NSError* error = nil; | |
1429 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1430 [session setActive:YES | |
1431 error:&error]; | |
1432 if (error != nil) { | |
1433 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1434 "Error activating audio session"); | |
1435 } | |
1436 // Post interruption the audio unit render callbacks don't | |
1437 // automatically continue, so we restart the unit manually here. | |
1438 AudioOutputUnitStop(_auVoiceProcessing); | |
1439 AudioOutputUnitStart(_auVoiceProcessing); | |
1440 break; | |
1441 } | |
1442 } | |
1443 }]; | |
1444 // Increment refcount on observer using ARC bridge. Instance variable is a | |
1445 // void* instead of an id because header is included in other pure C++ | |
1446 // files. | |
1447 _audioInterruptionObserver = (__bridge_retained void*)observer; | |
1448 | |
1449 // Activate audio session. | |
1450 error = nil; | |
1451 [session setActive:YES | |
1452 error:&error]; | |
1453 if (error != nil) { | |
1454 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1455 "Error activating audio session"); | |
1456 } | |
1457 | |
1458 return 0; | |
1459 } | |
1460 | |
1461 int32_t AudioDeviceIOS::ShutdownPlayOrRecord() { | |
1462 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__); | |
1463 | |
1464 if (_audioInterruptionObserver != NULL) { | |
1465 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; | |
1466 // Transfer ownership of observer back to ARC, which will dealloc the | |
1467 // observer once it exits this scope. | |
1468 id observer = (__bridge_transfer id)_audioInterruptionObserver; | |
1469 [center removeObserver:observer]; | |
1470 _audioInterruptionObserver = NULL; | |
1471 } | |
1472 | |
1473 // Close and delete AU | |
1474 OSStatus result = -1; | |
1475 if (NULL != _auVoiceProcessing) { | |
1476 result = AudioOutputUnitStop(_auVoiceProcessing); | |
1477 if (0 != result) { | |
1478 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1479 " Error stopping Audio Unit (result=%d)", result); | |
1480 } | |
1481 result = AudioComponentInstanceDispose(_auVoiceProcessing); | |
1482 if (0 != result) { | |
1483 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1484 " Error disposing Audio Unit (result=%d)", result); | |
1485 } | |
1486 _auVoiceProcessing = NULL; | |
1487 } | |
1488 | |
1489 return 0; | |
1490 } | 652 } |
1491 | 653 |
1492 // ============================================================================ | 654 // ============================================================================ |
1493 // Thread Methods | 655 // Thread Methods |
1494 // ============================================================================ | 656 // ============================================================================ |
1495 | 657 |
1496 OSStatus | 658 OSStatus AudioDeviceIOS::RecordProcess( |
1497 AudioDeviceIOS::RecordProcess(void *inRefCon, | 659 void* inRefCon, |
1498 AudioUnitRenderActionFlags *ioActionFlags, | 660 AudioUnitRenderActionFlags* ioActionFlags, |
1499 const AudioTimeStamp *inTimeStamp, | 661 const AudioTimeStamp* inTimeStamp, |
1500 UInt32 inBusNumber, | 662 UInt32 inBusNumber, |
1501 UInt32 inNumberFrames, | 663 UInt32 inNumberFrames, |
1502 AudioBufferList *ioData) { | 664 AudioBufferList* ioData) { |
1503 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); | 665 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); |
1504 | 666 return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber, |
1505 return ptrThis->RecordProcessImpl(ioActionFlags, | 667 inNumberFrames); |
1506 inTimeStamp, | 668 } |
1507 inBusNumber, | 669 |
1508 inNumberFrames); | 670 OSStatus AudioDeviceIOS::RecordProcessImpl( |
1509 } | 671 AudioUnitRenderActionFlags* ioActionFlags, |
1510 | 672 const AudioTimeStamp* inTimeStamp, |
1511 | 673 uint32_t inBusNumber, |
1512 OSStatus | 674 uint32_t inNumberFrames) { |
1513 AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags, | 675 // Setup some basic stuff |
1514 const AudioTimeStamp *inTimeStamp, | 676 // Use temp buffer not to lock up recording buffer more than necessary |
1515 uint32_t inBusNumber, | 677 // todo: Make dataTmp a member variable with static size that holds |
1516 uint32_t inNumberFrames) { | 678 // max possible frames? |
1517 // Setup some basic stuff | 679 int16_t* dataTmp = new int16_t[inNumberFrames]; |
1518 // Use temp buffer not to lock up recording buffer more than necessary | 680 memset(dataTmp, 0, 2 * inNumberFrames); |
1519 // todo: Make dataTmp a member variable with static size that holds | 681 |
1520 // max possible frames? | 682 AudioBufferList abList; |
1521 int16_t* dataTmp = new int16_t[inNumberFrames]; | 683 abList.mNumberBuffers = 1; |
1522 memset(dataTmp, 0, 2*inNumberFrames); | 684 abList.mBuffers[0].mData = dataTmp; |
1523 | 685 abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample |
1524 AudioBufferList abList; | 686 abList.mBuffers[0].mNumberChannels = 1; |
1525 abList.mNumberBuffers = 1; | 687 |
1526 abList.mBuffers[0].mData = dataTmp; | 688 // Get data from mic |
1527 abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample | 689 OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp, |
1528 abList.mBuffers[0].mNumberChannels = 1; | 690 inBusNumber, inNumberFrames, &abList); |
1529 | 691 if (res != 0) { |
1530 // Get data from mic | 692 // TODO(henrika): improve error handling. |
1531 OSStatus res = AudioUnitRender(_auVoiceProcessing, | 693 delete[] dataTmp; |
1532 ioActionFlags, inTimeStamp, | 694 return 0; |
1533 inBusNumber, inNumberFrames, &abList); | 695 } |
1534 if (res != 0) { | 696 |
1535 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 697 if (_recording) { |
1536 " Error getting rec data, error = %d", res); | 698 // Insert all data in temp buffer into recording buffers |
1537 | 699 // There is zero or one buffer partially full at any given time, |
1538 if (_recWarning > 0) { | 700 // all others are full or empty |
1539 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 701 // Full means filled with noSamp10ms samples. |
1540 " Pending rec warning exists"); | 702 |
| 703 const unsigned int noSamp10ms = _adbSampFreq / 100; |
| 704 unsigned int dataPos = 0; |
| 705 uint16_t bufPos = 0; |
| 706 int16_t insertPos = -1; |
| 707 unsigned int nCopy = 0; // Number of samples to copy |
| 708 |
| 709 while (dataPos < inNumberFrames) { |
| 710 // Loop over all recording buffers or |
| 711 // until we find the partially full buffer |
| 712 // First choice is to insert into partially full buffer, |
| 713 // second choice is to insert into empty buffer |
| 714 bufPos = 0; |
| 715 insertPos = -1; |
| 716 nCopy = 0; |
| 717 while (bufPos < N_REC_BUFFERS) { |
| 718 if ((_recordingLength[bufPos] > 0) && |
| 719 (_recordingLength[bufPos] < noSamp10ms)) { |
| 720 // Found the partially full buffer |
| 721 insertPos = static_cast<int16_t>(bufPos); |
| 722 // Don't need to search more, quit loop |
| 723 bufPos = N_REC_BUFFERS; |
| 724 } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) { |
| 725 // Found an empty buffer |
| 726 insertPos = static_cast<int16_t>(bufPos); |
1541 } | 727 } |
1542 _recWarning = 1; | 728 ++bufPos; |
1543 | 729 } |
1544 delete [] dataTmp; | 730 |
1545 return 0; | 731 // Insert data into buffer |
1546 } | 732 if (insertPos > -1) { |
1547 | 733 // We found a non-full buffer, copy data to it |
1548 if (_recording) { | 734 unsigned int dataToCopy = inNumberFrames - dataPos; |
1549 // Insert all data in temp buffer into recording buffers | 735 unsigned int currentRecLen = _recordingLength[insertPos]; |
1550 // There is zero or one buffer partially full at any given time, | 736 unsigned int roomInBuffer = noSamp10ms - currentRecLen; |
1551 // all others are full or empty | 737 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer); |
1552 // Full means filled with noSamp10ms samples. | 738 |
1553 | 739 memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos], |
1554 const unsigned int noSamp10ms = _adbSampFreq / 100; | 740 nCopy * sizeof(int16_t)); |
1555 unsigned int dataPos = 0; | 741 if (0 == currentRecLen) { |
1556 uint16_t bufPos = 0; | 742 _recordingSeqNumber[insertPos] = _recordingCurrentSeq; |
1557 int16_t insertPos = -1; | 743 ++_recordingCurrentSeq; |
1558 unsigned int nCopy = 0; // Number of samples to copy | |
1559 | |
1560 while (dataPos < inNumberFrames) { | |
1561 // Loop over all recording buffers or | |
1562 // until we find the partially full buffer | |
1563 // First choice is to insert into partially full buffer, | |
1564 // second choice is to insert into empty buffer | |
1565 bufPos = 0; | |
1566 insertPos = -1; | |
1567 nCopy = 0; | |
1568 while (bufPos < N_REC_BUFFERS) { | |
1569 if ((_recordingLength[bufPos] > 0) | |
1570 && (_recordingLength[bufPos] < noSamp10ms)) { | |
1571 // Found the partially full buffer | |
1572 insertPos = static_cast<int16_t>(bufPos); | |
1573 // Don't need to search more, quit loop | |
1574 bufPos = N_REC_BUFFERS; | |
1575 } else if ((-1 == insertPos) | |
1576 && (0 == _recordingLength[bufPos])) { | |
1577 // Found an empty buffer | |
1578 insertPos = static_cast<int16_t>(bufPos); | |
1579 } | |
1580 ++bufPos; | |
1581 } | |
1582 | |
1583 // Insert data into buffer | |
1584 if (insertPos > -1) { | |
1585 // We found a non-full buffer, copy data to it | |
1586 unsigned int dataToCopy = inNumberFrames - dataPos; | |
1587 unsigned int currentRecLen = _recordingLength[insertPos]; | |
1588 unsigned int roomInBuffer = noSamp10ms - currentRecLen; | |
1589 nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer); | |
1590 | |
1591 memcpy(&_recordingBuffer[insertPos][currentRecLen], | |
1592 &dataTmp[dataPos], nCopy*sizeof(int16_t)); | |
1593 if (0 == currentRecLen) { | |
1594 _recordingSeqNumber[insertPos] = _recordingCurrentSeq; | |
1595 ++_recordingCurrentSeq; | |
1596 } | |
1597 _recordingBufferTotalSize += nCopy; | |
1598 // Has to be done last to avoid interrupt problems | |
1599 // between threads | |
1600 _recordingLength[insertPos] += nCopy; | |
1601 dataPos += nCopy; | |
1602 } else { | |
1603 // Didn't find a non-full buffer | |
1604 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1605 " Could not insert into recording buffer"); | |
1606 if (_recWarning > 0) { | |
1607 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1608 " Pending rec warning exists"); | |
1609 } | |
1610 _recWarning = 1; | |
1611 dataPos = inNumberFrames; // Don't try to insert more | |
1612 } | |
1613 } | 744 } |
1614 } | 745 _recordingBufferTotalSize += nCopy; |
1615 | 746 // Has to be done last to avoid interrupt problems between threads. |
1616 delete [] dataTmp; | 747 _recordingLength[insertPos] += nCopy; |
1617 | 748 dataPos += nCopy; |
1618 return 0; | 749 } else { |
1619 } | 750 // Didn't find a non-full buffer |
1620 | 751 // TODO(henrika): improve error handling |
1621 OSStatus | 752 dataPos = inNumberFrames; // Don't try to insert more |
1622 AudioDeviceIOS::PlayoutProcess(void *inRefCon, | 753 } |
1623 AudioUnitRenderActionFlags *ioActionFlags, | 754 } |
1624 const AudioTimeStamp *inTimeStamp, | 755 } |
1625 UInt32 inBusNumber, | 756 delete[] dataTmp; |
1626 UInt32 inNumberFrames, | 757 return 0; |
1627 AudioBufferList *ioData) { | 758 } |
1628 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); | 759 |
1629 | 760 OSStatus AudioDeviceIOS::PlayoutProcess( |
1630 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); | 761 void* inRefCon, |
1631 } | 762 AudioUnitRenderActionFlags* ioActionFlags, |
1632 | 763 const AudioTimeStamp* inTimeStamp, |
1633 OSStatus | 764 UInt32 inBusNumber, |
1634 AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, | 765 UInt32 inNumberFrames, |
1635 AudioBufferList *ioData) { | 766 AudioBufferList* ioData) { |
1636 // Setup some basic stuff | 767 AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon); |
1637 // assert(sizeof(short) == 2); // Assumption for implementation | 768 return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData); |
1638 | 769 } |
1639 int16_t* data = | 770 |
1640 static_cast<int16_t*>(ioData->mBuffers[0].mData); | 771 OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames, |
1641 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; | 772 AudioBufferList* ioData) { |
1642 unsigned int dataSize = dataSizeBytes/2; // Number of samples | 773 int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData); |
1643 if (dataSize != inNumberFrames) { // Should always be the same | 774 unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize; |
1644 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 775 unsigned int dataSize = dataSizeBytes / 2; // Number of samples |
1645 "dataSize (%u) != inNumberFrames (%u)", | 776 CHECK_EQ(dataSize, inNumberFrames); |
1646 dataSize, (unsigned int)inNumberFrames); | 777 memset(data, 0, dataSizeBytes); // Start with empty buffer |
1647 if (_playWarning > 0) { | 778 |
1648 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 779 // Get playout data from Audio Device Buffer |
1649 " Pending play warning exists"); | 780 |
| 781 if (_playing) { |
| 782 unsigned int noSamp10ms = _adbSampFreq / 100; |
| 783 // todo: Member variable and allocate when samp freq is determined |
| 784 int16_t* dataTmp = new int16_t[noSamp10ms]; |
| 785 memset(dataTmp, 0, 2 * noSamp10ms); |
| 786 unsigned int dataPos = 0; |
| 787 int noSamplesOut = 0; |
| 788 unsigned int nCopy = 0; |
| 789 |
| 790 // First insert data from playout buffer if any |
| 791 if (_playoutBufferUsed > 0) { |
| 792 nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed; |
| 793 DCHECK_EQ(nCopy, _playoutBufferUsed); |
| 794 memcpy(data, _playoutBuffer, 2 * nCopy); |
| 795 dataPos = nCopy; |
| 796 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); |
| 797 _playoutBufferUsed = 0; |
| 798 } |
| 799 |
| 800 // Now get the rest from Audio Device Buffer. |
| 801 while (dataPos < dataSize) { |
| 802 // Update playout delay |
| 803 UpdatePlayoutDelay(); |
| 804 |
| 805 // Ask for new PCM data to be played out using the AudioDeviceBuffer |
| 806 noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms); |
| 807 |
| 808 // Get data from Audio Device Buffer |
| 809 noSamplesOut = audio_device_buffer_->GetPlayoutData( |
| 810 reinterpret_cast<int8_t*>(dataTmp)); |
| 811 CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut); |
| 812 |
| 813 // Insert as much as fits in data buffer |
| 814 nCopy = |
| 815 (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos); |
| 816 memcpy(&data[dataPos], dataTmp, 2 * nCopy); |
| 817 |
| 818 // Save rest in playout buffer if any |
| 819 if (nCopy < noSamp10ms) { |
| 820 memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy)); |
| 821 _playoutBufferUsed = noSamp10ms - nCopy; |
| 822 } |
| 823 |
| 824 // Update loop/index counter, if we copied less than noSamp10ms |
| 825 // samples we shall quit loop anyway |
| 826 dataPos += noSamp10ms; |
| 827 } |
| 828 delete[] dataTmp; |
| 829 } |
| 830 return 0; |
| 831 } |
| 832 |
| 833 // TODO(henrika): can either be removed or simplified. |
| 834 void AudioDeviceIOS::UpdatePlayoutDelay() { |
| 835 ++_playoutDelayMeasurementCounter; |
| 836 |
| 837 if (_playoutDelayMeasurementCounter >= 100) { |
| 838 // Update HW and OS delay every second, unlikely to change |
| 839 |
| 840 // Since this is eventually rounded to integral ms, add 0.5ms |
| 841 // here to get round-to-nearest-int behavior instead of |
| 842 // truncation. |
| 843 double totalDelaySeconds = 0.0005; |
| 844 |
| 845 // HW output latency |
| 846 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 847 double latency = session.outputLatency; |
| 848 assert(latency >= 0); |
| 849 totalDelaySeconds += latency; |
| 850 |
| 851 // HW buffer duration |
| 852 double ioBufferDuration = session.IOBufferDuration; |
| 853 assert(ioBufferDuration >= 0); |
| 854 totalDelaySeconds += ioBufferDuration; |
| 855 |
| 856 // AU latency |
| 857 Float64 f64(0); |
| 858 UInt32 size = sizeof(f64); |
| 859 OSStatus result = |
| 860 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency, |
| 861 kAudioUnitScope_Global, 0, &f64, &size); |
| 862 if (0 != result) { |
| 863 LOG_F(LS_ERROR) << "AU latency error: " << result; |
| 864 } |
| 865 assert(f64 >= 0); |
| 866 totalDelaySeconds += f64; |
| 867 |
| 868 // To ms |
| 869 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000); |
| 870 |
| 871 // Reset counter |
| 872 _playoutDelayMeasurementCounter = 0; |
| 873 } |
| 874 |
| 875 // todo: Add playout buffer? |
| 876 } |
| 877 |
| 878 void AudioDeviceIOS::UpdateRecordingDelay() { |
| 879 ++_recordingDelayMeasurementCounter; |
| 880 |
| 881 if (_recordingDelayMeasurementCounter >= 100) { |
| 882 // Update HW and OS delay every second, unlikely to change |
| 883 |
| 884 // Since this is eventually rounded to integral ms, add 0.5ms |
| 885 // here to get round-to-nearest-int behavior instead of |
| 886 // truncation. |
| 887 double totalDelaySeconds = 0.0005; |
| 888 |
| 889 // HW input latency |
| 890 AVAudioSession* session = [AVAudioSession sharedInstance]; |
| 891 double latency = session.inputLatency; |
| 892 assert(latency >= 0); |
| 893 totalDelaySeconds += latency; |
| 894 |
| 895 // HW buffer duration |
| 896 double ioBufferDuration = session.IOBufferDuration; |
| 897 assert(ioBufferDuration >= 0); |
| 898 totalDelaySeconds += ioBufferDuration; |
| 899 |
| 900 // AU latency |
| 901 Float64 f64(0); |
| 902 UInt32 size = sizeof(f64); |
| 903 OSStatus result = |
| 904 AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency, |
| 905 kAudioUnitScope_Global, 0, &f64, &size); |
| 906 if (0 != result) { |
| 907 LOG_F(LS_ERROR) << "AU latency error: " << result; |
| 908 } |
| 909 assert(f64 >= 0); |
| 910 totalDelaySeconds += f64; |
| 911 |
| 912 // To ms |
| 913 _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000); |
| 914 |
| 915 // Reset counter |
| 916 _recordingDelayMeasurementCounter = 0; |
| 917 } |
| 918 |
| 919 _recordingDelay = _recordingDelayHWAndOS; |
| 920 |
| 921 // ADB recording buffer size, update every time |
| 922 // Don't count the one next 10 ms to be sent, then convert samples => ms |
| 923 const uint32_t noSamp10ms = _adbSampFreq / 100; |
| 924 if (_recordingBufferTotalSize > noSamp10ms) { |
| 925 _recordingDelay += |
| 926 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000); |
| 927 } |
| 928 } |
| 929 |
| 930 bool AudioDeviceIOS::RunCapture(void* ptrThis) { |
| 931 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread(); |
| 932 } |
| 933 |
| 934 bool AudioDeviceIOS::CaptureWorkerThread() { |
| 935 if (_recording) { |
| 936 int bufPos = 0; |
| 937 unsigned int lowestSeq = 0; |
| 938 int lowestSeqBufPos = 0; |
| 939 bool foundBuf = true; |
| 940 const unsigned int noSamp10ms = _adbSampFreq / 100; |
| 941 |
| 942 while (foundBuf) { |
| 943 // Check if we have any buffer with data to insert |
| 944 // into the Audio Device Buffer, |
| 945 // and find the one with the lowest seq number |
| 946 foundBuf = false; |
| 947 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) { |
| 948 if (noSamp10ms == _recordingLength[bufPos]) { |
| 949 if (!foundBuf) { |
| 950 lowestSeq = _recordingSeqNumber[bufPos]; |
| 951 lowestSeqBufPos = bufPos; |
| 952 foundBuf = true; |
| 953 } else if (_recordingSeqNumber[bufPos] < lowestSeq) { |
| 954 lowestSeq = _recordingSeqNumber[bufPos]; |
| 955 lowestSeqBufPos = bufPos; |
| 956 } |
1650 } | 957 } |
1651 _playWarning = 1; | 958 } |
1652 } | 959 |
1653 memset(data, 0, dataSizeBytes); // Start with empty buffer | 960 // Insert data into the Audio Device Buffer if found any |
1654 | 961 if (foundBuf) { |
1655 | 962 // Update recording delay |
1656 // Get playout data from Audio Device Buffer | 963 UpdateRecordingDelay(); |
1657 | 964 |
1658 if (_playing) { | 965 // Set the recorded buffer |
1659 unsigned int noSamp10ms = _adbSampFreq / 100; | 966 audio_device_buffer_->SetRecordedBuffer( |
1660 // todo: Member variable and allocate when samp freq is determined | 967 reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]), |
1661 int16_t* dataTmp = new int16_t[noSamp10ms]; | 968 _recordingLength[lowestSeqBufPos]); |
1662 memset(dataTmp, 0, 2*noSamp10ms); | 969 |
1663 unsigned int dataPos = 0; | 970 // Don't need to set the current mic level in ADB since we only |
1664 int noSamplesOut = 0; | 971 // support digital AGC, |
1665 unsigned int nCopy = 0; | 972 // and besides we cannot get or set the IOS mic level anyway. |
1666 | 973 |
1667 // First insert data from playout buffer if any | 974 // Set VQE info, use clockdrift == 0 |
1668 if (_playoutBufferUsed > 0) { | 975 audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0); |
1669 nCopy = (dataSize < _playoutBufferUsed) ? | 976 |
1670 dataSize : _playoutBufferUsed; | 977 // Deliver recorded samples at specified sample rate, mic level |
1671 if (nCopy != _playoutBufferUsed) { | 978 // etc. to the observer using callback |
1672 // todo: If dataSize < _playoutBufferUsed | 979 audio_device_buffer_->DeliverRecordedData(); |
1673 // (should normally never be) | 980 |
1674 // we must move the remaining data | 981 // Make buffer available |
1675 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 982 _recordingSeqNumber[lowestSeqBufPos] = 0; |
1676 "nCopy (%u) != _playoutBufferUsed (%u)", | 983 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos]; |
1677 nCopy, _playoutBufferUsed); | 984 // Must be done last to avoid interrupt problems between threads |
1678 if (_playWarning > 0) { | 985 _recordingLength[lowestSeqBufPos] = 0; |
1679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 986 } |
1680 " Pending play warning exists"); | 987 } |
1681 } | 988 } |
1682 _playWarning = 1; | 989 |
1683 } | 990 { |
1684 memcpy(data, _playoutBuffer, 2*nCopy); | 991 // Normal case |
1685 dataPos = nCopy; | 992 // Sleep thread (5ms) to let other threads get to work |
1686 memset(_playoutBuffer, 0, sizeof(_playoutBuffer)); | 993 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio |
1687 _playoutBufferUsed = 0; | 994 // Device Buffer? |
1688 } | 995 timespec t; |
1689 | 996 t.tv_sec = 0; |
1690 // Now get the rest from Audio Device Buffer | 997 t.tv_nsec = 5 * 1000 * 1000; |
1691 while (dataPos < dataSize) { | 998 nanosleep(&t, nullptr); |
1692 // Update playout delay | 999 } |
1693 UpdatePlayoutDelay(); | 1000 return true; |
1694 | |
1695 // Ask for new PCM data to be played out using the AudioDeviceBuffer | |
1696 noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms); | |
1697 | |
1698 // Get data from Audio Device Buffer | |
1699 noSamplesOut = | |
1700 _ptrAudioBuffer->GetPlayoutData( | |
1701 reinterpret_cast<int8_t*>(dataTmp)); | |
1702 // Cast OK since only equality comparison | |
1703 if (noSamp10ms != (unsigned int)noSamplesOut) { | |
1704 // Should never happen | |
1705 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1706 "noSamp10ms (%u) != noSamplesOut (%d)", | |
1707 noSamp10ms, noSamplesOut); | |
1708 | |
1709 if (_playWarning > 0) { | |
1710 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1711 " Pending play warning exists"); | |
1712 } | |
1713 _playWarning = 1; | |
1714 } | |
1715 | |
1716 // Insert as much as fits in data buffer | |
1717 nCopy = (dataSize-dataPos) > noSamp10ms ? | |
1718 noSamp10ms : (dataSize-dataPos); | |
1719 memcpy(&data[dataPos], dataTmp, 2*nCopy); | |
1720 | |
1721 // Save rest in playout buffer if any | |
1722 if (nCopy < noSamp10ms) { | |
1723 memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy)); | |
1724 _playoutBufferUsed = noSamp10ms - nCopy; | |
1725 } | |
1726 | |
1727 // Update loop/index counter, if we copied less than noSamp10ms | |
1728 // samples we shall quit loop anyway | |
1729 dataPos += noSamp10ms; | |
1730 } | |
1731 | |
1732 delete [] dataTmp; | |
1733 } | |
1734 | |
1735 return 0; | |
1736 } | |
1737 | |
1738 void AudioDeviceIOS::UpdatePlayoutDelay() { | |
1739 ++_playoutDelayMeasurementCounter; | |
1740 | |
1741 if (_playoutDelayMeasurementCounter >= 100) { | |
1742 // Update HW and OS delay every second, unlikely to change | |
1743 | |
1744 // Since this is eventually rounded to integral ms, add 0.5ms | |
1745 // here to get round-to-nearest-int behavior instead of | |
1746 // truncation. | |
1747 double totalDelaySeconds = 0.0005; | |
1748 | |
1749 // HW output latency | |
1750 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1751 double latency = session.outputLatency; | |
1752 assert(latency >= 0); | |
1753 totalDelaySeconds += latency; | |
1754 | |
1755 // HW buffer duration | |
1756 double ioBufferDuration = session.IOBufferDuration; | |
1757 assert(ioBufferDuration >= 0); | |
1758 totalDelaySeconds += ioBufferDuration; | |
1759 | |
1760 // AU latency | |
1761 Float64 f64(0); | |
1762 UInt32 size = sizeof(f64); | |
1763 OSStatus result = AudioUnitGetProperty( | |
1764 _auVoiceProcessing, kAudioUnitProperty_Latency, | |
1765 kAudioUnitScope_Global, 0, &f64, &size); | |
1766 if (0 != result) { | |
1767 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1768 "error AU latency (result=%d)", result); | |
1769 } | |
1770 assert(f64 >= 0); | |
1771 totalDelaySeconds += f64; | |
1772 | |
1773 // To ms | |
1774 _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000); | |
1775 | |
1776 // Reset counter | |
1777 _playoutDelayMeasurementCounter = 0; | |
1778 } | |
1779 | |
1780 // todo: Add playout buffer? | |
1781 } | |
1782 | |
1783 void AudioDeviceIOS::UpdateRecordingDelay() { | |
1784 ++_recordingDelayMeasurementCounter; | |
1785 | |
1786 if (_recordingDelayMeasurementCounter >= 100) { | |
1787 // Update HW and OS delay every second, unlikely to change | |
1788 | |
1789 // Since this is eventually rounded to integral ms, add 0.5ms | |
1790 // here to get round-to-nearest-int behavior instead of | |
1791 // truncation. | |
1792 double totalDelaySeconds = 0.0005; | |
1793 | |
1794 // HW input latency | |
1795 AVAudioSession* session = [AVAudioSession sharedInstance]; | |
1796 double latency = session.inputLatency; | |
1797 assert(latency >= 0); | |
1798 totalDelaySeconds += latency; | |
1799 | |
1800 // HW buffer duration | |
1801 double ioBufferDuration = session.IOBufferDuration; | |
1802 assert(ioBufferDuration >= 0); | |
1803 totalDelaySeconds += ioBufferDuration; | |
1804 | |
1805 // AU latency | |
1806 Float64 f64(0); | |
1807 UInt32 size = sizeof(f64); | |
1808 OSStatus result = AudioUnitGetProperty( | |
1809 _auVoiceProcessing, kAudioUnitProperty_Latency, | |
1810 kAudioUnitScope_Global, 0, &f64, &size); | |
1811 if (0 != result) { | |
1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1813 "error AU latency (result=%d)", result); | |
1814 } | |
1815 assert(f64 >= 0); | |
1816 totalDelaySeconds += f64; | |
1817 | |
1818 // To ms | |
1819 _recordingDelayHWAndOS = | |
1820 static_cast<uint32_t>(totalDelaySeconds / 1000); | |
1821 | |
1822 // Reset counter | |
1823 _recordingDelayMeasurementCounter = 0; | |
1824 } | |
1825 | |
1826 _recordingDelay = _recordingDelayHWAndOS; | |
1827 | |
1828 // ADB recording buffer size, update every time | |
1829 // Don't count the one next 10 ms to be sent, then convert samples => ms | |
1830 const uint32_t noSamp10ms = _adbSampFreq / 100; | |
1831 if (_recordingBufferTotalSize > noSamp10ms) { | |
1832 _recordingDelay += | |
1833 (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000); | |
1834 } | |
1835 } | |
1836 | |
1837 bool AudioDeviceIOS::RunCapture(void* ptrThis) { | |
1838 return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread(); | |
1839 } | |
1840 | |
1841 bool AudioDeviceIOS::CaptureWorkerThread() { | |
1842 if (_recording) { | |
1843 int bufPos = 0; | |
1844 unsigned int lowestSeq = 0; | |
1845 int lowestSeqBufPos = 0; | |
1846 bool foundBuf = true; | |
1847 const unsigned int noSamp10ms = _adbSampFreq / 100; | |
1848 | |
1849 while (foundBuf) { | |
1850 // Check if we have any buffer with data to insert | |
1851 // into the Audio Device Buffer, | |
1852 // and find the one with the lowest seq number | |
1853 foundBuf = false; | |
1854 for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) { | |
1855 if (noSamp10ms == _recordingLength[bufPos]) { | |
1856 if (!foundBuf) { | |
1857 lowestSeq = _recordingSeqNumber[bufPos]; | |
1858 lowestSeqBufPos = bufPos; | |
1859 foundBuf = true; | |
1860 } else if (_recordingSeqNumber[bufPos] < lowestSeq) { | |
1861 lowestSeq = _recordingSeqNumber[bufPos]; | |
1862 lowestSeqBufPos = bufPos; | |
1863 } | |
1864 } | |
1865 } // for | |
1866 | |
1867 // Insert data into the Audio Device Buffer if found any | |
1868 if (foundBuf) { | |
1869 // Update recording delay | |
1870 UpdateRecordingDelay(); | |
1871 | |
1872 // Set the recorded buffer | |
1873 _ptrAudioBuffer->SetRecordedBuffer( | |
1874 reinterpret_cast<int8_t*>( | |
1875 _recordingBuffer[lowestSeqBufPos]), | |
1876 _recordingLength[lowestSeqBufPos]); | |
1877 | |
1878 // Don't need to set the current mic level in ADB since we only | |
1879 // support digital AGC, | |
1880 // and besides we cannot get or set the IOS mic level anyway. | |
1881 | |
1882 // Set VQE info, use clockdrift == 0 | |
1883 _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0); | |
1884 | |
1885 // Deliver recorded samples at specified sample rate, mic level | |
1886 // etc. to the observer using callback | |
1887 _ptrAudioBuffer->DeliverRecordedData(); | |
1888 | |
1889 // Make buffer available | |
1890 _recordingSeqNumber[lowestSeqBufPos] = 0; | |
1891 _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos]; | |
1892 // Must be done last to avoid interrupt problems between threads | |
1893 _recordingLength[lowestSeqBufPos] = 0; | |
1894 } | |
1895 } // while (foundBuf) | |
1896 } // if (_recording) | |
1897 | |
1898 { | |
1899 // Normal case | |
1900 // Sleep thread (5ms) to let other threads get to work | |
1901 // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio | |
1902 // Device Buffer? | |
1903 timespec t; | |
1904 t.tv_sec = 0; | |
1905 t.tv_nsec = 5*1000*1000; | |
1906 nanosleep(&t, NULL); | |
1907 } | |
1908 | |
1909 return true; | |
1910 } | 1001 } |
1911 | 1002 |
1912 } // namespace webrtc | 1003 } // namespace webrtc |
OLD | NEW |