OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/base/arraysize.h" | 11 #include "webrtc/base/arraysize.h" |
12 #include "webrtc/base/checks.h" | 12 #include "webrtc/base/checks.h" |
13 #include "webrtc/base/platform_thread.h" | 13 #include "webrtc/base/platform_thread.h" |
14 #include "webrtc/modules/audio_device/audio_device_config.h" | 14 #include "webrtc/modules/audio_device/audio_device_config.h" |
15 #include "webrtc/modules/audio_device/mac/audio_device_mac.h" | 15 #include "webrtc/modules/audio_device/mac/audio_device_mac.h" |
16 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h" | 16 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h" |
17 #include "webrtc/system_wrappers/include/event_wrapper.h" | 17 #include "webrtc/system_wrappers/include/event_wrapper.h" |
18 #include "webrtc/system_wrappers/include/trace.h" | 18 #include "webrtc/system_wrappers/include/trace.h" |
19 | 19 |
20 #include <ApplicationServices/ApplicationServices.h> | 20 #include <ApplicationServices/ApplicationServices.h> |
21 #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap() | 21 #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap() |
22 #include <mach/mach.h> // mach_task_self() | 22 #include <mach/mach.h> // mach_task_self() |
23 #include <sys/sysctl.h> // sysctlbyname() | 23 #include <sys/sysctl.h> // sysctlbyname() |
24 | 24 |
25 | 25 namespace webrtc { |
26 | 26 |
27 namespace webrtc | 27 #define WEBRTC_CA_RETURN_ON_ERR(expr) \ |
28 { | 28 do { \ |
29 | 29 err = expr; \ |
30 #define WEBRTC_CA_RETURN_ON_ERR(expr) \ | 30 if (err != noErr) { \ |
31 do { \ | 31 logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \ |
32 err = expr; \ | 32 (const char*) & err); \ |
33 if (err != noErr) { \ | 33 return -1; \ |
34 logCAMsg(kTraceError, kTraceAudioDevice, _id, \ | 34 } \ |
35 "Error in " #expr, (const char *)&err); \ | 35 } while (0) |
36 return -1; \ | 36 |
37 } \ | 37 #define WEBRTC_CA_LOG_ERR(expr) \ |
38 } while(0) | 38 do { \ |
39 | 39 err = expr; \ |
40 #define WEBRTC_CA_LOG_ERR(expr) \ | 40 if (err != noErr) { \ |
41 do { \ | 41 logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \ |
42 err = expr; \ | 42 (const char*) & err); \ |
43 if (err != noErr) { \ | 43 } \ |
44 logCAMsg(kTraceError, kTraceAudioDevice, _id, \ | 44 } while (0) |
45 "Error in " #expr, (const char *)&err); \ | 45 |
46 } \ | 46 #define WEBRTC_CA_LOG_WARN(expr) \ |
47 } while(0) | 47 do { \ |
48 | 48 err = expr; \ |
49 #define WEBRTC_CA_LOG_WARN(expr) \ | 49 if (err != noErr) { \ |
50 do { \ | 50 logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \ |
51 err = expr; \ | 51 (const char*) & err); \ |
52 if (err != noErr) { \ | 52 } \ |
53 logCAMsg(kTraceWarning, kTraceAudioDevice, _id, \ | 53 } while (0) |
54 "Error in " #expr, (const char *)&err); \ | 54 |
55 } \ | 55 enum { MaxNumberDevices = 64 }; |
56 } while(0) | 56 |
57 | 57 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) { |
58 enum | 58 while (1) { |
59 { | 59 int32_t oldValue = *theValue; |
60 MaxNumberDevices = 64 | 60 if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) { |
61 }; | 61 return; |
62 | 62 } |
63 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) | 63 } |
64 { | 64 } |
65 while (1) | 65 |
66 { | 66 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) { |
67 int32_t oldValue = *theValue; | 67 while (1) { |
68 if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) | 68 int32_t value = *theValue; |
69 == true) | 69 if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) { |
70 { | 70 return value; |
71 return; | 71 } |
72 } | 72 } |
73 } | |
74 } | |
75 | |
76 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) | |
77 { | |
78 while (1) | |
79 { | |
80 int32_t value = *theValue; | |
81 if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) | |
82 { | |
83 return value; | |
84 } | |
85 } | |
86 } | 73 } |
87 | 74 |
88 // CoreAudio errors are best interpreted as four character strings. | 75 // CoreAudio errors are best interpreted as four character strings. |
89 void AudioDeviceMac::logCAMsg(const TraceLevel level, | 76 void AudioDeviceMac::logCAMsg(const TraceLevel level, |
90 const TraceModule module, | 77 const TraceModule module, |
91 const int32_t id, const char *msg, | 78 const int32_t id, |
92 const char *err) | 79 const char* msg, |
93 { | 80 const char* err) { |
94 RTC_DCHECK(msg != NULL); | 81 RTC_DCHECK(msg != NULL); |
95 RTC_DCHECK(err != NULL); | 82 RTC_DCHECK(err != NULL); |
96 | 83 |
97 #ifdef WEBRTC_ARCH_BIG_ENDIAN | 84 #ifdef WEBRTC_ARCH_BIG_ENDIAN |
98 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); | 85 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); |
99 #else | 86 #else |
100 // We need to flip the characters in this case. | 87 // We need to flip the characters in this case. |
101 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err | 88 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2, |
102 + 2, err + 1, err); | 89 err + 1, err); |
103 #endif | 90 #endif |
104 } | 91 } |
105 | 92 |
106 AudioDeviceMac::AudioDeviceMac(const int32_t id) : | 93 AudioDeviceMac::AudioDeviceMac(const int32_t id) |
107 _ptrAudioBuffer(NULL), | 94 : _ptrAudioBuffer(NULL), |
108 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | 95 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), |
109 _stopEventRec(*EventWrapper::Create()), | 96 _stopEventRec(*EventWrapper::Create()), |
110 _stopEvent(*EventWrapper::Create()), | 97 _stopEvent(*EventWrapper::Create()), |
111 _id(id), | 98 _id(id), |
112 _mixerManager(id), | 99 _mixerManager(id), |
113 _inputDeviceIndex(0), | 100 _inputDeviceIndex(0), |
114 _outputDeviceIndex(0), | 101 _outputDeviceIndex(0), |
115 _inputDeviceID(kAudioObjectUnknown), | 102 _inputDeviceID(kAudioObjectUnknown), |
116 _outputDeviceID(kAudioObjectUnknown), | 103 _outputDeviceID(kAudioObjectUnknown), |
117 _inputDeviceIsSpecified(false), | 104 _inputDeviceIsSpecified(false), |
118 _outputDeviceIsSpecified(false), | 105 _outputDeviceIsSpecified(false), |
119 _recChannels(N_REC_CHANNELS), | 106 _recChannels(N_REC_CHANNELS), |
120 _playChannels(N_PLAY_CHANNELS), | 107 _playChannels(N_PLAY_CHANNELS), |
121 _captureBufData(NULL), | 108 _captureBufData(NULL), |
122 _renderBufData(NULL), | 109 _renderBufData(NULL), |
123 _playBufType(AudioDeviceModule::kFixedBufferSize), | 110 _playBufType(AudioDeviceModule::kFixedBufferSize), |
124 _initialized(false), | 111 _initialized(false), |
125 _isShutDown(false), | 112 _isShutDown(false), |
126 _recording(false), | 113 _recording(false), |
127 _playing(false), | 114 _playing(false), |
128 _recIsInitialized(false), | 115 _recIsInitialized(false), |
129 _playIsInitialized(false), | 116 _playIsInitialized(false), |
130 _AGC(false), | 117 _AGC(false), |
131 _renderDeviceIsAlive(1), | 118 _renderDeviceIsAlive(1), |
132 _captureDeviceIsAlive(1), | 119 _captureDeviceIsAlive(1), |
133 _twoDevices(true), | 120 _twoDevices(true), |
134 _doStop(false), | 121 _doStop(false), |
135 _doStopRec(false), | 122 _doStopRec(false), |
136 _macBookPro(false), | 123 _macBookPro(false), |
137 _macBookProPanRight(false), | 124 _macBookProPanRight(false), |
138 _captureLatencyUs(0), | 125 _captureLatencyUs(0), |
139 _renderLatencyUs(0), | 126 _renderLatencyUs(0), |
140 _captureDelayUs(0), | 127 _captureDelayUs(0), |
141 _renderDelayUs(0), | 128 _renderDelayUs(0), |
142 _renderDelayOffsetSamples(0), | 129 _renderDelayOffsetSamples(0), |
143 _playBufDelayFixed(20), | 130 _playBufDelayFixed(20), |
144 _playWarning(0), | 131 _playWarning(0), |
145 _playError(0), | 132 _playError(0), |
146 _recWarning(0), | 133 _recWarning(0), |
147 _recError(0), | 134 _recError(0), |
148 _paCaptureBuffer(NULL), | 135 _paCaptureBuffer(NULL), |
149 _paRenderBuffer(NULL), | 136 _paRenderBuffer(NULL), |
150 _captureBufSizeSamples(0), | 137 _captureBufSizeSamples(0), |
151 _renderBufSizeSamples(0), | 138 _renderBufSizeSamples(0), |
152 prev_key_state_(), | 139 prev_key_state_(), |
153 get_mic_volume_counter_ms_(0) | 140 get_mic_volume_counter_ms_(0) { |
154 { | 141 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); |
155 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, | 142 |
156 "%s created", __FUNCTION__); | 143 RTC_DCHECK(&_stopEvent != NULL); |
157 | 144 RTC_DCHECK(&_stopEventRec != NULL); |
158 RTC_DCHECK(&_stopEvent != NULL); | 145 |
159 RTC_DCHECK(&_stopEventRec != NULL); | 146 memset(_renderConvertData, 0, sizeof(_renderConvertData)); |
160 | 147 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); |
161 memset(_renderConvertData, 0, sizeof(_renderConvertData)); | 148 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); |
162 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); | 149 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); |
163 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); | 150 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); |
164 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); | 151 } |
165 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); | 152 |
166 } | 153 AudioDeviceMac::~AudioDeviceMac() { |
167 | 154 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", |
168 | 155 __FUNCTION__); |
169 AudioDeviceMac::~AudioDeviceMac() | 156 |
170 { | 157 if (!_isShutDown) { |
171 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, | 158 Terminate(); |
172 "%s destroyed", __FUNCTION__); | 159 } |
173 | 160 |
174 if (!_isShutDown) | 161 RTC_DCHECK(!capture_worker_thread_.get()); |
175 { | 162 RTC_DCHECK(!render_worker_thread_.get()); |
176 Terminate(); | 163 |
177 } | 164 if (_paRenderBuffer) { |
178 | 165 delete _paRenderBuffer; |
179 RTC_DCHECK(!capture_worker_thread_.get()); | 166 _paRenderBuffer = NULL; |
180 RTC_DCHECK(!render_worker_thread_.get()); | 167 } |
181 | 168 |
182 if (_paRenderBuffer) | 169 if (_paCaptureBuffer) { |
183 { | 170 delete _paCaptureBuffer; |
184 delete _paRenderBuffer; | 171 _paCaptureBuffer = NULL; |
185 _paRenderBuffer = NULL; | 172 } |
186 } | 173 |
187 | 174 if (_renderBufData) { |
188 if (_paCaptureBuffer) | 175 delete[] _renderBufData; |
189 { | 176 _renderBufData = NULL; |
190 delete _paCaptureBuffer; | 177 } |
191 _paCaptureBuffer = NULL; | 178 |
192 } | 179 if (_captureBufData) { |
193 | 180 delete[] _captureBufData; |
194 if (_renderBufData) | 181 _captureBufData = NULL; |
195 { | 182 } |
196 delete[] _renderBufData; | 183 |
197 _renderBufData = NULL; | 184 kern_return_t kernErr = KERN_SUCCESS; |
198 } | 185 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); |
199 | 186 if (kernErr != KERN_SUCCESS) { |
200 if (_captureBufData) | 187 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
201 { | 188 " semaphore_destroy() error: %d", kernErr); |
202 delete[] _captureBufData; | 189 } |
203 _captureBufData = NULL; | 190 |
204 } | 191 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); |
205 | 192 if (kernErr != KERN_SUCCESS) { |
206 kern_return_t kernErr = KERN_SUCCESS; | 193 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
207 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); | 194 " semaphore_destroy() error: %d", kernErr); |
208 if (kernErr != KERN_SUCCESS) | 195 } |
209 { | 196 |
210 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 197 delete &_stopEvent; |
211 " semaphore_destroy() error: %d", kernErr); | 198 delete &_stopEventRec; |
212 } | 199 delete &_critSect; |
213 | |
214 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); | |
215 if (kernErr != KERN_SUCCESS) | |
216 { | |
217 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
218 " semaphore_destroy() error: %d", kernErr); | |
219 } | |
220 | |
221 delete &_stopEvent; | |
222 delete &_stopEventRec; | |
223 delete &_critSect; | |
224 } | 200 } |
225 | 201 |
226 // ============================================================================ | 202 // ============================================================================ |
227 // API | 203 // API |
228 // ============================================================================ | 204 // ============================================================================ |
229 | 205 |
230 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) | 206 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
231 { | 207 CriticalSectionScoped lock(&_critSect); |
232 | 208 |
233 CriticalSectionScoped lock(&_critSect); | 209 _ptrAudioBuffer = audioBuffer; |
234 | 210 |
235 _ptrAudioBuffer = audioBuffer; | 211 // inform the AudioBuffer about default settings for this implementation |
236 | 212 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); |
237 // inform the AudioBuffer about default settings for this implementation | 213 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); |
| 214 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); |
| 215 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); |
| 216 } |
| 217 |
| 218 int32_t AudioDeviceMac::ActiveAudioLayer( |
| 219 AudioDeviceModule::AudioLayer& audioLayer) const { |
| 220 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; |
| 221 return 0; |
| 222 } |
| 223 |
| 224 int32_t AudioDeviceMac::Init() { |
| 225 CriticalSectionScoped lock(&_critSect); |
| 226 |
| 227 if (_initialized) { |
| 228 return 0; |
| 229 } |
| 230 |
| 231 OSStatus err = noErr; |
| 232 |
| 233 _isShutDown = false; |
| 234 |
| 235 // PortAudio ring buffers require an elementCount which is a power of two. |
| 236 if (_renderBufData == NULL) { |
| 237 UInt32 powerOfTwo = 1; |
| 238 while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) { |
| 239 powerOfTwo <<= 1; |
| 240 } |
| 241 _renderBufSizeSamples = powerOfTwo; |
| 242 _renderBufData = new SInt16[_renderBufSizeSamples]; |
| 243 } |
| 244 |
| 245 if (_paRenderBuffer == NULL) { |
| 246 _paRenderBuffer = new PaUtilRingBuffer; |
| 247 PaRingBufferSize bufSize = -1; |
| 248 bufSize = PaUtil_InitializeRingBuffer( |
| 249 _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData); |
| 250 if (bufSize == -1) { |
| 251 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, |
| 252 " PaUtil_InitializeRingBuffer() error"); |
| 253 return -1; |
| 254 } |
| 255 } |
| 256 |
| 257 if (_captureBufData == NULL) { |
| 258 UInt32 powerOfTwo = 1; |
| 259 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) { |
| 260 powerOfTwo <<= 1; |
| 261 } |
| 262 _captureBufSizeSamples = powerOfTwo; |
| 263 _captureBufData = new Float32[_captureBufSizeSamples]; |
| 264 } |
| 265 |
| 266 if (_paCaptureBuffer == NULL) { |
| 267 _paCaptureBuffer = new PaUtilRingBuffer; |
| 268 PaRingBufferSize bufSize = -1; |
| 269 bufSize = |
| 270 PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32), |
| 271 _captureBufSizeSamples, _captureBufData); |
| 272 if (bufSize == -1) { |
| 273 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, |
| 274 " PaUtil_InitializeRingBuffer() error"); |
| 275 return -1; |
| 276 } |
| 277 } |
| 278 |
| 279 kern_return_t kernErr = KERN_SUCCESS; |
| 280 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, |
| 281 SYNC_POLICY_FIFO, 0); |
| 282 if (kernErr != KERN_SUCCESS) { |
| 283 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, |
| 284 " semaphore_create() error: %d", kernErr); |
| 285 return -1; |
| 286 } |
| 287 |
| 288 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, |
| 289 SYNC_POLICY_FIFO, 0); |
| 290 if (kernErr != KERN_SUCCESS) { |
| 291 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, |
| 292 " semaphore_create() error: %d", kernErr); |
| 293 return -1; |
| 294 } |
| 295 |
| 296 // Setting RunLoop to NULL here instructs HAL to manage its own thread for |
| 297 // notifications. This was the default behaviour on OS X 10.5 and earlier, |
| 298 // but now must be explicitly specified. HAL would otherwise try to use the |
| 299 // main thread to issue notifications. |
| 300 AudioObjectPropertyAddress propertyAddress = { |
| 301 kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, |
| 302 kAudioObjectPropertyElementMaster}; |
| 303 CFRunLoopRef runLoop = NULL; |
| 304 UInt32 size = sizeof(CFRunLoopRef); |
| 305 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 306 kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop)); |
| 307 |
| 308 // Listen for any device changes. |
| 309 propertyAddress.mSelector = kAudioHardwarePropertyDevices; |
| 310 WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener( |
| 311 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); |
| 312 |
| 313 // Determine if this is a MacBook Pro |
| 314 _macBookPro = false; |
| 315 _macBookProPanRight = false; |
| 316 char buf[128]; |
| 317 size_t length = sizeof(buf); |
| 318 memset(buf, 0, length); |
| 319 |
| 320 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); |
| 321 if (intErr != 0) { |
| 322 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 323 " Error in sysctlbyname(): %d", err); |
| 324 } else { |
| 325 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Hardware model: %s", |
| 326 buf); |
| 327 if (strncmp(buf, "MacBookPro", 10) == 0) { |
| 328 _macBookPro = true; |
| 329 } |
| 330 } |
| 331 |
| 332 _playWarning = 0; |
| 333 _playError = 0; |
| 334 _recWarning = 0; |
| 335 _recError = 0; |
| 336 |
| 337 get_mic_volume_counter_ms_ = 0; |
| 338 |
| 339 _initialized = true; |
| 340 |
| 341 return 0; |
| 342 } |
| 343 |
| 344 int32_t AudioDeviceMac::Terminate() { |
| 345 if (!_initialized) { |
| 346 return 0; |
| 347 } |
| 348 |
| 349 if (_recording) { |
| 350 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 351 " Recording must be stopped"); |
| 352 return -1; |
| 353 } |
| 354 |
| 355 if (_playing) { |
| 356 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 357 " Playback must be stopped"); |
| 358 return -1; |
| 359 } |
| 360 |
| 361 _critSect.Enter(); |
| 362 |
| 363 _mixerManager.Close(); |
| 364 |
| 365 OSStatus err = noErr; |
| 366 int retVal = 0; |
| 367 |
| 368 AudioObjectPropertyAddress propertyAddress = { |
| 369 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, |
| 370 kAudioObjectPropertyElementMaster}; |
| 371 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
| 372 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); |
| 373 |
| 374 err = AudioHardwareUnload(); |
| 375 if (err != noErr) { |
| 376 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 377 "Error in AudioHardwareUnload()", (const char*)&err); |
| 378 retVal = -1; |
| 379 } |
| 380 |
| 381 _isShutDown = true; |
| 382 _initialized = false; |
| 383 _outputDeviceIsSpecified = false; |
| 384 _inputDeviceIsSpecified = false; |
| 385 |
| 386 _critSect.Leave(); |
| 387 |
| 388 return retVal; |
| 389 } |
| 390 |
| 391 bool AudioDeviceMac::Initialized() const { |
| 392 return (_initialized); |
| 393 } |
| 394 |
| 395 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) { |
| 396 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 397 |
| 398 // Make an attempt to open up the |
| 399 // output mixer corresponding to the currently selected output device. |
| 400 // |
| 401 if (!wasInitialized && InitSpeaker() == -1) { |
| 402 available = false; |
| 403 return 0; |
| 404 } |
| 405 |
| 406 // Given that InitSpeaker was successful, we know that a valid speaker |
| 407 // exists. |
| 408 available = true; |
| 409 |
| 410 // Close the initialized output mixer |
| 411 // |
| 412 if (!wasInitialized) { |
| 413 _mixerManager.CloseSpeaker(); |
| 414 } |
| 415 |
| 416 return 0; |
| 417 } |
| 418 |
| 419 int32_t AudioDeviceMac::InitSpeaker() { |
| 420 CriticalSectionScoped lock(&_critSect); |
| 421 |
| 422 if (_playing) { |
| 423 return -1; |
| 424 } |
| 425 |
| 426 if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) { |
| 427 return -1; |
| 428 } |
| 429 |
| 430 if (_inputDeviceID == _outputDeviceID) { |
| 431 _twoDevices = false; |
| 432 } else { |
| 433 _twoDevices = true; |
| 434 } |
| 435 |
| 436 if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) { |
| 437 return -1; |
| 438 } |
| 439 |
| 440 return 0; |
| 441 } |
| 442 |
| 443 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) { |
| 444 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 445 |
| 446 // Make an attempt to open up the |
| 447 // input mixer corresponding to the currently selected output device. |
| 448 // |
| 449 if (!wasInitialized && InitMicrophone() == -1) { |
| 450 available = false; |
| 451 return 0; |
| 452 } |
| 453 |
| 454 // Given that InitMicrophone was successful, we know that a valid microphone |
| 455 // exists. |
| 456 available = true; |
| 457 |
| 458 // Close the initialized input mixer |
| 459 // |
| 460 if (!wasInitialized) { |
| 461 _mixerManager.CloseMicrophone(); |
| 462 } |
| 463 |
| 464 return 0; |
| 465 } |
| 466 |
| 467 int32_t AudioDeviceMac::InitMicrophone() { |
| 468 CriticalSectionScoped lock(&_critSect); |
| 469 |
| 470 if (_recording) { |
| 471 return -1; |
| 472 } |
| 473 |
| 474 if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) { |
| 475 return -1; |
| 476 } |
| 477 |
| 478 if (_inputDeviceID == _outputDeviceID) { |
| 479 _twoDevices = false; |
| 480 } else { |
| 481 _twoDevices = true; |
| 482 } |
| 483 |
| 484 if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) { |
| 485 return -1; |
| 486 } |
| 487 |
| 488 return 0; |
| 489 } |
| 490 |
| 491 bool AudioDeviceMac::SpeakerIsInitialized() const { |
| 492 return (_mixerManager.SpeakerIsInitialized()); |
| 493 } |
| 494 |
| 495 bool AudioDeviceMac::MicrophoneIsInitialized() const { |
| 496 return (_mixerManager.MicrophoneIsInitialized()); |
| 497 } |
| 498 |
| 499 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) { |
| 500 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 501 |
| 502 // Make an attempt to open up the |
| 503 // output mixer corresponding to the currently selected output device. |
| 504 // |
| 505 if (!wasInitialized && InitSpeaker() == -1) { |
| 506 // If we end up here it means that the selected speaker has no volume |
| 507 // control. |
| 508 available = false; |
| 509 return 0; |
| 510 } |
| 511 |
| 512 // Given that InitSpeaker was successful, we know that a volume control exists |
| 513 // |
| 514 available = true; |
| 515 |
| 516 // Close the initialized output mixer |
| 517 // |
| 518 if (!wasInitialized) { |
| 519 _mixerManager.CloseSpeaker(); |
| 520 } |
| 521 |
| 522 return 0; |
| 523 } |
| 524 |
| 525 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) { |
| 526 return (_mixerManager.SetSpeakerVolume(volume)); |
| 527 } |
| 528 |
| 529 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const { |
| 530 uint32_t level(0); |
| 531 |
| 532 if (_mixerManager.SpeakerVolume(level) == -1) { |
| 533 return -1; |
| 534 } |
| 535 |
| 536 volume = level; |
| 537 return 0; |
| 538 } |
| 539 |
| 540 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft, |
| 541 uint16_t volumeRight) { |
| 542 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 543 " API call not supported on this platform"); |
| 544 return -1; |
| 545 } |
| 546 |
| 547 int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/, |
| 548 uint16_t& /*volumeRight*/) const { |
| 549 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 550 " API call not supported on this platform"); |
| 551 return -1; |
| 552 } |
| 553 |
| 554 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const { |
| 555 uint32_t maxVol(0); |
| 556 |
| 557 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { |
| 558 return -1; |
| 559 } |
| 560 |
| 561 maxVolume = maxVol; |
| 562 return 0; |
| 563 } |
| 564 |
| 565 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const { |
| 566 uint32_t minVol(0); |
| 567 |
| 568 if (_mixerManager.MinSpeakerVolume(minVol) == -1) { |
| 569 return -1; |
| 570 } |
| 571 |
| 572 minVolume = minVol; |
| 573 return 0; |
| 574 } |
| 575 |
| 576 int32_t AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const { |
| 577 uint16_t delta(0); |
| 578 |
| 579 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) { |
| 580 return -1; |
| 581 } |
| 582 |
| 583 stepSize = delta; |
| 584 return 0; |
| 585 } |
| 586 |
| 587 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) { |
| 588 bool isAvailable(false); |
| 589 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 590 |
| 591 // Make an attempt to open up the |
| 592 // output mixer corresponding to the currently selected output device. |
| 593 // |
| 594 if (!wasInitialized && InitSpeaker() == -1) { |
| 595 // If we end up here it means that the selected speaker has no volume |
| 596 // control, hence it is safe to state that there is no mute control |
| 597 // already at this stage. |
| 598 available = false; |
| 599 return 0; |
| 600 } |
| 601 |
| 602 // Check if the selected speaker has a mute control |
| 603 // |
| 604 _mixerManager.SpeakerMuteIsAvailable(isAvailable); |
| 605 |
| 606 available = isAvailable; |
| 607 |
| 608 // Close the initialized output mixer |
| 609 // |
| 610 if (!wasInitialized) { |
| 611 _mixerManager.CloseSpeaker(); |
| 612 } |
| 613 |
| 614 return 0; |
| 615 } |
| 616 |
| 617 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) { |
| 618 return (_mixerManager.SetSpeakerMute(enable)); |
| 619 } |
| 620 |
| 621 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const { |
| 622 bool muted(0); |
| 623 |
| 624 if (_mixerManager.SpeakerMute(muted) == -1) { |
| 625 return -1; |
| 626 } |
| 627 |
| 628 enabled = muted; |
| 629 return 0; |
| 630 } |
| 631 |
| 632 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) { |
| 633 bool isAvailable(false); |
| 634 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 635 |
| 636 // Make an attempt to open up the |
| 637 // input mixer corresponding to the currently selected input device. |
| 638 // |
| 639 if (!wasInitialized && InitMicrophone() == -1) { |
| 640 // If we end up here it means that the selected microphone has no volume |
| 641 // control, hence it is safe to state that there is no boost control |
| 642 // already at this stage. |
| 643 available = false; |
| 644 return 0; |
| 645 } |
| 646 |
| 647 // Check if the selected microphone has a mute control |
| 648 // |
| 649 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); |
| 650 available = isAvailable; |
| 651 |
| 652 // Close the initialized input mixer |
| 653 // |
| 654 if (!wasInitialized) { |
| 655 _mixerManager.CloseMicrophone(); |
| 656 } |
| 657 |
| 658 return 0; |
| 659 } |
| 660 |
| 661 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) { |
| 662 return (_mixerManager.SetMicrophoneMute(enable)); |
| 663 } |
| 664 |
| 665 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const { |
| 666 bool muted(0); |
| 667 |
| 668 if (_mixerManager.MicrophoneMute(muted) == -1) { |
| 669 return -1; |
| 670 } |
| 671 |
| 672 enabled = muted; |
| 673 return 0; |
| 674 } |
| 675 |
| 676 int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available) { |
| 677 bool isAvailable(false); |
| 678 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 679 |
| 680 // Enumerate all avaliable microphone and make an attempt to open up the |
| 681 // input mixer corresponding to the currently selected input device. |
| 682 // |
| 683 if (!wasInitialized && InitMicrophone() == -1) { |
| 684 // If we end up here it means that the selected microphone has no volume |
| 685 // control, hence it is safe to state that there is no boost control |
| 686 // already at this stage. |
| 687 available = false; |
| 688 return 0; |
| 689 } |
| 690 |
| 691 // Check if the selected microphone has a boost control |
| 692 // |
| 693 _mixerManager.MicrophoneBoostIsAvailable(isAvailable); |
| 694 available = isAvailable; |
| 695 |
| 696 // Close the initialized input mixer |
| 697 // |
| 698 if (!wasInitialized) { |
| 699 _mixerManager.CloseMicrophone(); |
| 700 } |
| 701 |
| 702 return 0; |
| 703 } |
| 704 |
| 705 int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable) { |
| 706 return (_mixerManager.SetMicrophoneBoost(enable)); |
| 707 } |
| 708 |
| 709 int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const { |
| 710 bool onOff(0); |
| 711 |
| 712 if (_mixerManager.MicrophoneBoost(onOff) == -1) { |
| 713 return -1; |
| 714 } |
| 715 |
| 716 enabled = onOff; |
| 717 return 0; |
| 718 } |
| 719 |
| 720 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) { |
| 721 bool isAvailable(false); |
| 722 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 723 |
| 724 if (!wasInitialized && InitMicrophone() == -1) { |
| 725 // Cannot open the specified device |
| 726 available = false; |
| 727 return 0; |
| 728 } |
| 729 |
| 730 // Check if the selected microphone can record stereo |
| 731 // |
| 732 _mixerManager.StereoRecordingIsAvailable(isAvailable); |
| 733 available = isAvailable; |
| 734 |
| 735 // Close the initialized input mixer |
| 736 // |
| 737 if (!wasInitialized) { |
| 738 _mixerManager.CloseMicrophone(); |
| 739 } |
| 740 |
| 741 return 0; |
| 742 } |
| 743 |
| 744 int32_t AudioDeviceMac::SetStereoRecording(bool enable) { |
| 745 if (enable) |
| 746 _recChannels = 2; |
| 747 else |
| 748 _recChannels = 1; |
| 749 |
| 750 return 0; |
| 751 } |
| 752 |
| 753 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const { |
| 754 if (_recChannels == 2) |
| 755 enabled = true; |
| 756 else |
| 757 enabled = false; |
| 758 |
| 759 return 0; |
| 760 } |
| 761 |
| 762 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) { |
| 763 bool isAvailable(false); |
| 764 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 765 |
| 766 if (!wasInitialized && InitSpeaker() == -1) { |
| 767 // Cannot open the specified device |
| 768 available = false; |
| 769 return 0; |
| 770 } |
| 771 |
| 772 // Check if the selected microphone can record stereo |
| 773 // |
| 774 _mixerManager.StereoPlayoutIsAvailable(isAvailable); |
| 775 available = isAvailable; |
| 776 |
| 777 // Close the initialized input mixer |
| 778 // |
| 779 if (!wasInitialized) { |
| 780 _mixerManager.CloseSpeaker(); |
| 781 } |
| 782 |
| 783 return 0; |
| 784 } |
| 785 |
| 786 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) { |
| 787 if (enable) |
| 788 _playChannels = 2; |
| 789 else |
| 790 _playChannels = 1; |
| 791 |
| 792 return 0; |
| 793 } |
| 794 |
| 795 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const { |
| 796 if (_playChannels == 2) |
| 797 enabled = true; |
| 798 else |
| 799 enabled = false; |
| 800 |
| 801 return 0; |
| 802 } |
| 803 |
| 804 int32_t AudioDeviceMac::SetAGC(bool enable) { |
| 805 _AGC = enable; |
| 806 |
| 807 return 0; |
| 808 } |
| 809 |
| 810 bool AudioDeviceMac::AGC() const { |
| 811 return _AGC; |
| 812 } |
| 813 |
| 814 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) { |
| 815 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 816 |
| 817 // Make an attempt to open up the |
| 818 // input mixer corresponding to the currently selected output device. |
| 819 // |
| 820 if (!wasInitialized && InitMicrophone() == -1) { |
| 821 // If we end up here it means that the selected microphone has no volume |
| 822 // control. |
| 823 available = false; |
| 824 return 0; |
| 825 } |
| 826 |
| 827 // Given that InitMicrophone was successful, we know that a volume control |
| 828 // exists |
| 829 // |
| 830 available = true; |
| 831 |
| 832 // Close the initialized input mixer |
| 833 // |
| 834 if (!wasInitialized) { |
| 835 _mixerManager.CloseMicrophone(); |
| 836 } |
| 837 |
| 838 return 0; |
| 839 } |
| 840 |
| 841 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) { |
| 842 return (_mixerManager.SetMicrophoneVolume(volume)); |
| 843 } |
| 844 |
| 845 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const { |
| 846 uint32_t level(0); |
| 847 |
| 848 if (_mixerManager.MicrophoneVolume(level) == -1) { |
| 849 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 850 " failed to retrive current microphone level"); |
| 851 return -1; |
| 852 } |
| 853 |
| 854 volume = level; |
| 855 return 0; |
| 856 } |
| 857 |
| 858 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { |
| 859 uint32_t maxVol(0); |
| 860 |
| 861 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { |
| 862 return -1; |
| 863 } |
| 864 |
| 865 maxVolume = maxVol; |
| 866 return 0; |
| 867 } |
| 868 |
| 869 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const { |
| 870 uint32_t minVol(0); |
| 871 |
| 872 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { |
| 873 return -1; |
| 874 } |
| 875 |
| 876 minVolume = minVol; |
| 877 return 0; |
| 878 } |
| 879 |
| 880 int32_t AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const { |
| 881 uint16_t delta(0); |
| 882 |
| 883 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) { |
| 884 return -1; |
| 885 } |
| 886 |
| 887 stepSize = delta; |
| 888 return 0; |
| 889 } |
| 890 |
| 891 int16_t AudioDeviceMac::PlayoutDevices() { |
| 892 AudioDeviceID playDevices[MaxNumberDevices]; |
| 893 return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices, |
| 894 MaxNumberDevices); |
| 895 } |
| 896 |
| 897 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) { |
| 898 CriticalSectionScoped lock(&_critSect); |
| 899 |
| 900 if (_playIsInitialized) { |
| 901 return -1; |
| 902 } |
| 903 |
| 904 AudioDeviceID playDevices[MaxNumberDevices]; |
| 905 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, |
| 906 playDevices, MaxNumberDevices); |
| 907 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 908 " number of availiable waveform-audio output devices is %u", |
| 909 nDevices); |
| 910 |
| 911 if (index > (nDevices - 1)) { |
| 912 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 913 " device index is out of range [0,%u]", (nDevices - 1)); |
| 914 return -1; |
| 915 } |
| 916 |
| 917 _outputDeviceIndex = index; |
| 918 _outputDeviceIsSpecified = true; |
| 919 |
| 920 return 0; |
| 921 } |
| 922 |
| 923 int32_t AudioDeviceMac::SetPlayoutDevice( |
| 924 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 925 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 926 "WindowsDeviceType not supported"); |
| 927 return -1; |
| 928 } |
| 929 |
| 930 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index, |
| 931 char name[kAdmMaxDeviceNameSize], |
| 932 char guid[kAdmMaxGuidSize]) { |
| 933 const uint16_t nDevices(PlayoutDevices()); |
| 934 |
| 935 if ((index > (nDevices - 1)) || (name == NULL)) { |
| 936 return -1; |
| 937 } |
| 938 |
| 939 memset(name, 0, kAdmMaxDeviceNameSize); |
| 940 |
| 941 if (guid != NULL) { |
| 942 memset(guid, 0, kAdmMaxGuidSize); |
| 943 } |
| 944 |
| 945 return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name); |
| 946 } |
| 947 |
| 948 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index, |
| 949 char name[kAdmMaxDeviceNameSize], |
| 950 char guid[kAdmMaxGuidSize]) { |
| 951 const uint16_t nDevices(RecordingDevices()); |
| 952 |
| 953 if ((index > (nDevices - 1)) || (name == NULL)) { |
| 954 return -1; |
| 955 } |
| 956 |
| 957 memset(name, 0, kAdmMaxDeviceNameSize); |
| 958 |
| 959 if (guid != NULL) { |
| 960 memset(guid, 0, kAdmMaxGuidSize); |
| 961 } |
| 962 |
| 963 return GetDeviceName(kAudioDevicePropertyScopeInput, index, name); |
| 964 } |
| 965 |
| 966 int16_t AudioDeviceMac::RecordingDevices() { |
| 967 AudioDeviceID recDevices[MaxNumberDevices]; |
| 968 return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices, |
| 969 MaxNumberDevices); |
| 970 } |
| 971 |
| 972 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) { |
| 973 if (_recIsInitialized) { |
| 974 return -1; |
| 975 } |
| 976 |
| 977 AudioDeviceID recDevices[MaxNumberDevices]; |
| 978 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, |
| 979 recDevices, MaxNumberDevices); |
| 980 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 981 " number of availiable waveform-audio input devices is %u", |
| 982 nDevices); |
| 983 |
| 984 if (index > (nDevices - 1)) { |
| 985 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 986 " device index is out of range [0,%u]", (nDevices - 1)); |
| 987 return -1; |
| 988 } |
| 989 |
| 990 _inputDeviceIndex = index; |
| 991 _inputDeviceIsSpecified = true; |
| 992 |
| 993 return 0; |
| 994 } |
| 995 |
| 996 int32_t AudioDeviceMac::SetRecordingDevice( |
| 997 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 998 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 999 "WindowsDeviceType not supported"); |
| 1000 return -1; |
| 1001 } |
| 1002 |
| 1003 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) { |
| 1004 available = true; |
| 1005 |
| 1006 // Try to initialize the playout side |
| 1007 if (InitPlayout() == -1) { |
| 1008 available = false; |
| 1009 } |
| 1010 |
| 1011 // We destroy the IOProc created by InitPlayout() in implDeviceIOProc(). |
| 1012 // We must actually start playout here in order to have the IOProc |
| 1013 // deleted by calling StopPlayout(). |
| 1014 if (StartPlayout() == -1) { |
| 1015 available = false; |
| 1016 } |
| 1017 |
| 1018 // Cancel effect of initialization |
| 1019 if (StopPlayout() == -1) { |
| 1020 available = false; |
| 1021 } |
| 1022 |
| 1023 return 0; |
| 1024 } |
| 1025 |
| 1026 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) { |
| 1027 available = true; |
| 1028 |
| 1029 // Try to initialize the recording side |
| 1030 if (InitRecording() == -1) { |
| 1031 available = false; |
| 1032 } |
| 1033 |
| 1034 // We destroy the IOProc created by InitRecording() in implInDeviceIOProc(). |
| 1035 // We must actually start recording here in order to have the IOProc |
| 1036 // deleted by calling StopRecording(). |
| 1037 if (StartRecording() == -1) { |
| 1038 available = false; |
| 1039 } |
| 1040 |
| 1041 // Cancel effect of initialization |
| 1042 if (StopRecording() == -1) { |
| 1043 available = false; |
| 1044 } |
| 1045 |
| 1046 return 0; |
| 1047 } |
| 1048 |
| 1049 int32_t AudioDeviceMac::InitPlayout() { |
| 1050 CriticalSectionScoped lock(&_critSect); |
| 1051 |
| 1052 if (_playing) { |
| 1053 return -1; |
| 1054 } |
| 1055 |
| 1056 if (!_outputDeviceIsSpecified) { |
| 1057 return -1; |
| 1058 } |
| 1059 |
| 1060 if (_playIsInitialized) { |
| 1061 return 0; |
| 1062 } |
| 1063 |
| 1064 // Initialize the speaker (devices might have been added or removed) |
| 1065 if (InitSpeaker() == -1) { |
| 1066 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1067 " InitSpeaker() failed"); |
| 1068 } |
| 1069 |
| 1070 if (!MicrophoneIsInitialized()) { |
| 1071 // Make this call to check if we are using |
| 1072 // one or two devices (_twoDevices) |
| 1073 bool available = false; |
| 1074 if (MicrophoneIsAvailable(available) == -1) { |
| 1075 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1076 " MicrophoneIsAvailable() failed"); |
| 1077 } |
| 1078 } |
| 1079 |
| 1080 PaUtil_FlushRingBuffer(_paRenderBuffer); |
| 1081 |
| 1082 OSStatus err = noErr; |
| 1083 UInt32 size = 0; |
| 1084 _renderDelayOffsetSamples = 0; |
| 1085 _renderDelayUs = 0; |
| 1086 _renderLatencyUs = 0; |
| 1087 _renderDeviceIsAlive = 1; |
| 1088 _doStop = false; |
| 1089 |
| 1090 // The internal microphone of a MacBook Pro is located under the left speaker |
| 1091 // grille. When the internal speakers are in use, we want to fully stereo |
| 1092 // pan to the right. |
| 1093 AudioObjectPropertyAddress propertyAddress = { |
| 1094 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; |
| 1095 if (_macBookPro) { |
| 1096 _macBookProPanRight = false; |
| 1097 Boolean hasProperty = |
| 1098 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 1099 if (hasProperty) { |
| 1100 UInt32 dataSource = 0; |
| 1101 size = sizeof(dataSource); |
| 1102 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData( |
| 1103 _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource)); |
| 1104 |
| 1105 if (dataSource == 'ispk') { |
| 1106 _macBookProPanRight = true; |
| 1107 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1108 "MacBook Pro using internal speakers; stereo" |
| 1109 " panning right"); |
| 1110 } else { |
| 1111 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1112 "MacBook Pro not using internal speakers"); |
| 1113 } |
| 1114 |
| 1115 // Add a listener to determine if the status changes. |
| 1116 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( |
| 1117 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1118 } |
| 1119 } |
| 1120 |
| 1121 // Get current stream description |
| 1122 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 1123 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); |
| 1124 size = sizeof(_outStreamFormat); |
| 1125 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1126 _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat)); |
| 1127 |
| 1128 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 1129 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 1130 "Unacceptable output stream format -> mFormatID", |
| 1131 (const char*)&_outStreamFormat.mFormatID); |
| 1132 return -1; |
| 1133 } |
| 1134 |
| 1135 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 1136 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1137 "Too many channels on output device (mChannelsPerFrame = %d)", |
| 1138 _outStreamFormat.mChannelsPerFrame); |
| 1139 return -1; |
| 1140 } |
| 1141 |
| 1142 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { |
| 1143 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1144 "Non-interleaved audio data is not supported.", |
| 1145 "AudioHardware streams should not have this format."); |
| 1146 return -1; |
| 1147 } |
| 1148 |
| 1149 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Ouput stream format:"); |
| 1150 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1151 "mSampleRate = %f, mChannelsPerFrame = %u", |
| 1152 _outStreamFormat.mSampleRate, |
| 1153 _outStreamFormat.mChannelsPerFrame); |
| 1154 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1155 "mBytesPerPacket = %u, mFramesPerPacket = %u", |
| 1156 _outStreamFormat.mBytesPerPacket, |
| 1157 _outStreamFormat.mFramesPerPacket); |
| 1158 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1159 "mBytesPerFrame = %u, mBitsPerChannel = %u", |
| 1160 _outStreamFormat.mBytesPerFrame, |
| 1161 _outStreamFormat.mBitsPerChannel); |
| 1162 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u", |
| 1163 _outStreamFormat.mFormatFlags); |
| 1164 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", |
| 1165 (const char*)&_outStreamFormat.mFormatID); |
| 1166 |
| 1167 // Our preferred format to work with. |
| 1168 if (_outStreamFormat.mChannelsPerFrame < 2) { |
| 1169 // Disable stereo playout when we only have one channel on the device. |
| 1170 _playChannels = 1; |
| 1171 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1172 "Stereo playout unavailable on this device"); |
| 1173 } |
| 1174 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); |
| 1175 |
| 1176 // Listen for format changes. |
| 1177 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 1178 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener( |
| 1179 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1180 |
| 1181 // Listen for processor overloads. |
| 1182 propertyAddress.mSelector = kAudioDeviceProcessorOverload; |
| 1183 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( |
| 1184 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1185 |
| 1186 if (_twoDevices || !_recIsInitialized) { |
| 1187 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( |
| 1188 _outputDeviceID, deviceIOProc, this, &_deviceIOProcID)); |
| 1189 } |
| 1190 |
| 1191 _playIsInitialized = true; |
| 1192 |
| 1193 return 0; |
| 1194 } |
| 1195 |
| 1196 int32_t AudioDeviceMac::InitRecording() { |
| 1197 CriticalSectionScoped lock(&_critSect); |
| 1198 |
| 1199 if (_recording) { |
| 1200 return -1; |
| 1201 } |
| 1202 |
| 1203 if (!_inputDeviceIsSpecified) { |
| 1204 return -1; |
| 1205 } |
| 1206 |
| 1207 if (_recIsInitialized) { |
| 1208 return 0; |
| 1209 } |
| 1210 |
| 1211 // Initialize the microphone (devices might have been added or removed) |
| 1212 if (InitMicrophone() == -1) { |
| 1213 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1214 " InitMicrophone() failed"); |
| 1215 } |
| 1216 |
| 1217 if (!SpeakerIsInitialized()) { |
| 1218 // Make this call to check if we are using |
| 1219 // one or two devices (_twoDevices) |
| 1220 bool available = false; |
| 1221 if (SpeakerIsAvailable(available) == -1) { |
| 1222 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1223 " SpeakerIsAvailable() failed"); |
| 1224 } |
| 1225 } |
| 1226 |
| 1227 OSStatus err = noErr; |
| 1228 UInt32 size = 0; |
| 1229 |
| 1230 PaUtil_FlushRingBuffer(_paCaptureBuffer); |
| 1231 |
| 1232 _captureDelayUs = 0; |
| 1233 _captureLatencyUs = 0; |
| 1234 _captureDeviceIsAlive = 1; |
| 1235 _doStopRec = false; |
| 1236 |
| 1237 // Get current stream description |
| 1238 AudioObjectPropertyAddress propertyAddress = { |
| 1239 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; |
| 1240 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); |
| 1241 size = sizeof(_inStreamFormat); |
| 1242 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1243 _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat)); |
| 1244 |
| 1245 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 1246 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 1247 "Unacceptable input stream format -> mFormatID", |
| 1248 (const char*)&_inStreamFormat.mFormatID); |
| 1249 return -1; |
| 1250 } |
| 1251 |
| 1252 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 1253 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1254 "Too many channels on input device (mChannelsPerFrame = %d)", |
| 1255 _inStreamFormat.mChannelsPerFrame); |
| 1256 return -1; |
| 1257 } |
| 1258 |
| 1259 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * |
| 1260 _inStreamFormat.mSampleRate / 100 * |
| 1261 N_BLOCKS_IO; |
| 1262 if (io_block_size_samples > _captureBufSizeSamples) { |
| 1263 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1264 "Input IO block size (%d) is larger than ring buffer (%u)", |
| 1265 io_block_size_samples, _captureBufSizeSamples); |
| 1266 return -1; |
| 1267 } |
| 1268 |
| 1269 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input stream format:"); |
| 1270 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1271 " mSampleRate = %f, mChannelsPerFrame = %u", |
| 1272 _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame); |
| 1273 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1274 " mBytesPerPacket = %u, mFramesPerPacket = %u", |
| 1275 _inStreamFormat.mBytesPerPacket, |
| 1276 _inStreamFormat.mFramesPerPacket); |
| 1277 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1278 " mBytesPerFrame = %u, mBitsPerChannel = %u", |
| 1279 _inStreamFormat.mBytesPerFrame, _inStreamFormat.mBitsPerChannel); |
| 1280 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " mFormatFlags = %u", |
| 1281 _inStreamFormat.mFormatFlags); |
| 1282 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", |
| 1283 (const char*)&_inStreamFormat.mFormatID); |
| 1284 |
| 1285 // Our preferred format to work with |
| 1286 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { |
| 1287 _inDesiredFormat.mChannelsPerFrame = 2; |
| 1288 } else { |
| 1289 // Disable stereo recording when we only have one channel on the device. |
| 1290 _inDesiredFormat.mChannelsPerFrame = 1; |
| 1291 _recChannels = 1; |
| 1292 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 1293 "Stereo recording unavailable on this device"); |
| 1294 } |
| 1295 |
| 1296 if (_ptrAudioBuffer) { |
| 1297 // Update audio buffer with the selected parameters |
238 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | 1298 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); |
239 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); | 1299 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
240 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); | 1300 } |
241 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); | 1301 |
242 } | 1302 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; |
243 | 1303 _inDesiredFormat.mBytesPerPacket = |
244 int32_t AudioDeviceMac::ActiveAudioLayer( | 1304 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); |
245 AudioDeviceModule::AudioLayer& audioLayer) const | 1305 _inDesiredFormat.mFramesPerPacket = 1; |
246 { | 1306 _inDesiredFormat.mBytesPerFrame = |
247 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; | 1307 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); |
| 1308 _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; |
| 1309 |
| 1310 _inDesiredFormat.mFormatFlags = |
| 1311 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
| 1312 #ifdef WEBRTC_ARCH_BIG_ENDIAN |
| 1313 _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; |
| 1314 #endif |
| 1315 _inDesiredFormat.mFormatID = kAudioFormatLinearPCM; |
| 1316 |
| 1317 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat, |
| 1318 &_captureConverter)); |
| 1319 |
| 1320 // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO) |
| 1321 // TODO(xians): investigate this block. |
| 1322 UInt32 bufByteCount = |
| 1323 (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO * |
| 1324 _inStreamFormat.mChannelsPerFrame * sizeof(Float32)); |
| 1325 if (_inStreamFormat.mFramesPerPacket != 0) { |
| 1326 if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) { |
| 1327 bufByteCount = |
| 1328 ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) * |
| 1329 _inStreamFormat.mFramesPerPacket; |
| 1330 } |
| 1331 } |
| 1332 |
| 1333 // Ensure the buffer size is within the acceptable range provided by the |
| 1334 // device. |
| 1335 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; |
| 1336 AudioValueRange range; |
| 1337 size = sizeof(range); |
| 1338 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1339 _inputDeviceID, &propertyAddress, 0, NULL, &size, &range)); |
| 1340 if (range.mMinimum > bufByteCount) { |
| 1341 bufByteCount = range.mMinimum; |
| 1342 } else if (range.mMaximum < bufByteCount) { |
| 1343 bufByteCount = range.mMaximum; |
| 1344 } |
| 1345 |
| 1346 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; |
| 1347 size = sizeof(bufByteCount); |
| 1348 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 1349 _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); |
| 1350 |
| 1351 // Get capture device latency |
| 1352 propertyAddress.mSelector = kAudioDevicePropertyLatency; |
| 1353 UInt32 latency = 0; |
| 1354 size = sizeof(UInt32); |
| 1355 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1356 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); |
| 1357 _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); |
| 1358 |
| 1359 // Get capture stream latency |
| 1360 propertyAddress.mSelector = kAudioDevicePropertyStreams; |
| 1361 AudioStreamID stream = 0; |
| 1362 size = sizeof(AudioStreamID); |
| 1363 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1364 _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); |
| 1365 propertyAddress.mSelector = kAudioStreamPropertyLatency; |
| 1366 size = sizeof(UInt32); |
| 1367 latency = 0; |
| 1368 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1369 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); |
| 1370 _captureLatencyUs += |
| 1371 (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); |
| 1372 |
| 1373 // Listen for format changes |
| 1374 // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged? |
| 1375 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 1376 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener( |
| 1377 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1378 |
| 1379 // Listen for processor overloads |
| 1380 propertyAddress.mSelector = kAudioDeviceProcessorOverload; |
| 1381 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( |
| 1382 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1383 |
| 1384 if (_twoDevices) { |
| 1385 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( |
| 1386 _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID)); |
| 1387 } else if (!_playIsInitialized) { |
| 1388 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( |
| 1389 _inputDeviceID, deviceIOProc, this, &_deviceIOProcID)); |
| 1390 } |
| 1391 |
| 1392 // Mark recording side as initialized |
| 1393 _recIsInitialized = true; |
| 1394 |
| 1395 return 0; |
| 1396 } |
| 1397 |
| 1398 int32_t AudioDeviceMac::StartRecording() { |
| 1399 CriticalSectionScoped lock(&_critSect); |
| 1400 |
| 1401 if (!_recIsInitialized) { |
| 1402 return -1; |
| 1403 } |
| 1404 |
| 1405 if (_recording) { |
248 return 0; | 1406 return 0; |
249 } | 1407 } |
250 | 1408 |
251 int32_t AudioDeviceMac::Init() | 1409 if (!_initialized) { |
252 { | 1410 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
253 | 1411 " Recording worker thread has not been started"); |
254 CriticalSectionScoped lock(&_critSect); | 1412 return -1; |
255 | 1413 } |
256 if (_initialized) | 1414 |
257 { | 1415 RTC_DCHECK(!capture_worker_thread_.get()); |
258 return 0; | 1416 capture_worker_thread_.reset( |
259 } | 1417 new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread")); |
260 | 1418 RTC_DCHECK(capture_worker_thread_.get()); |
| 1419 capture_worker_thread_->Start(); |
| 1420 capture_worker_thread_->SetPriority(rtc::kRealtimePriority); |
| 1421 |
| 1422 OSStatus err = noErr; |
| 1423 if (_twoDevices) { |
| 1424 WEBRTC_CA_RETURN_ON_ERR( |
| 1425 AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID)); |
| 1426 } else if (!_playing) { |
| 1427 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID)); |
| 1428 } |
| 1429 |
| 1430 _recording = true; |
| 1431 |
| 1432 return 0; |
| 1433 } |
| 1434 |
| 1435 int32_t AudioDeviceMac::StopRecording() { |
| 1436 CriticalSectionScoped lock(&_critSect); |
| 1437 |
| 1438 if (!_recIsInitialized) { |
| 1439 return 0; |
| 1440 } |
| 1441 |
| 1442 OSStatus err = noErr; |
| 1443 |
| 1444 // Stop device |
| 1445 int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); |
| 1446 if (_twoDevices) { |
| 1447 if (_recording && captureDeviceIsAlive == 1) { |
| 1448 _recording = false; |
| 1449 _doStopRec = true; // Signal to io proc to stop audio device |
| 1450 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
| 1451 if (kEventTimeout == _stopEventRec.Wait(2000)) { |
| 1452 CriticalSectionScoped critScoped(&_critSect); |
| 1453 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1454 " Timed out stopping the capture IOProc. " |
| 1455 "We may have failed to detect a device removal."); |
| 1456 |
| 1457 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); |
| 1458 WEBRTC_CA_LOG_WARN( |
| 1459 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); |
| 1460 } |
| 1461 _critSect.Enter(); |
| 1462 _doStopRec = false; |
| 1463 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " Recording stopped"); |
| 1464 } |
| 1465 } else { |
| 1466 // We signal a stop for a shared device even when rendering has |
| 1467 // not yet ended. This is to ensure the IOProc will return early as |
| 1468 // intended (by checking |_recording|) before accessing |
| 1469 // resources we free below (e.g. the capture converter). |
| 1470 // |
| 1471 // In the case of a shared devcie, the IOProc will verify |
| 1472 // rendering has ended before stopping itself. |
| 1473 if (_recording && captureDeviceIsAlive == 1) { |
| 1474 _recording = false; |
| 1475 _doStop = true; // Signal to io proc to stop audio device |
| 1476 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
| 1477 if (kEventTimeout == _stopEvent.Wait(2000)) { |
| 1478 CriticalSectionScoped critScoped(&_critSect); |
| 1479 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1480 " Timed out stopping the shared IOProc. " |
| 1481 "We may have failed to detect a device removal."); |
| 1482 |
| 1483 // We assume rendering on a shared device has stopped as well if |
| 1484 // the IOProc times out. |
| 1485 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
| 1486 WEBRTC_CA_LOG_WARN( |
| 1487 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
| 1488 } |
| 1489 _critSect.Enter(); |
| 1490 _doStop = false; |
| 1491 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 1492 " Recording stopped (shared)"); |
| 1493 } |
| 1494 } |
| 1495 |
| 1496 // Setting this signal will allow the worker thread to be stopped. |
| 1497 AtomicSet32(&_captureDeviceIsAlive, 0); |
| 1498 |
| 1499 if (capture_worker_thread_.get()) { |
| 1500 _critSect.Leave(); |
| 1501 capture_worker_thread_->Stop(); |
| 1502 capture_worker_thread_.reset(); |
| 1503 _critSect.Enter(); |
| 1504 } |
| 1505 |
| 1506 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter)); |
| 1507 |
| 1508 // Remove listeners. |
| 1509 AudioObjectPropertyAddress propertyAddress = { |
| 1510 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; |
| 1511 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
| 1512 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1513 |
| 1514 propertyAddress.mSelector = kAudioDeviceProcessorOverload; |
| 1515 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
| 1516 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1517 |
| 1518 _recIsInitialized = false; |
| 1519 _recording = false; |
| 1520 |
| 1521 return 0; |
| 1522 } |
| 1523 |
| 1524 bool AudioDeviceMac::RecordingIsInitialized() const { |
| 1525 return (_recIsInitialized); |
| 1526 } |
| 1527 |
| 1528 bool AudioDeviceMac::Recording() const { |
| 1529 return (_recording); |
| 1530 } |
| 1531 |
| 1532 bool AudioDeviceMac::PlayoutIsInitialized() const { |
| 1533 return (_playIsInitialized); |
| 1534 } |
| 1535 |
| 1536 int32_t AudioDeviceMac::StartPlayout() { |
| 1537 CriticalSectionScoped lock(&_critSect); |
| 1538 |
| 1539 if (!_playIsInitialized) { |
| 1540 return -1; |
| 1541 } |
| 1542 |
| 1543 if (_playing) { |
| 1544 return 0; |
| 1545 } |
| 1546 |
| 1547 RTC_DCHECK(!render_worker_thread_.get()); |
| 1548 render_worker_thread_.reset( |
| 1549 new rtc::PlatformThread(RunRender, this, "RenderWorkerThread")); |
| 1550 render_worker_thread_->Start(); |
| 1551 render_worker_thread_->SetPriority(rtc::kRealtimePriority); |
| 1552 |
| 1553 if (_twoDevices || !_recording) { |
261 OSStatus err = noErr; | 1554 OSStatus err = noErr; |
262 | 1555 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID)); |
263 _isShutDown = false; | 1556 } |
264 | 1557 _playing = true; |
265 // PortAudio ring buffers require an elementCount which is a power of two. | 1558 |
266 if (_renderBufData == NULL) | 1559 return 0; |
267 { | 1560 } |
268 UInt32 powerOfTwo = 1; | 1561 |
269 while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) | 1562 int32_t AudioDeviceMac::StopPlayout() { |
270 { | 1563 CriticalSectionScoped lock(&_critSect); |
271 powerOfTwo <<= 1; | 1564 |
272 } | 1565 if (!_playIsInitialized) { |
273 _renderBufSizeSamples = powerOfTwo; | |
274 _renderBufData = new SInt16[_renderBufSizeSamples]; | |
275 } | |
276 | |
277 if (_paRenderBuffer == NULL) | |
278 { | |
279 _paRenderBuffer = new PaUtilRingBuffer; | |
280 PaRingBufferSize bufSize = -1; | |
281 bufSize = PaUtil_InitializeRingBuffer(_paRenderBuffer, sizeof(SInt16), | |
282 _renderBufSizeSamples, | |
283 _renderBufData); | |
284 if (bufSize == -1) | |
285 { | |
286 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, | |
287 _id, " PaUtil_InitializeRingBuffer() error"); | |
288 return -1; | |
289 } | |
290 } | |
291 | |
292 if (_captureBufData == NULL) | |
293 { | |
294 UInt32 powerOfTwo = 1; | |
295 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) | |
296 { | |
297 powerOfTwo <<= 1; | |
298 } | |
299 _captureBufSizeSamples = powerOfTwo; | |
300 _captureBufData = new Float32[_captureBufSizeSamples]; | |
301 } | |
302 | |
303 if (_paCaptureBuffer == NULL) | |
304 { | |
305 _paCaptureBuffer = new PaUtilRingBuffer; | |
306 PaRingBufferSize bufSize = -1; | |
307 bufSize = PaUtil_InitializeRingBuffer(_paCaptureBuffer, | |
308 sizeof(Float32), | |
309 _captureBufSizeSamples, | |
310 _captureBufData); | |
311 if (bufSize == -1) | |
312 { | |
313 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, | |
314 _id, " PaUtil_InitializeRingBuffer() error"); | |
315 return -1; | |
316 } | |
317 } | |
318 | |
319 kern_return_t kernErr = KERN_SUCCESS; | |
320 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, | |
321 SYNC_POLICY_FIFO, 0); | |
322 if (kernErr != KERN_SUCCESS) | |
323 { | |
324 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
325 " semaphore_create() error: %d", kernErr); | |
326 return -1; | |
327 } | |
328 | |
329 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, | |
330 SYNC_POLICY_FIFO, 0); | |
331 if (kernErr != KERN_SUCCESS) | |
332 { | |
333 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | |
334 " semaphore_create() error: %d", kernErr); | |
335 return -1; | |
336 } | |
337 | |
338 // Setting RunLoop to NULL here instructs HAL to manage its own thread for | |
339 // notifications. This was the default behaviour on OS X 10.5 and earlier, | |
340 // but now must be explicitly specified. HAL would otherwise try to use the | |
341 // main thread to issue notifications. | |
342 AudioObjectPropertyAddress propertyAddress = { | |
343 kAudioHardwarePropertyRunLoop, | |
344 kAudioObjectPropertyScopeGlobal, | |
345 kAudioObjectPropertyElementMaster }; | |
346 CFRunLoopRef runLoop = NULL; | |
347 UInt32 size = sizeof(CFRunLoopRef); | |
348 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(kAudioObjectSystemObject, | |
349 &propertyAddress, 0, NULL, size, &runLoop)); | |
350 | |
351 // Listen for any device changes. | |
352 propertyAddress.mSelector = kAudioHardwarePropertyDevices; | |
353 WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener(kAudioObjectSystemObject, | |
354 &propertyAddress, &objectListenerProc, this)); | |
355 | |
356 // Determine if this is a MacBook Pro | |
357 _macBookPro = false; | |
358 _macBookProPanRight = false; | |
359 char buf[128]; | |
360 size_t length = sizeof(buf); | |
361 memset(buf, 0, length); | |
362 | |
363 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); | |
364 if (intErr != 0) | |
365 { | |
366 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
367 " Error in sysctlbyname(): %d", err); | |
368 } else | |
369 { | |
370 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
371 " Hardware model: %s", buf); | |
372 if (strncmp(buf, "MacBookPro", 10) == 0) | |
373 { | |
374 _macBookPro = true; | |
375 } | |
376 } | |
377 | |
378 _playWarning = 0; | |
379 _playError = 0; | |
380 _recWarning = 0; | |
381 _recError = 0; | |
382 | |
383 get_mic_volume_counter_ms_ = 0; | |
384 | |
385 _initialized = true; | |
386 | |
387 return 0; | 1566 return 0; |
388 } | 1567 } |
389 | 1568 |
390 int32_t AudioDeviceMac::Terminate() | 1569 OSStatus err = noErr; |
391 { | 1570 |
392 | 1571 int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive); |
393 if (!_initialized) | 1572 if (_playing && renderDeviceIsAlive == 1) { |
394 { | 1573 // We signal a stop for a shared device even when capturing has not |
395 return 0; | 1574 // yet ended. This is to ensure the IOProc will return early as |
396 } | 1575 // intended (by checking |_playing|) before accessing resources we |
397 | 1576 // free below (e.g. the render converter). |
398 if (_recording) | 1577 // |
399 { | 1578 // In the case of a shared device, the IOProc will verify capturing |
400 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1579 // has ended before stopping itself. |
401 " Recording must be stopped"); | 1580 _playing = false; |
402 return -1; | 1581 _doStop = true; // Signal to io proc to stop audio device |
403 } | 1582 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
404 | 1583 if (kEventTimeout == _stopEvent.Wait(2000)) { |
405 if (_playing) | 1584 CriticalSectionScoped critScoped(&_critSect); |
406 { | 1585 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
407 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1586 " Timed out stopping the render IOProc. " |
408 " Playback must be stopped"); | 1587 "We may have failed to detect a device removal."); |
409 return -1; | 1588 |
410 } | 1589 // We assume capturing on a shared device has stopped as well if the |
411 | 1590 // IOProc times out. |
| 1591 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
| 1592 WEBRTC_CA_LOG_WARN( |
| 1593 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
| 1594 } |
412 _critSect.Enter(); | 1595 _critSect.Enter(); |
413 | 1596 _doStop = false; |
414 _mixerManager.Close(); | 1597 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Playout stopped"); |
415 | 1598 } |
416 OSStatus err = noErr; | 1599 |
417 int retVal = 0; | 1600 // Setting this signal will allow the worker thread to be stopped. |
418 | 1601 AtomicSet32(&_renderDeviceIsAlive, 0); |
419 AudioObjectPropertyAddress propertyAddress = { | 1602 if (render_worker_thread_.get()) { |
420 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, | |
421 kAudioObjectPropertyElementMaster }; | |
422 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(kAudioObjectSystemObjec
t, | |
423 &propertyAddress, &objectListenerProc, this)); | |
424 | |
425 err = AudioHardwareUnload(); | |
426 if (err != noErr) | |
427 { | |
428 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
429 "Error in AudioHardwareUnload()", (const char*) &err); | |
430 retVal = -1; | |
431 } | |
432 | |
433 _isShutDown = true; | |
434 _initialized = false; | |
435 _outputDeviceIsSpecified = false; | |
436 _inputDeviceIsSpecified = false; | |
437 | |
438 _critSect.Leave(); | 1603 _critSect.Leave(); |
439 | 1604 render_worker_thread_->Stop(); |
440 return retVal; | 1605 render_worker_thread_.reset(); |
441 } | 1606 _critSect.Enter(); |
442 | 1607 } |
443 bool AudioDeviceMac::Initialized() const | 1608 |
444 { | 1609 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter)); |
445 return (_initialized); | 1610 |
446 } | 1611 // Remove listeners. |
447 | 1612 AudioObjectPropertyAddress propertyAddress = { |
448 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) | 1613 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0}; |
449 { | 1614 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
450 | 1615 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
451 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | 1616 |
452 | 1617 propertyAddress.mSelector = kAudioDeviceProcessorOverload; |
453 // Make an attempt to open up the | 1618 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
454 // output mixer corresponding to the currently selected output device. | 1619 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
455 // | 1620 |
456 if (!wasInitialized && InitSpeaker() == -1) | 1621 if (_macBookPro) { |
457 { | 1622 Boolean hasProperty = |
458 available = false; | 1623 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
459 return 0; | 1624 if (hasProperty) { |
460 } | 1625 propertyAddress.mSelector = kAudioDevicePropertyDataSource; |
461 | 1626 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
462 // Given that InitSpeaker was successful, we know that a valid speaker | 1627 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
463 // exists. | 1628 } |
464 available = true; | 1629 } |
465 | 1630 |
466 // Close the initialized output mixer | 1631 _playIsInitialized = false; |
467 // | 1632 _playing = false; |
468 if (!wasInitialized) | 1633 |
469 { | 1634 return 0; |
470 _mixerManager.CloseSpeaker(); | 1635 } |
471 } | 1636 |
472 | 1637 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const { |
473 return 0; | 1638 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); |
474 } | 1639 delayMS = |
475 | 1640 static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); |
476 int32_t AudioDeviceMac::InitSpeaker() | 1641 return 0; |
477 { | 1642 } |
478 | 1643 |
479 CriticalSectionScoped lock(&_critSect); | 1644 int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const { |
480 | 1645 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); |
481 if (_playing) | 1646 delayMS = |
482 { | 1647 static_cast<uint16_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); |
483 return -1; | 1648 return 0; |
484 } | 1649 } |
485 | 1650 |
486 if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) | 1651 bool AudioDeviceMac::Playing() const { |
487 { | 1652 return (_playing); |
488 return -1; | |
489 } | |
490 | |
491 if (_inputDeviceID == _outputDeviceID) | |
492 { | |
493 _twoDevices = false; | |
494 } else | |
495 { | |
496 _twoDevices = true; | |
497 } | |
498 | |
499 if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) | |
500 { | |
501 return -1; | |
502 } | |
503 | |
504 return 0; | |
505 } | |
506 | |
507 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) | |
508 { | |
509 | |
510 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
511 | |
512 // Make an attempt to open up the | |
513 // input mixer corresponding to the currently selected output device. | |
514 // | |
515 if (!wasInitialized && InitMicrophone() == -1) | |
516 { | |
517 available = false; | |
518 return 0; | |
519 } | |
520 | |
521 // Given that InitMicrophone was successful, we know that a valid microphone | |
522 // exists. | |
523 available = true; | |
524 | |
525 // Close the initialized input mixer | |
526 // | |
527 if (!wasInitialized) | |
528 { | |
529 _mixerManager.CloseMicrophone(); | |
530 } | |
531 | |
532 return 0; | |
533 } | |
534 | |
535 int32_t AudioDeviceMac::InitMicrophone() | |
536 { | |
537 | |
538 CriticalSectionScoped lock(&_critSect); | |
539 | |
540 if (_recording) | |
541 { | |
542 return -1; | |
543 } | |
544 | |
545 if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) | |
546 { | |
547 return -1; | |
548 } | |
549 | |
550 if (_inputDeviceID == _outputDeviceID) | |
551 { | |
552 _twoDevices = false; | |
553 } else | |
554 { | |
555 _twoDevices = true; | |
556 } | |
557 | |
558 if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) | |
559 { | |
560 return -1; | |
561 } | |
562 | |
563 return 0; | |
564 } | |
565 | |
566 bool AudioDeviceMac::SpeakerIsInitialized() const | |
567 { | |
568 return (_mixerManager.SpeakerIsInitialized()); | |
569 } | |
570 | |
571 bool AudioDeviceMac::MicrophoneIsInitialized() const | |
572 { | |
573 return (_mixerManager.MicrophoneIsInitialized()); | |
574 } | |
575 | |
576 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) | |
577 { | |
578 | |
579 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | |
580 | |
581 // Make an attempt to open up the | |
582 // output mixer corresponding to the currently selected output device. | |
583 // | |
584 if (!wasInitialized && InitSpeaker() == -1) | |
585 { | |
586 // If we end up here it means that the selected speaker has no volume | |
587 // control. | |
588 available = false; | |
589 return 0; | |
590 } | |
591 | |
592 // Given that InitSpeaker was successful, we know that a volume control exis
ts | |
593 // | |
594 available = true; | |
595 | |
596 // Close the initialized output mixer | |
597 // | |
598 if (!wasInitialized) | |
599 { | |
600 _mixerManager.CloseSpeaker(); | |
601 } | |
602 | |
603 return 0; | |
604 } | |
605 | |
606 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) | |
607 { | |
608 | |
609 return (_mixerManager.SetSpeakerVolume(volume)); | |
610 } | |
611 | |
612 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const | |
613 { | |
614 | |
615 uint32_t level(0); | |
616 | |
617 if (_mixerManager.SpeakerVolume(level) == -1) | |
618 { | |
619 return -1; | |
620 } | |
621 | |
622 volume = level; | |
623 return 0; | |
624 } | |
625 | |
626 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft, | |
627 uint16_t volumeRight) | |
628 { | |
629 | |
630 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
631 " API call not supported on this platform"); | |
632 return -1; | |
633 } | |
634 | |
635 int32_t | |
636 AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/, | |
637 uint16_t& /*volumeRight*/) const | |
638 { | |
639 | |
640 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
641 " API call not supported on this platform"); | |
642 return -1; | |
643 } | |
644 | |
645 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const | |
646 { | |
647 | |
648 uint32_t maxVol(0); | |
649 | |
650 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) | |
651 { | |
652 return -1; | |
653 } | |
654 | |
655 maxVolume = maxVol; | |
656 return 0; | |
657 } | |
658 | |
659 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const | |
660 { | |
661 | |
662 uint32_t minVol(0); | |
663 | |
664 if (_mixerManager.MinSpeakerVolume(minVol) == -1) | |
665 { | |
666 return -1; | |
667 } | |
668 | |
669 minVolume = minVol; | |
670 return 0; | |
671 } | |
672 | |
673 int32_t | |
674 AudioDeviceMac::SpeakerVolumeStepSize(uint16_t& stepSize) const | |
675 { | |
676 | |
677 uint16_t delta(0); | |
678 | |
679 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) | |
680 { | |
681 return -1; | |
682 } | |
683 | |
684 stepSize = delta; | |
685 return 0; | |
686 } | |
687 | |
688 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) | |
689 { | |
690 | |
691 bool isAvailable(false); | |
692 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | |
693 | |
694 // Make an attempt to open up the | |
695 // output mixer corresponding to the currently selected output device. | |
696 // | |
697 if (!wasInitialized && InitSpeaker() == -1) | |
698 { | |
699 // If we end up here it means that the selected speaker has no volume | |
700 // control, hence it is safe to state that there is no mute control | |
701 // already at this stage. | |
702 available = false; | |
703 return 0; | |
704 } | |
705 | |
706 // Check if the selected speaker has a mute control | |
707 // | |
708 _mixerManager.SpeakerMuteIsAvailable(isAvailable); | |
709 | |
710 available = isAvailable; | |
711 | |
712 // Close the initialized output mixer | |
713 // | |
714 if (!wasInitialized) | |
715 { | |
716 _mixerManager.CloseSpeaker(); | |
717 } | |
718 | |
719 return 0; | |
720 } | |
721 | |
722 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) | |
723 { | |
724 return (_mixerManager.SetSpeakerMute(enable)); | |
725 } | |
726 | |
727 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const | |
728 { | |
729 | |
730 bool muted(0); | |
731 | |
732 if (_mixerManager.SpeakerMute(muted) == -1) | |
733 { | |
734 return -1; | |
735 } | |
736 | |
737 enabled = muted; | |
738 return 0; | |
739 } | |
740 | |
741 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) | |
742 { | |
743 | |
744 bool isAvailable(false); | |
745 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
746 | |
747 // Make an attempt to open up the | |
748 // input mixer corresponding to the currently selected input device. | |
749 // | |
750 if (!wasInitialized && InitMicrophone() == -1) | |
751 { | |
752 // If we end up here it means that the selected microphone has no volume | |
753 // control, hence it is safe to state that there is no boost control | |
754 // already at this stage. | |
755 available = false; | |
756 return 0; | |
757 } | |
758 | |
759 // Check if the selected microphone has a mute control | |
760 // | |
761 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); | |
762 available = isAvailable; | |
763 | |
764 // Close the initialized input mixer | |
765 // | |
766 if (!wasInitialized) | |
767 { | |
768 _mixerManager.CloseMicrophone(); | |
769 } | |
770 | |
771 return 0; | |
772 } | |
773 | |
774 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) | |
775 { | |
776 return (_mixerManager.SetMicrophoneMute(enable)); | |
777 } | |
778 | |
779 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const | |
780 { | |
781 | |
782 bool muted(0); | |
783 | |
784 if (_mixerManager.MicrophoneMute(muted) == -1) | |
785 { | |
786 return -1; | |
787 } | |
788 | |
789 enabled = muted; | |
790 return 0; | |
791 } | |
792 | |
793 int32_t AudioDeviceMac::MicrophoneBoostIsAvailable(bool& available) | |
794 { | |
795 | |
796 bool isAvailable(false); | |
797 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
798 | |
799 // Enumerate all avaliable microphone and make an attempt to open up the | |
800 // input mixer corresponding to the currently selected input device. | |
801 // | |
802 if (!wasInitialized && InitMicrophone() == -1) | |
803 { | |
804 // If we end up here it means that the selected microphone has no volume | |
805 // control, hence it is safe to state that there is no boost control | |
806 // already at this stage. | |
807 available = false; | |
808 return 0; | |
809 } | |
810 | |
811 // Check if the selected microphone has a boost control | |
812 // | |
813 _mixerManager.MicrophoneBoostIsAvailable(isAvailable); | |
814 available = isAvailable; | |
815 | |
816 // Close the initialized input mixer | |
817 // | |
818 if (!wasInitialized) | |
819 { | |
820 _mixerManager.CloseMicrophone(); | |
821 } | |
822 | |
823 return 0; | |
824 } | |
825 | |
826 int32_t AudioDeviceMac::SetMicrophoneBoost(bool enable) | |
827 { | |
828 | |
829 return (_mixerManager.SetMicrophoneBoost(enable)); | |
830 } | |
831 | |
832 int32_t AudioDeviceMac::MicrophoneBoost(bool& enabled) const | |
833 { | |
834 | |
835 bool onOff(0); | |
836 | |
837 if (_mixerManager.MicrophoneBoost(onOff) == -1) | |
838 { | |
839 return -1; | |
840 } | |
841 | |
842 enabled = onOff; | |
843 return 0; | |
844 } | |
845 | |
846 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) | |
847 { | |
848 | |
849 bool isAvailable(false); | |
850 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
851 | |
852 if (!wasInitialized && InitMicrophone() == -1) | |
853 { | |
854 // Cannot open the specified device | |
855 available = false; | |
856 return 0; | |
857 } | |
858 | |
859 // Check if the selected microphone can record stereo | |
860 // | |
861 _mixerManager.StereoRecordingIsAvailable(isAvailable); | |
862 available = isAvailable; | |
863 | |
864 // Close the initialized input mixer | |
865 // | |
866 if (!wasInitialized) | |
867 { | |
868 _mixerManager.CloseMicrophone(); | |
869 } | |
870 | |
871 return 0; | |
872 } | |
873 | |
874 int32_t AudioDeviceMac::SetStereoRecording(bool enable) | |
875 { | |
876 | |
877 if (enable) | |
878 _recChannels = 2; | |
879 else | |
880 _recChannels = 1; | |
881 | |
882 return 0; | |
883 } | |
884 | |
885 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const | |
886 { | |
887 | |
888 if (_recChannels == 2) | |
889 enabled = true; | |
890 else | |
891 enabled = false; | |
892 | |
893 return 0; | |
894 } | |
895 | |
896 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) | |
897 { | |
898 | |
899 bool isAvailable(false); | |
900 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | |
901 | |
902 if (!wasInitialized && InitSpeaker() == -1) | |
903 { | |
904 // Cannot open the specified device | |
905 available = false; | |
906 return 0; | |
907 } | |
908 | |
909 // Check if the selected microphone can record stereo | |
910 // | |
911 _mixerManager.StereoPlayoutIsAvailable(isAvailable); | |
912 available = isAvailable; | |
913 | |
914 // Close the initialized input mixer | |
915 // | |
916 if (!wasInitialized) | |
917 { | |
918 _mixerManager.CloseSpeaker(); | |
919 } | |
920 | |
921 return 0; | |
922 } | |
923 | |
924 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) | |
925 { | |
926 | |
927 if (enable) | |
928 _playChannels = 2; | |
929 else | |
930 _playChannels = 1; | |
931 | |
932 return 0; | |
933 } | |
934 | |
935 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const | |
936 { | |
937 | |
938 if (_playChannels == 2) | |
939 enabled = true; | |
940 else | |
941 enabled = false; | |
942 | |
943 return 0; | |
944 } | |
945 | |
946 int32_t AudioDeviceMac::SetAGC(bool enable) | |
947 { | |
948 | |
949 _AGC = enable; | |
950 | |
951 return 0; | |
952 } | |
953 | |
954 bool AudioDeviceMac::AGC() const | |
955 { | |
956 | |
957 return _AGC; | |
958 } | |
959 | |
960 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) | |
961 { | |
962 | |
963 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
964 | |
965 // Make an attempt to open up the | |
966 // input mixer corresponding to the currently selected output device. | |
967 // | |
968 if (!wasInitialized && InitMicrophone() == -1) | |
969 { | |
970 // If we end up here it means that the selected microphone has no volume | |
971 // control. | |
972 available = false; | |
973 return 0; | |
974 } | |
975 | |
976 // Given that InitMicrophone was successful, we know that a volume control | |
977 // exists | |
978 // | |
979 available = true; | |
980 | |
981 // Close the initialized input mixer | |
982 // | |
983 if (!wasInitialized) | |
984 { | |
985 _mixerManager.CloseMicrophone(); | |
986 } | |
987 | |
988 return 0; | |
989 } | |
990 | |
991 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) | |
992 { | |
993 | |
994 return (_mixerManager.SetMicrophoneVolume(volume)); | |
995 } | |
996 | |
997 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const | |
998 { | |
999 | |
1000 uint32_t level(0); | |
1001 | |
1002 if (_mixerManager.MicrophoneVolume(level) == -1) | |
1003 { | |
1004 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1005 " failed to retrive current microphone level"); | |
1006 return -1; | |
1007 } | |
1008 | |
1009 volume = level; | |
1010 return 0; | |
1011 } | |
1012 | |
1013 int32_t | |
1014 AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const | |
1015 { | |
1016 | |
1017 uint32_t maxVol(0); | |
1018 | |
1019 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) | |
1020 { | |
1021 return -1; | |
1022 } | |
1023 | |
1024 maxVolume = maxVol; | |
1025 return 0; | |
1026 } | |
1027 | |
1028 int32_t | |
1029 AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const | |
1030 { | |
1031 | |
1032 uint32_t minVol(0); | |
1033 | |
1034 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) | |
1035 { | |
1036 return -1; | |
1037 } | |
1038 | |
1039 minVolume = minVol; | |
1040 return 0; | |
1041 } | |
1042 | |
1043 int32_t | |
1044 AudioDeviceMac::MicrophoneVolumeStepSize(uint16_t& stepSize) const | |
1045 { | |
1046 | |
1047 uint16_t delta(0); | |
1048 | |
1049 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) | |
1050 { | |
1051 return -1; | |
1052 } | |
1053 | |
1054 stepSize = delta; | |
1055 return 0; | |
1056 } | |
1057 | |
1058 int16_t AudioDeviceMac::PlayoutDevices() | |
1059 { | |
1060 | |
1061 AudioDeviceID playDevices[MaxNumberDevices]; | |
1062 return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices, | |
1063 MaxNumberDevices); | |
1064 } | |
1065 | |
1066 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) | |
1067 { | |
1068 CriticalSectionScoped lock(&_critSect); | |
1069 | |
1070 if (_playIsInitialized) | |
1071 { | |
1072 return -1; | |
1073 } | |
1074 | |
1075 AudioDeviceID playDevices[MaxNumberDevices]; | |
1076 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, | |
1077 playDevices, MaxNumberDevices); | |
1078 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1079 " number of availiable waveform-audio output devices is %u", | |
1080 nDevices); | |
1081 | |
1082 if (index > (nDevices - 1)) | |
1083 { | |
1084 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1085 " device index is out of range [0,%u]", (nDevices - 1)); | |
1086 return -1; | |
1087 } | |
1088 | |
1089 _outputDeviceIndex = index; | |
1090 _outputDeviceIsSpecified = true; | |
1091 | |
1092 return 0; | |
1093 } | |
1094 | |
1095 int32_t AudioDeviceMac::SetPlayoutDevice( | |
1096 AudioDeviceModule::WindowsDeviceType /*device*/) | |
1097 { | |
1098 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1099 "WindowsDeviceType not supported"); | |
1100 return -1; | |
1101 } | |
1102 | |
1103 int32_t AudioDeviceMac::PlayoutDeviceName( | |
1104 uint16_t index, | |
1105 char name[kAdmMaxDeviceNameSize], | |
1106 char guid[kAdmMaxGuidSize]) | |
1107 { | |
1108 | |
1109 const uint16_t nDevices(PlayoutDevices()); | |
1110 | |
1111 if ((index > (nDevices - 1)) || (name == NULL)) | |
1112 { | |
1113 return -1; | |
1114 } | |
1115 | |
1116 memset(name, 0, kAdmMaxDeviceNameSize); | |
1117 | |
1118 if (guid != NULL) | |
1119 { | |
1120 memset(guid, 0, kAdmMaxGuidSize); | |
1121 } | |
1122 | |
1123 return GetDeviceName(kAudioDevicePropertyScopeOutput, index, name); | |
1124 } | |
1125 | |
1126 int32_t AudioDeviceMac::RecordingDeviceName( | |
1127 uint16_t index, | |
1128 char name[kAdmMaxDeviceNameSize], | |
1129 char guid[kAdmMaxGuidSize]) | |
1130 { | |
1131 | |
1132 const uint16_t nDevices(RecordingDevices()); | |
1133 | |
1134 if ((index > (nDevices - 1)) || (name == NULL)) | |
1135 { | |
1136 return -1; | |
1137 } | |
1138 | |
1139 memset(name, 0, kAdmMaxDeviceNameSize); | |
1140 | |
1141 if (guid != NULL) | |
1142 { | |
1143 memset(guid, 0, kAdmMaxGuidSize); | |
1144 } | |
1145 | |
1146 return GetDeviceName(kAudioDevicePropertyScopeInput, index, name); | |
1147 } | |
1148 | |
1149 int16_t AudioDeviceMac::RecordingDevices() | |
1150 { | |
1151 | |
1152 AudioDeviceID recDevices[MaxNumberDevices]; | |
1153 return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices, | |
1154 MaxNumberDevices); | |
1155 } | |
1156 | |
1157 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) | |
1158 { | |
1159 | |
1160 if (_recIsInitialized) | |
1161 { | |
1162 return -1; | |
1163 } | |
1164 | |
1165 AudioDeviceID recDevices[MaxNumberDevices]; | |
1166 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, | |
1167 recDevices, MaxNumberDevices); | |
1168 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1169 " number of availiable waveform-audio input devices is %u", | |
1170 nDevices); | |
1171 | |
1172 if (index > (nDevices - 1)) | |
1173 { | |
1174 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1175 " device index is out of range [0,%u]", (nDevices - 1)); | |
1176 return -1; | |
1177 } | |
1178 | |
1179 _inputDeviceIndex = index; | |
1180 _inputDeviceIsSpecified = true; | |
1181 | |
1182 return 0; | |
1183 } | |
1184 | |
1185 | |
1186 int32_t | |
1187 AudioDeviceMac::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType /*device
*/) | |
1188 { | |
1189 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1190 "WindowsDeviceType not supported"); | |
1191 return -1; | |
1192 } | |
1193 | |
1194 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) | |
1195 { | |
1196 | |
1197 available = true; | |
1198 | |
1199 // Try to initialize the playout side | |
1200 if (InitPlayout() == -1) | |
1201 { | |
1202 available = false; | |
1203 } | |
1204 | |
1205 // We destroy the IOProc created by InitPlayout() in implDeviceIOProc(). | |
1206 // We must actually start playout here in order to have the IOProc | |
1207 // deleted by calling StopPlayout(). | |
1208 if (StartPlayout() == -1) | |
1209 { | |
1210 available = false; | |
1211 } | |
1212 | |
1213 // Cancel effect of initialization | |
1214 if (StopPlayout() == -1) | |
1215 { | |
1216 available = false; | |
1217 } | |
1218 | |
1219 return 0; | |
1220 } | |
1221 | |
1222 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) | |
1223 { | |
1224 | |
1225 available = true; | |
1226 | |
1227 // Try to initialize the recording side | |
1228 if (InitRecording() == -1) | |
1229 { | |
1230 available = false; | |
1231 } | |
1232 | |
1233 // We destroy the IOProc created by InitRecording() in implInDeviceIOProc(). | |
1234 // We must actually start recording here in order to have the IOProc | |
1235 // deleted by calling StopRecording(). | |
1236 if (StartRecording() == -1) | |
1237 { | |
1238 available = false; | |
1239 } | |
1240 | |
1241 // Cancel effect of initialization | |
1242 if (StopRecording() == -1) | |
1243 { | |
1244 available = false; | |
1245 } | |
1246 | |
1247 return 0; | |
1248 } | |
1249 | |
1250 int32_t AudioDeviceMac::InitPlayout() | |
1251 { | |
1252 CriticalSectionScoped lock(&_critSect); | |
1253 | |
1254 if (_playing) | |
1255 { | |
1256 return -1; | |
1257 } | |
1258 | |
1259 if (!_outputDeviceIsSpecified) | |
1260 { | |
1261 return -1; | |
1262 } | |
1263 | |
1264 if (_playIsInitialized) | |
1265 { | |
1266 return 0; | |
1267 } | |
1268 | |
1269 // Initialize the speaker (devices might have been added or removed) | |
1270 if (InitSpeaker() == -1) | |
1271 { | |
1272 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1273 " InitSpeaker() failed"); | |
1274 } | |
1275 | |
1276 if (!MicrophoneIsInitialized()) | |
1277 { | |
1278 // Make this call to check if we are using | |
1279 // one or two devices (_twoDevices) | |
1280 bool available = false; | |
1281 if (MicrophoneIsAvailable(available) == -1) | |
1282 { | |
1283 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1284 " MicrophoneIsAvailable() failed"); | |
1285 } | |
1286 } | |
1287 | |
1288 PaUtil_FlushRingBuffer(_paRenderBuffer); | |
1289 | |
1290 OSStatus err = noErr; | |
1291 UInt32 size = 0; | |
1292 _renderDelayOffsetSamples = 0; | |
1293 _renderDelayUs = 0; | |
1294 _renderLatencyUs = 0; | |
1295 _renderDeviceIsAlive = 1; | |
1296 _doStop = false; | |
1297 | |
1298 // The internal microphone of a MacBook Pro is located under the left speake
r | |
1299 // grille. When the internal speakers are in use, we want to fully stereo | |
1300 // pan to the right. | |
1301 AudioObjectPropertyAddress | |
1302 propertyAddress = { kAudioDevicePropertyDataSource, | |
1303 kAudioDevicePropertyScopeOutput, 0 }; | |
1304 if (_macBookPro) | |
1305 { | |
1306 _macBookProPanRight = false; | |
1307 Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, | |
1308 &propertyAddress); | |
1309 if (hasProperty) | |
1310 { | |
1311 UInt32 dataSource = 0; | |
1312 size = sizeof(dataSource); | |
1313 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData(_outputDeviceID, | |
1314 &propertyAddress, 0, NULL, &size, &dataSource)); | |
1315 | |
1316 if (dataSource == 'ispk') | |
1317 { | |
1318 _macBookProPanRight = true; | |
1319 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, | |
1320 _id, | |
1321 "MacBook Pro using internal speakers; stereo" | |
1322 " panning right"); | |
1323 } else | |
1324 { | |
1325 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, | |
1326 _id, "MacBook Pro not using internal speakers"); | |
1327 } | |
1328 | |
1329 // Add a listener to determine if the status changes. | |
1330 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, | |
1331 &propertyAddress, &objectListenerProc, this)); | |
1332 } | |
1333 } | |
1334 | |
1335 // Get current stream description | |
1336 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | |
1337 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); | |
1338 size = sizeof(_outStreamFormat); | |
1339 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, | |
1340 &propertyAddress, 0, NULL, &size, &_outStreamFormat)); | |
1341 | |
1342 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) | |
1343 { | |
1344 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
1345 "Unacceptable output stream format -> mFormatID", | |
1346 (const char *) &_outStreamFormat.mFormatID); | |
1347 return -1; | |
1348 } | |
1349 | |
1350 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) | |
1351 { | |
1352 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1353 "Too many channels on output device (mChannelsPerFrame = %d)", | |
1354 _outStreamFormat.mChannelsPerFrame); | |
1355 return -1; | |
1356 } | |
1357 | |
1358 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) | |
1359 { | |
1360 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1361 "Non-interleaved audio data is not supported.", | |
1362 "AudioHardware streams should not have this format."); | |
1363 return -1; | |
1364 } | |
1365 | |
1366 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1367 "Ouput stream format:"); | |
1368 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1369 "mSampleRate = %f, mChannelsPerFrame = %u", | |
1370 _outStreamFormat.mSampleRate, | |
1371 _outStreamFormat.mChannelsPerFrame); | |
1372 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1373 "mBytesPerPacket = %u, mFramesPerPacket = %u", | |
1374 _outStreamFormat.mBytesPerPacket, | |
1375 _outStreamFormat.mFramesPerPacket); | |
1376 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1377 "mBytesPerFrame = %u, mBitsPerChannel = %u", | |
1378 _outStreamFormat.mBytesPerFrame, | |
1379 _outStreamFormat.mBitsPerChannel); | |
1380 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1381 "mFormatFlags = %u", | |
1382 _outStreamFormat.mFormatFlags); | |
1383 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
1384 (const char *) &_outStreamFormat.mFormatID); | |
1385 | |
1386 // Our preferred format to work with. | |
1387 if (_outStreamFormat.mChannelsPerFrame < 2) | |
1388 { | |
1389 // Disable stereo playout when we only have one channel on the device. | |
1390 _playChannels = 1; | |
1391 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1392 "Stereo playout unavailable on this device"); | |
1393 } | |
1394 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); | |
1395 | |
1396 // Listen for format changes. | |
1397 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | |
1398 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_outputDeviceID, | |
1399 &propertyAddress, | |
1400 &objectListenerProc, | |
1401 this)); | |
1402 | |
1403 // Listen for processor overloads. | |
1404 propertyAddress.mSelector = kAudioDeviceProcessorOverload; | |
1405 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_outputDeviceID, | |
1406 &propertyAddress, | |
1407 &objectListenerProc, | |
1408 this)); | |
1409 | |
1410 if (_twoDevices || !_recIsInitialized) | |
1411 { | |
1412 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_outputDeviceID, | |
1413 deviceIOProc, | |
1414 this, | |
1415 &_deviceIOProcID)); | |
1416 } | |
1417 | |
1418 _playIsInitialized = true; | |
1419 | |
1420 return 0; | |
1421 } | |
1422 | |
1423 int32_t AudioDeviceMac::InitRecording() | |
1424 { | |
1425 | |
1426 CriticalSectionScoped lock(&_critSect); | |
1427 | |
1428 if (_recording) | |
1429 { | |
1430 return -1; | |
1431 } | |
1432 | |
1433 if (!_inputDeviceIsSpecified) | |
1434 { | |
1435 return -1; | |
1436 } | |
1437 | |
1438 if (_recIsInitialized) | |
1439 { | |
1440 return 0; | |
1441 } | |
1442 | |
1443 // Initialize the microphone (devices might have been added or removed) | |
1444 if (InitMicrophone() == -1) | |
1445 { | |
1446 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1447 " InitMicrophone() failed"); | |
1448 } | |
1449 | |
1450 if (!SpeakerIsInitialized()) | |
1451 { | |
1452 // Make this call to check if we are using | |
1453 // one or two devices (_twoDevices) | |
1454 bool available = false; | |
1455 if (SpeakerIsAvailable(available) == -1) | |
1456 { | |
1457 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1458 " SpeakerIsAvailable() failed"); | |
1459 } | |
1460 } | |
1461 | |
1462 OSStatus err = noErr; | |
1463 UInt32 size = 0; | |
1464 | |
1465 PaUtil_FlushRingBuffer(_paCaptureBuffer); | |
1466 | |
1467 _captureDelayUs = 0; | |
1468 _captureLatencyUs = 0; | |
1469 _captureDeviceIsAlive = 1; | |
1470 _doStopRec = false; | |
1471 | |
1472 // Get current stream description | |
1473 AudioObjectPropertyAddress | |
1474 propertyAddress = { kAudioDevicePropertyStreamFormat, | |
1475 kAudioDevicePropertyScopeInput, 0 }; | |
1476 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); | |
1477 size = sizeof(_inStreamFormat); | |
1478 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, | |
1479 &propertyAddress, 0, NULL, &size, &_inStreamFormat)); | |
1480 | |
1481 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) | |
1482 { | |
1483 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
1484 "Unacceptable input stream format -> mFormatID", | |
1485 (const char *) &_inStreamFormat.mFormatID); | |
1486 return -1; | |
1487 } | |
1488 | |
1489 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) | |
1490 { | |
1491 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1492 "Too many channels on input device (mChannelsPerFrame = %d)", | |
1493 _inStreamFormat.mChannelsPerFrame); | |
1494 return -1; | |
1495 } | |
1496 | |
1497 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * | |
1498 _inStreamFormat.mSampleRate / 100 * N_BLOCKS_IO; | |
1499 if (io_block_size_samples > _captureBufSizeSamples) | |
1500 { | |
1501 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1502 "Input IO block size (%d) is larger than ring buffer (%u)", | |
1503 io_block_size_samples, _captureBufSizeSamples); | |
1504 return -1; | |
1505 } | |
1506 | |
1507 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1508 " Input stream format:"); | |
1509 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1510 " mSampleRate = %f, mChannelsPerFrame = %u", | |
1511 _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame)
; | |
1512 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1513 " mBytesPerPacket = %u, mFramesPerPacket = %u", | |
1514 _inStreamFormat.mBytesPerPacket, | |
1515 _inStreamFormat.mFramesPerPacket); | |
1516 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1517 " mBytesPerFrame = %u, mBitsPerChannel = %u", | |
1518 _inStreamFormat.mBytesPerFrame, | |
1519 _inStreamFormat.mBitsPerChannel); | |
1520 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1521 " mFormatFlags = %u", | |
1522 _inStreamFormat.mFormatFlags); | |
1523 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
1524 (const char *) &_inStreamFormat.mFormatID); | |
1525 | |
1526 // Our preferred format to work with | |
1527 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) | |
1528 { | |
1529 _inDesiredFormat.mChannelsPerFrame = 2; | |
1530 } else | |
1531 { | |
1532 // Disable stereo recording when we only have one channel on the device. | |
1533 _inDesiredFormat.mChannelsPerFrame = 1; | |
1534 _recChannels = 1; | |
1535 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
1536 "Stereo recording unavailable on this device"); | |
1537 } | |
1538 | |
1539 if (_ptrAudioBuffer) | |
1540 { | |
1541 // Update audio buffer with the selected parameters | |
1542 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | |
1543 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels); | |
1544 } | |
1545 | |
1546 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; | |
1547 _inDesiredFormat.mBytesPerPacket = _inDesiredFormat.mChannelsPerFrame | |
1548 * sizeof(SInt16); | |
1549 _inDesiredFormat.mFramesPerPacket = 1; | |
1550 _inDesiredFormat.mBytesPerFrame = _inDesiredFormat.mChannelsPerFrame | |
1551 * sizeof(SInt16); | |
1552 _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; | |
1553 | |
1554 _inDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | |
1555 | kLinearPCMFormatFlagIsPacked; | |
1556 #ifdef WEBRTC_ARCH_BIG_ENDIAN | |
1557 _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; | |
1558 #endif | |
1559 _inDesiredFormat.mFormatID = kAudioFormatLinearPCM; | |
1560 | |
1561 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredForma
t, | |
1562 &_captureConverter)); | |
1563 | |
1564 // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO) | |
1565 // TODO(xians): investigate this block. | |
1566 UInt32 bufByteCount = (UInt32)((_inStreamFormat.mSampleRate / 1000.0) | |
1567 * 10.0 * N_BLOCKS_IO * _inStreamFormat.mChannelsPerFrame | |
1568 * sizeof(Float32)); | |
1569 if (_inStreamFormat.mFramesPerPacket != 0) | |
1570 { | |
1571 if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) | |
1572 { | |
1573 bufByteCount = ((UInt32)(bufByteCount | |
1574 / _inStreamFormat.mFramesPerPacket) + 1) | |
1575 * _inStreamFormat.mFramesPerPacket; | |
1576 } | |
1577 } | |
1578 | |
1579 // Ensure the buffer size is within the acceptable range provided by the dev
ice. | |
1580 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; | |
1581 AudioValueRange range; | |
1582 size = sizeof(range); | |
1583 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, | |
1584 &propertyAddress, 0, NULL, &size, &range)); | |
1585 if (range.mMinimum > bufByteCount) | |
1586 { | |
1587 bufByteCount = range.mMinimum; | |
1588 } else if (range.mMaximum < bufByteCount) | |
1589 { | |
1590 bufByteCount = range.mMaximum; | |
1591 } | |
1592 | |
1593 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; | |
1594 size = sizeof(bufByteCount); | |
1595 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_inputDeviceID, | |
1596 &propertyAddress, 0, NULL, size, &bufByteCount)); | |
1597 | |
1598 // Get capture device latency | |
1599 propertyAddress.mSelector = kAudioDevicePropertyLatency; | |
1600 UInt32 latency = 0; | |
1601 size = sizeof(UInt32); | |
1602 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, | |
1603 &propertyAddress, 0, NULL, &size, &latency)); | |
1604 _captureLatencyUs = (UInt32)((1.0e6 * latency) | |
1605 / _inStreamFormat.mSampleRate); | |
1606 | |
1607 // Get capture stream latency | |
1608 propertyAddress.mSelector = kAudioDevicePropertyStreams; | |
1609 AudioStreamID stream = 0; | |
1610 size = sizeof(AudioStreamID); | |
1611 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, | |
1612 &propertyAddress, 0, NULL, &size, &stream)); | |
1613 propertyAddress.mSelector = kAudioStreamPropertyLatency; | |
1614 size = sizeof(UInt32); | |
1615 latency = 0; | |
1616 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_inputDeviceID, | |
1617 &propertyAddress, 0, NULL, &size, &latency)); | |
1618 _captureLatencyUs += (UInt32)((1.0e6 * latency) | |
1619 / _inStreamFormat.mSampleRate); | |
1620 | |
1621 // Listen for format changes | |
1622 // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged? | |
1623 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | |
1624 WEBRTC_CA_RETURN_ON_ERR(AudioObjectAddPropertyListener(_inputDeviceID, | |
1625 &propertyAddress, &objectListenerProc, this)); | |
1626 | |
1627 // Listen for processor overloads | |
1628 propertyAddress.mSelector = kAudioDeviceProcessorOverload; | |
1629 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener(_inputDeviceID, | |
1630 &propertyAddress, &objectListenerProc, this)); | |
1631 | |
1632 if (_twoDevices) | |
1633 { | |
1634 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID, | |
1635 inDeviceIOProc, this, &_inDeviceIOProcID)); | |
1636 } else if (!_playIsInitialized) | |
1637 { | |
1638 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID(_inputDeviceID, | |
1639 deviceIOProc, this, &_deviceIOProcID)); | |
1640 } | |
1641 | |
1642 // Mark recording side as initialized | |
1643 _recIsInitialized = true; | |
1644 | |
1645 return 0; | |
1646 } | |
1647 | |
1648 int32_t AudioDeviceMac::StartRecording() | |
1649 { | |
1650 | |
1651 CriticalSectionScoped lock(&_critSect); | |
1652 | |
1653 if (!_recIsInitialized) | |
1654 { | |
1655 return -1; | |
1656 } | |
1657 | |
1658 if (_recording) | |
1659 { | |
1660 return 0; | |
1661 } | |
1662 | |
1663 if (!_initialized) | |
1664 { | |
1665 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
1666 " Recording worker thread has not been started"); | |
1667 return -1; | |
1668 } | |
1669 | |
1670 RTC_DCHECK(!capture_worker_thread_.get()); | |
1671 capture_worker_thread_.reset( | |
1672 new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread")); | |
1673 RTC_DCHECK(capture_worker_thread_.get()); | |
1674 capture_worker_thread_->Start(); | |
1675 capture_worker_thread_->SetPriority(rtc::kRealtimePriority); | |
1676 | |
1677 OSStatus err = noErr; | |
1678 if (_twoDevices) | |
1679 { | |
1680 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _inDeviceIOProc
ID)); | |
1681 } else if (!_playing) | |
1682 { | |
1683 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID
)); | |
1684 } | |
1685 | |
1686 _recording = true; | |
1687 | |
1688 return 0; | |
1689 } | |
1690 | |
1691 int32_t AudioDeviceMac::StopRecording() | |
1692 { | |
1693 | |
1694 CriticalSectionScoped lock(&_critSect); | |
1695 | |
1696 if (!_recIsInitialized) | |
1697 { | |
1698 return 0; | |
1699 } | |
1700 | |
1701 OSStatus err = noErr; | |
1702 | |
1703 // Stop device | |
1704 int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); | |
1705 if (_twoDevices) | |
1706 { | |
1707 if (_recording && captureDeviceIsAlive == 1) | |
1708 { | |
1709 _recording = false; | |
1710 _doStopRec = true; // Signal to io proc to stop audio device | |
1711 _critSect.Leave(); // Cannot be under lock, risk of deadlock | |
1712 if (kEventTimeout == _stopEventRec.Wait(2000)) | |
1713 { | |
1714 CriticalSectionScoped critScoped(&_critSect); | |
1715 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1716 " Timed out stopping the capture IOProc. " | |
1717 "We may have failed to detect a device removal."); | |
1718 | |
1719 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, | |
1720 _inDeviceIOProcID)); | |
1721 WEBRTC_CA_LOG_WARN( | |
1722 AudioDeviceDestroyIOProcID(_inputDeviceID, | |
1723 _inDeviceIOProcID)); | |
1724 } | |
1725 _critSect.Enter(); | |
1726 _doStopRec = false; | |
1727 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1728 " Recording stopped"); | |
1729 } | |
1730 } | |
1731 else | |
1732 { | |
1733 // We signal a stop for a shared device even when rendering has | |
1734 // not yet ended. This is to ensure the IOProc will return early as | |
1735 // intended (by checking |_recording|) before accessing | |
1736 // resources we free below (e.g. the capture converter). | |
1737 // | |
1738 // In the case of a shared devcie, the IOProc will verify | |
1739 // rendering has ended before stopping itself. | |
1740 if (_recording && captureDeviceIsAlive == 1) | |
1741 { | |
1742 _recording = false; | |
1743 _doStop = true; // Signal to io proc to stop audio device | |
1744 _critSect.Leave(); // Cannot be under lock, risk of deadlock | |
1745 if (kEventTimeout == _stopEvent.Wait(2000)) | |
1746 { | |
1747 CriticalSectionScoped critScoped(&_critSect); | |
1748 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1749 " Timed out stopping the shared IOProc. " | |
1750 "We may have failed to detect a device removal."); | |
1751 | |
1752 // We assume rendering on a shared device has stopped as well if | |
1753 // the IOProc times out. | |
1754 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, | |
1755 _deviceIOProcID)); | |
1756 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID, | |
1757 _deviceIOProcID)); | |
1758 } | |
1759 _critSect.Enter(); | |
1760 _doStop = false; | |
1761 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1762 " Recording stopped (shared)"); | |
1763 } | |
1764 } | |
1765 | |
1766 // Setting this signal will allow the worker thread to be stopped. | |
1767 AtomicSet32(&_captureDeviceIsAlive, 0); | |
1768 | |
1769 if (capture_worker_thread_.get()) { | |
1770 _critSect.Leave(); | |
1771 capture_worker_thread_->Stop(); | |
1772 capture_worker_thread_.reset(); | |
1773 _critSect.Enter(); | |
1774 } | |
1775 | |
1776 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter)); | |
1777 | |
1778 // Remove listeners. | |
1779 AudioObjectPropertyAddress | |
1780 propertyAddress = { kAudioDevicePropertyStreamFormat, | |
1781 kAudioDevicePropertyScopeInput, 0 }; | |
1782 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, | |
1783 &propertyAddress, &objectListenerProc, this)); | |
1784 | |
1785 propertyAddress.mSelector = kAudioDeviceProcessorOverload; | |
1786 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_inputDeviceID, | |
1787 &propertyAddress, &objectListenerProc, this)); | |
1788 | |
1789 _recIsInitialized = false; | |
1790 _recording = false; | |
1791 | |
1792 return 0; | |
1793 } | |
1794 | |
1795 bool AudioDeviceMac::RecordingIsInitialized() const | |
1796 { | |
1797 return (_recIsInitialized); | |
1798 } | |
1799 | |
1800 bool AudioDeviceMac::Recording() const | |
1801 { | |
1802 return (_recording); | |
1803 } | |
1804 | |
1805 bool AudioDeviceMac::PlayoutIsInitialized() const | |
1806 { | |
1807 return (_playIsInitialized); | |
1808 } | |
1809 | |
1810 int32_t AudioDeviceMac::StartPlayout() | |
1811 { | |
1812 | |
1813 CriticalSectionScoped lock(&_critSect); | |
1814 | |
1815 if (!_playIsInitialized) | |
1816 { | |
1817 return -1; | |
1818 } | |
1819 | |
1820 if (_playing) | |
1821 { | |
1822 return 0; | |
1823 } | |
1824 | |
1825 RTC_DCHECK(!render_worker_thread_.get()); | |
1826 render_worker_thread_.reset( | |
1827 new rtc::PlatformThread(RunRender, this, "RenderWorkerThread")); | |
1828 render_worker_thread_->Start(); | |
1829 render_worker_thread_->SetPriority(rtc::kRealtimePriority); | |
1830 | |
1831 if (_twoDevices || !_recording) | |
1832 { | |
1833 OSStatus err = noErr; | |
1834 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcI
D)); | |
1835 } | |
1836 _playing = true; | |
1837 | |
1838 return 0; | |
1839 } | |
1840 | |
1841 int32_t AudioDeviceMac::StopPlayout() | |
1842 { | |
1843 | |
1844 CriticalSectionScoped lock(&_critSect); | |
1845 | |
1846 if (!_playIsInitialized) | |
1847 { | |
1848 return 0; | |
1849 } | |
1850 | |
1851 OSStatus err = noErr; | |
1852 | |
1853 int32_t renderDeviceIsAlive = AtomicGet32(&_renderDeviceIsAlive); | |
1854 if (_playing && renderDeviceIsAlive == 1) | |
1855 { | |
1856 // We signal a stop for a shared device even when capturing has not | |
1857 // yet ended. This is to ensure the IOProc will return early as | |
1858 // intended (by checking |_playing|) before accessing resources we | |
1859 // free below (e.g. the render converter). | |
1860 // | |
1861 // In the case of a shared device, the IOProc will verify capturing | |
1862 // has ended before stopping itself. | |
1863 _playing = false; | |
1864 _doStop = true; // Signal to io proc to stop audio device | |
1865 _critSect.Leave(); // Cannot be under lock, risk of deadlock | |
1866 if (kEventTimeout == _stopEvent.Wait(2000)) | |
1867 { | |
1868 CriticalSectionScoped critScoped(&_critSect); | |
1869 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
1870 " Timed out stopping the render IOProc. " | |
1871 "We may have failed to detect a device removal."); | |
1872 | |
1873 // We assume capturing on a shared device has stopped as well if the | |
1874 // IOProc times out. | |
1875 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, | |
1876 _deviceIOProcID)); | |
1877 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID, | |
1878 _deviceIOProcID)); | |
1879 } | |
1880 _critSect.Enter(); | |
1881 _doStop = false; | |
1882 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
1883 "Playout stopped"); | |
1884 } | |
1885 | |
1886 // Setting this signal will allow the worker thread to be stopped. | |
1887 AtomicSet32(&_renderDeviceIsAlive, 0); | |
1888 if (render_worker_thread_.get()) { | |
1889 _critSect.Leave(); | |
1890 render_worker_thread_->Stop(); | |
1891 render_worker_thread_.reset(); | |
1892 _critSect.Enter(); | |
1893 } | |
1894 | |
1895 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter)); | |
1896 | |
1897 // Remove listeners. | |
1898 AudioObjectPropertyAddress propertyAddress = { | |
1899 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, | |
1900 0 }; | |
1901 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, | |
1902 &propertyAddress, &objectListenerProc, this)); | |
1903 | |
1904 propertyAddress.mSelector = kAudioDeviceProcessorOverload; | |
1905 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID, | |
1906 &propertyAddress, &objectListenerProc, this)); | |
1907 | |
1908 if (_macBookPro) | |
1909 { | |
1910 Boolean hasProperty = AudioObjectHasProperty(_outputDeviceID, | |
1911 &propertyAddress); | |
1912 if (hasProperty) | |
1913 { | |
1914 propertyAddress.mSelector = kAudioDevicePropertyDataSource; | |
1915 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener(_outputDeviceID
, | |
1916 &propertyAddress, &objectListenerProc, this)); | |
1917 } | |
1918 } | |
1919 | |
1920 _playIsInitialized = false; | |
1921 _playing = false; | |
1922 | |
1923 return 0; | |
1924 } | |
1925 | |
1926 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const | |
1927 { | |
1928 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); | |
1929 delayMS = static_cast<uint16_t> (1e-3 * (renderDelayUs + _renderLatencyUs) + | |
1930 0.5); | |
1931 return 0; | |
1932 } | |
1933 | |
1934 int32_t AudioDeviceMac::RecordingDelay(uint16_t& delayMS) const | |
1935 { | |
1936 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); | |
1937 delayMS = static_cast<uint16_t> (1e-3 * (captureDelayUs + | |
1938 _captureLatencyUs) + 0.5); | |
1939 return 0; | |
1940 } | |
1941 | |
1942 bool AudioDeviceMac::Playing() const | |
1943 { | |
1944 return (_playing); | |
1945 } | 1653 } |
1946 | 1654 |
1947 int32_t AudioDeviceMac::SetPlayoutBuffer( | 1655 int32_t AudioDeviceMac::SetPlayoutBuffer( |
1948 const AudioDeviceModule::BufferType type, | 1656 const AudioDeviceModule::BufferType type, |
1949 uint16_t sizeMS) | 1657 uint16_t sizeMS) { |
1950 { | 1658 if (type != AudioDeviceModule::kFixedBufferSize) { |
1951 | 1659 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
1952 if (type != AudioDeviceModule::kFixedBufferSize) | 1660 " Adaptive buffer size not supported on this platform"); |
1953 { | 1661 return -1; |
1954 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1662 } |
1955 " Adaptive buffer size not supported on this platform"); | 1663 |
1956 return -1; | 1664 _playBufType = type; |
1957 } | 1665 _playBufDelayFixed = sizeMS; |
1958 | 1666 return 0; |
1959 _playBufType = type; | 1667 } |
1960 _playBufDelayFixed = sizeMS; | 1668 |
1961 return 0; | 1669 int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type, |
1962 } | 1670 uint16_t& sizeMS) const { |
1963 | 1671 type = _playBufType; |
1964 int32_t AudioDeviceMac::PlayoutBuffer( | 1672 sizeMS = _playBufDelayFixed; |
1965 AudioDeviceModule::BufferType& type, | 1673 |
1966 uint16_t& sizeMS) const | 1674 return 0; |
1967 { | |
1968 | |
1969 type = _playBufType; | |
1970 sizeMS = _playBufDelayFixed; | |
1971 | |
1972 return 0; | |
1973 } | 1675 } |
1974 | 1676 |
1975 // Not implemented for Mac. | 1677 // Not implemented for Mac. |
1976 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const | 1678 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const { |
1977 { | 1679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
1978 | 1680 " API call not supported on this platform"); |
1979 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1681 |
1980 " API call not supported on this platform"); | 1682 return -1; |
1981 | 1683 } |
1982 return -1; | 1684 |
1983 } | 1685 bool AudioDeviceMac::PlayoutWarning() const { |
1984 | 1686 return (_playWarning > 0); |
1985 bool AudioDeviceMac::PlayoutWarning() const | 1687 } |
1986 { | 1688 |
1987 return (_playWarning > 0); | 1689 bool AudioDeviceMac::PlayoutError() const { |
1988 } | 1690 return (_playError > 0); |
1989 | 1691 } |
1990 bool AudioDeviceMac::PlayoutError() const | 1692 |
1991 { | 1693 bool AudioDeviceMac::RecordingWarning() const { |
1992 return (_playError > 0); | 1694 return (_recWarning > 0); |
1993 } | 1695 } |
1994 | 1696 |
1995 bool AudioDeviceMac::RecordingWarning() const | 1697 bool AudioDeviceMac::RecordingError() const { |
1996 { | 1698 return (_recError > 0); |
1997 return (_recWarning > 0); | 1699 } |
1998 } | 1700 |
1999 | 1701 void AudioDeviceMac::ClearPlayoutWarning() { |
2000 bool AudioDeviceMac::RecordingError() const | 1702 _playWarning = 0; |
2001 { | 1703 } |
2002 return (_recError > 0); | 1704 |
2003 } | 1705 void AudioDeviceMac::ClearPlayoutError() { |
2004 | 1706 _playError = 0; |
2005 void AudioDeviceMac::ClearPlayoutWarning() | 1707 } |
2006 { | 1708 |
2007 _playWarning = 0; | 1709 void AudioDeviceMac::ClearRecordingWarning() { |
2008 } | 1710 _recWarning = 0; |
2009 | 1711 } |
2010 void AudioDeviceMac::ClearPlayoutError() | 1712 |
2011 { | 1713 void AudioDeviceMac::ClearRecordingError() { |
2012 _playError = 0; | 1714 _recError = 0; |
2013 } | |
2014 | |
2015 void AudioDeviceMac::ClearRecordingWarning() | |
2016 { | |
2017 _recWarning = 0; | |
2018 } | |
2019 | |
2020 void AudioDeviceMac::ClearRecordingError() | |
2021 { | |
2022 _recError = 0; | |
2023 } | 1715 } |
2024 | 1716 |
2025 // ============================================================================ | 1717 // ============================================================================ |
2026 // Private Methods | 1718 // Private Methods |
2027 // ============================================================================ | 1719 // ============================================================================ |
2028 | 1720 |
2029 int32_t | 1721 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope, |
2030 AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope, | 1722 AudioDeviceID scopedDeviceIds[], |
2031 AudioDeviceID scopedDeviceIds[], | 1723 const uint32_t deviceListLength) { |
2032 const uint32_t deviceListLength) | 1724 OSStatus err = noErr; |
2033 { | 1725 |
2034 OSStatus err = noErr; | 1726 AudioObjectPropertyAddress propertyAddress = { |
2035 | 1727 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, |
| 1728 kAudioObjectPropertyElementMaster}; |
| 1729 UInt32 size = 0; |
| 1730 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize( |
| 1731 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size)); |
| 1732 if (size == 0) { |
| 1733 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "No devices"); |
| 1734 return 0; |
| 1735 } |
| 1736 |
| 1737 AudioDeviceID* deviceIds = (AudioDeviceID*)malloc(size); |
| 1738 UInt32 numberDevices = size / sizeof(AudioDeviceID); |
| 1739 AudioBufferList* bufferList = NULL; |
| 1740 UInt32 numberScopedDevices = 0; |
| 1741 |
| 1742 // First check if there is a default device and list it |
| 1743 UInt32 hardwareProperty = 0; |
| 1744 if (scope == kAudioDevicePropertyScopeOutput) { |
| 1745 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; |
| 1746 } else { |
| 1747 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; |
| 1748 } |
| 1749 |
| 1750 AudioObjectPropertyAddress propertyAddressDefault = { |
| 1751 hardwareProperty, kAudioObjectPropertyScopeGlobal, |
| 1752 kAudioObjectPropertyElementMaster}; |
| 1753 |
| 1754 AudioDeviceID usedID; |
| 1755 UInt32 uintSize = sizeof(UInt32); |
| 1756 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| 1757 &propertyAddressDefault, 0, |
| 1758 NULL, &uintSize, &usedID)); |
| 1759 if (usedID != kAudioDeviceUnknown) { |
| 1760 scopedDeviceIds[numberScopedDevices] = usedID; |
| 1761 numberScopedDevices++; |
| 1762 } else { |
| 1763 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1764 "GetNumberDevices(): Default device unknown"); |
| 1765 } |
| 1766 |
| 1767 // Then list the rest of the devices |
| 1768 bool listOK = true; |
| 1769 |
| 1770 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( |
| 1771 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, deviceIds)); |
| 1772 if (err != noErr) { |
| 1773 listOK = false; |
| 1774 } else { |
| 1775 propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; |
| 1776 propertyAddress.mScope = scope; |
| 1777 propertyAddress.mElement = 0; |
| 1778 for (UInt32 i = 0; i < numberDevices; i++) { |
| 1779 // Check for input channels |
| 1780 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize( |
| 1781 deviceIds[i], &propertyAddress, 0, NULL, &size)); |
| 1782 if (err == kAudioHardwareBadDeviceError) { |
| 1783 // This device doesn't actually exist; continue iterating. |
| 1784 continue; |
| 1785 } else if (err != noErr) { |
| 1786 listOK = false; |
| 1787 break; |
| 1788 } |
| 1789 |
| 1790 bufferList = (AudioBufferList*)malloc(size); |
| 1791 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( |
| 1792 deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList)); |
| 1793 if (err != noErr) { |
| 1794 listOK = false; |
| 1795 break; |
| 1796 } |
| 1797 |
| 1798 if (bufferList->mNumberBuffers > 0) { |
| 1799 if (numberScopedDevices >= deviceListLength) { |
| 1800 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1801 "Device list is not long enough"); |
| 1802 listOK = false; |
| 1803 break; |
| 1804 } |
| 1805 |
| 1806 scopedDeviceIds[numberScopedDevices] = deviceIds[i]; |
| 1807 numberScopedDevices++; |
| 1808 } |
| 1809 |
| 1810 free(bufferList); |
| 1811 bufferList = NULL; |
| 1812 } // for |
| 1813 } |
| 1814 |
| 1815 if (!listOK) { |
| 1816 if (deviceIds) { |
| 1817 free(deviceIds); |
| 1818 deviceIds = NULL; |
| 1819 } |
| 1820 |
| 1821 if (bufferList) { |
| 1822 free(bufferList); |
| 1823 bufferList = NULL; |
| 1824 } |
| 1825 |
| 1826 return -1; |
| 1827 } |
| 1828 |
| 1829 // Happy ending |
| 1830 if (deviceIds) { |
| 1831 free(deviceIds); |
| 1832 deviceIds = NULL; |
| 1833 } |
| 1834 |
| 1835 return numberScopedDevices; |
| 1836 } |
| 1837 |
| 1838 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope, |
| 1839 const uint16_t index, |
| 1840 char* name) { |
| 1841 OSStatus err = noErr; |
| 1842 UInt32 len = kAdmMaxDeviceNameSize; |
| 1843 AudioDeviceID deviceIds[MaxNumberDevices]; |
| 1844 |
| 1845 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); |
| 1846 if (numberDevices < 0) { |
| 1847 return -1; |
| 1848 } else if (numberDevices == 0) { |
| 1849 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "No devices"); |
| 1850 return -1; |
| 1851 } |
| 1852 |
| 1853 // If the number is below the number of devices, assume it's "WEBRTC ID" |
| 1854 // otherwise assume it's a CoreAudio ID |
| 1855 AudioDeviceID usedID; |
| 1856 |
| 1857 // Check if there is a default device |
| 1858 bool isDefaultDevice = false; |
| 1859 if (index == 0) { |
| 1860 UInt32 hardwareProperty = 0; |
| 1861 if (scope == kAudioDevicePropertyScopeOutput) { |
| 1862 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; |
| 1863 } else { |
| 1864 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; |
| 1865 } |
2036 AudioObjectPropertyAddress propertyAddress = { | 1866 AudioObjectPropertyAddress propertyAddress = { |
2037 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, | 1867 hardwareProperty, kAudioObjectPropertyScopeGlobal, |
2038 kAudioObjectPropertyElementMaster }; | 1868 kAudioObjectPropertyElementMaster}; |
2039 UInt32 size = 0; | 1869 UInt32 size = sizeof(UInt32); |
2040 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(kAudioObjectSystemObj
ect, | 1870 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2041 &propertyAddress, 0, NULL, &size)); | 1871 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID)); |
2042 if (size == 0) | 1872 if (usedID == kAudioDeviceUnknown) { |
2043 { | 1873 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2044 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1874 "GetDeviceName(): Default device unknown"); |
2045 "No devices"); | 1875 } else { |
2046 return 0; | 1876 isDefaultDevice = true; |
2047 } | 1877 } |
2048 | 1878 } |
2049 AudioDeviceID* deviceIds = (AudioDeviceID*) malloc(size); | 1879 |
2050 UInt32 numberDevices = size / sizeof(AudioDeviceID); | 1880 AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, |
2051 AudioBufferList* bufferList = NULL; | 1881 scope, 0}; |
2052 UInt32 numberScopedDevices = 0; | 1882 |
2053 | 1883 if (isDefaultDevice) { |
2054 // First check if there is a default device and list it | 1884 char devName[len]; |
2055 UInt32 hardwareProperty = 0; | 1885 |
2056 if (scope == kAudioDevicePropertyScopeOutput) | 1886 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress, |
2057 { | 1887 0, NULL, &len, devName)); |
2058 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; | 1888 |
2059 } else | 1889 sprintf(name, "default (%s)", devName); |
2060 { | 1890 } else { |
2061 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; | 1891 if (index < numberDevices) { |
2062 } | 1892 usedID = deviceIds[index]; |
2063 | 1893 } else { |
2064 AudioObjectPropertyAddress | 1894 usedID = index; |
2065 propertyAddressDefault = { hardwareProperty, | 1895 } |
2066 kAudioObjectPropertyScopeGlobal, | 1896 |
2067 kAudioObjectPropertyElementMaster }; | 1897 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, &propertyAddress, |
2068 | 1898 0, NULL, &len, name)); |
2069 AudioDeviceID usedID; | 1899 } |
2070 UInt32 uintSize = sizeof(UInt32); | 1900 |
2071 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, | 1901 return 0; |
2072 &propertyAddressDefault, 0, NULL, &uintSize, &usedID)); | |
2073 if (usedID != kAudioDeviceUnknown) | |
2074 { | |
2075 scopedDeviceIds[numberScopedDevices] = usedID; | |
2076 numberScopedDevices++; | |
2077 } else | |
2078 { | |
2079 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
2080 "GetNumberDevices(): Default device unknown"); | |
2081 } | |
2082 | |
2083 // Then list the rest of the devices | |
2084 bool listOK = true; | |
2085 | |
2086 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, | |
2087 &propertyAddress, 0, NULL, &size, deviceIds)); | |
2088 if (err != noErr) | |
2089 { | |
2090 listOK = false; | |
2091 } else | |
2092 { | |
2093 propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; | |
2094 propertyAddress.mScope = scope; | |
2095 propertyAddress.mElement = 0; | |
2096 for (UInt32 i = 0; i < numberDevices; i++) | |
2097 { | |
2098 // Check for input channels | |
2099 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize(deviceIds[i], | |
2100 &propertyAddress, 0, NULL, &size)); | |
2101 if (err == kAudioHardwareBadDeviceError) | |
2102 { | |
2103 // This device doesn't actually exist; continue iterating. | |
2104 continue; | |
2105 } else if (err != noErr) | |
2106 { | |
2107 listOK = false; | |
2108 break; | |
2109 } | |
2110 | |
2111 bufferList = (AudioBufferList*) malloc(size); | |
2112 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(deviceIds[i], | |
2113 &propertyAddress, 0, NULL, &size, bufferList)); | |
2114 if (err != noErr) | |
2115 { | |
2116 listOK = false; | |
2117 break; | |
2118 } | |
2119 | |
2120 if (bufferList->mNumberBuffers > 0) | |
2121 { | |
2122 if (numberScopedDevices >= deviceListLength) | |
2123 { | |
2124 WEBRTC_TRACE(kTraceError, | |
2125 kTraceAudioDevice, _id, | |
2126 "Device list is not long enough"); | |
2127 listOK = false; | |
2128 break; | |
2129 } | |
2130 | |
2131 scopedDeviceIds[numberScopedDevices] = deviceIds[i]; | |
2132 numberScopedDevices++; | |
2133 } | |
2134 | |
2135 free(bufferList); | |
2136 bufferList = NULL; | |
2137 } // for | |
2138 } | |
2139 | |
2140 if (!listOK) | |
2141 { | |
2142 if (deviceIds) | |
2143 { | |
2144 free(deviceIds); | |
2145 deviceIds = NULL; | |
2146 } | |
2147 | |
2148 if (bufferList) | |
2149 { | |
2150 free(bufferList); | |
2151 bufferList = NULL; | |
2152 } | |
2153 | |
2154 return -1; | |
2155 } | |
2156 | |
2157 // Happy ending | |
2158 if (deviceIds) | |
2159 { | |
2160 free(deviceIds); | |
2161 deviceIds = NULL; | |
2162 } | |
2163 | |
2164 return numberScopedDevices; | |
2165 } | |
2166 | |
2167 int32_t | |
2168 AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope, | |
2169 const uint16_t index, | |
2170 char* name) | |
2171 { | |
2172 OSStatus err = noErr; | |
2173 UInt32 len = kAdmMaxDeviceNameSize; | |
2174 AudioDeviceID deviceIds[MaxNumberDevices]; | |
2175 | |
2176 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); | |
2177 if (numberDevices < 0) | |
2178 { | |
2179 return -1; | |
2180 } else if (numberDevices == 0) | |
2181 { | |
2182 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
2183 "No devices"); | |
2184 return -1; | |
2185 } | |
2186 | |
2187 // If the number is below the number of devices, assume it's "WEBRTC ID" | |
2188 // otherwise assume it's a CoreAudio ID | |
2189 AudioDeviceID usedID; | |
2190 | |
2191 // Check if there is a default device | |
2192 bool isDefaultDevice = false; | |
2193 if (index == 0) | |
2194 { | |
2195 UInt32 hardwareProperty = 0; | |
2196 if (scope == kAudioDevicePropertyScopeOutput) | |
2197 { | |
2198 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; | |
2199 } else | |
2200 { | |
2201 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; | |
2202 } | |
2203 AudioObjectPropertyAddress propertyAddress = { hardwareProperty, | |
2204 kAudioObjectPropertyScopeGlobal, | |
2205 kAudioObjectPropertyElementMaster }; | |
2206 UInt32 size = sizeof(UInt32); | |
2207 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObj
ect, | |
2208 &propertyAddress, 0, NULL, &size, &usedID)); | |
2209 if (usedID == kAudioDeviceUnknown) | |
2210 { | |
2211 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
2212 "GetDeviceName(): Default device unknown"); | |
2213 } else | |
2214 { | |
2215 isDefaultDevice = true; | |
2216 } | |
2217 } | |
2218 | |
2219 AudioObjectPropertyAddress propertyAddress = { | |
2220 kAudioDevicePropertyDeviceName, scope, 0 }; | |
2221 | |
2222 if (isDefaultDevice) | |
2223 { | |
2224 char devName[len]; | |
2225 | |
2226 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, | |
2227 &propertyAddress, 0, NULL, &len, devName)); | |
2228 | |
2229 sprintf(name, "default (%s)", devName); | |
2230 } else | |
2231 { | |
2232 if (index < numberDevices) | |
2233 { | |
2234 usedID = deviceIds[index]; | |
2235 } else | |
2236 { | |
2237 usedID = index; | |
2238 } | |
2239 | |
2240 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(usedID, | |
2241 &propertyAddress, 0, NULL, &len, name)); | |
2242 } | |
2243 | |
2244 return 0; | |
2245 } | 1902 } |
2246 | 1903 |
2247 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex, | 1904 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex, |
2248 AudioDeviceID& deviceId, | 1905 AudioDeviceID& deviceId, |
2249 const bool isInput) | 1906 const bool isInput) { |
2250 { | 1907 OSStatus err = noErr; |
2251 OSStatus err = noErr; | 1908 UInt32 size = 0; |
2252 UInt32 size = 0; | 1909 AudioObjectPropertyScope deviceScope; |
2253 AudioObjectPropertyScope deviceScope; | 1910 AudioObjectPropertySelector defaultDeviceSelector; |
2254 AudioObjectPropertySelector defaultDeviceSelector; | 1911 AudioDeviceID deviceIds[MaxNumberDevices]; |
2255 AudioDeviceID deviceIds[MaxNumberDevices]; | 1912 |
2256 | 1913 if (isInput) { |
2257 if (isInput) | 1914 deviceScope = kAudioDevicePropertyScopeInput; |
2258 { | 1915 defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice; |
2259 deviceScope = kAudioDevicePropertyScopeInput; | 1916 } else { |
2260 defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice; | 1917 deviceScope = kAudioDevicePropertyScopeOutput; |
2261 } else | 1918 defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice; |
2262 { | 1919 } |
2263 deviceScope = kAudioDevicePropertyScopeOutput; | 1920 |
2264 defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice; | 1921 AudioObjectPropertyAddress propertyAddress = { |
2265 } | 1922 defaultDeviceSelector, kAudioObjectPropertyScopeGlobal, |
2266 | 1923 kAudioObjectPropertyElementMaster}; |
2267 AudioObjectPropertyAddress | 1924 |
2268 propertyAddress = { defaultDeviceSelector, | 1925 // Get the actual device IDs |
2269 kAudioObjectPropertyScopeGlobal, | 1926 int numberDevices = |
2270 kAudioObjectPropertyElementMaster }; | 1927 GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices); |
2271 | 1928 if (numberDevices < 0) { |
2272 // Get the actual device IDs | 1929 return -1; |
2273 int numberDevices = GetNumberDevices(deviceScope, deviceIds, | 1930 } else if (numberDevices == 0) { |
2274 MaxNumberDevices); | 1931 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
2275 if (numberDevices < 0) | 1932 "InitDevice(): No devices"); |
2276 { | 1933 return -1; |
2277 return -1; | 1934 } |
2278 } else if (numberDevices == 0) | 1935 |
2279 { | 1936 bool isDefaultDevice = false; |
2280 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1937 deviceId = kAudioDeviceUnknown; |
2281 "InitDevice(): No devices"); | 1938 if (userDeviceIndex == 0) { |
2282 return -1; | 1939 // Try to use default system device |
2283 } | 1940 size = sizeof(AudioDeviceID); |
2284 | 1941 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2285 bool isDefaultDevice = false; | 1942 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId)); |
2286 deviceId = kAudioDeviceUnknown; | 1943 if (deviceId == kAudioDeviceUnknown) { |
2287 if (userDeviceIndex == 0) | 1944 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2288 { | 1945 " No default device exists"); |
2289 // Try to use default system device | 1946 } else { |
2290 size = sizeof(AudioDeviceID); | 1947 isDefaultDevice = true; |
2291 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObj
ect, | 1948 } |
2292 &propertyAddress, 0, NULL, &size, &deviceId)); | 1949 } |
2293 if (deviceId == kAudioDeviceUnknown) | 1950 |
2294 { | 1951 if (!isDefaultDevice) { |
2295 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1952 deviceId = deviceIds[userDeviceIndex]; |
2296 " No default device exists"); | 1953 } |
2297 } else | 1954 |
2298 { | 1955 // Obtain device name and manufacturer for logging. |
2299 isDefaultDevice = true; | 1956 // Also use this as a test to ensure a user-set device ID is valid. |
2300 } | 1957 char devName[128]; |
2301 } | 1958 char devManf[128]; |
2302 | 1959 memset(devName, 0, sizeof(devName)); |
2303 if (!isDefaultDevice) | 1960 memset(devManf, 0, sizeof(devManf)); |
2304 { | 1961 |
2305 deviceId = deviceIds[userDeviceIndex]; | 1962 propertyAddress.mSelector = kAudioDevicePropertyDeviceName; |
2306 } | 1963 propertyAddress.mScope = deviceScope; |
2307 | 1964 propertyAddress.mElement = 0; |
2308 // Obtain device name and manufacturer for logging. | 1965 size = sizeof(devName); |
2309 // Also use this as a test to ensure a user-set device ID is valid. | 1966 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, |
2310 char devName[128]; | 1967 0, NULL, &size, devName)); |
2311 char devManf[128]; | 1968 |
2312 memset(devName, 0, sizeof(devName)); | 1969 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; |
2313 memset(devManf, 0, sizeof(devManf)); | 1970 size = sizeof(devManf); |
2314 | 1971 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, |
2315 propertyAddress.mSelector = kAudioDevicePropertyDeviceName; | 1972 0, NULL, &size, devManf)); |
2316 propertyAddress.mScope = deviceScope; | 1973 |
2317 propertyAddress.mElement = 0; | 1974 if (isInput) { |
2318 size = sizeof(devName); | 1975 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input device: %s %s", |
2319 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, | 1976 devManf, devName); |
2320 &propertyAddress, 0, NULL, &size, devName)); | 1977 } else { |
2321 | 1978 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Output device: %s %s", |
2322 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; | 1979 devManf, devName); |
2323 size = sizeof(devManf); | 1980 } |
2324 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, | 1981 |
2325 &propertyAddress, 0, NULL, &size, devManf)); | 1982 return 0; |
2326 | 1983 } |
2327 if (isInput) | 1984 |
2328 { | 1985 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() { |
2329 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1986 // Our preferred format to work with. |
2330 " Input device: %s %s", devManf, devName); | 1987 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; |
2331 } else | 1988 _outDesiredFormat.mChannelsPerFrame = _playChannels; |
2332 { | 1989 |
2333 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1990 if (_ptrAudioBuffer) { |
2334 " Output device: %s %s", devManf, devName); | 1991 // Update audio buffer with the selected parameters. |
2335 } | 1992 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); |
2336 | 1993 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); |
2337 return 0; | 1994 } |
2338 } | 1995 |
2339 | 1996 _renderDelayOffsetSamples = _renderBufSizeSamples - |
2340 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() | 1997 N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * |
2341 { | 1998 _outDesiredFormat.mChannelsPerFrame; |
2342 // Our preferred format to work with. | 1999 |
2343 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; | 2000 _outDesiredFormat.mBytesPerPacket = |
2344 _outDesiredFormat.mChannelsPerFrame = _playChannels; | 2001 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); |
2345 | 2002 // In uncompressed audio, a packet is one frame. |
2346 if (_ptrAudioBuffer) | 2003 _outDesiredFormat.mFramesPerPacket = 1; |
2347 { | 2004 _outDesiredFormat.mBytesPerFrame = |
2348 // Update audio buffer with the selected parameters. | 2005 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); |
2349 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); | 2006 _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; |
2350 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels); | 2007 |
2351 } | 2008 _outDesiredFormat.mFormatFlags = |
2352 | 2009 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; |
2353 _renderDelayOffsetSamples = _renderBufSizeSamples - N_BUFFERS_OUT * | |
2354 ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame; | |
2355 | |
2356 _outDesiredFormat.mBytesPerPacket = _outDesiredFormat.mChannelsPerFrame * | |
2357 sizeof(SInt16); | |
2358 // In uncompressed audio, a packet is one frame. | |
2359 _outDesiredFormat.mFramesPerPacket = 1; | |
2360 _outDesiredFormat.mBytesPerFrame = _outDesiredFormat.mChannelsPerFrame * | |
2361 sizeof(SInt16); | |
2362 _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; | |
2363 | |
2364 _outDesiredFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | | |
2365 kLinearPCMFormatFlagIsPacked; | |
2366 #ifdef WEBRTC_ARCH_BIG_ENDIAN | 2010 #ifdef WEBRTC_ARCH_BIG_ENDIAN |
2367 _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; | 2011 _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; |
2368 #endif | 2012 #endif |
2369 _outDesiredFormat.mFormatID = kAudioFormatLinearPCM; | 2013 _outDesiredFormat.mFormatID = kAudioFormatLinearPCM; |
2370 | 2014 |
2371 OSStatus err = noErr; | 2015 OSStatus err = noErr; |
2372 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_outDesiredFormat, | 2016 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew( |
2373 &_outStreamFormat, | 2017 &_outDesiredFormat, &_outStreamFormat, &_renderConverter)); |
2374 &_renderConverter)); | 2018 |
2375 | 2019 // Try to set buffer size to desired value (_playBufDelayFixed). |
2376 // Try to set buffer size to desired value (_playBufDelayFixed). | 2020 UInt32 bufByteCount = static_cast<UInt32>( |
2377 UInt32 bufByteCount = static_cast<UInt32> ((_outStreamFormat.mSampleRate / | 2021 (_outStreamFormat.mSampleRate / 1000.0) * _playBufDelayFixed * |
2378 1000.0) * | 2022 _outStreamFormat.mChannelsPerFrame * sizeof(Float32)); |
2379 _playBufDelayFixed * | 2023 if (_outStreamFormat.mFramesPerPacket != 0) { |
2380 _outStreamFormat.mChannelsPerFrame * | 2024 if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) { |
2381 sizeof(Float32)); | 2025 bufByteCount = (static_cast<UInt32>(bufByteCount / |
2382 if (_outStreamFormat.mFramesPerPacket != 0) | 2026 _outStreamFormat.mFramesPerPacket) + |
2383 { | 2027 1) * |
2384 if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) | 2028 _outStreamFormat.mFramesPerPacket; |
2385 { | 2029 } |
2386 bufByteCount = (static_cast<UInt32> (bufByteCount / | 2030 } |
2387 _outStreamFormat.mFramesPerPacket) + 1) * | 2031 |
2388 _outStreamFormat.mFramesPerPacket; | 2032 // Ensure the buffer size is within the range provided by the device. |
2389 } | 2033 AudioObjectPropertyAddress propertyAddress = { |
2390 } | 2034 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; |
2391 | 2035 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; |
2392 // Ensure the buffer size is within the range provided by the device. | 2036 AudioValueRange range; |
2393 AudioObjectPropertyAddress propertyAddress = | 2037 UInt32 size = sizeof(range); |
2394 {kAudioDevicePropertyDataSource, | 2038 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2395 kAudioDevicePropertyScopeOutput, | 2039 _outputDeviceID, &propertyAddress, 0, NULL, &size, &range)); |
2396 0}; | 2040 if (range.mMinimum > bufByteCount) { |
2397 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; | 2041 bufByteCount = range.mMinimum; |
2398 AudioValueRange range; | 2042 } else if (range.mMaximum < bufByteCount) { |
2399 UInt32 size = sizeof(range); | 2043 bufByteCount = range.mMaximum; |
2400 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, | 2044 } |
2401 &propertyAddress, | 2045 |
2402 0, | 2046 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; |
2403 NULL, | 2047 size = sizeof(bufByteCount); |
2404 &size, | 2048 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
2405 &range)); | 2049 _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); |
2406 if (range.mMinimum > bufByteCount) | 2050 |
2407 { | 2051 // Get render device latency. |
2408 bufByteCount = range.mMinimum; | 2052 propertyAddress.mSelector = kAudioDevicePropertyLatency; |
2409 } else if (range.mMaximum < bufByteCount) | 2053 UInt32 latency = 0; |
2410 { | 2054 size = sizeof(UInt32); |
2411 bufByteCount = range.mMaximum; | 2055 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2412 } | 2056 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); |
2413 | 2057 _renderLatencyUs = |
2414 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; | 2058 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); |
2415 size = sizeof(bufByteCount); | 2059 |
2416 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData(_outputDeviceID, | 2060 // Get render stream latency. |
2417 &propertyAddress, | 2061 propertyAddress.mSelector = kAudioDevicePropertyStreams; |
2418 0, | 2062 AudioStreamID stream = 0; |
2419 NULL, | 2063 size = sizeof(AudioStreamID); |
2420 size, | 2064 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2421 &bufByteCount)); | 2065 _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); |
2422 | 2066 propertyAddress.mSelector = kAudioStreamPropertyLatency; |
2423 // Get render device latency. | 2067 size = sizeof(UInt32); |
2424 propertyAddress.mSelector = kAudioDevicePropertyLatency; | 2068 latency = 0; |
2425 UInt32 latency = 0; | 2069 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2426 size = sizeof(UInt32); | 2070 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); |
2427 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, | 2071 _renderLatencyUs += |
2428 &propertyAddress, | 2072 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); |
2429 0, | 2073 |
2430 NULL, | 2074 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
2431 &size, | 2075 " initial playout status: _renderDelayOffsetSamples=%d," |
2432 &latency)); | 2076 " _renderDelayUs=%d, _renderLatencyUs=%d", |
2433 _renderLatencyUs = static_cast<uint32_t> ((1.0e6 * latency) / | 2077 _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs); |
2434 _outStreamFormat.mSampleRate); | 2078 return 0; |
2435 | |
2436 // Get render stream latency. | |
2437 propertyAddress.mSelector = kAudioDevicePropertyStreams; | |
2438 AudioStreamID stream = 0; | |
2439 size = sizeof(AudioStreamID); | |
2440 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, | |
2441 &propertyAddress, | |
2442 0, | |
2443 NULL, | |
2444 &size, | |
2445 &stream)); | |
2446 propertyAddress.mSelector = kAudioStreamPropertyLatency; | |
2447 size = sizeof(UInt32); | |
2448 latency = 0; | |
2449 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(_outputDeviceID, | |
2450 &propertyAddress, | |
2451 0, | |
2452 NULL, | |
2453 &size, | |
2454 &latency)); | |
2455 _renderLatencyUs += static_cast<uint32_t> ((1.0e6 * latency) / | |
2456 _outStreamFormat.mSampleRate); | |
2457 | |
2458 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2459 " initial playout status: _renderDelayOffsetSamples=%d," | |
2460 " _renderDelayUs=%d, _renderLatencyUs=%d", | |
2461 _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs); | |
2462 return 0; | |
2463 } | 2079 } |
2464 | 2080 |
2465 OSStatus AudioDeviceMac::objectListenerProc( | 2081 OSStatus AudioDeviceMac::objectListenerProc( |
2466 AudioObjectID objectId, | 2082 AudioObjectID objectId, |
2467 UInt32 numberAddresses, | 2083 UInt32 numberAddresses, |
2468 const AudioObjectPropertyAddress addresses[], | 2084 const AudioObjectPropertyAddress addresses[], |
2469 void* clientData) | 2085 void* clientData) { |
2470 { | 2086 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; |
2471 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; | 2087 RTC_DCHECK(ptrThis != NULL); |
2472 RTC_DCHECK(ptrThis != NULL); | 2088 |
2473 | 2089 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); |
2474 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); | 2090 |
2475 | 2091 // AudioObjectPropertyListenerProc functions are supposed to return 0 |
2476 // AudioObjectPropertyListenerProc functions are supposed to return 0 | 2092 return 0; |
2477 return 0; | |
2478 } | 2093 } |
2479 | 2094 |
2480 OSStatus AudioDeviceMac::implObjectListenerProc( | 2095 OSStatus AudioDeviceMac::implObjectListenerProc( |
2481 const AudioObjectID objectId, | 2096 const AudioObjectID objectId, |
2482 const UInt32 numberAddresses, | 2097 const UInt32 numberAddresses, |
2483 const AudioObjectPropertyAddress addresses[]) | 2098 const AudioObjectPropertyAddress addresses[]) { |
2484 { | 2099 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
2485 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2100 "AudioDeviceMac::implObjectListenerProc()"); |
2486 "AudioDeviceMac::implObjectListenerProc()"); | 2101 |
2487 | 2102 for (UInt32 i = 0; i < numberAddresses; i++) { |
2488 for (UInt32 i = 0; i < numberAddresses; i++) | 2103 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) { |
2489 { | 2104 HandleDeviceChange(); |
2490 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) | 2105 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) { |
2491 { | 2106 HandleStreamFormatChange(objectId, addresses[i]); |
2492 HandleDeviceChange(); | 2107 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) { |
2493 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) | 2108 HandleDataSourceChange(objectId, addresses[i]); |
2494 { | 2109 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) { |
2495 HandleStreamFormatChange(objectId, addresses[i]); | 2110 HandleProcessorOverload(addresses[i]); |
2496 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) | 2111 } |
2497 { | 2112 } |
2498 HandleDataSourceChange(objectId, addresses[i]); | 2113 |
2499 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) | 2114 return 0; |
2500 { | 2115 } |
2501 HandleProcessorOverload(addresses[i]); | 2116 |
2502 } | 2117 int32_t AudioDeviceMac::HandleDeviceChange() { |
2503 } | 2118 OSStatus err = noErr; |
2504 | 2119 |
2505 return 0; | 2120 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
2506 } | 2121 "kAudioHardwarePropertyDevices"); |
2507 | 2122 |
2508 int32_t AudioDeviceMac::HandleDeviceChange() | 2123 // A device has changed. Check if our registered devices have been removed. |
2509 { | 2124 // Ensure the devices have been initialized, meaning the IDs are valid. |
2510 OSStatus err = noErr; | 2125 if (MicrophoneIsInitialized()) { |
2511 | 2126 AudioObjectPropertyAddress propertyAddress = { |
2512 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2127 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0}; |
2513 "kAudioHardwarePropertyDevices"); | 2128 UInt32 deviceIsAlive = 1; |
2514 | 2129 UInt32 size = sizeof(UInt32); |
2515 // A device has changed. Check if our registered devices have been removed. | 2130 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL, |
2516 // Ensure the devices have been initialized, meaning the IDs are valid. | 2131 &size, &deviceIsAlive); |
2517 if (MicrophoneIsInitialized()) | 2132 |
2518 { | 2133 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { |
2519 AudioObjectPropertyAddress propertyAddress = { | 2134 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2520 kAudioDevicePropertyDeviceIsAlive, | 2135 "Capture device is not alive (probably removed)"); |
2521 kAudioDevicePropertyScopeInput, 0 }; | 2136 AtomicSet32(&_captureDeviceIsAlive, 0); |
2522 UInt32 deviceIsAlive = 1; | 2137 _mixerManager.CloseMicrophone(); |
2523 UInt32 size = sizeof(UInt32); | 2138 if (_recError == 1) { |
2524 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, | 2139 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2525 NULL, &size, &deviceIsAlive); | 2140 " pending recording error exists"); |
2526 | 2141 } |
2527 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) | 2142 _recError = 1; // triggers callback from module process thread |
2528 { | 2143 } else if (err != noErr) { |
2529 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2144 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
2530 "Capture device is not alive (probably removed)"); | 2145 "Error in AudioDeviceGetProperty()", (const char*)&err); |
2531 AtomicSet32(&_captureDeviceIsAlive, 0); | 2146 return -1; |
2532 _mixerManager.CloseMicrophone(); | 2147 } |
2533 if (_recError == 1) | 2148 } |
2534 { | 2149 |
2535 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | 2150 if (SpeakerIsInitialized()) { |
2536 _id, " pending recording error exists"); | 2151 AudioObjectPropertyAddress propertyAddress = { |
2537 } | 2152 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0}; |
2538 _recError = 1; // triggers callback from module process thread | 2153 UInt32 deviceIsAlive = 1; |
2539 } else if (err != noErr) | 2154 UInt32 size = sizeof(UInt32); |
2540 { | 2155 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL, |
2541 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2156 &size, &deviceIsAlive); |
2542 "Error in AudioDeviceGetProperty()", (const char*) &err); | 2157 |
2543 return -1; | 2158 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { |
2544 } | 2159 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2545 } | 2160 "Render device is not alive (probably removed)"); |
2546 | 2161 AtomicSet32(&_renderDeviceIsAlive, 0); |
2547 if (SpeakerIsInitialized()) | 2162 _mixerManager.CloseSpeaker(); |
2548 { | 2163 if (_playError == 1) { |
2549 AudioObjectPropertyAddress propertyAddress = { | 2164 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2550 kAudioDevicePropertyDeviceIsAlive, | 2165 " pending playout error exists"); |
2551 kAudioDevicePropertyScopeOutput, 0 }; | 2166 } |
2552 UInt32 deviceIsAlive = 1; | 2167 _playError = 1; // triggers callback from module process thread |
2553 UInt32 size = sizeof(UInt32); | 2168 } else if (err != noErr) { |
2554 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, | 2169 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
2555 NULL, &size, &deviceIsAlive); | 2170 "Error in AudioDeviceGetProperty()", (const char*)&err); |
2556 | 2171 return -1; |
2557 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) | 2172 } |
2558 { | 2173 } |
2559 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2174 |
2560 "Render device is not alive (probably removed)"); | 2175 return 0; |
2561 AtomicSet32(&_renderDeviceIsAlive, 0); | |
2562 _mixerManager.CloseSpeaker(); | |
2563 if (_playError == 1) | |
2564 { | |
2565 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | |
2566 _id, " pending playout error exists"); | |
2567 } | |
2568 _playError = 1; // triggers callback from module process thread | |
2569 } else if (err != noErr) | |
2570 { | |
2571 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
2572 "Error in AudioDeviceGetProperty()", (const char*) &err); | |
2573 return -1; | |
2574 } | |
2575 } | |
2576 | |
2577 return 0; | |
2578 } | 2176 } |
2579 | 2177 |
2580 int32_t AudioDeviceMac::HandleStreamFormatChange( | 2178 int32_t AudioDeviceMac::HandleStreamFormatChange( |
2581 const AudioObjectID objectId, | 2179 const AudioObjectID objectId, |
2582 const AudioObjectPropertyAddress propertyAddress) | 2180 const AudioObjectPropertyAddress propertyAddress) { |
2583 { | 2181 OSStatus err = noErr; |
2584 OSStatus err = noErr; | 2182 |
2585 | 2183 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Stream format changed"); |
2586 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2184 |
2587 "Stream format changed"); | 2185 if (objectId != _inputDeviceID && objectId != _outputDeviceID) { |
2588 | |
2589 if (objectId != _inputDeviceID && objectId != _outputDeviceID) | |
2590 { | |
2591 return 0; | |
2592 } | |
2593 | |
2594 // Get the new device format | |
2595 AudioStreamBasicDescription streamFormat; | |
2596 UInt32 size = sizeof(streamFormat); | |
2597 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId, | |
2598 &propertyAddress, 0, NULL, &size, &streamFormat)); | |
2599 | |
2600 if (streamFormat.mFormatID != kAudioFormatLinearPCM) | |
2601 { | |
2602 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
2603 "Unacceptable input stream format -> mFormatID", | |
2604 (const char *) &streamFormat.mFormatID); | |
2605 return -1; | |
2606 } | |
2607 | |
2608 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) | |
2609 { | |
2610 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
2611 "Too many channels on device (mChannelsPerFrame = %d)", | |
2612 streamFormat.mChannelsPerFrame); | |
2613 return -1; | |
2614 } | |
2615 | |
2616 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2617 "Stream format:"); | |
2618 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2619 "mSampleRate = %f, mChannelsPerFrame = %u", | |
2620 streamFormat.mSampleRate, streamFormat.mChannelsPerFrame); | |
2621 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2622 "mBytesPerPacket = %u, mFramesPerPacket = %u", | |
2623 streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket); | |
2624 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2625 "mBytesPerFrame = %u, mBitsPerChannel = %u", | |
2626 streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel); | |
2627 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2628 "mFormatFlags = %u", | |
2629 streamFormat.mFormatFlags); | |
2630 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
2631 (const char *) &streamFormat.mFormatID); | |
2632 | |
2633 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) | |
2634 { | |
2635 const int io_block_size_samples = streamFormat.mChannelsPerFrame * | |
2636 streamFormat.mSampleRate / 100 * N_BLOCKS_IO; | |
2637 if (io_block_size_samples > _captureBufSizeSamples) | |
2638 { | |
2639 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
2640 "Input IO block size (%d) is larger than ring buffer (%u)", | |
2641 io_block_size_samples, _captureBufSizeSamples); | |
2642 return -1; | |
2643 | |
2644 } | |
2645 | |
2646 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); | |
2647 | |
2648 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) | |
2649 { | |
2650 _inDesiredFormat.mChannelsPerFrame = 2; | |
2651 } else | |
2652 { | |
2653 // Disable stereo recording when we only have one channel on the dev
ice. | |
2654 _inDesiredFormat.mChannelsPerFrame = 1; | |
2655 _recChannels = 1; | |
2656 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2657 "Stereo recording unavailable on this device"); | |
2658 } | |
2659 | |
2660 if (_ptrAudioBuffer) | |
2661 { | |
2662 // Update audio buffer with the selected parameters | |
2663 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | |
2664 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels); | |
2665 } | |
2666 | |
2667 // Recreate the converter with the new format | |
2668 // TODO(xians): make this thread safe | |
2669 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); | |
2670 | |
2671 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredForm
at, | |
2672 &_captureConverter)); | |
2673 } else | |
2674 { | |
2675 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); | |
2676 | |
2677 // Our preferred format to work with | |
2678 if (_outStreamFormat.mChannelsPerFrame < 2) | |
2679 { | |
2680 _playChannels = 1; | |
2681 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
2682 "Stereo playout unavailable on this device"); | |
2683 } | |
2684 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); | |
2685 } | |
2686 return 0; | 2186 return 0; |
| 2187 } |
| 2188 |
| 2189 // Get the new device format |
| 2190 AudioStreamBasicDescription streamFormat; |
| 2191 UInt32 size = sizeof(streamFormat); |
| 2192 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 2193 objectId, &propertyAddress, 0, NULL, &size, &streamFormat)); |
| 2194 |
| 2195 if (streamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 2196 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 2197 "Unacceptable input stream format -> mFormatID", |
| 2198 (const char*)&streamFormat.mFormatID); |
| 2199 return -1; |
| 2200 } |
| 2201 |
| 2202 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 2203 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2204 "Too many channels on device (mChannelsPerFrame = %d)", |
| 2205 streamFormat.mChannelsPerFrame); |
| 2206 return -1; |
| 2207 } |
| 2208 |
| 2209 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Stream format:"); |
| 2210 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2211 "mSampleRate = %f, mChannelsPerFrame = %u", |
| 2212 streamFormat.mSampleRate, streamFormat.mChannelsPerFrame); |
| 2213 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2214 "mBytesPerPacket = %u, mFramesPerPacket = %u", |
| 2215 streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket); |
| 2216 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2217 "mBytesPerFrame = %u, mBitsPerChannel = %u", |
| 2218 streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel); |
| 2219 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u", |
| 2220 streamFormat.mFormatFlags); |
| 2221 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", |
| 2222 (const char*)&streamFormat.mFormatID); |
| 2223 |
| 2224 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { |
| 2225 const int io_block_size_samples = streamFormat.mChannelsPerFrame * |
| 2226 streamFormat.mSampleRate / 100 * |
| 2227 N_BLOCKS_IO; |
| 2228 if (io_block_size_samples > _captureBufSizeSamples) { |
| 2229 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2230 "Input IO block size (%d) is larger than ring buffer (%u)", |
| 2231 io_block_size_samples, _captureBufSizeSamples); |
| 2232 return -1; |
| 2233 } |
| 2234 |
| 2235 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); |
| 2236 |
| 2237 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { |
| 2238 _inDesiredFormat.mChannelsPerFrame = 2; |
| 2239 } else { |
| 2240 // Disable stereo recording when we only have one channel on the device. |
| 2241 _inDesiredFormat.mChannelsPerFrame = 1; |
| 2242 _recChannels = 1; |
| 2243 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2244 "Stereo recording unavailable on this device"); |
| 2245 } |
| 2246 |
| 2247 if (_ptrAudioBuffer) { |
| 2248 // Update audio buffer with the selected parameters |
| 2249 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); |
| 2250 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
| 2251 } |
| 2252 |
| 2253 // Recreate the converter with the new format |
| 2254 // TODO(xians): make this thread safe |
| 2255 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); |
| 2256 |
| 2257 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, |
| 2258 &_captureConverter)); |
| 2259 } else { |
| 2260 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); |
| 2261 |
| 2262 // Our preferred format to work with |
| 2263 if (_outStreamFormat.mChannelsPerFrame < 2) { |
| 2264 _playChannels = 1; |
| 2265 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2266 "Stereo playout unavailable on this device"); |
| 2267 } |
| 2268 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); |
| 2269 } |
| 2270 return 0; |
2687 } | 2271 } |
2688 | 2272 |
2689 int32_t AudioDeviceMac::HandleDataSourceChange( | 2273 int32_t AudioDeviceMac::HandleDataSourceChange( |
2690 const AudioObjectID objectId, | 2274 const AudioObjectID objectId, |
2691 const AudioObjectPropertyAddress propertyAddress) | 2275 const AudioObjectPropertyAddress propertyAddress) { |
2692 { | 2276 OSStatus err = noErr; |
2693 OSStatus err = noErr; | 2277 |
2694 | 2278 if (_macBookPro && |
2695 if (_macBookPro && propertyAddress.mScope | 2279 propertyAddress.mScope == kAudioDevicePropertyScopeOutput) { |
2696 == kAudioDevicePropertyScopeOutput) | 2280 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Data source changed"); |
2697 { | 2281 |
2698 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2282 _macBookProPanRight = false; |
2699 "Data source changed"); | 2283 UInt32 dataSource = 0; |
2700 | 2284 UInt32 size = sizeof(UInt32); |
2701 _macBookProPanRight = false; | 2285 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
2702 UInt32 dataSource = 0; | 2286 objectId, &propertyAddress, 0, NULL, &size, &dataSource)); |
2703 UInt32 size = sizeof(UInt32); | 2287 if (dataSource == 'ispk') { |
2704 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(objectId, | 2288 _macBookProPanRight = true; |
2705 &propertyAddress, 0, NULL, &size, &dataSource)); | 2289 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
2706 if (dataSource == 'ispk') | 2290 "MacBook Pro using internal speakers; stereo panning right"); |
2707 { | 2291 } else { |
2708 _macBookProPanRight = true; | 2292 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
2709 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2293 "MacBook Pro not using internal speakers"); |
2710 "MacBook Pro using internal speakers; stereo panning ri
ght"); | 2294 } |
2711 } else | 2295 } |
2712 { | 2296 |
2713 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2297 return 0; |
2714 "MacBook Pro not using internal speakers"); | |
2715 } | |
2716 } | |
2717 | |
2718 return 0; | |
2719 } | 2298 } |
2720 int32_t AudioDeviceMac::HandleProcessorOverload( | 2299 int32_t AudioDeviceMac::HandleProcessorOverload( |
2721 const AudioObjectPropertyAddress propertyAddress) | 2300 const AudioObjectPropertyAddress propertyAddress) { |
2722 { | 2301 // TODO(xians): we probably want to notify the user in some way of the |
2723 // TODO(xians): we probably want to notify the user in some way of the | 2302 // overload. However, the Windows interpretations of these errors seem to |
2724 // overload. However, the Windows interpretations of these errors seem to | 2303 // be more severe than what ProcessorOverload is thrown for. |
2725 // be more severe than what ProcessorOverload is thrown for. | 2304 // |
2726 // | 2305 // We don't log the notification, as it's sent from the HAL's IO thread. We |
2727 // We don't log the notification, as it's sent from the HAL's IO thread. We | 2306 // don't want to slow it down even further. |
2728 // don't want to slow it down even further. | 2307 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { |
2729 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) | 2308 // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor |
2730 { | 2309 // overload"); |
2731 //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor | 2310 //_callback->ProblemIsReported( |
2732 // overload"); | 2311 // SndCardStreamObserver::ERecordingProblem); |
2733 //_callback->ProblemIsReported( | 2312 } else { |
2734 // SndCardStreamObserver::ERecordingProblem); | 2313 // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2735 } else | 2314 // "Render processor overload"); |
2736 { | 2315 //_callback->ProblemIsReported( |
2737 //WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2316 // SndCardStreamObserver::EPlaybackProblem); |
2738 // "Render processor overload"); | 2317 } |
2739 //_callback->ProblemIsReported( | 2318 |
2740 // SndCardStreamObserver::EPlaybackProblem); | 2319 return 0; |
2741 } | |
2742 | |
2743 return 0; | |
2744 } | 2320 } |
2745 | 2321 |
2746 // ============================================================================ | 2322 // ============================================================================ |
2747 // Thread Methods | 2323 // Thread Methods |
2748 // ============================================================================ | 2324 // ============================================================================ |
2749 | 2325 |
2750 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, const AudioTimeStamp*, | 2326 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, |
| 2327 const AudioTimeStamp*, |
2751 const AudioBufferList* inputData, | 2328 const AudioBufferList* inputData, |
2752 const AudioTimeStamp* inputTime, | 2329 const AudioTimeStamp* inputTime, |
2753 AudioBufferList* outputData, | 2330 AudioBufferList* outputData, |
2754 const AudioTimeStamp* outputTime, | 2331 const AudioTimeStamp* outputTime, |
2755 void *clientData) | 2332 void* clientData) { |
2756 { | 2333 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; |
2757 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; | 2334 RTC_DCHECK(ptrThis != NULL); |
2758 RTC_DCHECK(ptrThis != NULL); | 2335 |
2759 | 2336 ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime); |
2760 ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime); | 2337 |
2761 | 2338 // AudioDeviceIOProc functions are supposed to return 0 |
2762 // AudioDeviceIOProc functions are supposed to return 0 | 2339 return 0; |
2763 return 0; | |
2764 } | 2340 } |
2765 | 2341 |
2766 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef, | 2342 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef, |
2767 UInt32 *numberDataPackets, | 2343 UInt32* numberDataPackets, |
2768 AudioBufferList *data, | 2344 AudioBufferList* data, |
2769 AudioStreamPacketDescription **, | 2345 AudioStreamPacketDescription**, |
2770 void *userData) | 2346 void* userData) { |
2771 { | 2347 AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData; |
2772 AudioDeviceMac *ptrThis = (AudioDeviceMac *) userData; | 2348 RTC_DCHECK(ptrThis != NULL); |
2773 RTC_DCHECK(ptrThis != NULL); | 2349 |
2774 | 2350 return ptrThis->implOutConverterProc(numberDataPackets, data); |
2775 return ptrThis->implOutConverterProc(numberDataPackets, data); | 2351 } |
2776 } | 2352 |
2777 | 2353 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, |
2778 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, const AudioTimeStamp*, | 2354 const AudioTimeStamp*, |
2779 const AudioBufferList* inputData, | 2355 const AudioBufferList* inputData, |
2780 const AudioTimeStamp* inputTime, | 2356 const AudioTimeStamp* inputTime, |
2781 AudioBufferList*, | 2357 AudioBufferList*, |
2782 const AudioTimeStamp*, void* clientData) | 2358 const AudioTimeStamp*, |
2783 { | 2359 void* clientData) { |
2784 AudioDeviceMac *ptrThis = (AudioDeviceMac *) clientData; | 2360 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; |
2785 RTC_DCHECK(ptrThis != NULL); | 2361 RTC_DCHECK(ptrThis != NULL); |
2786 | 2362 |
2787 ptrThis->implInDeviceIOProc(inputData, inputTime); | 2363 ptrThis->implInDeviceIOProc(inputData, inputTime); |
2788 | 2364 |
2789 // AudioDeviceIOProc functions are supposed to return 0 | 2365 // AudioDeviceIOProc functions are supposed to return 0 |
2790 return 0; | 2366 return 0; |
2791 } | 2367 } |
2792 | 2368 |
2793 OSStatus AudioDeviceMac::inConverterProc( | 2369 OSStatus AudioDeviceMac::inConverterProc( |
2794 AudioConverterRef, | 2370 AudioConverterRef, |
2795 UInt32 *numberDataPackets, | 2371 UInt32* numberDataPackets, |
2796 AudioBufferList *data, | 2372 AudioBufferList* data, |
2797 AudioStreamPacketDescription ** /*dataPacketDescription*/, | 2373 AudioStreamPacketDescription** /*dataPacketDescription*/, |
2798 void *userData) | 2374 void* userData) { |
2799 { | 2375 AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData); |
2800 AudioDeviceMac *ptrThis = static_cast<AudioDeviceMac*> (userData); | 2376 RTC_DCHECK(ptrThis != NULL); |
2801 RTC_DCHECK(ptrThis != NULL); | 2377 |
2802 | 2378 return ptrThis->implInConverterProc(numberDataPackets, data); |
2803 return ptrThis->implInConverterProc(numberDataPackets, data); | 2379 } |
2804 } | 2380 |
2805 | 2381 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData, |
2806 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList *inputData, | 2382 const AudioTimeStamp* inputTime, |
2807 const AudioTimeStamp *inputTime, | 2383 AudioBufferList* outputData, |
2808 AudioBufferList *outputData, | 2384 const AudioTimeStamp* outputTime) { |
2809 const AudioTimeStamp *outputTime) | 2385 OSStatus err = noErr; |
2810 { | 2386 UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime); |
2811 OSStatus err = noErr; | 2387 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
2812 UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime); | 2388 |
2813 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); | 2389 if (!_twoDevices && _recording) { |
2814 | 2390 implInDeviceIOProc(inputData, inputTime); |
2815 if (!_twoDevices && _recording) | 2391 } |
2816 { | 2392 |
2817 implInDeviceIOProc(inputData, inputTime); | 2393 // Check if we should close down audio device |
2818 } | 2394 // Double-checked locking optimization to remove locking overhead |
2819 | 2395 if (_doStop) { |
2820 // Check if we should close down audio device | 2396 _critSect.Enter(); |
2821 // Double-checked locking optimization to remove locking overhead | 2397 if (_doStop) { |
2822 if (_doStop) | 2398 if (_twoDevices || (!_recording && !_playing)) { |
2823 { | 2399 // In the case of a shared device, the single driving ioProc |
2824 _critSect.Enter(); | 2400 // is stopped here |
2825 if (_doStop) | 2401 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
2826 { | 2402 WEBRTC_CA_LOG_WARN( |
2827 if (_twoDevices || (!_recording && !_playing)) | 2403 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
2828 { | 2404 if (err == noErr) { |
2829 // In the case of a shared device, the single driving ioProc | 2405 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
2830 // is stopped here | 2406 " Playout or shared device stopped"); |
2831 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, | |
2832 _deviceIOProcID)); | |
2833 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_outputDeviceID, | |
2834 _deviceIOProcID)); | |
2835 if (err == noErr) | |
2836 { | |
2837 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
2838 _id, " Playout or shared device stopped"); | |
2839 } | |
2840 } | |
2841 | |
2842 _doStop = false; | |
2843 _stopEvent.Set(); | |
2844 _critSect.Leave(); | |
2845 return 0; | |
2846 } | 2407 } |
2847 _critSect.Leave(); | 2408 } |
2848 } | 2409 |
2849 | 2410 _doStop = false; |
2850 if (!_playing) | 2411 _stopEvent.Set(); |
2851 { | 2412 _critSect.Leave(); |
2852 // This can be the case when a shared device is capturing but not | 2413 return 0; |
2853 // rendering. We allow the checks above before returning to avoid a | 2414 } |
2854 // timeout when capturing is stopped. | 2415 _critSect.Leave(); |
2855 return 0; | 2416 } |
2856 } | 2417 |
2857 | 2418 if (!_playing) { |
2858 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); | 2419 // This can be the case when a shared device is capturing but not |
2859 UInt32 size = outputData->mBuffers->mDataByteSize | 2420 // rendering. We allow the checks above before returning to avoid a |
2860 / _outStreamFormat.mBytesPerFrame; | 2421 // timeout when capturing is stopped. |
2861 | 2422 return 0; |
2862 // TODO(xians): signal an error somehow? | 2423 } |
2863 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, | 2424 |
2864 this, &size, outputData, NULL); | 2425 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); |
2865 if (err != noErr) | 2426 UInt32 size = |
2866 { | 2427 outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame; |
2867 if (err == 1) | 2428 |
2868 { | 2429 // TODO(xians): signal an error somehow? |
2869 // This is our own error. | 2430 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, |
2870 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2431 this, &size, outputData, NULL); |
2871 " Error in AudioConverterFillComplexBuffer()"); | 2432 if (err != noErr) { |
2872 return 1; | 2433 if (err == 1) { |
2873 } else | 2434 // This is our own error. |
2874 { | 2435 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
2875 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2436 " Error in AudioConverterFillComplexBuffer()"); |
2876 "Error in AudioConverterFillComplexBuffer()", | 2437 return 1; |
2877 (const char *) &err); | 2438 } else { |
2878 return 1; | 2439 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 2440 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); |
| 2441 return 1; |
| 2442 } |
| 2443 } |
| 2444 |
| 2445 PaRingBufferSize bufSizeSamples = |
| 2446 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); |
| 2447 |
| 2448 int32_t renderDelayUs = |
| 2449 static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5); |
| 2450 renderDelayUs += static_cast<int32_t>( |
| 2451 (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame / |
| 2452 _outDesiredFormat.mSampleRate + |
| 2453 0.5); |
| 2454 |
| 2455 AtomicSet32(&_renderDelayUs, renderDelayUs); |
| 2456 |
| 2457 return 0; |
| 2458 } |
| 2459 |
| 2460 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets, |
| 2461 AudioBufferList* data) { |
| 2462 RTC_DCHECK(data->mNumberBuffers == 1); |
| 2463 PaRingBufferSize numSamples = |
| 2464 *numberDataPackets * _outDesiredFormat.mChannelsPerFrame; |
| 2465 |
| 2466 data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame; |
| 2467 // Always give the converter as much as it wants, zero padding as required. |
| 2468 data->mBuffers->mDataByteSize = |
| 2469 *numberDataPackets * _outDesiredFormat.mBytesPerPacket; |
| 2470 data->mBuffers->mData = _renderConvertData; |
| 2471 memset(_renderConvertData, 0, sizeof(_renderConvertData)); |
| 2472 |
| 2473 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); |
| 2474 |
| 2475 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); |
| 2476 if (kernErr != KERN_SUCCESS) { |
| 2477 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2478 " semaphore_signal_all() error: %d", kernErr); |
| 2479 return 1; |
| 2480 } |
| 2481 |
| 2482 return 0; |
| 2483 } |
| 2484 |
| 2485 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, |
| 2486 const AudioTimeStamp* inputTime) { |
| 2487 OSStatus err = noErr; |
| 2488 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); |
| 2489 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
| 2490 |
| 2491 // Check if we should close down audio device |
| 2492 // Double-checked locking optimization to remove locking overhead |
| 2493 if (_doStopRec) { |
| 2494 _critSect.Enter(); |
| 2495 if (_doStopRec) { |
| 2496 // This will be signalled only when a shared device is not in use. |
| 2497 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); |
| 2498 WEBRTC_CA_LOG_WARN( |
| 2499 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); |
| 2500 if (err == noErr) { |
| 2501 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 2502 " Recording device stopped"); |
| 2503 } |
| 2504 |
| 2505 _doStopRec = false; |
| 2506 _stopEventRec.Set(); |
| 2507 _critSect.Leave(); |
| 2508 return 0; |
| 2509 } |
| 2510 _critSect.Leave(); |
| 2511 } |
| 2512 |
| 2513 if (!_recording) { |
| 2514 // Allow above checks to avoid a timeout on stopping capture. |
| 2515 return 0; |
| 2516 } |
| 2517 |
| 2518 PaRingBufferSize bufSizeSamples = |
| 2519 PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer); |
| 2520 |
| 2521 int32_t captureDelayUs = |
| 2522 static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5); |
| 2523 captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) / |
| 2524 _inStreamFormat.mChannelsPerFrame / |
| 2525 _inStreamFormat.mSampleRate + |
| 2526 0.5); |
| 2527 |
| 2528 AtomicSet32(&_captureDelayUs, captureDelayUs); |
| 2529 |
| 2530 RTC_DCHECK(inputData->mNumberBuffers == 1); |
| 2531 PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize * |
| 2532 _inStreamFormat.mChannelsPerFrame / |
| 2533 _inStreamFormat.mBytesPerPacket; |
| 2534 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, |
| 2535 numSamples); |
| 2536 |
| 2537 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); |
| 2538 if (kernErr != KERN_SUCCESS) { |
| 2539 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2540 " semaphore_signal_all() error: %d", kernErr); |
| 2541 } |
| 2542 |
| 2543 return err; |
| 2544 } |
| 2545 |
| 2546 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, |
| 2547 AudioBufferList* data) { |
| 2548 RTC_DCHECK(data->mNumberBuffers == 1); |
| 2549 PaRingBufferSize numSamples = |
| 2550 *numberDataPackets * _inStreamFormat.mChannelsPerFrame; |
| 2551 |
| 2552 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) { |
| 2553 mach_timespec_t timeout; |
| 2554 timeout.tv_sec = 0; |
| 2555 timeout.tv_nsec = TIMER_PERIOD_MS; |
| 2556 |
| 2557 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); |
| 2558 if (kernErr == KERN_OPERATION_TIMED_OUT) { |
| 2559 int32_t signal = AtomicGet32(&_captureDeviceIsAlive); |
| 2560 if (signal == 0) { |
| 2561 // The capture device is no longer alive; stop the worker thread. |
| 2562 *numberDataPackets = 0; |
| 2563 return 1; |
| 2564 } |
| 2565 } else if (kernErr != KERN_SUCCESS) { |
| 2566 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2567 " semaphore_wait() error: %d", kernErr); |
| 2568 } |
| 2569 } |
| 2570 |
| 2571 // Pass the read pointer directly to the converter to avoid a memcpy. |
| 2572 void* dummyPtr; |
| 2573 PaRingBufferSize dummySize; |
| 2574 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, |
| 2575 &data->mBuffers->mData, &numSamples, |
| 2576 &dummyPtr, &dummySize); |
| 2577 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); |
| 2578 |
| 2579 data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame; |
| 2580 *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame; |
| 2581 data->mBuffers->mDataByteSize = |
| 2582 *numberDataPackets * _inStreamFormat.mBytesPerPacket; |
| 2583 |
| 2584 return 0; |
| 2585 } |
| 2586 |
| 2587 bool AudioDeviceMac::RunRender(void* ptrThis) { |
| 2588 return static_cast<AudioDeviceMac*>(ptrThis)->RenderWorkerThread(); |
| 2589 } |
| 2590 |
| 2591 bool AudioDeviceMac::RenderWorkerThread() { |
| 2592 PaRingBufferSize numSamples = |
| 2593 ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame; |
| 2594 while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) - |
| 2595 _renderDelayOffsetSamples < |
| 2596 numSamples) { |
| 2597 mach_timespec_t timeout; |
| 2598 timeout.tv_sec = 0; |
| 2599 timeout.tv_nsec = TIMER_PERIOD_MS; |
| 2600 |
| 2601 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); |
| 2602 if (kernErr == KERN_OPERATION_TIMED_OUT) { |
| 2603 int32_t signal = AtomicGet32(&_renderDeviceIsAlive); |
| 2604 if (signal == 0) { |
| 2605 // The render device is no longer alive; stop the worker thread. |
| 2606 return false; |
| 2607 } |
| 2608 } else if (kernErr != KERN_SUCCESS) { |
| 2609 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2610 " semaphore_timedwait() error: %d", kernErr); |
| 2611 } |
| 2612 } |
| 2613 |
| 2614 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; |
| 2615 |
| 2616 if (!_ptrAudioBuffer) { |
| 2617 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2618 " capture AudioBuffer is invalid"); |
| 2619 return false; |
| 2620 } |
| 2621 |
| 2622 // Ask for new PCM data to be played out using the AudioDeviceBuffer. |
| 2623 uint32_t nSamples = |
| 2624 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); |
| 2625 |
| 2626 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); |
| 2627 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) { |
| 2628 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2629 " invalid number of output samples(%d)", nSamples); |
| 2630 } |
| 2631 |
| 2632 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; |
| 2633 |
| 2634 SInt16* pPlayBuffer = (SInt16*)&playBuffer; |
| 2635 if (_macBookProPanRight && (_playChannels == 2)) { |
| 2636 // Mix entirely into the right channel and zero the left channel. |
| 2637 SInt32 sampleInt32 = 0; |
| 2638 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) { |
| 2639 sampleInt32 = pPlayBuffer[sampleIdx]; |
| 2640 sampleInt32 += pPlayBuffer[sampleIdx + 1]; |
| 2641 sampleInt32 /= 2; |
| 2642 |
| 2643 if (sampleInt32 > 32767) { |
| 2644 sampleInt32 = 32767; |
| 2645 } else if (sampleInt32 < -32768) { |
| 2646 sampleInt32 = -32768; |
| 2647 } |
| 2648 |
| 2649 pPlayBuffer[sampleIdx] = 0; |
| 2650 pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32); |
| 2651 } |
| 2652 } |
| 2653 |
| 2654 PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples); |
| 2655 |
| 2656 return true; |
| 2657 } |
| 2658 |
| 2659 bool AudioDeviceMac::RunCapture(void* ptrThis) { |
| 2660 return static_cast<AudioDeviceMac*>(ptrThis)->CaptureWorkerThread(); |
| 2661 } |
| 2662 |
| 2663 bool AudioDeviceMac::CaptureWorkerThread() { |
| 2664 OSStatus err = noErr; |
| 2665 UInt32 noRecSamples = |
| 2666 ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame; |
| 2667 SInt16 recordBuffer[noRecSamples]; |
| 2668 UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES; |
| 2669 |
| 2670 AudioBufferList engineBuffer; |
| 2671 engineBuffer.mNumberBuffers = 1; // Interleaved channels. |
| 2672 engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame; |
| 2673 engineBuffer.mBuffers->mDataByteSize = |
| 2674 _inDesiredFormat.mBytesPerPacket * noRecSamples; |
| 2675 engineBuffer.mBuffers->mData = recordBuffer; |
| 2676 |
| 2677 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, |
| 2678 this, &size, &engineBuffer, NULL); |
| 2679 if (err != noErr) { |
| 2680 if (err == 1) { |
| 2681 // This is our own error. |
| 2682 return false; |
| 2683 } else { |
| 2684 logCAMsg(kTraceError, kTraceAudioDevice, _id, |
| 2685 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); |
| 2686 return false; |
| 2687 } |
| 2688 } |
| 2689 |
| 2690 // TODO(xians): what if the returned size is incorrect? |
| 2691 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) { |
| 2692 uint32_t currentMicLevel(0); |
| 2693 uint32_t newMicLevel(0); |
| 2694 int32_t msecOnPlaySide; |
| 2695 int32_t msecOnRecordSide; |
| 2696 |
| 2697 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); |
| 2698 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); |
| 2699 |
| 2700 msecOnPlaySide = |
| 2701 static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); |
| 2702 msecOnRecordSide = |
| 2703 static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); |
| 2704 |
| 2705 if (!_ptrAudioBuffer) { |
| 2706 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2707 " capture AudioBuffer is invalid"); |
| 2708 return false; |
| 2709 } |
| 2710 |
| 2711 // store the recorded buffer (no action will be taken if the |
| 2712 // #recorded samples is not a full buffer) |
| 2713 _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size); |
| 2714 |
| 2715 if (AGC()) { |
| 2716 // Use mod to ensure we check the volume on the first pass. |
| 2717 if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) { |
| 2718 get_mic_volume_counter_ms_ = 0; |
| 2719 // store current mic level in the audio buffer if AGC is enabled |
| 2720 if (MicrophoneVolume(currentMicLevel) == 0) { |
| 2721 // this call does not affect the actual microphone volume |
| 2722 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); |
2879 } | 2723 } |
2880 } | 2724 } |
2881 | 2725 get_mic_volume_counter_ms_ += kBufferSizeMs; |
2882 PaRingBufferSize bufSizeSamples = | 2726 } |
2883 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); | 2727 |
2884 | 2728 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0); |
2885 int32_t renderDelayUs = static_cast<int32_t> (1e-3 * (outputTimeNs - nowNs) | 2729 |
2886 + 0.5); | 2730 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); |
2887 renderDelayUs += static_cast<int32_t> ((1.0e6 * bufSizeSamples) | 2731 |
2888 / _outDesiredFormat.mChannelsPerFrame / _outDesiredFormat.mSampleRate | 2732 // deliver recorded samples at specified sample rate, mic level etc. |
2889 + 0.5); | 2733 // to the observer using callback |
2890 | 2734 _ptrAudioBuffer->DeliverRecordedData(); |
2891 AtomicSet32(&_renderDelayUs, renderDelayUs); | 2735 |
2892 | 2736 if (AGC()) { |
2893 return 0; | 2737 newMicLevel = _ptrAudioBuffer->NewMicLevel(); |
2894 } | 2738 if (newMicLevel != 0) { |
2895 | 2739 // The VQE will only deliver non-zero microphone levels when |
2896 OSStatus AudioDeviceMac::implOutConverterProc(UInt32 *numberDataPackets, | 2740 // a change is needed. |
2897 AudioBufferList *data) | 2741 // Set this new mic level (received from the observer as return |
2898 { | 2742 // value in the callback). |
2899 RTC_DCHECK(data->mNumberBuffers == 1); | 2743 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, |
2900 PaRingBufferSize numSamples = *numberDataPackets | 2744 " AGC change of volume: old=%u => new=%u", |
2901 * _outDesiredFormat.mChannelsPerFrame; | 2745 currentMicLevel, newMicLevel); |
2902 | 2746 if (SetMicrophoneVolume(newMicLevel) == -1) { |
2903 data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame; | 2747 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
2904 // Always give the converter as much as it wants, zero padding as required. | 2748 " the required modification of the microphone " |
2905 data->mBuffers->mDataByteSize = *numberDataPackets | 2749 "volume failed"); |
2906 * _outDesiredFormat.mBytesPerPacket; | |
2907 data->mBuffers->mData = _renderConvertData; | |
2908 memset(_renderConvertData, 0, sizeof(_renderConvertData)); | |
2909 | |
2910 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); | |
2911 | |
2912 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); | |
2913 if (kernErr != KERN_SUCCESS) | |
2914 { | |
2915 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
2916 " semaphore_signal_all() error: %d", kernErr); | |
2917 return 1; | |
2918 } | |
2919 | |
2920 return 0; | |
2921 } | |
2922 | |
2923 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList *inputData, | |
2924 const AudioTimeStamp *inputTime) | |
2925 { | |
2926 OSStatus err = noErr; | |
2927 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); | |
2928 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); | |
2929 | |
2930 // Check if we should close down audio device | |
2931 // Double-checked locking optimization to remove locking overhead | |
2932 if (_doStopRec) | |
2933 { | |
2934 _critSect.Enter(); | |
2935 if (_doStopRec) | |
2936 { | |
2937 // This will be signalled only when a shared device is not in use. | |
2938 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)
); | |
2939 WEBRTC_CA_LOG_WARN(AudioDeviceDestroyIOProcID(_inputDeviceID, | |
2940 _inDeviceIOProcID)); | |
2941 if (err == noErr) | |
2942 { | |
2943 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, | |
2944 _id, " Recording device stopped"); | |
2945 } | |
2946 | |
2947 _doStopRec = false; | |
2948 _stopEventRec.Set(); | |
2949 _critSect.Leave(); | |
2950 return 0; | |
2951 } | 2750 } |
2952 _critSect.Leave(); | 2751 } |
2953 } | 2752 } |
2954 | 2753 } |
2955 if (!_recording) | 2754 |
2956 { | 2755 return true; |
2957 // Allow above checks to avoid a timeout on stopping capture. | |
2958 return 0; | |
2959 } | |
2960 | |
2961 PaRingBufferSize bufSizeSamples = | |
2962 PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer); | |
2963 | |
2964 int32_t captureDelayUs = static_cast<int32_t> (1e-3 * (nowNs - inputTimeNs) | |
2965 + 0.5); | |
2966 captureDelayUs | |
2967 += static_cast<int32_t> ((1.0e6 * bufSizeSamples) | |
2968 / _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mSampleRate | |
2969 + 0.5); | |
2970 | |
2971 AtomicSet32(&_captureDelayUs, captureDelayUs); | |
2972 | |
2973 RTC_DCHECK(inputData->mNumberBuffers == 1); | |
2974 PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize | |
2975 * _inStreamFormat.mChannelsPerFrame / _inStreamFormat.mBytesPerPacket; | |
2976 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, | |
2977 numSamples); | |
2978 | |
2979 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); | |
2980 if (kernErr != KERN_SUCCESS) | |
2981 { | |
2982 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
2983 " semaphore_signal_all() error: %d", kernErr); | |
2984 } | |
2985 | |
2986 return err; | |
2987 } | |
2988 | |
2989 OSStatus AudioDeviceMac::implInConverterProc(UInt32 *numberDataPackets, | |
2990 AudioBufferList *data) | |
2991 { | |
2992 RTC_DCHECK(data->mNumberBuffers == 1); | |
2993 PaRingBufferSize numSamples = *numberDataPackets | |
2994 * _inStreamFormat.mChannelsPerFrame; | |
2995 | |
2996 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) | |
2997 { | |
2998 mach_timespec_t timeout; | |
2999 timeout.tv_sec = 0; | |
3000 timeout.tv_nsec = TIMER_PERIOD_MS; | |
3001 | |
3002 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); | |
3003 if (kernErr == KERN_OPERATION_TIMED_OUT) | |
3004 { | |
3005 int32_t signal = AtomicGet32(&_captureDeviceIsAlive); | |
3006 if (signal == 0) | |
3007 { | |
3008 // The capture device is no longer alive; stop the worker thread
. | |
3009 *numberDataPackets = 0; | |
3010 return 1; | |
3011 } | |
3012 } else if (kernErr != KERN_SUCCESS) | |
3013 { | |
3014 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
3015 " semaphore_wait() error: %d", kernErr); | |
3016 } | |
3017 } | |
3018 | |
3019 // Pass the read pointer directly to the converter to avoid a memcpy. | |
3020 void* dummyPtr; | |
3021 PaRingBufferSize dummySize; | |
3022 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, | |
3023 &data->mBuffers->mData, &numSamples, | |
3024 &dummyPtr, &dummySize); | |
3025 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); | |
3026 | |
3027 data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame; | |
3028 *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame; | |
3029 data->mBuffers->mDataByteSize = *numberDataPackets | |
3030 * _inStreamFormat.mBytesPerPacket; | |
3031 | |
3032 return 0; | |
3033 } | |
3034 | |
3035 bool AudioDeviceMac::RunRender(void* ptrThis) | |
3036 { | |
3037 return static_cast<AudioDeviceMac*> (ptrThis)->RenderWorkerThread(); | |
3038 } | |
3039 | |
3040 bool AudioDeviceMac::RenderWorkerThread() | |
3041 { | |
3042 PaRingBufferSize numSamples = ENGINE_PLAY_BUF_SIZE_IN_SAMPLES | |
3043 * _outDesiredFormat.mChannelsPerFrame; | |
3044 while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) | |
3045 - _renderDelayOffsetSamples < numSamples) | |
3046 { | |
3047 mach_timespec_t timeout; | |
3048 timeout.tv_sec = 0; | |
3049 timeout.tv_nsec = TIMER_PERIOD_MS; | |
3050 | |
3051 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); | |
3052 if (kernErr == KERN_OPERATION_TIMED_OUT) | |
3053 { | |
3054 int32_t signal = AtomicGet32(&_renderDeviceIsAlive); | |
3055 if (signal == 0) | |
3056 { | |
3057 // The render device is no longer alive; stop the worker thread. | |
3058 return false; | |
3059 } | |
3060 } else if (kernErr != KERN_SUCCESS) | |
3061 { | |
3062 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
3063 " semaphore_timedwait() error: %d", kernErr); | |
3064 } | |
3065 } | |
3066 | |
3067 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; | |
3068 | |
3069 if (!_ptrAudioBuffer) | |
3070 { | |
3071 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
3072 " capture AudioBuffer is invalid"); | |
3073 return false; | |
3074 } | |
3075 | |
3076 // Ask for new PCM data to be played out using the AudioDeviceBuffer. | |
3077 uint32_t nSamples = | |
3078 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); | |
3079 | |
3080 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); | |
3081 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) | |
3082 { | |
3083 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
3084 " invalid number of output samples(%d)", nSamples); | |
3085 } | |
3086 | |
3087 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; | |
3088 | |
3089 SInt16 *pPlayBuffer = (SInt16 *) &playBuffer; | |
3090 if (_macBookProPanRight && (_playChannels == 2)) | |
3091 { | |
3092 // Mix entirely into the right channel and zero the left channel. | |
3093 SInt32 sampleInt32 = 0; | |
3094 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx | |
3095 += 2) | |
3096 { | |
3097 sampleInt32 = pPlayBuffer[sampleIdx]; | |
3098 sampleInt32 += pPlayBuffer[sampleIdx + 1]; | |
3099 sampleInt32 /= 2; | |
3100 | |
3101 if (sampleInt32 > 32767) | |
3102 { | |
3103 sampleInt32 = 32767; | |
3104 } else if (sampleInt32 < -32768) | |
3105 { | |
3106 sampleInt32 = -32768; | |
3107 } | |
3108 | |
3109 pPlayBuffer[sampleIdx] = 0; | |
3110 pPlayBuffer[sampleIdx + 1] = static_cast<SInt16> (sampleInt32); | |
3111 } | |
3112 } | |
3113 | |
3114 PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples); | |
3115 | |
3116 return true; | |
3117 } | |
3118 | |
3119 bool AudioDeviceMac::RunCapture(void* ptrThis) | |
3120 { | |
3121 return static_cast<AudioDeviceMac*> (ptrThis)->CaptureWorkerThread(); | |
3122 } | |
3123 | |
3124 bool AudioDeviceMac::CaptureWorkerThread() | |
3125 { | |
3126 OSStatus err = noErr; | |
3127 UInt32 noRecSamples = ENGINE_REC_BUF_SIZE_IN_SAMPLES | |
3128 * _inDesiredFormat.mChannelsPerFrame; | |
3129 SInt16 recordBuffer[noRecSamples]; | |
3130 UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES; | |
3131 | |
3132 AudioBufferList engineBuffer; | |
3133 engineBuffer.mNumberBuffers = 1; // Interleaved channels. | |
3134 engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame; | |
3135 engineBuffer.mBuffers->mDataByteSize = _inDesiredFormat.mBytesPerPacket | |
3136 * noRecSamples; | |
3137 engineBuffer.mBuffers->mData = recordBuffer; | |
3138 | |
3139 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, | |
3140 this, &size, &engineBuffer, NULL); | |
3141 if (err != noErr) | |
3142 { | |
3143 if (err == 1) | |
3144 { | |
3145 // This is our own error. | |
3146 return false; | |
3147 } else | |
3148 { | |
3149 logCAMsg(kTraceError, kTraceAudioDevice, _id, | |
3150 "Error in AudioConverterFillComplexBuffer()", | |
3151 (const char *) &err); | |
3152 return false; | |
3153 } | |
3154 } | |
3155 | |
3156 // TODO(xians): what if the returned size is incorrect? | |
3157 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) | |
3158 { | |
3159 uint32_t currentMicLevel(0); | |
3160 uint32_t newMicLevel(0); | |
3161 int32_t msecOnPlaySide; | |
3162 int32_t msecOnRecordSide; | |
3163 | |
3164 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); | |
3165 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); | |
3166 | |
3167 msecOnPlaySide = static_cast<int32_t> (1e-3 * (renderDelayUs + | |
3168 _renderLatencyUs) + 0.5); | |
3169 msecOnRecordSide = static_cast<int32_t> (1e-3 * (captureDelayUs + | |
3170 _captureLatencyUs) + | |
3171 0.5); | |
3172 | |
3173 if (!_ptrAudioBuffer) | |
3174 { | |
3175 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
3176 " capture AudioBuffer is invalid"); | |
3177 return false; | |
3178 } | |
3179 | |
3180 // store the recorded buffer (no action will be taken if the | |
3181 // #recorded samples is not a full buffer) | |
3182 _ptrAudioBuffer->SetRecordedBuffer((int8_t*) &recordBuffer, | |
3183 (uint32_t) size); | |
3184 | |
3185 if (AGC()) | |
3186 { | |
3187 // Use mod to ensure we check the volume on the first pass. | |
3188 if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) { | |
3189 get_mic_volume_counter_ms_ = 0; | |
3190 // store current mic level in the audio buffer if AGC is enabled | |
3191 if (MicrophoneVolume(currentMicLevel) == 0) | |
3192 { | |
3193 // this call does not affect the actual microphone volume | |
3194 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); | |
3195 } | |
3196 } | |
3197 get_mic_volume_counter_ms_ += kBufferSizeMs; | |
3198 } | |
3199 | |
3200 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, 0); | |
3201 | |
3202 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); | |
3203 | |
3204 // deliver recorded samples at specified sample rate, mic level etc. | |
3205 // to the observer using callback | |
3206 _ptrAudioBuffer->DeliverRecordedData(); | |
3207 | |
3208 if (AGC()) | |
3209 { | |
3210 newMicLevel = _ptrAudioBuffer->NewMicLevel(); | |
3211 if (newMicLevel != 0) | |
3212 { | |
3213 // The VQE will only deliver non-zero microphone levels when | |
3214 // a change is needed. | |
3215 // Set this new mic level (received from the observer as return | |
3216 // value in the callback). | |
3217 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, | |
3218 _id, " AGC change of volume: old=%u => new=%u", | |
3219 currentMicLevel, newMicLevel); | |
3220 if (SetMicrophoneVolume(newMicLevel) == -1) | |
3221 { | |
3222 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
3223 " the required modification of the microphone
" | |
3224 "volume failed"); | |
3225 } | |
3226 } | |
3227 } | |
3228 } | |
3229 | |
3230 return true; | |
3231 } | 2756 } |
3232 | 2757 |
3233 bool AudioDeviceMac::KeyPressed() { | 2758 bool AudioDeviceMac::KeyPressed() { |
3234 bool key_down = false; | 2759 bool key_down = false; |
3235 // Loop through all Mac virtual key constant values. | 2760 // Loop through all Mac virtual key constant values. |
3236 for (unsigned int key_index = 0; | 2761 for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_); |
3237 key_index < arraysize(prev_key_state_); | 2762 ++key_index) { |
3238 ++key_index) { | 2763 bool keyState = |
3239 bool keyState = CGEventSourceKeyState( | 2764 CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index); |
3240 kCGEventSourceStateHIDSystemState, | |
3241 key_index); | |
3242 // A false -> true change in keymap means a key is pressed. | 2765 // A false -> true change in keymap means a key is pressed. |
3243 key_down |= (keyState && !prev_key_state_[key_index]); | 2766 key_down |= (keyState && !prev_key_state_[key_index]); |
3244 // Save current state. | 2767 // Save current state. |
3245 prev_key_state_[key_index] = keyState; | 2768 prev_key_state_[key_index] = keyState; |
3246 } | 2769 } |
3247 return key_down; | 2770 return key_down; |
3248 } | 2771 } |
3249 } // namespace webrtc | 2772 } // namespace webrtc |
OLD | NEW |