| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_device/mac/audio_device_mac.h" | 11 #include "webrtc/modules/audio_device/mac/audio_device_mac.h" |
| 12 #include "webrtc/modules/audio_device/audio_device_config.h" | 12 #include "webrtc/modules/audio_device/audio_device_config.h" |
| 13 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h" | 13 #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h" |
| 14 #include "webrtc/rtc_base/arraysize.h" | 14 #include "webrtc/rtc_base/arraysize.h" |
| 15 #include "webrtc/rtc_base/checks.h" | 15 #include "webrtc/rtc_base/checks.h" |
| 16 #include "webrtc/rtc_base/logging.h" | |
| 17 #include "webrtc/rtc_base/platform_thread.h" | 16 #include "webrtc/rtc_base/platform_thread.h" |
| 18 #include "webrtc/system_wrappers/include/event_wrapper.h" | 17 #include "webrtc/system_wrappers/include/event_wrapper.h" |
| 19 #include "webrtc/system_wrappers/include/trace.h" | |
| 20 | 18 |
| 21 #include <ApplicationServices/ApplicationServices.h> | 19 #include <ApplicationServices/ApplicationServices.h> |
| 22 #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap() | 20 #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap() |
| 23 #include <mach/mach.h> // mach_task_self() | 21 #include <mach/mach.h> // mach_task_self() |
| 24 #include <sys/sysctl.h> // sysctlbyname() | 22 #include <sys/sysctl.h> // sysctlbyname() |
| 25 | 23 |
| 26 namespace webrtc { | 24 namespace webrtc { |
| 27 | 25 |
| 28 #define WEBRTC_CA_RETURN_ON_ERR(expr) \ | 26 #define WEBRTC_CA_RETURN_ON_ERR(expr) \ |
| 29 do { \ | 27 do { \ |
| 30 err = expr; \ | 28 err = expr; \ |
| 31 if (err != noErr) { \ | 29 if (err != noErr) { \ |
| 32 logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \ | 30 logCAMsg(rtc::LS_ERROR, "Error in " #expr, \ |
| 33 (const char*) & err); \ | 31 (const char*) & err); \ |
| 34 return -1; \ | 32 return -1; \ |
| 35 } \ | 33 } \ |
| 36 } while (0) | 34 } while (0) |
| 37 | 35 |
| 38 #define WEBRTC_CA_LOG_ERR(expr) \ | 36 #define WEBRTC_CA_LOG_ERR(expr) \ |
| 39 do { \ | 37 do { \ |
| 40 err = expr; \ | 38 err = expr; \ |
| 41 if (err != noErr) { \ | 39 if (err != noErr) { \ |
| 42 logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \ | 40 logCAMsg(rtc::LS_ERROR, "Error in " #expr, \ |
| 43 (const char*) & err); \ | 41 (const char*) & err); \ |
| 44 } \ | 42 } \ |
| 45 } while (0) | 43 } while (0) |
| 46 | 44 |
| 47 #define WEBRTC_CA_LOG_WARN(expr) \ | 45 #define WEBRTC_CA_LOG_WARN(expr) \ |
| 48 do { \ | 46 do { \ |
| 49 err = expr; \ | 47 err = expr; \ |
| 50 if (err != noErr) { \ | 48 if (err != noErr) { \ |
| 51 logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \ | 49 logCAMsg(rtc::LS_WARNING, "Error in " #expr, \ |
| 52 (const char*) & err); \ | 50 (const char*) & err); \ |
| 53 } \ | 51 } \ |
| 54 } while (0) | 52 } while (0) |
| 55 | 53 |
| 56 enum { MaxNumberDevices = 64 }; | 54 enum { MaxNumberDevices = 64 }; |
| 57 | 55 |
| 58 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) { | 56 void AudioDeviceMac::AtomicSet32(int32_t* theValue, int32_t newValue) { |
| 59 while (1) { | 57 while (1) { |
| 60 int32_t oldValue = *theValue; | 58 int32_t oldValue = *theValue; |
| 61 if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) { | 59 if (OSAtomicCompareAndSwap32Barrier(oldValue, newValue, theValue) == true) { |
| 62 return; | 60 return; |
| 63 } | 61 } |
| 64 } | 62 } |
| 65 } | 63 } |
| 66 | 64 |
| 67 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) { | 65 int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) { |
| 68 while (1) { | 66 while (1) { |
| 69 int32_t value = *theValue; | 67 int32_t value = *theValue; |
| 70 if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) { | 68 if (OSAtomicCompareAndSwap32Barrier(value, value, theValue) == true) { |
| 71 return value; | 69 return value; |
| 72 } | 70 } |
| 73 } | 71 } |
| 74 } | 72 } |
| 75 | 73 |
| 76 // CoreAudio errors are best interpreted as four character strings. | 74 // CoreAudio errors are best interpreted as four character strings. |
| 77 void AudioDeviceMac::logCAMsg(const TraceLevel level, | 75 void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev, |
| 78 const TraceModule module, | |
| 79 const int32_t id, | |
| 80 const char* msg, | 76 const char* msg, |
| 81 const char* err) { | 77 const char* err) { |
| 82 RTC_DCHECK(msg != NULL); | 78 RTC_DCHECK(msg != NULL); |
| 83 RTC_DCHECK(err != NULL); | 79 RTC_DCHECK(err != NULL); |
| 84 | 80 |
| 85 #ifdef WEBRTC_ARCH_BIG_ENDIAN | 81 #ifdef WEBRTC_ARCH_BIG_ENDIAN |
| 86 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); | 82 switch (sev) { |
| 83 case rtc::LS_ERROR: |
| 84 LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3]; |
| 85 break; |
| 86 case rtc::LS_WARNING: |
| 87 LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2] << err[3]; |
| 88 break; |
| 89 case rtc::LS_VERBOSE: |
| 90 LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2] << err[3]; |
| 91 break; |
| 92 default: |
| 93 break; |
| 94 } |
| 87 #else | 95 #else |
| 88 // We need to flip the characters in this case. | 96 // We need to flip the characters in this case. |
| 89 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2, | 97 switch (sev) { |
| 90 err + 1, err); | 98 case rtc::LS_ERROR: |
| 99 LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0]; |
| 100 break; |
| 101 case rtc::LS_WARNING: |
| 102 LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1] << err[0]; |
| 103 break; |
| 104 case rtc::LS_VERBOSE: |
| 105 LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1] << err[0]; |
| 106 break; |
| 107 default: |
| 108 break; |
| 109 } |
| 91 #endif | 110 #endif |
| 92 } | 111 } |
| 93 | 112 |
| 94 AudioDeviceMac::AudioDeviceMac(const int32_t id) | 113 AudioDeviceMac::AudioDeviceMac() |
| 95 : _ptrAudioBuffer(NULL), | 114 : _ptrAudioBuffer(NULL), |
| 96 _stopEventRec(*EventWrapper::Create()), | 115 _stopEventRec(*EventWrapper::Create()), |
| 97 _stopEvent(*EventWrapper::Create()), | 116 _stopEvent(*EventWrapper::Create()), |
| 98 _id(id), | 117 _mixerManager(), |
| 99 _mixerManager(id), | |
| 100 _inputDeviceIndex(0), | 118 _inputDeviceIndex(0), |
| 101 _outputDeviceIndex(0), | 119 _outputDeviceIndex(0), |
| 102 _inputDeviceID(kAudioObjectUnknown), | 120 _inputDeviceID(kAudioObjectUnknown), |
| 103 _outputDeviceID(kAudioObjectUnknown), | 121 _outputDeviceID(kAudioObjectUnknown), |
| 104 _inputDeviceIsSpecified(false), | 122 _inputDeviceIsSpecified(false), |
| 105 _outputDeviceIsSpecified(false), | 123 _outputDeviceIsSpecified(false), |
| 106 _recChannels(N_REC_CHANNELS), | 124 _recChannels(N_REC_CHANNELS), |
| 107 _playChannels(N_PLAY_CHANNELS), | 125 _playChannels(N_PLAY_CHANNELS), |
| 108 _captureBufData(NULL), | 126 _captureBufData(NULL), |
| 109 _renderBufData(NULL), | 127 _renderBufData(NULL), |
| (...skipping 21 matching lines...) Expand all Loading... |
| 131 _playWarning(0), | 149 _playWarning(0), |
| 132 _playError(0), | 150 _playError(0), |
| 133 _recWarning(0), | 151 _recWarning(0), |
| 134 _recError(0), | 152 _recError(0), |
| 135 _paCaptureBuffer(NULL), | 153 _paCaptureBuffer(NULL), |
| 136 _paRenderBuffer(NULL), | 154 _paRenderBuffer(NULL), |
| 137 _captureBufSizeSamples(0), | 155 _captureBufSizeSamples(0), |
| 138 _renderBufSizeSamples(0), | 156 _renderBufSizeSamples(0), |
| 139 prev_key_state_(), | 157 prev_key_state_(), |
| 140 get_mic_volume_counter_ms_(0) { | 158 get_mic_volume_counter_ms_(0) { |
| 141 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); | 159 LOG(LS_INFO) << __FUNCTION__ << " created"; |
| 142 | 160 |
| 143 RTC_DCHECK(&_stopEvent != NULL); | 161 RTC_DCHECK(&_stopEvent != NULL); |
| 144 RTC_DCHECK(&_stopEventRec != NULL); | 162 RTC_DCHECK(&_stopEventRec != NULL); |
| 145 | 163 |
| 146 memset(_renderConvertData, 0, sizeof(_renderConvertData)); | 164 memset(_renderConvertData, 0, sizeof(_renderConvertData)); |
| 147 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); | 165 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); |
| 148 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); | 166 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); |
| 149 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); | 167 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); |
| 150 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); | 168 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); |
| 151 } | 169 } |
| 152 | 170 |
| 153 AudioDeviceMac::~AudioDeviceMac() { | 171 AudioDeviceMac::~AudioDeviceMac() { |
| 154 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", | 172 LOG(LS_INFO) << __FUNCTION__ << " destroyed"; |
| 155 __FUNCTION__); | |
| 156 | 173 |
| 157 if (!_isShutDown) { | 174 if (!_isShutDown) { |
| 158 Terminate(); | 175 Terminate(); |
| 159 } | 176 } |
| 160 | 177 |
| 161 RTC_DCHECK(!capture_worker_thread_.get()); | 178 RTC_DCHECK(!capture_worker_thread_.get()); |
| 162 RTC_DCHECK(!render_worker_thread_.get()); | 179 RTC_DCHECK(!render_worker_thread_.get()); |
| 163 | 180 |
| 164 if (_paRenderBuffer) { | 181 if (_paRenderBuffer) { |
| 165 delete _paRenderBuffer; | 182 delete _paRenderBuffer; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 177 } | 194 } |
| 178 | 195 |
| 179 if (_captureBufData) { | 196 if (_captureBufData) { |
| 180 delete[] _captureBufData; | 197 delete[] _captureBufData; |
| 181 _captureBufData = NULL; | 198 _captureBufData = NULL; |
| 182 } | 199 } |
| 183 | 200 |
| 184 kern_return_t kernErr = KERN_SUCCESS; | 201 kern_return_t kernErr = KERN_SUCCESS; |
| 185 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); | 202 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); |
| 186 if (kernErr != KERN_SUCCESS) { | 203 if (kernErr != KERN_SUCCESS) { |
| 187 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 204 LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; |
| 188 " semaphore_destroy() error: %d", kernErr); | |
| 189 } | 205 } |
| 190 | 206 |
| 191 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); | 207 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); |
| 192 if (kernErr != KERN_SUCCESS) { | 208 if (kernErr != KERN_SUCCESS) { |
| 193 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 209 LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; |
| 194 " semaphore_destroy() error: %d", kernErr); | |
| 195 } | 210 } |
| 196 | 211 |
| 197 delete &_stopEvent; | 212 delete &_stopEvent; |
| 198 delete &_stopEventRec; | 213 delete &_stopEventRec; |
| 199 } | 214 } |
| 200 | 215 |
| 201 // ============================================================================ | 216 // ============================================================================ |
| 202 // API | 217 // API |
| 203 // ============================================================================ | 218 // ============================================================================ |
| 204 | 219 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 240 _renderBufSizeSamples = powerOfTwo; | 255 _renderBufSizeSamples = powerOfTwo; |
| 241 _renderBufData = new SInt16[_renderBufSizeSamples]; | 256 _renderBufData = new SInt16[_renderBufSizeSamples]; |
| 242 } | 257 } |
| 243 | 258 |
| 244 if (_paRenderBuffer == NULL) { | 259 if (_paRenderBuffer == NULL) { |
| 245 _paRenderBuffer = new PaUtilRingBuffer; | 260 _paRenderBuffer = new PaUtilRingBuffer; |
| 246 PaRingBufferSize bufSize = -1; | 261 PaRingBufferSize bufSize = -1; |
| 247 bufSize = PaUtil_InitializeRingBuffer( | 262 bufSize = PaUtil_InitializeRingBuffer( |
| 248 _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData); | 263 _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData); |
| 249 if (bufSize == -1) { | 264 if (bufSize == -1) { |
| 250 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | 265 LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; |
| 251 " PaUtil_InitializeRingBuffer() error"); | |
| 252 return InitStatus::PLAYOUT_ERROR; | 266 return InitStatus::PLAYOUT_ERROR; |
| 253 } | 267 } |
| 254 } | 268 } |
| 255 | 269 |
| 256 if (_captureBufData == NULL) { | 270 if (_captureBufData == NULL) { |
| 257 UInt32 powerOfTwo = 1; | 271 UInt32 powerOfTwo = 1; |
| 258 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) { | 272 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) { |
| 259 powerOfTwo <<= 1; | 273 powerOfTwo <<= 1; |
| 260 } | 274 } |
| 261 _captureBufSizeSamples = powerOfTwo; | 275 _captureBufSizeSamples = powerOfTwo; |
| 262 _captureBufData = new Float32[_captureBufSizeSamples]; | 276 _captureBufData = new Float32[_captureBufSizeSamples]; |
| 263 } | 277 } |
| 264 | 278 |
| 265 if (_paCaptureBuffer == NULL) { | 279 if (_paCaptureBuffer == NULL) { |
| 266 _paCaptureBuffer = new PaUtilRingBuffer; | 280 _paCaptureBuffer = new PaUtilRingBuffer; |
| 267 PaRingBufferSize bufSize = -1; | 281 PaRingBufferSize bufSize = -1; |
| 268 bufSize = | 282 bufSize = |
| 269 PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32), | 283 PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32), |
| 270 _captureBufSizeSamples, _captureBufData); | 284 _captureBufSizeSamples, _captureBufData); |
| 271 if (bufSize == -1) { | 285 if (bufSize == -1) { |
| 272 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | 286 LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; |
| 273 " PaUtil_InitializeRingBuffer() error"); | |
| 274 return InitStatus::RECORDING_ERROR; | 287 return InitStatus::RECORDING_ERROR; |
| 275 } | 288 } |
| 276 } | 289 } |
| 277 | 290 |
| 278 kern_return_t kernErr = KERN_SUCCESS; | 291 kern_return_t kernErr = KERN_SUCCESS; |
| 279 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, | 292 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, |
| 280 SYNC_POLICY_FIFO, 0); | 293 SYNC_POLICY_FIFO, 0); |
| 281 if (kernErr != KERN_SUCCESS) { | 294 if (kernErr != KERN_SUCCESS) { |
| 282 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | 295 LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; |
| 283 " semaphore_create() error: %d", kernErr); | |
| 284 return InitStatus::OTHER_ERROR; | 296 return InitStatus::OTHER_ERROR; |
| 285 } | 297 } |
| 286 | 298 |
| 287 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, | 299 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, |
| 288 SYNC_POLICY_FIFO, 0); | 300 SYNC_POLICY_FIFO, 0); |
| 289 if (kernErr != KERN_SUCCESS) { | 301 if (kernErr != KERN_SUCCESS) { |
| 290 WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, | 302 LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; |
| 291 " semaphore_create() error: %d", kernErr); | |
| 292 return InitStatus::OTHER_ERROR; | 303 return InitStatus::OTHER_ERROR; |
| 293 } | 304 } |
| 294 | 305 |
| 295 // Setting RunLoop to NULL here instructs HAL to manage its own thread for | 306 // Setting RunLoop to NULL here instructs HAL to manage its own thread for |
| 296 // notifications. This was the default behaviour on OS X 10.5 and earlier, | 307 // notifications. This was the default behaviour on OS X 10.5 and earlier, |
| 297 // but now must be explicitly specified. HAL would otherwise try to use the | 308 // but now must be explicitly specified. HAL would otherwise try to use the |
| 298 // main thread to issue notifications. | 309 // main thread to issue notifications. |
| 299 AudioObjectPropertyAddress propertyAddress = { | 310 AudioObjectPropertyAddress propertyAddress = { |
| 300 kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, | 311 kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, |
| 301 kAudioObjectPropertyElementMaster}; | 312 kAudioObjectPropertyElementMaster}; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 316 | 327 |
| 317 // Determine if this is a MacBook Pro | 328 // Determine if this is a MacBook Pro |
| 318 _macBookPro = false; | 329 _macBookPro = false; |
| 319 _macBookProPanRight = false; | 330 _macBookProPanRight = false; |
| 320 char buf[128]; | 331 char buf[128]; |
| 321 size_t length = sizeof(buf); | 332 size_t length = sizeof(buf); |
| 322 memset(buf, 0, length); | 333 memset(buf, 0, length); |
| 323 | 334 |
| 324 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); | 335 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); |
| 325 if (intErr != 0) { | 336 if (intErr != 0) { |
| 326 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 337 LOG(LS_ERROR) << "Error in sysctlbyname(): " << err; |
| 327 " Error in sysctlbyname(): %d", err); | |
| 328 } else { | 338 } else { |
| 329 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Hardware model: %s", | 339 LOG(LS_VERBOSE) << "Hardware model: " << buf; |
| 330 buf); | |
| 331 if (strncmp(buf, "MacBookPro", 10) == 0) { | 340 if (strncmp(buf, "MacBookPro", 10) == 0) { |
| 332 _macBookPro = true; | 341 _macBookPro = true; |
| 333 } | 342 } |
| 334 } | 343 } |
| 335 | 344 |
| 336 _playWarning = 0; | 345 _playWarning = 0; |
| 337 _playError = 0; | 346 _playError = 0; |
| 338 _recWarning = 0; | 347 _recWarning = 0; |
| 339 _recError = 0; | 348 _recError = 0; |
| 340 | 349 |
| 341 get_mic_volume_counter_ms_ = 0; | 350 get_mic_volume_counter_ms_ = 0; |
| 342 | 351 |
| 343 _initialized = true; | 352 _initialized = true; |
| 344 | 353 |
| 345 return InitStatus::OK; | 354 return InitStatus::OK; |
| 346 } | 355 } |
| 347 | 356 |
| 348 int32_t AudioDeviceMac::Terminate() { | 357 int32_t AudioDeviceMac::Terminate() { |
| 349 if (!_initialized) { | 358 if (!_initialized) { |
| 350 return 0; | 359 return 0; |
| 351 } | 360 } |
| 352 | 361 |
| 353 if (_recording) { | 362 if (_recording) { |
| 354 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 363 LOG(LS_ERROR) << "Recording must be stopped"; |
| 355 " Recording must be stopped"); | |
| 356 return -1; | 364 return -1; |
| 357 } | 365 } |
| 358 | 366 |
| 359 if (_playing) { | 367 if (_playing) { |
| 360 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 368 LOG(LS_ERROR) << "Playback must be stopped"; |
| 361 " Playback must be stopped"); | |
| 362 return -1; | 369 return -1; |
| 363 } | 370 } |
| 364 | 371 |
| 365 _critSect.Enter(); | 372 _critSect.Enter(); |
| 366 | 373 |
| 367 _mixerManager.Close(); | 374 _mixerManager.Close(); |
| 368 | 375 |
| 369 OSStatus err = noErr; | 376 OSStatus err = noErr; |
| 370 int retVal = 0; | 377 int retVal = 0; |
| 371 | 378 |
| 372 AudioObjectPropertyAddress propertyAddress = { | 379 AudioObjectPropertyAddress propertyAddress = { |
| 373 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, | 380 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, |
| 374 kAudioObjectPropertyElementMaster}; | 381 kAudioObjectPropertyElementMaster}; |
| 375 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( | 382 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( |
| 376 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); | 383 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); |
| 377 | 384 |
| 378 err = AudioHardwareUnload(); | 385 err = AudioHardwareUnload(); |
| 379 if (err != noErr) { | 386 if (err != noErr) { |
| 380 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 387 logCAMsg(rtc::LS_ERROR, |
| 381 "Error in AudioHardwareUnload()", (const char*)&err); | 388 "Error in AudioHardwareUnload()", (const char*)&err); |
| 382 retVal = -1; | 389 retVal = -1; |
| 383 } | 390 } |
| 384 | 391 |
| 385 _isShutDown = true; | 392 _isShutDown = true; |
| 386 _initialized = false; | 393 _initialized = false; |
| 387 _outputDeviceIsSpecified = false; | 394 _outputDeviceIsSpecified = false; |
| 388 _inputDeviceIsSpecified = false; | 395 _inputDeviceIsSpecified = false; |
| 389 | 396 |
| 390 _critSect.Leave(); | 397 _critSect.Leave(); |
| (...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 536 if (_mixerManager.SpeakerVolume(level) == -1) { | 543 if (_mixerManager.SpeakerVolume(level) == -1) { |
| 537 return -1; | 544 return -1; |
| 538 } | 545 } |
| 539 | 546 |
| 540 volume = level; | 547 volume = level; |
| 541 return 0; | 548 return 0; |
| 542 } | 549 } |
| 543 | 550 |
| 544 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft, | 551 int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft, |
| 545 uint16_t volumeRight) { | 552 uint16_t volumeRight) { |
| 546 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 553 LOG(LS_WARNING) << "API call not supported on this platform"; |
| 547 " API call not supported on this platform"); | |
| 548 return -1; | 554 return -1; |
| 549 } | 555 } |
| 550 | 556 |
| 551 int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/, | 557 int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/, |
| 552 uint16_t& /*volumeRight*/) const { | 558 uint16_t& /*volumeRight*/) const { |
| 553 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 559 LOG(LS_WARNING) << "API call not supported on this platform"; |
| 554 " API call not supported on this platform"); | |
| 555 return -1; | 560 return -1; |
| 556 } | 561 } |
| 557 | 562 |
| 558 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const { | 563 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const { |
| 559 uint32_t maxVol(0); | 564 uint32_t maxVol(0); |
| 560 | 565 |
| 561 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { | 566 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { |
| 562 return -1; | 567 return -1; |
| 563 } | 568 } |
| 564 | 569 |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 843 } | 848 } |
| 844 | 849 |
| 845 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) { | 850 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) { |
| 846 return (_mixerManager.SetMicrophoneVolume(volume)); | 851 return (_mixerManager.SetMicrophoneVolume(volume)); |
| 847 } | 852 } |
| 848 | 853 |
| 849 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const { | 854 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const { |
| 850 uint32_t level(0); | 855 uint32_t level(0); |
| 851 | 856 |
| 852 if (_mixerManager.MicrophoneVolume(level) == -1) { | 857 if (_mixerManager.MicrophoneVolume(level) == -1) { |
| 853 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 858 LOG(LS_WARNING) << "failed to retrieve current microphone level"; |
| 854 " failed to retrive current microphone level"); | |
| 855 return -1; | 859 return -1; |
| 856 } | 860 } |
| 857 | 861 |
| 858 volume = level; | 862 volume = level; |
| 859 return 0; | 863 return 0; |
| 860 } | 864 } |
| 861 | 865 |
| 862 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { | 866 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { |
| 863 uint32_t maxVol(0); | 867 uint32_t maxVol(0); |
| 864 | 868 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 901 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) { | 905 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) { |
| 902 rtc::CritScope lock(&_critSect); | 906 rtc::CritScope lock(&_critSect); |
| 903 | 907 |
| 904 if (_playIsInitialized) { | 908 if (_playIsInitialized) { |
| 905 return -1; | 909 return -1; |
| 906 } | 910 } |
| 907 | 911 |
| 908 AudioDeviceID playDevices[MaxNumberDevices]; | 912 AudioDeviceID playDevices[MaxNumberDevices]; |
| 909 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, | 913 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, |
| 910 playDevices, MaxNumberDevices); | 914 playDevices, MaxNumberDevices); |
| 911 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 915 LOG(LS_VERBOSE) << "number of available waveform-audio output devices is " |
| 912 " number of availiable waveform-audio output devices is %u", | 916 << nDevices; |
| 913 nDevices); | |
| 914 | 917 |
| 915 if (index > (nDevices - 1)) { | 918 if (index > (nDevices - 1)) { |
| 916 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 919 LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) |
| 917 " device index is out of range [0,%u]", (nDevices - 1)); | 920 << "]"; |
| 918 return -1; | 921 return -1; |
| 919 } | 922 } |
| 920 | 923 |
| 921 _outputDeviceIndex = index; | 924 _outputDeviceIndex = index; |
| 922 _outputDeviceIsSpecified = true; | 925 _outputDeviceIsSpecified = true; |
| 923 | 926 |
| 924 return 0; | 927 return 0; |
| 925 } | 928 } |
| 926 | 929 |
| 927 int32_t AudioDeviceMac::SetPlayoutDevice( | 930 int32_t AudioDeviceMac::SetPlayoutDevice( |
| 928 AudioDeviceModule::WindowsDeviceType /*device*/) { | 931 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 929 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 932 LOG(LS_ERROR) << "WindowsDeviceType not supported"; |
| 930 "WindowsDeviceType not supported"); | |
| 931 return -1; | 933 return -1; |
| 932 } | 934 } |
| 933 | 935 |
| 934 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index, | 936 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index, |
| 935 char name[kAdmMaxDeviceNameSize], | 937 char name[kAdmMaxDeviceNameSize], |
| 936 char guid[kAdmMaxGuidSize]) { | 938 char guid[kAdmMaxGuidSize]) { |
| 937 const uint16_t nDevices(PlayoutDevices()); | 939 const uint16_t nDevices(PlayoutDevices()); |
| 938 | 940 |
| 939 if ((index > (nDevices - 1)) || (name == NULL)) { | 941 if ((index > (nDevices - 1)) || (name == NULL)) { |
| 940 return -1; | 942 return -1; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 974 } | 976 } |
| 975 | 977 |
| 976 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) { | 978 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) { |
| 977 if (_recIsInitialized) { | 979 if (_recIsInitialized) { |
| 978 return -1; | 980 return -1; |
| 979 } | 981 } |
| 980 | 982 |
| 981 AudioDeviceID recDevices[MaxNumberDevices]; | 983 AudioDeviceID recDevices[MaxNumberDevices]; |
| 982 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, | 984 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, |
| 983 recDevices, MaxNumberDevices); | 985 recDevices, MaxNumberDevices); |
| 984 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 986 LOG(LS_VERBOSE) << "number of available waveform-audio input devices is " |
| 985 " number of availiable waveform-audio input devices is %u", | 987 << nDevices; |
| 986 nDevices); | |
| 987 | 988 |
| 988 if (index > (nDevices - 1)) { | 989 if (index > (nDevices - 1)) { |
| 989 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 990 LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) |
| 990 " device index is out of range [0,%u]", (nDevices - 1)); | 991 << "]"; |
| 991 return -1; | 992 return -1; |
| 992 } | 993 } |
| 993 | 994 |
| 994 _inputDeviceIndex = index; | 995 _inputDeviceIndex = index; |
| 995 _inputDeviceIsSpecified = true; | 996 _inputDeviceIsSpecified = true; |
| 996 | 997 |
| 997 return 0; | 998 return 0; |
| 998 } | 999 } |
| 999 | 1000 |
| 1000 int32_t AudioDeviceMac::SetRecordingDevice( | 1001 int32_t AudioDeviceMac::SetRecordingDevice( |
| 1001 AudioDeviceModule::WindowsDeviceType /*device*/) { | 1002 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 1002 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1003 LOG(LS_ERROR) << "WindowsDeviceType not supported"; |
| 1003 "WindowsDeviceType not supported"); | |
| 1004 return -1; | 1004 return -1; |
| 1005 } | 1005 } |
| 1006 | 1006 |
| 1007 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) { | 1007 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) { |
| 1008 available = true; | 1008 available = true; |
| 1009 | 1009 |
| 1010 // Try to initialize the playout side | 1010 // Try to initialize the playout side |
| 1011 if (InitPlayout() == -1) { | 1011 if (InitPlayout() == -1) { |
| 1012 available = false; | 1012 available = false; |
| 1013 } | 1013 } |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1060 if (!_outputDeviceIsSpecified) { | 1060 if (!_outputDeviceIsSpecified) { |
| 1061 return -1; | 1061 return -1; |
| 1062 } | 1062 } |
| 1063 | 1063 |
| 1064 if (_playIsInitialized) { | 1064 if (_playIsInitialized) { |
| 1065 return 0; | 1065 return 0; |
| 1066 } | 1066 } |
| 1067 | 1067 |
| 1068 // Initialize the speaker (devices might have been added or removed) | 1068 // Initialize the speaker (devices might have been added or removed) |
| 1069 if (InitSpeaker() == -1) { | 1069 if (InitSpeaker() == -1) { |
| 1070 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1070 LOG(LS_WARNING) << "InitSpeaker() failed"; |
| 1071 " InitSpeaker() failed"); | |
| 1072 } | 1071 } |
| 1073 | 1072 |
| 1074 if (!MicrophoneIsInitialized()) { | 1073 if (!MicrophoneIsInitialized()) { |
| 1075 // Make this call to check if we are using | 1074 // Make this call to check if we are using |
| 1076 // one or two devices (_twoDevices) | 1075 // one or two devices (_twoDevices) |
| 1077 bool available = false; | 1076 bool available = false; |
| 1078 if (MicrophoneIsAvailable(available) == -1) { | 1077 if (MicrophoneIsAvailable(available) == -1) { |
| 1079 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1078 LOG(LS_WARNING) << "MicrophoneIsAvailable() failed"; |
| 1080 " MicrophoneIsAvailable() failed"); | |
| 1081 } | 1079 } |
| 1082 } | 1080 } |
| 1083 | 1081 |
| 1084 PaUtil_FlushRingBuffer(_paRenderBuffer); | 1082 PaUtil_FlushRingBuffer(_paRenderBuffer); |
| 1085 | 1083 |
| 1086 OSStatus err = noErr; | 1084 OSStatus err = noErr; |
| 1087 UInt32 size = 0; | 1085 UInt32 size = 0; |
| 1088 _renderDelayOffsetSamples = 0; | 1086 _renderDelayOffsetSamples = 0; |
| 1089 _renderDelayUs = 0; | 1087 _renderDelayUs = 0; |
| 1090 _renderLatencyUs = 0; | 1088 _renderLatencyUs = 0; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1101 Boolean hasProperty = | 1099 Boolean hasProperty = |
| 1102 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); | 1100 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 1103 if (hasProperty) { | 1101 if (hasProperty) { |
| 1104 UInt32 dataSource = 0; | 1102 UInt32 dataSource = 0; |
| 1105 size = sizeof(dataSource); | 1103 size = sizeof(dataSource); |
| 1106 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData( | 1104 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData( |
| 1107 _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource)); | 1105 _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource)); |
| 1108 | 1106 |
| 1109 if (dataSource == 'ispk') { | 1107 if (dataSource == 'ispk') { |
| 1110 _macBookProPanRight = true; | 1108 _macBookProPanRight = true; |
| 1111 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1109 LOG(LS_VERBOSE) |
| 1112 "MacBook Pro using internal speakers; stereo" | 1110 << "MacBook Pro using internal speakers; stereo panning right"; |
| 1113 " panning right"); | |
| 1114 } else { | 1111 } else { |
| 1115 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1112 LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; |
| 1116 "MacBook Pro not using internal speakers"); | |
| 1117 } | 1113 } |
| 1118 | 1114 |
| 1119 // Add a listener to determine if the status changes. | 1115 // Add a listener to determine if the status changes. |
| 1120 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( | 1116 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( |
| 1121 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); | 1117 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1122 } | 1118 } |
| 1123 } | 1119 } |
| 1124 | 1120 |
| 1125 // Get current stream description | 1121 // Get current stream description |
| 1126 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | 1122 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 1127 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); | 1123 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); |
| 1128 size = sizeof(_outStreamFormat); | 1124 size = sizeof(_outStreamFormat); |
| 1129 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 1125 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1130 _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat)); | 1126 _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat)); |
| 1131 | 1127 |
| 1132 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) { | 1128 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 1133 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 1129 logCAMsg(rtc::LS_ERROR, |
| 1134 "Unacceptable output stream format -> mFormatID", | 1130 "Unacceptable output stream format -> mFormatID", |
| 1135 (const char*)&_outStreamFormat.mFormatID); | 1131 (const char*)&_outStreamFormat.mFormatID); |
| 1136 return -1; | 1132 return -1; |
| 1137 } | 1133 } |
| 1138 | 1134 |
| 1139 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { | 1135 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 1140 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1136 LOG(LS_ERROR) << "Too many channels on output device (mChannelsPerFrame = " |
| 1141 "Too many channels on output device (mChannelsPerFrame = %d)", | 1137 << _outStreamFormat.mChannelsPerFrame << ")"; |
| 1142 _outStreamFormat.mChannelsPerFrame); | |
| 1143 return -1; | 1138 return -1; |
| 1144 } | 1139 } |
| 1145 | 1140 |
| 1146 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { | 1141 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { |
| 1147 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1142 LOG(LS_ERROR) << "Non-interleaved audio data is not supported." |
| 1148 "Non-interleaved audio data is not supported.", | 1143 << "AudioHardware streams should not have this format."; |
| 1149 "AudioHardware streams should not have this format."); | |
| 1150 return -1; | 1144 return -1; |
| 1151 } | 1145 } |
| 1152 | 1146 |
| 1153 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Ouput stream format:"); | 1147 LOG(LS_VERBOSE) << "Ouput stream format:"; |
| 1154 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1148 LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate |
| 1155 "mSampleRate = %f, mChannelsPerFrame = %u", | 1149 << ", mChannelsPerFrame = " |
| 1156 _outStreamFormat.mSampleRate, | 1150 << _outStreamFormat.mChannelsPerFrame; |
| 1157 _outStreamFormat.mChannelsPerFrame); | 1151 LOG(LS_VERBOSE) << "mBytesPerPacket = " << _outStreamFormat.mBytesPerPacket |
| 1158 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1152 << ", mFramesPerPacket = " |
| 1159 "mBytesPerPacket = %u, mFramesPerPacket = %u", | 1153 << _outStreamFormat.mFramesPerPacket; |
| 1160 _outStreamFormat.mBytesPerPacket, | 1154 LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame |
| 1161 _outStreamFormat.mFramesPerPacket); | 1155 << ", mBitsPerChannel = " << _outStreamFormat.mBitsPerChannel; |
| 1162 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1156 LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags; |
| 1163 "mBytesPerFrame = %u, mBitsPerChannel = %u", | 1157 logCAMsg(rtc::LS_VERBOSE, "mFormatID", |
| 1164 _outStreamFormat.mBytesPerFrame, | |
| 1165 _outStreamFormat.mBitsPerChannel); | |
| 1166 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u", | |
| 1167 _outStreamFormat.mFormatFlags); | |
| 1168 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
| 1169 (const char*)&_outStreamFormat.mFormatID); | 1158 (const char*)&_outStreamFormat.mFormatID); |
| 1170 | 1159 |
| 1171 // Our preferred format to work with. | 1160 // Our preferred format to work with. |
| 1172 if (_outStreamFormat.mChannelsPerFrame < 2) { | 1161 if (_outStreamFormat.mChannelsPerFrame < 2) { |
| 1173 // Disable stereo playout when we only have one channel on the device. | 1162 // Disable stereo playout when we only have one channel on the device. |
| 1174 _playChannels = 1; | 1163 _playChannels = 1; |
| 1175 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1164 LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; |
| 1176 "Stereo playout unavailable on this device"); | |
| 1177 } | 1165 } |
| 1178 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); | 1166 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); |
| 1179 | 1167 |
| 1180 // Listen for format changes. | 1168 // Listen for format changes. |
| 1181 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | 1169 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 1182 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( | 1170 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( |
| 1183 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); | 1171 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); |
| 1184 | 1172 |
| 1185 // Listen for processor overloads. | 1173 // Listen for processor overloads. |
| 1186 propertyAddress.mSelector = kAudioDeviceProcessorOverload; | 1174 propertyAddress.mSelector = kAudioDeviceProcessorOverload; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1207 if (!_inputDeviceIsSpecified) { | 1195 if (!_inputDeviceIsSpecified) { |
| 1208 return -1; | 1196 return -1; |
| 1209 } | 1197 } |
| 1210 | 1198 |
| 1211 if (_recIsInitialized) { | 1199 if (_recIsInitialized) { |
| 1212 return 0; | 1200 return 0; |
| 1213 } | 1201 } |
| 1214 | 1202 |
| 1215 // Initialize the microphone (devices might have been added or removed) | 1203 // Initialize the microphone (devices might have been added or removed) |
| 1216 if (InitMicrophone() == -1) { | 1204 if (InitMicrophone() == -1) { |
| 1217 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1205 LOG(LS_WARNING) << "InitMicrophone() failed"; |
| 1218 " InitMicrophone() failed"); | |
| 1219 } | 1206 } |
| 1220 | 1207 |
| 1221 if (!SpeakerIsInitialized()) { | 1208 if (!SpeakerIsInitialized()) { |
| 1222 // Make this call to check if we are using | 1209 // Make this call to check if we are using |
| 1223 // one or two devices (_twoDevices) | 1210 // one or two devices (_twoDevices) |
| 1224 bool available = false; | 1211 bool available = false; |
| 1225 if (SpeakerIsAvailable(available) == -1) { | 1212 if (SpeakerIsAvailable(available) == -1) { |
| 1226 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1213 LOG(LS_WARNING) << "SpeakerIsAvailable() failed"; |
| 1227 " SpeakerIsAvailable() failed"); | |
| 1228 } | 1214 } |
| 1229 } | 1215 } |
| 1230 | 1216 |
| 1231 OSStatus err = noErr; | 1217 OSStatus err = noErr; |
| 1232 UInt32 size = 0; | 1218 UInt32 size = 0; |
| 1233 | 1219 |
| 1234 PaUtil_FlushRingBuffer(_paCaptureBuffer); | 1220 PaUtil_FlushRingBuffer(_paCaptureBuffer); |
| 1235 | 1221 |
| 1236 _captureDelayUs = 0; | 1222 _captureDelayUs = 0; |
| 1237 _captureLatencyUs = 0; | 1223 _captureLatencyUs = 0; |
| 1238 _captureDeviceIsAlive = 1; | 1224 _captureDeviceIsAlive = 1; |
| 1239 _doStopRec = false; | 1225 _doStopRec = false; |
| 1240 | 1226 |
| 1241 // Get current stream description | 1227 // Get current stream description |
| 1242 AudioObjectPropertyAddress propertyAddress = { | 1228 AudioObjectPropertyAddress propertyAddress = { |
| 1243 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; | 1229 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; |
| 1244 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); | 1230 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); |
| 1245 size = sizeof(_inStreamFormat); | 1231 size = sizeof(_inStreamFormat); |
| 1246 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 1232 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1247 _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat)); | 1233 _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat)); |
| 1248 | 1234 |
| 1249 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) { | 1235 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 1250 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 1236 logCAMsg(rtc::LS_ERROR, |
| 1251 "Unacceptable input stream format -> mFormatID", | 1237 "Unacceptable input stream format -> mFormatID", |
| 1252 (const char*)&_inStreamFormat.mFormatID); | 1238 (const char*)&_inStreamFormat.mFormatID); |
| 1253 return -1; | 1239 return -1; |
| 1254 } | 1240 } |
| 1255 | 1241 |
| 1256 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { | 1242 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 1257 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1243 LOG(LS_ERROR) << "Too many channels on input device (mChannelsPerFrame = " |
| 1258 "Too many channels on input device (mChannelsPerFrame = %d)", | 1244 << _inStreamFormat.mChannelsPerFrame << ")"; |
| 1259 _inStreamFormat.mChannelsPerFrame); | |
| 1260 return -1; | 1245 return -1; |
| 1261 } | 1246 } |
| 1262 | 1247 |
| 1263 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * | 1248 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * |
| 1264 _inStreamFormat.mSampleRate / 100 * | 1249 _inStreamFormat.mSampleRate / 100 * |
| 1265 N_BLOCKS_IO; | 1250 N_BLOCKS_IO; |
| 1266 if (io_block_size_samples > _captureBufSizeSamples) { | 1251 if (io_block_size_samples > _captureBufSizeSamples) { |
| 1267 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1252 LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples |
| 1268 "Input IO block size (%d) is larger than ring buffer (%u)", | 1253 << ") is larger than ring buffer (" << _captureBufSizeSamples |
| 1269 io_block_size_samples, _captureBufSizeSamples); | 1254 << ")"; |
| 1270 return -1; | 1255 return -1; |
| 1271 } | 1256 } |
| 1272 | 1257 |
| 1273 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input stream format:"); | 1258 LOG(LS_VERBOSE) << "Input stream format:"; |
| 1274 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1259 LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate |
| 1275 " mSampleRate = %f, mChannelsPerFrame = %u", | 1260 << ", mChannelsPerFrame = " |
| 1276 _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame); | 1261 << _inStreamFormat.mChannelsPerFrame; |
| 1277 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1262 LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket |
| 1278 " mBytesPerPacket = %u, mFramesPerPacket = %u", | 1263 << ", mFramesPerPacket = " |
| 1279 _inStreamFormat.mBytesPerPacket, | 1264 << _inStreamFormat.mFramesPerPacket; |
| 1280 _inStreamFormat.mFramesPerPacket); | 1265 LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame |
| 1281 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1266 << ", mBitsPerChannel = " << _inStreamFormat.mBitsPerChannel; |
| 1282 " mBytesPerFrame = %u, mBitsPerChannel = %u", | 1267 LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags; |
| 1283 _inStreamFormat.mBytesPerFrame, _inStreamFormat.mBitsPerChannel); | 1268 logCAMsg(rtc::LS_VERBOSE, "mFormatID", |
| 1284 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " mFormatFlags = %u", | |
| 1285 _inStreamFormat.mFormatFlags); | |
| 1286 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
| 1287 (const char*)&_inStreamFormat.mFormatID); | 1269 (const char*)&_inStreamFormat.mFormatID); |
| 1288 | 1270 |
| 1289 // Our preferred format to work with | 1271 // Our preferred format to work with |
| 1290 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { | 1272 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { |
| 1291 _inDesiredFormat.mChannelsPerFrame = 2; | 1273 _inDesiredFormat.mChannelsPerFrame = 2; |
| 1292 } else { | 1274 } else { |
| 1293 // Disable stereo recording when we only have one channel on the device. | 1275 // Disable stereo recording when we only have one channel on the device. |
| 1294 _inDesiredFormat.mChannelsPerFrame = 1; | 1276 _inDesiredFormat.mChannelsPerFrame = 1; |
| 1295 _recChannels = 1; | 1277 _recChannels = 1; |
| 1296 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 1278 LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; |
| 1297 "Stereo recording unavailable on this device"); | |
| 1298 } | 1279 } |
| 1299 | 1280 |
| 1300 if (_ptrAudioBuffer) { | 1281 if (_ptrAudioBuffer) { |
| 1301 // Update audio buffer with the selected parameters | 1282 // Update audio buffer with the selected parameters |
| 1302 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | 1283 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); |
| 1303 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); | 1284 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
| 1304 } | 1285 } |
| 1305 | 1286 |
| 1306 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; | 1287 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; |
| 1307 _inDesiredFormat.mBytesPerPacket = | 1288 _inDesiredFormat.mBytesPerPacket = |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1404 | 1385 |
| 1405 if (!_recIsInitialized) { | 1386 if (!_recIsInitialized) { |
| 1406 return -1; | 1387 return -1; |
| 1407 } | 1388 } |
| 1408 | 1389 |
| 1409 if (_recording) { | 1390 if (_recording) { |
| 1410 return 0; | 1391 return 0; |
| 1411 } | 1392 } |
| 1412 | 1393 |
| 1413 if (!_initialized) { | 1394 if (!_initialized) { |
| 1414 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1395 LOG(LS_ERROR) << "Recording worker thread has not been started"; |
| 1415 " Recording worker thread has not been started"); | |
| 1416 return -1; | 1396 return -1; |
| 1417 } | 1397 } |
| 1418 | 1398 |
| 1419 RTC_DCHECK(!capture_worker_thread_.get()); | 1399 RTC_DCHECK(!capture_worker_thread_.get()); |
| 1420 capture_worker_thread_.reset( | 1400 capture_worker_thread_.reset( |
| 1421 new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread")); | 1401 new rtc::PlatformThread(RunCapture, this, "CaptureWorkerThread")); |
| 1422 RTC_DCHECK(capture_worker_thread_.get()); | 1402 RTC_DCHECK(capture_worker_thread_.get()); |
| 1423 capture_worker_thread_->Start(); | 1403 capture_worker_thread_->Start(); |
| 1424 capture_worker_thread_->SetPriority(rtc::kRealtimePriority); | 1404 capture_worker_thread_->SetPriority(rtc::kRealtimePriority); |
| 1425 | 1405 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1447 | 1427 |
| 1448 // Stop device | 1428 // Stop device |
| 1449 int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); | 1429 int32_t captureDeviceIsAlive = AtomicGet32(&_captureDeviceIsAlive); |
| 1450 if (_twoDevices) { | 1430 if (_twoDevices) { |
| 1451 if (_recording && captureDeviceIsAlive == 1) { | 1431 if (_recording && captureDeviceIsAlive == 1) { |
| 1452 _recording = false; | 1432 _recording = false; |
| 1453 _doStopRec = true; // Signal to io proc to stop audio device | 1433 _doStopRec = true; // Signal to io proc to stop audio device |
| 1454 _critSect.Leave(); // Cannot be under lock, risk of deadlock | 1434 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
| 1455 if (kEventTimeout == _stopEventRec.Wait(2000)) { | 1435 if (kEventTimeout == _stopEventRec.Wait(2000)) { |
| 1456 rtc::CritScope critScoped(&_critSect); | 1436 rtc::CritScope critScoped(&_critSect); |
| 1457 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1437 LOG(LS_WARNING) |
| 1458 " Timed out stopping the capture IOProc. " | 1438 << "Timed out stopping the capture IOProc." |
| 1459 "We may have failed to detect a device removal."); | 1439 << "We may have failed to detect a device removal."; |
| 1460 | 1440 |
| 1461 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); | 1441 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); |
| 1462 WEBRTC_CA_LOG_WARN( | 1442 WEBRTC_CA_LOG_WARN( |
| 1463 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); | 1443 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); |
| 1464 } | 1444 } |
| 1465 _critSect.Enter(); | 1445 _critSect.Enter(); |
| 1466 _doStopRec = false; | 1446 _doStopRec = false; |
| 1467 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " Recording stopped"); | 1447 LOG(LS_VERBOSE) << "Recording stopped"; |
| 1468 } | 1448 } |
| 1469 } else { | 1449 } else { |
| 1470 // We signal a stop for a shared device even when rendering has | 1450 // We signal a stop for a shared device even when rendering has |
| 1471 // not yet ended. This is to ensure the IOProc will return early as | 1451 // not yet ended. This is to ensure the IOProc will return early as |
| 1472 // intended (by checking |_recording|) before accessing | 1452 // intended (by checking |_recording|) before accessing |
| 1473 // resources we free below (e.g. the capture converter). | 1453 // resources we free below (e.g. the capture converter). |
| 1474 // | 1454 // |
| 1475 // In the case of a shared devcie, the IOProc will verify | 1455 // In the case of a shared devcie, the IOProc will verify |
| 1476 // rendering has ended before stopping itself. | 1456 // rendering has ended before stopping itself. |
| 1477 if (_recording && captureDeviceIsAlive == 1) { | 1457 if (_recording && captureDeviceIsAlive == 1) { |
| 1478 _recording = false; | 1458 _recording = false; |
| 1479 _doStop = true; // Signal to io proc to stop audio device | 1459 _doStop = true; // Signal to io proc to stop audio device |
| 1480 _critSect.Leave(); // Cannot be under lock, risk of deadlock | 1460 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
| 1481 if (kEventTimeout == _stopEvent.Wait(2000)) { | 1461 if (kEventTimeout == _stopEvent.Wait(2000)) { |
| 1482 rtc::CritScope critScoped(&_critSect); | 1462 rtc::CritScope critScoped(&_critSect); |
| 1483 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1463 LOG(LS_WARNING) |
| 1484 " Timed out stopping the shared IOProc. " | 1464 << "Timed out stopping the shared IOProc." |
| 1485 "We may have failed to detect a device removal."); | 1465 << "We may have failed to detect a device removal."; |
| 1486 | 1466 |
| 1487 // We assume rendering on a shared device has stopped as well if | 1467 // We assume rendering on a shared device has stopped as well if |
| 1488 // the IOProc times out. | 1468 // the IOProc times out. |
| 1489 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); | 1469 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
| 1490 WEBRTC_CA_LOG_WARN( | 1470 WEBRTC_CA_LOG_WARN( |
| 1491 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); | 1471 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
| 1492 } | 1472 } |
| 1493 _critSect.Enter(); | 1473 _critSect.Enter(); |
| 1494 _doStop = false; | 1474 _doStop = false; |
| 1495 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1475 LOG(LS_VERBOSE) << "Recording stopped (shared)"; |
| 1496 " Recording stopped (shared)"); | |
| 1497 } | 1476 } |
| 1498 } | 1477 } |
| 1499 | 1478 |
| 1500 // Setting this signal will allow the worker thread to be stopped. | 1479 // Setting this signal will allow the worker thread to be stopped. |
| 1501 AtomicSet32(&_captureDeviceIsAlive, 0); | 1480 AtomicSet32(&_captureDeviceIsAlive, 0); |
| 1502 | 1481 |
| 1503 if (capture_worker_thread_.get()) { | 1482 if (capture_worker_thread_.get()) { |
| 1504 _critSect.Leave(); | 1483 _critSect.Leave(); |
| 1505 capture_worker_thread_->Stop(); | 1484 capture_worker_thread_->Stop(); |
| 1506 capture_worker_thread_.reset(); | 1485 capture_worker_thread_.reset(); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1579 // intended (by checking |_playing|) before accessing resources we | 1558 // intended (by checking |_playing|) before accessing resources we |
| 1580 // free below (e.g. the render converter). | 1559 // free below (e.g. the render converter). |
| 1581 // | 1560 // |
| 1582 // In the case of a shared device, the IOProc will verify capturing | 1561 // In the case of a shared device, the IOProc will verify capturing |
| 1583 // has ended before stopping itself. | 1562 // has ended before stopping itself. |
| 1584 _playing = false; | 1563 _playing = false; |
| 1585 _doStop = true; // Signal to io proc to stop audio device | 1564 _doStop = true; // Signal to io proc to stop audio device |
| 1586 _critSect.Leave(); // Cannot be under lock, risk of deadlock | 1565 _critSect.Leave(); // Cannot be under lock, risk of deadlock |
| 1587 if (kEventTimeout == _stopEvent.Wait(2000)) { | 1566 if (kEventTimeout == _stopEvent.Wait(2000)) { |
| 1588 rtc::CritScope critScoped(&_critSect); | 1567 rtc::CritScope critScoped(&_critSect); |
| 1589 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1568 LOG(LS_WARNING) |
| 1590 " Timed out stopping the render IOProc. " | 1569 << "Timed out stopping the render IOProc." |
| 1591 "We may have failed to detect a device removal."); | 1570 << "We may have failed to detect a device removal."; |
| 1592 | 1571 |
| 1593 // We assume capturing on a shared device has stopped as well if the | 1572 // We assume capturing on a shared device has stopped as well if the |
| 1594 // IOProc times out. | 1573 // IOProc times out. |
| 1595 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); | 1574 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
| 1596 WEBRTC_CA_LOG_WARN( | 1575 WEBRTC_CA_LOG_WARN( |
| 1597 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); | 1576 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
| 1598 } | 1577 } |
| 1599 _critSect.Enter(); | 1578 _critSect.Enter(); |
| 1600 _doStop = false; | 1579 _doStop = false; |
| 1601 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Playout stopped"); | 1580 LOG(LS_VERBOSE) << "Playout stopped"; |
| 1602 } | 1581 } |
| 1603 | 1582 |
| 1604 // Setting this signal will allow the worker thread to be stopped. | 1583 // Setting this signal will allow the worker thread to be stopped. |
| 1605 AtomicSet32(&_renderDeviceIsAlive, 0); | 1584 AtomicSet32(&_renderDeviceIsAlive, 0); |
| 1606 if (render_worker_thread_.get()) { | 1585 if (render_worker_thread_.get()) { |
| 1607 _critSect.Leave(); | 1586 _critSect.Leave(); |
| 1608 render_worker_thread_->Stop(); | 1587 render_worker_thread_->Stop(); |
| 1609 render_worker_thread_.reset(); | 1588 render_worker_thread_.reset(); |
| 1610 _critSect.Enter(); | 1589 _critSect.Enter(); |
| 1611 } | 1590 } |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1653 } | 1632 } |
| 1654 | 1633 |
| 1655 bool AudioDeviceMac::Playing() const { | 1634 bool AudioDeviceMac::Playing() const { |
| 1656 return (_playing); | 1635 return (_playing); |
| 1657 } | 1636 } |
| 1658 | 1637 |
| 1659 int32_t AudioDeviceMac::SetPlayoutBuffer( | 1638 int32_t AudioDeviceMac::SetPlayoutBuffer( |
| 1660 const AudioDeviceModule::BufferType type, | 1639 const AudioDeviceModule::BufferType type, |
| 1661 uint16_t sizeMS) { | 1640 uint16_t sizeMS) { |
| 1662 if (type != AudioDeviceModule::kFixedBufferSize) { | 1641 if (type != AudioDeviceModule::kFixedBufferSize) { |
| 1663 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1642 LOG(LS_ERROR) << "Adaptive buffer size not supported on this platform"; |
| 1664 " Adaptive buffer size not supported on this platform"); | |
| 1665 return -1; | 1643 return -1; |
| 1666 } | 1644 } |
| 1667 | 1645 |
| 1668 _playBufType = type; | 1646 _playBufType = type; |
| 1669 _playBufDelayFixed = sizeMS; | 1647 _playBufDelayFixed = sizeMS; |
| 1670 return 0; | 1648 return 0; |
| 1671 } | 1649 } |
| 1672 | 1650 |
| 1673 int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type, | 1651 int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type, |
| 1674 uint16_t& sizeMS) const { | 1652 uint16_t& sizeMS) const { |
| 1675 type = _playBufType; | 1653 type = _playBufType; |
| 1676 sizeMS = _playBufDelayFixed; | 1654 sizeMS = _playBufDelayFixed; |
| 1677 | 1655 |
| 1678 return 0; | 1656 return 0; |
| 1679 } | 1657 } |
| 1680 | 1658 |
| 1681 // Not implemented for Mac. | 1659 // Not implemented for Mac. |
| 1682 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const { | 1660 int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const { |
| 1683 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1661 LOG(LS_WARNING) << "API call not supported on this platform"; |
| 1684 " API call not supported on this platform"); | |
| 1685 | 1662 |
| 1686 return -1; | 1663 return -1; |
| 1687 } | 1664 } |
| 1688 | 1665 |
| 1689 bool AudioDeviceMac::PlayoutWarning() const { | 1666 bool AudioDeviceMac::PlayoutWarning() const { |
| 1690 return (_playWarning > 0); | 1667 return (_playWarning > 0); |
| 1691 } | 1668 } |
| 1692 | 1669 |
| 1693 bool AudioDeviceMac::PlayoutError() const { | 1670 bool AudioDeviceMac::PlayoutError() const { |
| 1694 return (_playError > 0); | 1671 return (_playError > 0); |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1727 const uint32_t deviceListLength) { | 1704 const uint32_t deviceListLength) { |
| 1728 OSStatus err = noErr; | 1705 OSStatus err = noErr; |
| 1729 | 1706 |
| 1730 AudioObjectPropertyAddress propertyAddress = { | 1707 AudioObjectPropertyAddress propertyAddress = { |
| 1731 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, | 1708 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, |
| 1732 kAudioObjectPropertyElementMaster}; | 1709 kAudioObjectPropertyElementMaster}; |
| 1733 UInt32 size = 0; | 1710 UInt32 size = 0; |
| 1734 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize( | 1711 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize( |
| 1735 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size)); | 1712 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size)); |
| 1736 if (size == 0) { | 1713 if (size == 0) { |
| 1737 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "No devices"); | 1714 LOG(LS_WARNING) << "No devices"; |
| 1738 return 0; | 1715 return 0; |
| 1739 } | 1716 } |
| 1740 | 1717 |
| 1741 AudioDeviceID* deviceIds = (AudioDeviceID*)malloc(size); | 1718 AudioDeviceID* deviceIds = (AudioDeviceID*)malloc(size); |
| 1742 UInt32 numberDevices = size / sizeof(AudioDeviceID); | 1719 UInt32 numberDevices = size / sizeof(AudioDeviceID); |
| 1743 AudioBufferList* bufferList = NULL; | 1720 AudioBufferList* bufferList = NULL; |
| 1744 UInt32 numberScopedDevices = 0; | 1721 UInt32 numberScopedDevices = 0; |
| 1745 | 1722 |
| 1746 // First check if there is a default device and list it | 1723 // First check if there is a default device and list it |
| 1747 UInt32 hardwareProperty = 0; | 1724 UInt32 hardwareProperty = 0; |
| 1748 if (scope == kAudioDevicePropertyScopeOutput) { | 1725 if (scope == kAudioDevicePropertyScopeOutput) { |
| 1749 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; | 1726 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; |
| 1750 } else { | 1727 } else { |
| 1751 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; | 1728 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; |
| 1752 } | 1729 } |
| 1753 | 1730 |
| 1754 AudioObjectPropertyAddress propertyAddressDefault = { | 1731 AudioObjectPropertyAddress propertyAddressDefault = { |
| 1755 hardwareProperty, kAudioObjectPropertyScopeGlobal, | 1732 hardwareProperty, kAudioObjectPropertyScopeGlobal, |
| 1756 kAudioObjectPropertyElementMaster}; | 1733 kAudioObjectPropertyElementMaster}; |
| 1757 | 1734 |
| 1758 AudioDeviceID usedID; | 1735 AudioDeviceID usedID; |
| 1759 UInt32 uintSize = sizeof(UInt32); | 1736 UInt32 uintSize = sizeof(UInt32); |
| 1760 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, | 1737 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, |
| 1761 &propertyAddressDefault, 0, | 1738 &propertyAddressDefault, 0, |
| 1762 NULL, &uintSize, &usedID)); | 1739 NULL, &uintSize, &usedID)); |
| 1763 if (usedID != kAudioDeviceUnknown) { | 1740 if (usedID != kAudioDeviceUnknown) { |
| 1764 scopedDeviceIds[numberScopedDevices] = usedID; | 1741 scopedDeviceIds[numberScopedDevices] = usedID; |
| 1765 numberScopedDevices++; | 1742 numberScopedDevices++; |
| 1766 } else { | 1743 } else { |
| 1767 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1744 LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown"; |
| 1768 "GetNumberDevices(): Default device unknown"); | |
| 1769 } | 1745 } |
| 1770 | 1746 |
| 1771 // Then list the rest of the devices | 1747 // Then list the rest of the devices |
| 1772 bool listOK = true; | 1748 bool listOK = true; |
| 1773 | 1749 |
| 1774 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( | 1750 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( |
| 1775 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, deviceIds)); | 1751 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, deviceIds)); |
| 1776 if (err != noErr) { | 1752 if (err != noErr) { |
| 1777 listOK = false; | 1753 listOK = false; |
| 1778 } else { | 1754 } else { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1794 bufferList = (AudioBufferList*)malloc(size); | 1770 bufferList = (AudioBufferList*)malloc(size); |
| 1795 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( | 1771 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( |
| 1796 deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList)); | 1772 deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList)); |
| 1797 if (err != noErr) { | 1773 if (err != noErr) { |
| 1798 listOK = false; | 1774 listOK = false; |
| 1799 break; | 1775 break; |
| 1800 } | 1776 } |
| 1801 | 1777 |
| 1802 if (bufferList->mNumberBuffers > 0) { | 1778 if (bufferList->mNumberBuffers > 0) { |
| 1803 if (numberScopedDevices >= deviceListLength) { | 1779 if (numberScopedDevices >= deviceListLength) { |
| 1804 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1780 LOG(LS_ERROR) << "Device list is not long enough"; |
| 1805 "Device list is not long enough"); | |
| 1806 listOK = false; | 1781 listOK = false; |
| 1807 break; | 1782 break; |
| 1808 } | 1783 } |
| 1809 | 1784 |
| 1810 scopedDeviceIds[numberScopedDevices] = deviceIds[i]; | 1785 scopedDeviceIds[numberScopedDevices] = deviceIds[i]; |
| 1811 numberScopedDevices++; | 1786 numberScopedDevices++; |
| 1812 } | 1787 } |
| 1813 | 1788 |
| 1814 free(bufferList); | 1789 free(bufferList); |
| 1815 bufferList = NULL; | 1790 bufferList = NULL; |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1843 const uint16_t index, | 1818 const uint16_t index, |
| 1844 char* name) { | 1819 char* name) { |
| 1845 OSStatus err = noErr; | 1820 OSStatus err = noErr; |
| 1846 UInt32 len = kAdmMaxDeviceNameSize; | 1821 UInt32 len = kAdmMaxDeviceNameSize; |
| 1847 AudioDeviceID deviceIds[MaxNumberDevices]; | 1822 AudioDeviceID deviceIds[MaxNumberDevices]; |
| 1848 | 1823 |
| 1849 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); | 1824 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); |
| 1850 if (numberDevices < 0) { | 1825 if (numberDevices < 0) { |
| 1851 return -1; | 1826 return -1; |
| 1852 } else if (numberDevices == 0) { | 1827 } else if (numberDevices == 0) { |
| 1853 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "No devices"); | 1828 LOG(LS_ERROR) << "No devices"; |
| 1854 return -1; | 1829 return -1; |
| 1855 } | 1830 } |
| 1856 | 1831 |
| 1857 // If the number is below the number of devices, assume it's "WEBRTC ID" | 1832 // If the number is below the number of devices, assume it's "WEBRTC ID" |
| 1858 // otherwise assume it's a CoreAudio ID | 1833 // otherwise assume it's a CoreAudio ID |
| 1859 AudioDeviceID usedID; | 1834 AudioDeviceID usedID; |
| 1860 | 1835 |
| 1861 // Check if there is a default device | 1836 // Check if there is a default device |
| 1862 bool isDefaultDevice = false; | 1837 bool isDefaultDevice = false; |
| 1863 if (index == 0) { | 1838 if (index == 0) { |
| 1864 UInt32 hardwareProperty = 0; | 1839 UInt32 hardwareProperty = 0; |
| 1865 if (scope == kAudioDevicePropertyScopeOutput) { | 1840 if (scope == kAudioDevicePropertyScopeOutput) { |
| 1866 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; | 1841 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; |
| 1867 } else { | 1842 } else { |
| 1868 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; | 1843 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; |
| 1869 } | 1844 } |
| 1870 AudioObjectPropertyAddress propertyAddress = { | 1845 AudioObjectPropertyAddress propertyAddress = { |
| 1871 hardwareProperty, kAudioObjectPropertyScopeGlobal, | 1846 hardwareProperty, kAudioObjectPropertyScopeGlobal, |
| 1872 kAudioObjectPropertyElementMaster}; | 1847 kAudioObjectPropertyElementMaster}; |
| 1873 UInt32 size = sizeof(UInt32); | 1848 UInt32 size = sizeof(UInt32); |
| 1874 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 1849 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1875 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID)); | 1850 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID)); |
| 1876 if (usedID == kAudioDeviceUnknown) { | 1851 if (usedID == kAudioDeviceUnknown) { |
| 1877 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1852 LOG(LS_WARNING) << "GetDeviceName(): Default device unknown"; |
| 1878 "GetDeviceName(): Default device unknown"); | |
| 1879 } else { | 1853 } else { |
| 1880 isDefaultDevice = true; | 1854 isDefaultDevice = true; |
| 1881 } | 1855 } |
| 1882 } | 1856 } |
| 1883 | 1857 |
| 1884 AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, | 1858 AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, |
| 1885 scope, 0}; | 1859 scope, 0}; |
| 1886 | 1860 |
| 1887 if (isDefaultDevice) { | 1861 if (isDefaultDevice) { |
| 1888 char devName[len]; | 1862 char devName[len]; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1925 AudioObjectPropertyAddress propertyAddress = { | 1899 AudioObjectPropertyAddress propertyAddress = { |
| 1926 defaultDeviceSelector, kAudioObjectPropertyScopeGlobal, | 1900 defaultDeviceSelector, kAudioObjectPropertyScopeGlobal, |
| 1927 kAudioObjectPropertyElementMaster}; | 1901 kAudioObjectPropertyElementMaster}; |
| 1928 | 1902 |
| 1929 // Get the actual device IDs | 1903 // Get the actual device IDs |
| 1930 int numberDevices = | 1904 int numberDevices = |
| 1931 GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices); | 1905 GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices); |
| 1932 if (numberDevices < 0) { | 1906 if (numberDevices < 0) { |
| 1933 return -1; | 1907 return -1; |
| 1934 } else if (numberDevices == 0) { | 1908 } else if (numberDevices == 0) { |
| 1935 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1909 LOG(LS_ERROR) << "InitDevice(): No devices"; |
| 1936 "InitDevice(): No devices"); | |
| 1937 return -1; | 1910 return -1; |
| 1938 } | 1911 } |
| 1939 | 1912 |
| 1940 bool isDefaultDevice = false; | 1913 bool isDefaultDevice = false; |
| 1941 deviceId = kAudioDeviceUnknown; | 1914 deviceId = kAudioDeviceUnknown; |
| 1942 if (userDeviceIndex == 0) { | 1915 if (userDeviceIndex == 0) { |
| 1943 // Try to use default system device | 1916 // Try to use default system device |
| 1944 size = sizeof(AudioDeviceID); | 1917 size = sizeof(AudioDeviceID); |
| 1945 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 1918 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 1946 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId)); | 1919 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId)); |
| 1947 if (deviceId == kAudioDeviceUnknown) { | 1920 if (deviceId == kAudioDeviceUnknown) { |
| 1948 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1921 LOG(LS_WARNING) << "No default device exists"; |
| 1949 " No default device exists"); | |
| 1950 } else { | 1922 } else { |
| 1951 isDefaultDevice = true; | 1923 isDefaultDevice = true; |
| 1952 } | 1924 } |
| 1953 } | 1925 } |
| 1954 | 1926 |
| 1955 if (!isDefaultDevice) { | 1927 if (!isDefaultDevice) { |
| 1956 deviceId = deviceIds[userDeviceIndex]; | 1928 deviceId = deviceIds[userDeviceIndex]; |
| 1957 } | 1929 } |
| 1958 | 1930 |
| 1959 // Obtain device name and manufacturer for logging. | 1931 // Obtain device name and manufacturer for logging. |
| 1960 // Also use this as a test to ensure a user-set device ID is valid. | 1932 // Also use this as a test to ensure a user-set device ID is valid. |
| 1961 char devName[128]; | 1933 char devName[128]; |
| 1962 char devManf[128]; | 1934 char devManf[128]; |
| 1963 memset(devName, 0, sizeof(devName)); | 1935 memset(devName, 0, sizeof(devName)); |
| 1964 memset(devManf, 0, sizeof(devManf)); | 1936 memset(devManf, 0, sizeof(devManf)); |
| 1965 | 1937 |
| 1966 propertyAddress.mSelector = kAudioDevicePropertyDeviceName; | 1938 propertyAddress.mSelector = kAudioDevicePropertyDeviceName; |
| 1967 propertyAddress.mScope = deviceScope; | 1939 propertyAddress.mScope = deviceScope; |
| 1968 propertyAddress.mElement = 0; | 1940 propertyAddress.mElement = 0; |
| 1969 size = sizeof(devName); | 1941 size = sizeof(devName); |
| 1970 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, | 1942 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, |
| 1971 0, NULL, &size, devName)); | 1943 0, NULL, &size, devName)); |
| 1972 | 1944 |
| 1973 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; | 1945 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; |
| 1974 size = sizeof(devManf); | 1946 size = sizeof(devManf); |
| 1975 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, | 1947 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, |
| 1976 0, NULL, &size, devManf)); | 1948 0, NULL, &size, devManf)); |
| 1977 | 1949 |
| 1978 if (isInput) { | 1950 if (isInput) { |
| 1979 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input device: %s %s", | 1951 LOG(LS_VERBOSE) << "Input device: " << devManf << " " << devName; |
| 1980 devManf, devName); | |
| 1981 } else { | 1952 } else { |
| 1982 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Output device: %s %s", | 1953 LOG(LS_VERBOSE) << "Output device: " << devManf << " " << devName; |
| 1983 devManf, devName); | |
| 1984 } | 1954 } |
| 1985 | 1955 |
| 1986 return 0; | 1956 return 0; |
| 1987 } | 1957 } |
| 1988 | 1958 |
| 1989 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() { | 1959 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() { |
| 1990 // Our preferred format to work with. | 1960 // Our preferred format to work with. |
| 1991 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; | 1961 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; |
| 1992 _outDesiredFormat.mChannelsPerFrame = _playChannels; | 1962 _outDesiredFormat.mChannelsPerFrame = _playChannels; |
| 1993 | 1963 |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2068 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 2038 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 2069 _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); | 2039 _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); |
| 2070 propertyAddress.mSelector = kAudioStreamPropertyLatency; | 2040 propertyAddress.mSelector = kAudioStreamPropertyLatency; |
| 2071 size = sizeof(UInt32); | 2041 size = sizeof(UInt32); |
| 2072 latency = 0; | 2042 latency = 0; |
| 2073 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 2043 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 2074 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); | 2044 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); |
| 2075 _renderLatencyUs += | 2045 _renderLatencyUs += |
| 2076 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); | 2046 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); |
| 2077 | 2047 |
| 2078 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2048 LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples=" |
| 2079 " initial playout status: _renderDelayOffsetSamples=%d," | 2049 << _renderDelayOffsetSamples << ", _renderDelayUs=" |
| 2080 " _renderDelayUs=%d, _renderLatencyUs=%d", | 2050 << _renderDelayUs << ", _renderLatencyUs=" |
| 2081 _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs); | 2051 << _renderLatencyUs; |
| 2082 return 0; | 2052 return 0; |
| 2083 } | 2053 } |
| 2084 | 2054 |
| 2085 OSStatus AudioDeviceMac::objectListenerProc( | 2055 OSStatus AudioDeviceMac::objectListenerProc( |
| 2086 AudioObjectID objectId, | 2056 AudioObjectID objectId, |
| 2087 UInt32 numberAddresses, | 2057 UInt32 numberAddresses, |
| 2088 const AudioObjectPropertyAddress addresses[], | 2058 const AudioObjectPropertyAddress addresses[], |
| 2089 void* clientData) { | 2059 void* clientData) { |
| 2090 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; | 2060 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; |
| 2091 RTC_DCHECK(ptrThis != NULL); | 2061 RTC_DCHECK(ptrThis != NULL); |
| 2092 | 2062 |
| 2093 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); | 2063 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); |
| 2094 | 2064 |
| 2095 // AudioObjectPropertyListenerProc functions are supposed to return 0 | 2065 // AudioObjectPropertyListenerProc functions are supposed to return 0 |
| 2096 return 0; | 2066 return 0; |
| 2097 } | 2067 } |
| 2098 | 2068 |
| 2099 OSStatus AudioDeviceMac::implObjectListenerProc( | 2069 OSStatus AudioDeviceMac::implObjectListenerProc( |
| 2100 const AudioObjectID objectId, | 2070 const AudioObjectID objectId, |
| 2101 const UInt32 numberAddresses, | 2071 const UInt32 numberAddresses, |
| 2102 const AudioObjectPropertyAddress addresses[]) { | 2072 const AudioObjectPropertyAddress addresses[]) { |
| 2103 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2073 LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()"; |
| 2104 "AudioDeviceMac::implObjectListenerProc()"); | |
| 2105 | 2074 |
| 2106 for (UInt32 i = 0; i < numberAddresses; i++) { | 2075 for (UInt32 i = 0; i < numberAddresses; i++) { |
| 2107 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) { | 2076 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) { |
| 2108 HandleDeviceChange(); | 2077 HandleDeviceChange(); |
| 2109 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) { | 2078 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) { |
| 2110 HandleStreamFormatChange(objectId, addresses[i]); | 2079 HandleStreamFormatChange(objectId, addresses[i]); |
| 2111 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) { | 2080 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) { |
| 2112 HandleDataSourceChange(objectId, addresses[i]); | 2081 HandleDataSourceChange(objectId, addresses[i]); |
| 2113 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) { | 2082 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) { |
| 2114 HandleProcessorOverload(addresses[i]); | 2083 HandleProcessorOverload(addresses[i]); |
| 2115 } | 2084 } |
| 2116 } | 2085 } |
| 2117 | 2086 |
| 2118 return 0; | 2087 return 0; |
| 2119 } | 2088 } |
| 2120 | 2089 |
| 2121 int32_t AudioDeviceMac::HandleDeviceChange() { | 2090 int32_t AudioDeviceMac::HandleDeviceChange() { |
| 2122 OSStatus err = noErr; | 2091 OSStatus err = noErr; |
| 2123 | 2092 |
| 2124 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2093 LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices"; |
| 2125 "kAudioHardwarePropertyDevices"); | |
| 2126 | 2094 |
| 2127 // A device has changed. Check if our registered devices have been removed. | 2095 // A device has changed. Check if our registered devices have been removed. |
| 2128 // Ensure the devices have been initialized, meaning the IDs are valid. | 2096 // Ensure the devices have been initialized, meaning the IDs are valid. |
| 2129 if (MicrophoneIsInitialized()) { | 2097 if (MicrophoneIsInitialized()) { |
| 2130 AudioObjectPropertyAddress propertyAddress = { | 2098 AudioObjectPropertyAddress propertyAddress = { |
| 2131 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0}; | 2099 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0}; |
| 2132 UInt32 deviceIsAlive = 1; | 2100 UInt32 deviceIsAlive = 1; |
| 2133 UInt32 size = sizeof(UInt32); | 2101 UInt32 size = sizeof(UInt32); |
| 2134 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL, | 2102 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL, |
| 2135 &size, &deviceIsAlive); | 2103 &size, &deviceIsAlive); |
| 2136 | 2104 |
| 2137 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { | 2105 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { |
| 2138 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2106 LOG(LS_WARNING) << "Capture device is not alive (probably removed)"; |
| 2139 "Capture device is not alive (probably removed)"); | |
| 2140 AtomicSet32(&_captureDeviceIsAlive, 0); | 2107 AtomicSet32(&_captureDeviceIsAlive, 0); |
| 2141 _mixerManager.CloseMicrophone(); | 2108 _mixerManager.CloseMicrophone(); |
| 2142 if (_recError == 1) { | 2109 if (_recError == 1) { |
| 2143 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2110 LOG(LS_WARNING) << "pending recording error exists"; |
| 2144 " pending recording error exists"); | |
| 2145 } | 2111 } |
| 2146 _recError = 1; // triggers callback from module process thread | 2112 _recError = 1; // triggers callback from module process thread |
| 2147 } else if (err != noErr) { | 2113 } else if (err != noErr) { |
| 2148 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2114 logCAMsg(rtc::LS_ERROR, |
| 2149 "Error in AudioDeviceGetProperty()", (const char*)&err); | 2115 "Error in AudioDeviceGetProperty()", (const char*)&err); |
| 2150 return -1; | 2116 return -1; |
| 2151 } | 2117 } |
| 2152 } | 2118 } |
| 2153 | 2119 |
| 2154 if (SpeakerIsInitialized()) { | 2120 if (SpeakerIsInitialized()) { |
| 2155 AudioObjectPropertyAddress propertyAddress = { | 2121 AudioObjectPropertyAddress propertyAddress = { |
| 2156 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0}; | 2122 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0}; |
| 2157 UInt32 deviceIsAlive = 1; | 2123 UInt32 deviceIsAlive = 1; |
| 2158 UInt32 size = sizeof(UInt32); | 2124 UInt32 size = sizeof(UInt32); |
| 2159 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL, | 2125 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL, |
| 2160 &size, &deviceIsAlive); | 2126 &size, &deviceIsAlive); |
| 2161 | 2127 |
| 2162 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { | 2128 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { |
| 2163 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2129 LOG(LS_WARNING) << "Render device is not alive (probably removed)"; |
| 2164 "Render device is not alive (probably removed)"); | |
| 2165 AtomicSet32(&_renderDeviceIsAlive, 0); | 2130 AtomicSet32(&_renderDeviceIsAlive, 0); |
| 2166 _mixerManager.CloseSpeaker(); | 2131 _mixerManager.CloseSpeaker(); |
| 2167 if (_playError == 1) { | 2132 if (_playError == 1) { |
| 2168 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2133 LOG(LS_WARNING) << "pending playout error exists"; |
| 2169 " pending playout error exists"); | |
| 2170 } | 2134 } |
| 2171 _playError = 1; // triggers callback from module process thread | 2135 _playError = 1; // triggers callback from module process thread |
| 2172 } else if (err != noErr) { | 2136 } else if (err != noErr) { |
| 2173 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2137 logCAMsg(rtc::LS_ERROR, |
| 2174 "Error in AudioDeviceGetProperty()", (const char*)&err); | 2138 "Error in AudioDeviceGetProperty()", (const char*)&err); |
| 2175 return -1; | 2139 return -1; |
| 2176 } | 2140 } |
| 2177 } | 2141 } |
| 2178 | 2142 |
| 2179 return 0; | 2143 return 0; |
| 2180 } | 2144 } |
| 2181 | 2145 |
| 2182 int32_t AudioDeviceMac::HandleStreamFormatChange( | 2146 int32_t AudioDeviceMac::HandleStreamFormatChange( |
| 2183 const AudioObjectID objectId, | 2147 const AudioObjectID objectId, |
| 2184 const AudioObjectPropertyAddress propertyAddress) { | 2148 const AudioObjectPropertyAddress propertyAddress) { |
| 2185 OSStatus err = noErr; | 2149 OSStatus err = noErr; |
| 2186 | 2150 |
| 2187 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Stream format changed"); | 2151 LOG(LS_VERBOSE) << "Stream format changed"; |
| 2188 | 2152 |
| 2189 if (objectId != _inputDeviceID && objectId != _outputDeviceID) { | 2153 if (objectId != _inputDeviceID && objectId != _outputDeviceID) { |
| 2190 return 0; | 2154 return 0; |
| 2191 } | 2155 } |
| 2192 | 2156 |
| 2193 // Get the new device format | 2157 // Get the new device format |
| 2194 AudioStreamBasicDescription streamFormat; | 2158 AudioStreamBasicDescription streamFormat; |
| 2195 UInt32 size = sizeof(streamFormat); | 2159 UInt32 size = sizeof(streamFormat); |
| 2196 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 2160 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 2197 objectId, &propertyAddress, 0, NULL, &size, &streamFormat)); | 2161 objectId, &propertyAddress, 0, NULL, &size, &streamFormat)); |
| 2198 | 2162 |
| 2199 if (streamFormat.mFormatID != kAudioFormatLinearPCM) { | 2163 if (streamFormat.mFormatID != kAudioFormatLinearPCM) { |
| 2200 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2164 logCAMsg(rtc::LS_ERROR, |
| 2201 "Unacceptable input stream format -> mFormatID", | 2165 "Unacceptable input stream format -> mFormatID", |
| 2202 (const char*)&streamFormat.mFormatID); | 2166 (const char*)&streamFormat.mFormatID); |
| 2203 return -1; | 2167 return -1; |
| 2204 } | 2168 } |
| 2205 | 2169 |
| 2206 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { | 2170 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { |
| 2207 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2171 LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = " |
| 2208 "Too many channels on device (mChannelsPerFrame = %d)", | 2172 << streamFormat.mChannelsPerFrame << ")"; |
| 2209 streamFormat.mChannelsPerFrame); | |
| 2210 return -1; | 2173 return -1; |
| 2211 } | 2174 } |
| 2212 | 2175 |
| 2213 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Stream format:"); | 2176 LOG(LS_VERBOSE) << "Stream format:"; |
| 2214 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2177 LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate |
| 2215 "mSampleRate = %f, mChannelsPerFrame = %u", | 2178 << ", mChannelsPerFrame = " << streamFormat.mChannelsPerFrame; |
| 2216 streamFormat.mSampleRate, streamFormat.mChannelsPerFrame); | 2179 LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket |
| 2217 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2180 << ", mFramesPerPacket = " << streamFormat.mFramesPerPacket; |
| 2218 "mBytesPerPacket = %u, mFramesPerPacket = %u", | 2181 LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame |
| 2219 streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket); | 2182 << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel; |
| 2220 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2183 LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags; |
| 2221 "mBytesPerFrame = %u, mBitsPerChannel = %u", | 2184 logCAMsg(rtc::LS_VERBOSE, "mFormatID", |
| 2222 streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel); | |
| 2223 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u", | |
| 2224 streamFormat.mFormatFlags); | |
| 2225 logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID", | |
| 2226 (const char*)&streamFormat.mFormatID); | 2185 (const char*)&streamFormat.mFormatID); |
| 2227 | 2186 |
| 2228 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { | 2187 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { |
| 2229 const int io_block_size_samples = streamFormat.mChannelsPerFrame * | 2188 const int io_block_size_samples = streamFormat.mChannelsPerFrame * |
| 2230 streamFormat.mSampleRate / 100 * | 2189 streamFormat.mSampleRate / 100 * |
| 2231 N_BLOCKS_IO; | 2190 N_BLOCKS_IO; |
| 2232 if (io_block_size_samples > _captureBufSizeSamples) { | 2191 if (io_block_size_samples > _captureBufSizeSamples) { |
| 2233 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2192 LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples |
| 2234 "Input IO block size (%d) is larger than ring buffer (%u)", | 2193 << ") is larger than ring buffer (" |
| 2235 io_block_size_samples, _captureBufSizeSamples); | 2194 << _captureBufSizeSamples << ")"; |
| 2236 return -1; | 2195 return -1; |
| 2237 } | 2196 } |
| 2238 | 2197 |
| 2239 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); | 2198 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); |
| 2240 | 2199 |
| 2241 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { | 2200 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { |
| 2242 _inDesiredFormat.mChannelsPerFrame = 2; | 2201 _inDesiredFormat.mChannelsPerFrame = 2; |
| 2243 } else { | 2202 } else { |
| 2244 // Disable stereo recording when we only have one channel on the device. | 2203 // Disable stereo recording when we only have one channel on the device. |
| 2245 _inDesiredFormat.mChannelsPerFrame = 1; | 2204 _inDesiredFormat.mChannelsPerFrame = 1; |
| 2246 _recChannels = 1; | 2205 _recChannels = 1; |
| 2247 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2206 LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; |
| 2248 "Stereo recording unavailable on this device"); | |
| 2249 } | 2207 } |
| 2250 | 2208 |
| 2251 if (_ptrAudioBuffer) { | 2209 if (_ptrAudioBuffer) { |
| 2252 // Update audio buffer with the selected parameters | 2210 // Update audio buffer with the selected parameters |
| 2253 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | 2211 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); |
| 2254 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); | 2212 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
| 2255 } | 2213 } |
| 2256 | 2214 |
| 2257 // Recreate the converter with the new format | 2215 // Recreate the converter with the new format |
| 2258 // TODO(xians): make this thread safe | 2216 // TODO(xians): make this thread safe |
| 2259 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); | 2217 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); |
| 2260 | 2218 |
| 2261 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, | 2219 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, |
| 2262 &_captureConverter)); | 2220 &_captureConverter)); |
| 2263 } else { | 2221 } else { |
| 2264 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); | 2222 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); |
| 2265 | 2223 |
| 2266 // Our preferred format to work with | 2224 // Our preferred format to work with |
| 2267 if (_outStreamFormat.mChannelsPerFrame < 2) { | 2225 if (_outStreamFormat.mChannelsPerFrame < 2) { |
| 2268 _playChannels = 1; | 2226 _playChannels = 1; |
| 2269 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2227 LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; |
| 2270 "Stereo playout unavailable on this device"); | |
| 2271 } | 2228 } |
| 2272 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); | 2229 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); |
| 2273 } | 2230 } |
| 2274 return 0; | 2231 return 0; |
| 2275 } | 2232 } |
| 2276 | 2233 |
| 2277 int32_t AudioDeviceMac::HandleDataSourceChange( | 2234 int32_t AudioDeviceMac::HandleDataSourceChange( |
| 2278 const AudioObjectID objectId, | 2235 const AudioObjectID objectId, |
| 2279 const AudioObjectPropertyAddress propertyAddress) { | 2236 const AudioObjectPropertyAddress propertyAddress) { |
| 2280 OSStatus err = noErr; | 2237 OSStatus err = noErr; |
| 2281 | 2238 |
| 2282 if (_macBookPro && | 2239 if (_macBookPro && |
| 2283 propertyAddress.mScope == kAudioDevicePropertyScopeOutput) { | 2240 propertyAddress.mScope == kAudioDevicePropertyScopeOutput) { |
| 2284 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Data source changed"); | 2241 LOG(LS_VERBOSE) << "Data source changed"; |
| 2285 | 2242 |
| 2286 _macBookProPanRight = false; | 2243 _macBookProPanRight = false; |
| 2287 UInt32 dataSource = 0; | 2244 UInt32 dataSource = 0; |
| 2288 UInt32 size = sizeof(UInt32); | 2245 UInt32 size = sizeof(UInt32); |
| 2289 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 2246 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 2290 objectId, &propertyAddress, 0, NULL, &size, &dataSource)); | 2247 objectId, &propertyAddress, 0, NULL, &size, &dataSource)); |
| 2291 if (dataSource == 'ispk') { | 2248 if (dataSource == 'ispk') { |
| 2292 _macBookProPanRight = true; | 2249 _macBookProPanRight = true; |
| 2293 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2250 LOG(LS_VERBOSE) |
| 2294 "MacBook Pro using internal speakers; stereo panning right"); | 2251 << "MacBook Pro using internal speakers; stereo panning right"; |
| 2295 } else { | 2252 } else { |
| 2296 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 2253 LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; |
| 2297 "MacBook Pro not using internal speakers"); | |
| 2298 } | 2254 } |
| 2299 } | 2255 } |
| 2300 | 2256 |
| 2301 return 0; | 2257 return 0; |
| 2302 } | 2258 } |
| 2303 int32_t AudioDeviceMac::HandleProcessorOverload( | 2259 int32_t AudioDeviceMac::HandleProcessorOverload( |
| 2304 const AudioObjectPropertyAddress propertyAddress) { | 2260 const AudioObjectPropertyAddress propertyAddress) { |
| 2305 // TODO(xians): we probably want to notify the user in some way of the | 2261 // TODO(xians): we probably want to notify the user in some way of the |
| 2306 // overload. However, the Windows interpretations of these errors seem to | 2262 // overload. However, the Windows interpretations of these errors seem to |
| 2307 // be more severe than what ProcessorOverload is thrown for. | 2263 // be more severe than what ProcessorOverload is thrown for. |
| 2308 // | 2264 // |
| 2309 // We don't log the notification, as it's sent from the HAL's IO thread. We | 2265 // We don't log the notification, as it's sent from the HAL's IO thread. We |
| 2310 // don't want to slow it down even further. | 2266 // don't want to slow it down even further. |
| 2311 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { | 2267 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { |
| 2312 // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor | 2268 // LOG(LS_WARNING) << "Capture processor // overload"; |
| 2313 // overload"); | |
| 2314 //_callback->ProblemIsReported( | 2269 //_callback->ProblemIsReported( |
| 2315 // SndCardStreamObserver::ERecordingProblem); | 2270 // SndCardStreamObserver::ERecordingProblem); |
| 2316 } else { | 2271 } else { |
| 2317 // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2272 // LOG(LS_WARNING) << "Render processor overload"; |
| 2318 // "Render processor overload"); | |
| 2319 //_callback->ProblemIsReported( | 2273 //_callback->ProblemIsReported( |
| 2320 // SndCardStreamObserver::EPlaybackProblem); | 2274 // SndCardStreamObserver::EPlaybackProblem); |
| 2321 } | 2275 } |
| 2322 | 2276 |
| 2323 return 0; | 2277 return 0; |
| 2324 } | 2278 } |
| 2325 | 2279 |
| 2326 // ============================================================================ | 2280 // ============================================================================ |
| 2327 // Thread Methods | 2281 // Thread Methods |
| 2328 // ============================================================================ | 2282 // ============================================================================ |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2399 if (_doStop) { | 2353 if (_doStop) { |
| 2400 _critSect.Enter(); | 2354 _critSect.Enter(); |
| 2401 if (_doStop) { | 2355 if (_doStop) { |
| 2402 if (_twoDevices || (!_recording && !_playing)) { | 2356 if (_twoDevices || (!_recording && !_playing)) { |
| 2403 // In the case of a shared device, the single driving ioProc | 2357 // In the case of a shared device, the single driving ioProc |
| 2404 // is stopped here | 2358 // is stopped here |
| 2405 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); | 2359 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); |
| 2406 WEBRTC_CA_LOG_WARN( | 2360 WEBRTC_CA_LOG_WARN( |
| 2407 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); | 2361 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); |
| 2408 if (err == noErr) { | 2362 if (err == noErr) { |
| 2409 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2363 LOG(LS_VERBOSE) << "Playout or shared device stopped"; |
| 2410 " Playout or shared device stopped"); | |
| 2411 } | 2364 } |
| 2412 } | 2365 } |
| 2413 | 2366 |
| 2414 _doStop = false; | 2367 _doStop = false; |
| 2415 _stopEvent.Set(); | 2368 _stopEvent.Set(); |
| 2416 _critSect.Leave(); | 2369 _critSect.Leave(); |
| 2417 return 0; | 2370 return 0; |
| 2418 } | 2371 } |
| 2419 _critSect.Leave(); | 2372 _critSect.Leave(); |
| 2420 } | 2373 } |
| 2421 | 2374 |
| 2422 if (!_playing) { | 2375 if (!_playing) { |
| 2423 // This can be the case when a shared device is capturing but not | 2376 // This can be the case when a shared device is capturing but not |
| 2424 // rendering. We allow the checks above before returning to avoid a | 2377 // rendering. We allow the checks above before returning to avoid a |
| 2425 // timeout when capturing is stopped. | 2378 // timeout when capturing is stopped. |
| 2426 return 0; | 2379 return 0; |
| 2427 } | 2380 } |
| 2428 | 2381 |
| 2429 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); | 2382 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); |
| 2430 UInt32 size = | 2383 UInt32 size = |
| 2431 outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame; | 2384 outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame; |
| 2432 | 2385 |
| 2433 // TODO(xians): signal an error somehow? | 2386 // TODO(xians): signal an error somehow? |
| 2434 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, | 2387 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, |
| 2435 this, &size, outputData, NULL); | 2388 this, &size, outputData, NULL); |
| 2436 if (err != noErr) { | 2389 if (err != noErr) { |
| 2437 if (err == 1) { | 2390 if (err == 1) { |
| 2438 // This is our own error. | 2391 // This is our own error. |
| 2439 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2392 LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()"; |
| 2440 " Error in AudioConverterFillComplexBuffer()"); | |
| 2441 return 1; | 2393 return 1; |
| 2442 } else { | 2394 } else { |
| 2443 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2395 logCAMsg(rtc::LS_ERROR, |
| 2444 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); | 2396 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); |
| 2445 return 1; | 2397 return 1; |
| 2446 } | 2398 } |
| 2447 } | 2399 } |
| 2448 | 2400 |
| 2449 PaRingBufferSize bufSizeSamples = | 2401 PaRingBufferSize bufSizeSamples = |
| 2450 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); | 2402 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); |
| 2451 | 2403 |
| 2452 int32_t renderDelayUs = | 2404 int32_t renderDelayUs = |
| 2453 static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5); | 2405 static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 2471 // Always give the converter as much as it wants, zero padding as required. | 2423 // Always give the converter as much as it wants, zero padding as required. |
| 2472 data->mBuffers->mDataByteSize = | 2424 data->mBuffers->mDataByteSize = |
| 2473 *numberDataPackets * _outDesiredFormat.mBytesPerPacket; | 2425 *numberDataPackets * _outDesiredFormat.mBytesPerPacket; |
| 2474 data->mBuffers->mData = _renderConvertData; | 2426 data->mBuffers->mData = _renderConvertData; |
| 2475 memset(_renderConvertData, 0, sizeof(_renderConvertData)); | 2427 memset(_renderConvertData, 0, sizeof(_renderConvertData)); |
| 2476 | 2428 |
| 2477 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); | 2429 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); |
| 2478 | 2430 |
| 2479 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); | 2431 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); |
| 2480 if (kernErr != KERN_SUCCESS) { | 2432 if (kernErr != KERN_SUCCESS) { |
| 2481 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2433 LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; |
| 2482 " semaphore_signal_all() error: %d", kernErr); | |
| 2483 return 1; | 2434 return 1; |
| 2484 } | 2435 } |
| 2485 | 2436 |
| 2486 return 0; | 2437 return 0; |
| 2487 } | 2438 } |
| 2488 | 2439 |
| 2489 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, | 2440 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, |
| 2490 const AudioTimeStamp* inputTime) { | 2441 const AudioTimeStamp* inputTime) { |
| 2491 OSStatus err = noErr; | 2442 OSStatus err = noErr; |
| 2492 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); | 2443 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); |
| 2493 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); | 2444 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); |
| 2494 | 2445 |
| 2495 // Check if we should close down audio device | 2446 // Check if we should close down audio device |
| 2496 // Double-checked locking optimization to remove locking overhead | 2447 // Double-checked locking optimization to remove locking overhead |
| 2497 if (_doStopRec) { | 2448 if (_doStopRec) { |
| 2498 _critSect.Enter(); | 2449 _critSect.Enter(); |
| 2499 if (_doStopRec) { | 2450 if (_doStopRec) { |
| 2500 // This will be signalled only when a shared device is not in use. | 2451 // This will be signalled only when a shared device is not in use. |
| 2501 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); | 2452 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); |
| 2502 WEBRTC_CA_LOG_WARN( | 2453 WEBRTC_CA_LOG_WARN( |
| 2503 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); | 2454 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); |
| 2504 if (err == noErr) { | 2455 if (err == noErr) { |
| 2505 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 2456 LOG(LS_VERBOSE) << "Recording device stopped"; |
| 2506 " Recording device stopped"); | |
| 2507 } | 2457 } |
| 2508 | 2458 |
| 2509 _doStopRec = false; | 2459 _doStopRec = false; |
| 2510 _stopEventRec.Set(); | 2460 _stopEventRec.Set(); |
| 2511 _critSect.Leave(); | 2461 _critSect.Leave(); |
| 2512 return 0; | 2462 return 0; |
| 2513 } | 2463 } |
| 2514 _critSect.Leave(); | 2464 _critSect.Leave(); |
| 2515 } | 2465 } |
| 2516 | 2466 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2533 | 2483 |
| 2534 RTC_DCHECK(inputData->mNumberBuffers == 1); | 2484 RTC_DCHECK(inputData->mNumberBuffers == 1); |
| 2535 PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize * | 2485 PaRingBufferSize numSamples = inputData->mBuffers->mDataByteSize * |
| 2536 _inStreamFormat.mChannelsPerFrame / | 2486 _inStreamFormat.mChannelsPerFrame / |
| 2537 _inStreamFormat.mBytesPerPacket; | 2487 _inStreamFormat.mBytesPerPacket; |
| 2538 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, | 2488 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, |
| 2539 numSamples); | 2489 numSamples); |
| 2540 | 2490 |
| 2541 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); | 2491 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); |
| 2542 if (kernErr != KERN_SUCCESS) { | 2492 if (kernErr != KERN_SUCCESS) { |
| 2543 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2493 LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; |
| 2544 " semaphore_signal_all() error: %d", kernErr); | |
| 2545 } | 2494 } |
| 2546 | 2495 |
| 2547 return err; | 2496 return err; |
| 2548 } | 2497 } |
| 2549 | 2498 |
| 2550 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, | 2499 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, |
| 2551 AudioBufferList* data) { | 2500 AudioBufferList* data) { |
| 2552 RTC_DCHECK(data->mNumberBuffers == 1); | 2501 RTC_DCHECK(data->mNumberBuffers == 1); |
| 2553 PaRingBufferSize numSamples = | 2502 PaRingBufferSize numSamples = |
| 2554 *numberDataPackets * _inStreamFormat.mChannelsPerFrame; | 2503 *numberDataPackets * _inStreamFormat.mChannelsPerFrame; |
| 2555 | 2504 |
| 2556 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) { | 2505 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) { |
| 2557 mach_timespec_t timeout; | 2506 mach_timespec_t timeout; |
| 2558 timeout.tv_sec = 0; | 2507 timeout.tv_sec = 0; |
| 2559 timeout.tv_nsec = TIMER_PERIOD_MS; | 2508 timeout.tv_nsec = TIMER_PERIOD_MS; |
| 2560 | 2509 |
| 2561 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); | 2510 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); |
| 2562 if (kernErr == KERN_OPERATION_TIMED_OUT) { | 2511 if (kernErr == KERN_OPERATION_TIMED_OUT) { |
| 2563 int32_t signal = AtomicGet32(&_captureDeviceIsAlive); | 2512 int32_t signal = AtomicGet32(&_captureDeviceIsAlive); |
| 2564 if (signal == 0) { | 2513 if (signal == 0) { |
| 2565 // The capture device is no longer alive; stop the worker thread. | 2514 // The capture device is no longer alive; stop the worker thread. |
| 2566 *numberDataPackets = 0; | 2515 *numberDataPackets = 0; |
| 2567 return 1; | 2516 return 1; |
| 2568 } | 2517 } |
| 2569 } else if (kernErr != KERN_SUCCESS) { | 2518 } else if (kernErr != KERN_SUCCESS) { |
| 2570 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2519 LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr; |
| 2571 " semaphore_wait() error: %d", kernErr); | |
| 2572 } | 2520 } |
| 2573 } | 2521 } |
| 2574 | 2522 |
| 2575 // Pass the read pointer directly to the converter to avoid a memcpy. | 2523 // Pass the read pointer directly to the converter to avoid a memcpy. |
| 2576 void* dummyPtr; | 2524 void* dummyPtr; |
| 2577 PaRingBufferSize dummySize; | 2525 PaRingBufferSize dummySize; |
| 2578 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, | 2526 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, |
| 2579 &data->mBuffers->mData, &numSamples, | 2527 &data->mBuffers->mData, &numSamples, |
| 2580 &dummyPtr, &dummySize); | 2528 &dummyPtr, &dummySize); |
| 2581 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); | 2529 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); |
| (...skipping 21 matching lines...) Expand all Loading... |
| 2603 timeout.tv_nsec = TIMER_PERIOD_MS; | 2551 timeout.tv_nsec = TIMER_PERIOD_MS; |
| 2604 | 2552 |
| 2605 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); | 2553 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); |
| 2606 if (kernErr == KERN_OPERATION_TIMED_OUT) { | 2554 if (kernErr == KERN_OPERATION_TIMED_OUT) { |
| 2607 int32_t signal = AtomicGet32(&_renderDeviceIsAlive); | 2555 int32_t signal = AtomicGet32(&_renderDeviceIsAlive); |
| 2608 if (signal == 0) { | 2556 if (signal == 0) { |
| 2609 // The render device is no longer alive; stop the worker thread. | 2557 // The render device is no longer alive; stop the worker thread. |
| 2610 return false; | 2558 return false; |
| 2611 } | 2559 } |
| 2612 } else if (kernErr != KERN_SUCCESS) { | 2560 } else if (kernErr != KERN_SUCCESS) { |
| 2613 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2561 LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr; |
| 2614 " semaphore_timedwait() error: %d", kernErr); | |
| 2615 } | 2562 } |
| 2616 } | 2563 } |
| 2617 | 2564 |
| 2618 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; | 2565 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; |
| 2619 | 2566 |
| 2620 if (!_ptrAudioBuffer) { | 2567 if (!_ptrAudioBuffer) { |
| 2621 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2568 LOG(LS_ERROR) << "capture AudioBuffer is invalid"; |
| 2622 " capture AudioBuffer is invalid"); | |
| 2623 return false; | 2569 return false; |
| 2624 } | 2570 } |
| 2625 | 2571 |
| 2626 // Ask for new PCM data to be played out using the AudioDeviceBuffer. | 2572 // Ask for new PCM data to be played out using the AudioDeviceBuffer. |
| 2627 uint32_t nSamples = | 2573 uint32_t nSamples = |
| 2628 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); | 2574 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); |
| 2629 | 2575 |
| 2630 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); | 2576 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); |
| 2631 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) { | 2577 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) { |
| 2632 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2578 LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")"; |
| 2633 " invalid number of output samples(%d)", nSamples); | |
| 2634 } | 2579 } |
| 2635 | 2580 |
| 2636 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; | 2581 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; |
| 2637 | 2582 |
| 2638 SInt16* pPlayBuffer = (SInt16*)&playBuffer; | 2583 SInt16* pPlayBuffer = (SInt16*)&playBuffer; |
| 2639 if (_macBookProPanRight && (_playChannels == 2)) { | 2584 if (_macBookProPanRight && (_playChannels == 2)) { |
| 2640 // Mix entirely into the right channel and zero the left channel. | 2585 // Mix entirely into the right channel and zero the left channel. |
| 2641 SInt32 sampleInt32 = 0; | 2586 SInt32 sampleInt32 = 0; |
| 2642 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) { | 2587 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) { |
| 2643 sampleInt32 = pPlayBuffer[sampleIdx]; | 2588 sampleInt32 = pPlayBuffer[sampleIdx]; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2678 _inDesiredFormat.mBytesPerPacket * noRecSamples; | 2623 _inDesiredFormat.mBytesPerPacket * noRecSamples; |
| 2679 engineBuffer.mBuffers->mData = recordBuffer; | 2624 engineBuffer.mBuffers->mData = recordBuffer; |
| 2680 | 2625 |
| 2681 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, | 2626 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, |
| 2682 this, &size, &engineBuffer, NULL); | 2627 this, &size, &engineBuffer, NULL); |
| 2683 if (err != noErr) { | 2628 if (err != noErr) { |
| 2684 if (err == 1) { | 2629 if (err == 1) { |
| 2685 // This is our own error. | 2630 // This is our own error. |
| 2686 return false; | 2631 return false; |
| 2687 } else { | 2632 } else { |
| 2688 logCAMsg(kTraceError, kTraceAudioDevice, _id, | 2633 logCAMsg(rtc::LS_ERROR, |
| 2689 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); | 2634 "Error in AudioConverterFillComplexBuffer()", (const char*)&err); |
| 2690 return false; | 2635 return false; |
| 2691 } | 2636 } |
| 2692 } | 2637 } |
| 2693 | 2638 |
| 2694 // TODO(xians): what if the returned size is incorrect? | 2639 // TODO(xians): what if the returned size is incorrect? |
| 2695 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) { | 2640 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) { |
| 2696 uint32_t currentMicLevel(0); | 2641 uint32_t currentMicLevel(0); |
| 2697 uint32_t newMicLevel(0); | 2642 uint32_t newMicLevel(0); |
| 2698 int32_t msecOnPlaySide; | 2643 int32_t msecOnPlaySide; |
| 2699 int32_t msecOnRecordSide; | 2644 int32_t msecOnRecordSide; |
| 2700 | 2645 |
| 2701 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); | 2646 int32_t captureDelayUs = AtomicGet32(&_captureDelayUs); |
| 2702 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); | 2647 int32_t renderDelayUs = AtomicGet32(&_renderDelayUs); |
| 2703 | 2648 |
| 2704 msecOnPlaySide = | 2649 msecOnPlaySide = |
| 2705 static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); | 2650 static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); |
| 2706 msecOnRecordSide = | 2651 msecOnRecordSide = |
| 2707 static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); | 2652 static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); |
| 2708 | 2653 |
| 2709 if (!_ptrAudioBuffer) { | 2654 if (!_ptrAudioBuffer) { |
| 2710 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2655 LOG(LS_ERROR) << "capture AudioBuffer is invalid"; |
| 2711 " capture AudioBuffer is invalid"); | |
| 2712 return false; | 2656 return false; |
| 2713 } | 2657 } |
| 2714 | 2658 |
| 2715 // store the recorded buffer (no action will be taken if the | 2659 // store the recorded buffer (no action will be taken if the |
| 2716 // #recorded samples is not a full buffer) | 2660 // #recorded samples is not a full buffer) |
| 2717 _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size); | 2661 _ptrAudioBuffer->SetRecordedBuffer((int8_t*)&recordBuffer, (uint32_t)size); |
| 2718 | 2662 |
| 2719 if (AGC()) { | 2663 if (AGC()) { |
| 2720 // Use mod to ensure we check the volume on the first pass. | 2664 // Use mod to ensure we check the volume on the first pass. |
| 2721 if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) { | 2665 if (get_mic_volume_counter_ms_ % kGetMicVolumeIntervalMs == 0) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 2737 // to the observer using callback | 2681 // to the observer using callback |
| 2738 _ptrAudioBuffer->DeliverRecordedData(); | 2682 _ptrAudioBuffer->DeliverRecordedData(); |
| 2739 | 2683 |
| 2740 if (AGC()) { | 2684 if (AGC()) { |
| 2741 newMicLevel = _ptrAudioBuffer->NewMicLevel(); | 2685 newMicLevel = _ptrAudioBuffer->NewMicLevel(); |
| 2742 if (newMicLevel != 0) { | 2686 if (newMicLevel != 0) { |
| 2743 // The VQE will only deliver non-zero microphone levels when | 2687 // The VQE will only deliver non-zero microphone levels when |
| 2744 // a change is needed. | 2688 // a change is needed. |
| 2745 // Set this new mic level (received from the observer as return | 2689 // Set this new mic level (received from the observer as return |
| 2746 // value in the callback). | 2690 // value in the callback). |
| 2747 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, | 2691 LOG(LS_VERBOSE) << "AGC change of volume: old=" << currentMicLevel |
| 2748 " AGC change of volume: old=%u => new=%u", | 2692 << " => new=" << newMicLevel; |
| 2749 currentMicLevel, newMicLevel); | |
| 2750 if (SetMicrophoneVolume(newMicLevel) == -1) { | 2693 if (SetMicrophoneVolume(newMicLevel) == -1) { |
| 2751 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 2694 LOG(LS_WARNING) |
| 2752 " the required modification of the microphone " | 2695 << "the required modification of the microphone volume failed"; |
| 2753 "volume failed"); | |
| 2754 } | 2696 } |
| 2755 } | 2697 } |
| 2756 } | 2698 } |
| 2757 } | 2699 } |
| 2758 | 2700 |
| 2759 return true; | 2701 return true; |
| 2760 } | 2702 } |
| 2761 | 2703 |
| 2762 bool AudioDeviceMac::KeyPressed() { | 2704 bool AudioDeviceMac::KeyPressed() { |
| 2763 bool key_down = false; | 2705 bool key_down = false; |
| 2764 // Loop through all Mac virtual key constant values. | 2706 // Loop through all Mac virtual key constant values. |
| 2765 for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_); | 2707 for (unsigned int key_index = 0; key_index < arraysize(prev_key_state_); |
| 2766 ++key_index) { | 2708 ++key_index) { |
| 2767 bool keyState = | 2709 bool keyState = |
| 2768 CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index); | 2710 CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index); |
| 2769 // A false -> true change in keymap means a key is pressed. | 2711 // A false -> true change in keymap means a key is pressed. |
| 2770 key_down |= (keyState && !prev_key_state_[key_index]); | 2712 key_down |= (keyState && !prev_key_state_[key_index]); |
| 2771 // Save current state. | 2713 // Save current state. |
| 2772 prev_key_state_[key_index] = keyState; | 2714 prev_key_state_[key_index] = keyState; |
| 2773 } | 2715 } |
| 2774 return key_down; | 2716 return key_down; |
| 2775 } | 2717 } |
| 2776 } // namespace webrtc | 2718 } // namespace webrtc |
| OLD | NEW |