| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 111 pid_t hogPid = -1; | 111 pid_t hogPid = -1; |
| 112 | 112 |
| 113 _outputDeviceID = deviceID; | 113 _outputDeviceID = deviceID; |
| 114 | 114 |
| 115 // Check which process, if any, has hogged the device. | 115 // Check which process, if any, has hogged the device. |
| 116 AudioObjectPropertyAddress propertyAddress = { | 116 AudioObjectPropertyAddress propertyAddress = { |
| 117 kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeOutput, 0}; | 117 kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeOutput, 0}; |
| 118 | 118 |
| 119 size = sizeof(hogPid); | 119 size = sizeof(hogPid); |
| 120 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 120 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 121 _outputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid)); | 121 _outputDeviceID, &propertyAddress, 0, nullptr, &size, &hogPid)); |
| 122 | 122 |
| 123 if (hogPid == -1) { | 123 if (hogPid == -1) { |
| 124 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 124 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 125 " No process has hogged the input device"); | 125 " No process has hogged the input device"); |
| 126 } | 126 } |
| 127 // getpid() is apparently "always successful" | 127 // getpid() is apparently "always successful" |
| 128 else if (hogPid == getpid()) { | 128 else if (hogPid == getpid()) { |
| 129 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 129 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 130 " Our process has hogged the input device"); | 130 " Our process has hogged the input device"); |
| 131 } else { | 131 } else { |
| 132 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 132 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 133 " Another process (pid = %d) has hogged the input device", | 133 " Another process (pid = %d) has hogged the input device", |
| 134 static_cast<int>(hogPid)); | 134 static_cast<int>(hogPid)); |
| 135 | 135 |
| 136 return -1; | 136 return -1; |
| 137 } | 137 } |
| 138 | 138 |
| 139 // get number of channels from stream format | 139 // get number of channels from stream format |
| 140 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | 140 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 141 | 141 |
| 142 // Get the stream format, to be able to read the number of channels. | 142 // Get the stream format, to be able to read the number of channels. |
| 143 AudioStreamBasicDescription streamFormat; | 143 AudioStreamBasicDescription streamFormat; |
| 144 size = sizeof(AudioStreamBasicDescription); | 144 size = sizeof(AudioStreamBasicDescription); |
| 145 memset(&streamFormat, 0, size); | 145 memset(&streamFormat, 0, size); |
| 146 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 146 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 147 _outputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat)); | 147 _outputDeviceID, &propertyAddress, 0, nullptr, &size, &streamFormat)); |
| 148 | 148 |
| 149 _noOutputChannels = streamFormat.mChannelsPerFrame; | 149 _noOutputChannels = streamFormat.mChannelsPerFrame; |
| 150 | 150 |
| 151 return 0; | 151 return 0; |
| 152 } | 152 } |
| 153 | 153 |
| 154 int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) { | 154 int32_t AudioMixerManagerMac::OpenMicrophone(AudioDeviceID deviceID) { |
| 155 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 155 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 156 "AudioMixerManagerMac::OpenMicrophone(id=%d)", deviceID); | 156 "AudioMixerManagerMac::OpenMicrophone(id=%d)", deviceID); |
| 157 | 157 |
| 158 CriticalSectionScoped lock(&_critSect); | 158 CriticalSectionScoped lock(&_critSect); |
| 159 | 159 |
| 160 OSStatus err = noErr; | 160 OSStatus err = noErr; |
| 161 UInt32 size = 0; | 161 UInt32 size = 0; |
| 162 pid_t hogPid = -1; | 162 pid_t hogPid = -1; |
| 163 | 163 |
| 164 _inputDeviceID = deviceID; | 164 _inputDeviceID = deviceID; |
| 165 | 165 |
| 166 // Check which process, if any, has hogged the device. | 166 // Check which process, if any, has hogged the device. |
| 167 AudioObjectPropertyAddress propertyAddress = { | 167 AudioObjectPropertyAddress propertyAddress = { |
| 168 kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeInput, 0}; | 168 kAudioDevicePropertyHogMode, kAudioDevicePropertyScopeInput, 0}; |
| 169 size = sizeof(hogPid); | 169 size = sizeof(hogPid); |
| 170 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 170 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 171 _inputDeviceID, &propertyAddress, 0, NULL, &size, &hogPid)); | 171 _inputDeviceID, &propertyAddress, 0, nullptr, &size, &hogPid)); |
| 172 if (hogPid == -1) { | 172 if (hogPid == -1) { |
| 173 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 173 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 174 " No process has hogged the input device"); | 174 " No process has hogged the input device"); |
| 175 } | 175 } |
| 176 // getpid() is apparently "always successful" | 176 // getpid() is apparently "always successful" |
| 177 else if (hogPid == getpid()) { | 177 else if (hogPid == getpid()) { |
| 178 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 178 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 179 " Our process has hogged the input device"); | 179 " Our process has hogged the input device"); |
| 180 } else { | 180 } else { |
| 181 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 181 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 182 " Another process (pid = %d) has hogged the input device", | 182 " Another process (pid = %d) has hogged the input device", |
| 183 static_cast<int>(hogPid)); | 183 static_cast<int>(hogPid)); |
| 184 | 184 |
| 185 return -1; | 185 return -1; |
| 186 } | 186 } |
| 187 | 187 |
| 188 // get number of channels from stream format | 188 // get number of channels from stream format |
| 189 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; | 189 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; |
| 190 | 190 |
| 191 // Get the stream format, to be able to read the number of channels. | 191 // Get the stream format, to be able to read the number of channels. |
| 192 AudioStreamBasicDescription streamFormat; | 192 AudioStreamBasicDescription streamFormat; |
| 193 size = sizeof(AudioStreamBasicDescription); | 193 size = sizeof(AudioStreamBasicDescription); |
| 194 memset(&streamFormat, 0, size); | 194 memset(&streamFormat, 0, size); |
| 195 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 195 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 196 _inputDeviceID, &propertyAddress, 0, NULL, &size, &streamFormat)); | 196 _inputDeviceID, &propertyAddress, 0, nullptr, &size, &streamFormat)); |
| 197 | 197 |
| 198 _noInputChannels = streamFormat.mChannelsPerFrame; | 198 _noInputChannels = streamFormat.mChannelsPerFrame; |
| 199 | 199 |
| 200 return 0; | 200 return 0; |
| 201 } | 201 } |
| 202 | 202 |
| 203 bool AudioMixerManagerMac::SpeakerIsInitialized() const { | 203 bool AudioMixerManagerMac::SpeakerIsInitialized() const { |
| 204 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); | 204 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s", __FUNCTION__); |
| 205 | 205 |
| 206 return (_outputDeviceID != kAudioObjectUnknown); | 206 return (_outputDeviceID != kAudioObjectUnknown); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 236 // Does the capture device have a master volume control? | 236 // Does the capture device have a master volume control? |
| 237 // If so, use it exclusively. | 237 // If so, use it exclusively. |
| 238 AudioObjectPropertyAddress propertyAddress = { | 238 AudioObjectPropertyAddress propertyAddress = { |
| 239 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; | 239 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; |
| 240 Boolean isSettable = false; | 240 Boolean isSettable = false; |
| 241 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, | 241 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, |
| 242 &isSettable); | 242 &isSettable); |
| 243 if (err == noErr && isSettable) { | 243 if (err == noErr && isSettable) { |
| 244 size = sizeof(vol); | 244 size = sizeof(vol); |
| 245 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 245 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 246 _outputDeviceID, &propertyAddress, 0, NULL, size, &vol)); | 246 _outputDeviceID, &propertyAddress, 0, nullptr, size, &vol)); |
| 247 | 247 |
| 248 return 0; | 248 return 0; |
| 249 } | 249 } |
| 250 | 250 |
| 251 // Otherwise try to set each channel. | 251 // Otherwise try to set each channel. |
| 252 for (UInt32 i = 1; i <= _noOutputChannels; i++) { | 252 for (UInt32 i = 1; i <= _noOutputChannels; i++) { |
| 253 propertyAddress.mElement = i; | 253 propertyAddress.mElement = i; |
| 254 isSettable = false; | 254 isSettable = false; |
| 255 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, | 255 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, |
| 256 &isSettable); | 256 &isSettable); |
| 257 if (err == noErr && isSettable) { | 257 if (err == noErr && isSettable) { |
| 258 size = sizeof(vol); | 258 size = sizeof(vol); |
| 259 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 259 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 260 _outputDeviceID, &propertyAddress, 0, NULL, size, &vol)); | 260 _outputDeviceID, &propertyAddress, 0, nullptr, size, &vol)); |
| 261 } | 261 } |
| 262 success = true; | 262 success = true; |
| 263 } | 263 } |
| 264 | 264 |
| 265 if (!success) { | 265 if (!success) { |
| 266 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 266 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 267 " Unable to set a volume on any output channel"); | 267 " Unable to set a volume on any output channel"); |
| 268 return -1; | 268 return -1; |
| 269 } | 269 } |
| 270 | 270 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 286 | 286 |
| 287 // Does the device have a master volume control? | 287 // Does the device have a master volume control? |
| 288 // If so, use it exclusively. | 288 // If so, use it exclusively. |
| 289 AudioObjectPropertyAddress propertyAddress = { | 289 AudioObjectPropertyAddress propertyAddress = { |
| 290 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; | 290 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeOutput, 0}; |
| 291 Boolean hasProperty = | 291 Boolean hasProperty = |
| 292 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); | 292 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 293 if (hasProperty) { | 293 if (hasProperty) { |
| 294 size = sizeof(vol); | 294 size = sizeof(vol); |
| 295 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 295 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 296 _outputDeviceID, &propertyAddress, 0, NULL, &size, &vol)); | 296 _outputDeviceID, &propertyAddress, 0, nullptr, &size, &vol)); |
| 297 | 297 |
| 298 // vol 0.0 to 1.0 -> convert to 0 - 255 | 298 // vol 0.0 to 1.0 -> convert to 0 - 255 |
| 299 volume = static_cast<uint32_t>(vol * 255 + 0.5); | 299 volume = static_cast<uint32_t>(vol * 255 + 0.5); |
| 300 } else { | 300 } else { |
| 301 // Otherwise get the average volume across channels. | 301 // Otherwise get the average volume across channels. |
| 302 vol = 0; | 302 vol = 0; |
| 303 for (UInt32 i = 1; i <= _noOutputChannels; i++) { | 303 for (UInt32 i = 1; i <= _noOutputChannels; i++) { |
| 304 channelVol = 0; | 304 channelVol = 0; |
| 305 propertyAddress.mElement = i; | 305 propertyAddress.mElement = i; |
| 306 hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); | 306 hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 307 if (hasProperty) { | 307 if (hasProperty) { |
| 308 size = sizeof(channelVol); | 308 size = sizeof(channelVol); |
| 309 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 309 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 310 _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol)); | 310 _outputDeviceID, &propertyAddress, 0, nullptr, &size, &channelVol)); |
| 311 | 311 |
| 312 vol += channelVol; | 312 vol += channelVol; |
| 313 channels++; | 313 channels++; |
| 314 } | 314 } |
| 315 } | 315 } |
| 316 | 316 |
| 317 if (channels == 0) { | 317 if (channels == 0) { |
| 318 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 318 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 319 " Unable to get a volume on any channel"); | 319 " Unable to get a volume on any channel"); |
| 320 return -1; | 320 return -1; |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 472 // Does the render device have a master mute control? | 472 // Does the render device have a master mute control? |
| 473 // If so, use it exclusively. | 473 // If so, use it exclusively. |
| 474 AudioObjectPropertyAddress propertyAddress = { | 474 AudioObjectPropertyAddress propertyAddress = { |
| 475 kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; | 475 kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; |
| 476 Boolean isSettable = false; | 476 Boolean isSettable = false; |
| 477 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, | 477 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, |
| 478 &isSettable); | 478 &isSettable); |
| 479 if (err == noErr && isSettable) { | 479 if (err == noErr && isSettable) { |
| 480 size = sizeof(mute); | 480 size = sizeof(mute); |
| 481 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 481 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 482 _outputDeviceID, &propertyAddress, 0, NULL, size, &mute)); | 482 _outputDeviceID, &propertyAddress, 0, nullptr, size, &mute)); |
| 483 | 483 |
| 484 return 0; | 484 return 0; |
| 485 } | 485 } |
| 486 | 486 |
| 487 // Otherwise try to set each channel. | 487 // Otherwise try to set each channel. |
| 488 for (UInt32 i = 1; i <= _noOutputChannels; i++) { | 488 for (UInt32 i = 1; i <= _noOutputChannels; i++) { |
| 489 propertyAddress.mElement = i; | 489 propertyAddress.mElement = i; |
| 490 isSettable = false; | 490 isSettable = false; |
| 491 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, | 491 err = AudioObjectIsPropertySettable(_outputDeviceID, &propertyAddress, |
| 492 &isSettable); | 492 &isSettable); |
| 493 if (err == noErr && isSettable) { | 493 if (err == noErr && isSettable) { |
| 494 size = sizeof(mute); | 494 size = sizeof(mute); |
| 495 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 495 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 496 _outputDeviceID, &propertyAddress, 0, NULL, size, &mute)); | 496 _outputDeviceID, &propertyAddress, 0, nullptr, size, &mute)); |
| 497 } | 497 } |
| 498 success = true; | 498 success = true; |
| 499 } | 499 } |
| 500 | 500 |
| 501 if (!success) { | 501 if (!success) { |
| 502 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 502 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 503 " Unable to set mute on any input channel"); | 503 " Unable to set mute on any input channel"); |
| 504 return -1; | 504 return -1; |
| 505 } | 505 } |
| 506 | 506 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 522 | 522 |
| 523 // Does the device have a master volume control? | 523 // Does the device have a master volume control? |
| 524 // If so, use it exclusively. | 524 // If so, use it exclusively. |
| 525 AudioObjectPropertyAddress propertyAddress = { | 525 AudioObjectPropertyAddress propertyAddress = { |
| 526 kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; | 526 kAudioDevicePropertyMute, kAudioDevicePropertyScopeOutput, 0}; |
| 527 Boolean hasProperty = | 527 Boolean hasProperty = |
| 528 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); | 528 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 529 if (hasProperty) { | 529 if (hasProperty) { |
| 530 size = sizeof(muted); | 530 size = sizeof(muted); |
| 531 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 531 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 532 _outputDeviceID, &propertyAddress, 0, NULL, &size, &muted)); | 532 _outputDeviceID, &propertyAddress, 0, nullptr, &size, &muted)); |
| 533 | 533 |
| 534 // 1 means muted | 534 // 1 means muted |
| 535 enabled = static_cast<bool>(muted); | 535 enabled = static_cast<bool>(muted); |
| 536 } else { | 536 } else { |
| 537 // Otherwise check if all channels are muted. | 537 // Otherwise check if all channels are muted. |
| 538 for (UInt32 i = 1; i <= _noOutputChannels; i++) { | 538 for (UInt32 i = 1; i <= _noOutputChannels; i++) { |
| 539 muted = 0; | 539 muted = 0; |
| 540 propertyAddress.mElement = i; | 540 propertyAddress.mElement = i; |
| 541 hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); | 541 hasProperty = AudioObjectHasProperty(_outputDeviceID, &propertyAddress); |
| 542 if (hasProperty) { | 542 if (hasProperty) { |
| 543 size = sizeof(channelMuted); | 543 size = sizeof(channelMuted); |
| 544 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 544 WEBRTC_CA_RETURN_ON_ERR( |
| 545 _outputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted)); | 545 AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, |
| 546 nullptr, &size, &channelMuted)); |
| 546 | 547 |
| 547 muted = (muted && channelMuted); | 548 muted = (muted && channelMuted); |
| 548 channels++; | 549 channels++; |
| 549 } | 550 } |
| 550 } | 551 } |
| 551 | 552 |
| 552 if (channels == 0) { | 553 if (channels == 0) { |
| 553 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 554 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 554 " Unable to get mute for any channel"); | 555 " Unable to get mute for any channel"); |
| 555 return -1; | 556 return -1; |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 648 // Does the capture device have a master mute control? | 649 // Does the capture device have a master mute control? |
| 649 // If so, use it exclusively. | 650 // If so, use it exclusively. |
| 650 AudioObjectPropertyAddress propertyAddress = { | 651 AudioObjectPropertyAddress propertyAddress = { |
| 651 kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; | 652 kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; |
| 652 Boolean isSettable = false; | 653 Boolean isSettable = false; |
| 653 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, | 654 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, |
| 654 &isSettable); | 655 &isSettable); |
| 655 if (err == noErr && isSettable) { | 656 if (err == noErr && isSettable) { |
| 656 size = sizeof(mute); | 657 size = sizeof(mute); |
| 657 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 658 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 658 _inputDeviceID, &propertyAddress, 0, NULL, size, &mute)); | 659 _inputDeviceID, &propertyAddress, 0, nullptr, size, &mute)); |
| 659 | 660 |
| 660 return 0; | 661 return 0; |
| 661 } | 662 } |
| 662 | 663 |
| 663 // Otherwise try to set each channel. | 664 // Otherwise try to set each channel. |
| 664 for (UInt32 i = 1; i <= _noInputChannels; i++) { | 665 for (UInt32 i = 1; i <= _noInputChannels; i++) { |
| 665 propertyAddress.mElement = i; | 666 propertyAddress.mElement = i; |
| 666 isSettable = false; | 667 isSettable = false; |
| 667 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, | 668 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, |
| 668 &isSettable); | 669 &isSettable); |
| 669 if (err == noErr && isSettable) { | 670 if (err == noErr && isSettable) { |
| 670 size = sizeof(mute); | 671 size = sizeof(mute); |
| 671 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 672 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 672 _inputDeviceID, &propertyAddress, 0, NULL, size, &mute)); | 673 _inputDeviceID, &propertyAddress, 0, nullptr, size, &mute)); |
| 673 } | 674 } |
| 674 success = true; | 675 success = true; |
| 675 } | 676 } |
| 676 | 677 |
| 677 if (!success) { | 678 if (!success) { |
| 678 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 679 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 679 " Unable to set mute on any input channel"); | 680 " Unable to set mute on any input channel"); |
| 680 return -1; | 681 return -1; |
| 681 } | 682 } |
| 682 | 683 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 698 | 699 |
| 699 // Does the device have a master volume control? | 700 // Does the device have a master volume control? |
| 700 // If so, use it exclusively. | 701 // If so, use it exclusively. |
| 701 AudioObjectPropertyAddress propertyAddress = { | 702 AudioObjectPropertyAddress propertyAddress = { |
| 702 kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; | 703 kAudioDevicePropertyMute, kAudioDevicePropertyScopeInput, 0}; |
| 703 Boolean hasProperty = | 704 Boolean hasProperty = |
| 704 AudioObjectHasProperty(_inputDeviceID, &propertyAddress); | 705 AudioObjectHasProperty(_inputDeviceID, &propertyAddress); |
| 705 if (hasProperty) { | 706 if (hasProperty) { |
| 706 size = sizeof(muted); | 707 size = sizeof(muted); |
| 707 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 708 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 708 _inputDeviceID, &propertyAddress, 0, NULL, &size, &muted)); | 709 _inputDeviceID, &propertyAddress, 0, nullptr, &size, &muted)); |
| 709 | 710 |
| 710 // 1 means muted | 711 // 1 means muted |
| 711 enabled = static_cast<bool>(muted); | 712 enabled = static_cast<bool>(muted); |
| 712 } else { | 713 } else { |
| 713 // Otherwise check if all channels are muted. | 714 // Otherwise check if all channels are muted. |
| 714 for (UInt32 i = 1; i <= _noInputChannels; i++) { | 715 for (UInt32 i = 1; i <= _noInputChannels; i++) { |
| 715 muted = 0; | 716 muted = 0; |
| 716 propertyAddress.mElement = i; | 717 propertyAddress.mElement = i; |
| 717 hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); | 718 hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); |
| 718 if (hasProperty) { | 719 if (hasProperty) { |
| 719 size = sizeof(channelMuted); | 720 size = sizeof(channelMuted); |
| 720 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 721 WEBRTC_CA_RETURN_ON_ERR( |
| 721 _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelMuted)); | 722 AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, |
| 723 nullptr, &size, &channelMuted)); |
| 722 | 724 |
| 723 muted = (muted && channelMuted); | 725 muted = (muted && channelMuted); |
| 724 channels++; | 726 channels++; |
| 725 } | 727 } |
| 726 } | 728 } |
| 727 | 729 |
| 728 if (channels == 0) { | 730 if (channels == 0) { |
| 729 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 731 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 730 " Unable to get mute for any channel"); | 732 " Unable to get mute for any channel"); |
| 731 return -1; | 733 return -1; |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 857 // Does the capture device have a master volume control? | 859 // Does the capture device have a master volume control? |
| 858 // If so, use it exclusively. | 860 // If so, use it exclusively. |
| 859 AudioObjectPropertyAddress propertyAddress = { | 861 AudioObjectPropertyAddress propertyAddress = { |
| 860 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; | 862 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; |
| 861 Boolean isSettable = false; | 863 Boolean isSettable = false; |
| 862 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, | 864 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, |
| 863 &isSettable); | 865 &isSettable); |
| 864 if (err == noErr && isSettable) { | 866 if (err == noErr && isSettable) { |
| 865 size = sizeof(vol); | 867 size = sizeof(vol); |
| 866 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 868 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 867 _inputDeviceID, &propertyAddress, 0, NULL, size, &vol)); | 869 _inputDeviceID, &propertyAddress, 0, nullptr, size, &vol)); |
| 868 | 870 |
| 869 return 0; | 871 return 0; |
| 870 } | 872 } |
| 871 | 873 |
| 872 // Otherwise try to set each channel. | 874 // Otherwise try to set each channel. |
| 873 for (UInt32 i = 1; i <= _noInputChannels; i++) { | 875 for (UInt32 i = 1; i <= _noInputChannels; i++) { |
| 874 propertyAddress.mElement = i; | 876 propertyAddress.mElement = i; |
| 875 isSettable = false; | 877 isSettable = false; |
| 876 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, | 878 err = AudioObjectIsPropertySettable(_inputDeviceID, &propertyAddress, |
| 877 &isSettable); | 879 &isSettable); |
| 878 if (err == noErr && isSettable) { | 880 if (err == noErr && isSettable) { |
| 879 size = sizeof(vol); | 881 size = sizeof(vol); |
| 880 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( | 882 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( |
| 881 _inputDeviceID, &propertyAddress, 0, NULL, size, &vol)); | 883 _inputDeviceID, &propertyAddress, 0, nullptr, size, &vol)); |
| 882 } | 884 } |
| 883 success = true; | 885 success = true; |
| 884 } | 886 } |
| 885 | 887 |
| 886 if (!success) { | 888 if (!success) { |
| 887 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 889 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 888 " Unable to set a level on any input channel"); | 890 " Unable to set a level on any input channel"); |
| 889 return -1; | 891 return -1; |
| 890 } | 892 } |
| 891 | 893 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 907 | 909 |
| 908 // Does the device have a master volume control? | 910 // Does the device have a master volume control? |
| 909 // If so, use it exclusively. | 911 // If so, use it exclusively. |
| 910 AudioObjectPropertyAddress propertyAddress = { | 912 AudioObjectPropertyAddress propertyAddress = { |
| 911 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; | 913 kAudioDevicePropertyVolumeScalar, kAudioDevicePropertyScopeInput, 0}; |
| 912 Boolean hasProperty = | 914 Boolean hasProperty = |
| 913 AudioObjectHasProperty(_inputDeviceID, &propertyAddress); | 915 AudioObjectHasProperty(_inputDeviceID, &propertyAddress); |
| 914 if (hasProperty) { | 916 if (hasProperty) { |
| 915 size = sizeof(volFloat32); | 917 size = sizeof(volFloat32); |
| 916 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 918 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 917 _inputDeviceID, &propertyAddress, 0, NULL, &size, &volFloat32)); | 919 _inputDeviceID, &propertyAddress, 0, nullptr, &size, &volFloat32)); |
| 918 | 920 |
| 919 // vol 0.0 to 1.0 -> convert to 0 - 255 | 921 // vol 0.0 to 1.0 -> convert to 0 - 255 |
| 920 volume = static_cast<uint32_t>(volFloat32 * 255 + 0.5); | 922 volume = static_cast<uint32_t>(volFloat32 * 255 + 0.5); |
| 921 } else { | 923 } else { |
| 922 // Otherwise get the average volume across channels. | 924 // Otherwise get the average volume across channels. |
| 923 volFloat32 = 0; | 925 volFloat32 = 0; |
| 924 for (UInt32 i = 1; i <= _noInputChannels; i++) { | 926 for (UInt32 i = 1; i <= _noInputChannels; i++) { |
| 925 channelVol = 0; | 927 channelVol = 0; |
| 926 propertyAddress.mElement = i; | 928 propertyAddress.mElement = i; |
| 927 hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); | 929 hasProperty = AudioObjectHasProperty(_inputDeviceID, &propertyAddress); |
| 928 if (hasProperty) { | 930 if (hasProperty) { |
| 929 size = sizeof(channelVol); | 931 size = sizeof(channelVol); |
| 930 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( | 932 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( |
| 931 _inputDeviceID, &propertyAddress, 0, NULL, &size, &channelVol)); | 933 _inputDeviceID, &propertyAddress, 0, nullptr, &size, &channelVol)); |
| 932 | 934 |
| 933 volFloat32 += channelVol; | 935 volFloat32 += channelVol; |
| 934 channels++; | 936 channels++; |
| 935 } | 937 } |
| 936 } | 938 } |
| 937 | 939 |
| 938 if (channels == 0) { | 940 if (channels == 0) { |
| 939 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 941 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 940 " Unable to get a level on any channel"); | 942 " Unable to get a level on any channel"); |
| 941 return -1; | 943 return -1; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 999 // ============================================================================ | 1001 // ============================================================================ |
| 1000 // Private Methods | 1002 // Private Methods |
| 1001 // ============================================================================ | 1003 // ============================================================================ |
| 1002 | 1004 |
| 1003 // CoreAudio errors are best interpreted as four character strings. | 1005 // CoreAudio errors are best interpreted as four character strings. |
| 1004 void AudioMixerManagerMac::logCAMsg(const TraceLevel level, | 1006 void AudioMixerManagerMac::logCAMsg(const TraceLevel level, |
| 1005 const TraceModule module, | 1007 const TraceModule module, |
| 1006 const int32_t id, | 1008 const int32_t id, |
| 1007 const char* msg, | 1009 const char* msg, |
| 1008 const char* err) { | 1010 const char* err) { |
| 1009 assert(msg != NULL); | 1011 assert(msg != nullptr); |
| 1010 assert(err != NULL); | 1012 assert(err != nullptr); |
| 1011 | 1013 |
| 1012 #ifdef WEBRTC_ARCH_BIG_ENDIAN | 1014 #ifdef WEBRTC_ARCH_BIG_ENDIAN |
| 1013 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); | 1015 WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err); |
| 1014 #else | 1016 #else |
| 1015 // We need to flip the characters in this case. | 1017 // We need to flip the characters in this case. |
| 1016 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2, | 1018 WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2, |
| 1017 err + 1, err); | 1019 err + 1, err); |
| 1018 #endif | 1020 #endif |
| 1019 } | 1021 } |
| 1020 | 1022 |
| 1021 } // namespace webrtc | 1023 } // namespace webrtc |
| 1022 // EOF | 1024 // EOF |
| OLD | NEW |