| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include <assert.h> | 11 #include <assert.h> |
| 12 | 12 |
| 13 #include "webrtc/base/checks.h" | 13 #include "webrtc/base/checks.h" |
| 14 #include "webrtc/base/logging.h" | 14 #include "webrtc/base/logging.h" |
| 15 #include "webrtc/modules/audio_device/audio_device_config.h" | 15 #include "webrtc/modules/audio_device/audio_device_config.h" |
| 16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h" | 16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h" |
| 17 #include "webrtc/system_wrappers/include/event_wrapper.h" | 17 #include "webrtc/system_wrappers/include/event_wrapper.h" |
| 18 #include "webrtc/system_wrappers/include/trace.h" | 18 #include "webrtc/system_wrappers/include/trace.h" |
| 19 | 19 |
| 20 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; | 20 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; |
| 21 | 21 |
| 22 // Accesses Pulse functions through our late-binding symbol table instead of | 22 // Accesses Pulse functions through our late-binding symbol table instead of |
| 23 // directly. This way we don't have to link to libpulse, which means our binary | 23 // directly. This way we don't have to link to libpulse, which means our binary |
| 24 // will work on systems that don't have it. | 24 // will work on systems that don't have it. |
| 25 #define LATE(sym) \ | 25 #define LATE(sym) \ |
| 26 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \ | 26 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \ |
| 27 sym) | 27 sym) |
| 28 | 28 |
| 29 namespace webrtc | 29 namespace webrtc { |
| 30 { | |
| 31 | 30 |
| 32 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) : | 31 AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) |
| 33 _ptrAudioBuffer(NULL), | 32 : _ptrAudioBuffer(NULL), |
| 34 _timeEventRec(*EventWrapper::Create()), | 33 _timeEventRec(*EventWrapper::Create()), |
| 35 _timeEventPlay(*EventWrapper::Create()), | 34 _timeEventPlay(*EventWrapper::Create()), |
| 36 _recStartEvent(*EventWrapper::Create()), | 35 _recStartEvent(*EventWrapper::Create()), |
| 37 _playStartEvent(*EventWrapper::Create()), | 36 _playStartEvent(*EventWrapper::Create()), |
| 38 _id(id), | 37 _id(id), |
| 39 _mixerManager(id), | 38 _mixerManager(id), |
| 40 _inputDeviceIndex(0), | 39 _inputDeviceIndex(0), |
| 41 _outputDeviceIndex(0), | 40 _outputDeviceIndex(0), |
| 42 _inputDeviceIsSpecified(false), | 41 _inputDeviceIsSpecified(false), |
| 43 _outputDeviceIsSpecified(false), | 42 _outputDeviceIsSpecified(false), |
| 44 sample_rate_hz_(0), | 43 sample_rate_hz_(0), |
| 45 _recChannels(1), | 44 _recChannels(1), |
| 46 _playChannels(1), | 45 _playChannels(1), |
| 47 _playBufType(AudioDeviceModule::kFixedBufferSize), | 46 _playBufType(AudioDeviceModule::kFixedBufferSize), |
| 48 _initialized(false), | 47 _initialized(false), |
| 49 _recording(false), | 48 _recording(false), |
| 50 _playing(false), | 49 _playing(false), |
| 51 _recIsInitialized(false), | 50 _recIsInitialized(false), |
| 52 _playIsInitialized(false), | 51 _playIsInitialized(false), |
| 53 _startRec(false), | 52 _startRec(false), |
| 54 _stopRec(false), | 53 _stopRec(false), |
| 55 _startPlay(false), | 54 _startPlay(false), |
| 56 _stopPlay(false), | 55 _stopPlay(false), |
| 57 _AGC(false), | 56 _AGC(false), |
| 58 update_speaker_volume_at_startup_(false), | 57 update_speaker_volume_at_startup_(false), |
| 59 _playBufDelayFixed(20), | 58 _playBufDelayFixed(20), |
| 60 _sndCardPlayDelay(0), | 59 _sndCardPlayDelay(0), |
| 61 _sndCardRecDelay(0), | 60 _sndCardRecDelay(0), |
| 62 _writeErrors(0), | 61 _writeErrors(0), |
| 63 _playWarning(0), | 62 _playWarning(0), |
| 64 _playError(0), | 63 _playError(0), |
| 65 _recWarning(0), | 64 _recWarning(0), |
| 66 _recError(0), | 65 _recError(0), |
| 67 _deviceIndex(-1), | 66 _deviceIndex(-1), |
| 68 _numPlayDevices(0), | 67 _numPlayDevices(0), |
| 69 _numRecDevices(0), | 68 _numRecDevices(0), |
| 70 _playDeviceName(NULL), | 69 _playDeviceName(NULL), |
| 71 _recDeviceName(NULL), | 70 _recDeviceName(NULL), |
| 72 _playDisplayDeviceName(NULL), | 71 _playDisplayDeviceName(NULL), |
| 73 _recDisplayDeviceName(NULL), | 72 _recDisplayDeviceName(NULL), |
| 74 _playBuffer(NULL), | 73 _playBuffer(NULL), |
| 75 _playbackBufferSize(0), | 74 _playbackBufferSize(0), |
| 76 _playbackBufferUnused(0), | 75 _playbackBufferUnused(0), |
| 77 _tempBufferSpace(0), | 76 _tempBufferSpace(0), |
| 78 _recBuffer(NULL), | 77 _recBuffer(NULL), |
| 79 _recordBufferSize(0), | 78 _recordBufferSize(0), |
| 80 _recordBufferUsed(0), | 79 _recordBufferUsed(0), |
| 81 _tempSampleData(NULL), | 80 _tempSampleData(NULL), |
| 82 _tempSampleDataSize(0), | 81 _tempSampleDataSize(0), |
| 83 _configuredLatencyPlay(0), | 82 _configuredLatencyPlay(0), |
| 84 _configuredLatencyRec(0), | 83 _configuredLatencyRec(0), |
| 85 _paDeviceIndex(-1), | 84 _paDeviceIndex(-1), |
| 86 _paStateChanged(false), | 85 _paStateChanged(false), |
| 87 _paMainloop(NULL), | 86 _paMainloop(NULL), |
| 88 _paMainloopApi(NULL), | 87 _paMainloopApi(NULL), |
| 89 _paContext(NULL), | 88 _paContext(NULL), |
| 90 _recStream(NULL), | 89 _recStream(NULL), |
| 91 _playStream(NULL), | 90 _playStream(NULL), |
| 92 _recStreamFlags(0), | 91 _recStreamFlags(0), |
| 93 _playStreamFlags(0) | 92 _playStreamFlags(0) { |
| 94 { | 93 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); |
| 95 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, | |
| 96 "%s created", __FUNCTION__); | |
| 97 | 94 |
| 98 memset(_paServerVersion, 0, sizeof(_paServerVersion)); | 95 memset(_paServerVersion, 0, sizeof(_paServerVersion)); |
| 99 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); | 96 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); |
| 100 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); | 97 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); |
| 101 memset(_oldKeyState, 0, sizeof(_oldKeyState)); | 98 memset(_oldKeyState, 0, sizeof(_oldKeyState)); |
| 102 } | 99 } |
| 103 | 100 |
| 104 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() | 101 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { |
| 105 { | 102 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", |
| 106 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, | 103 __FUNCTION__); |
| 107 "%s destroyed", __FUNCTION__); | 104 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 108 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 105 Terminate(); |
| 109 Terminate(); | |
| 110 | 106 |
| 111 if (_recBuffer) | 107 if (_recBuffer) { |
| 112 { | 108 delete[] _recBuffer; |
| 113 delete [] _recBuffer; | 109 _recBuffer = NULL; |
| 114 _recBuffer = NULL; | 110 } |
| 115 } | 111 if (_playBuffer) { |
| 116 if (_playBuffer) | 112 delete[] _playBuffer; |
| 117 { | 113 _playBuffer = NULL; |
| 118 delete [] _playBuffer; | 114 } |
| 119 _playBuffer = NULL; | 115 if (_playDeviceName) { |
| 120 } | 116 delete[] _playDeviceName; |
| 121 if (_playDeviceName) | 117 _playDeviceName = NULL; |
| 122 { | 118 } |
| 123 delete [] _playDeviceName; | 119 if (_recDeviceName) { |
| 124 _playDeviceName = NULL; | 120 delete[] _recDeviceName; |
| 125 } | 121 _recDeviceName = NULL; |
| 126 if (_recDeviceName) | 122 } |
| 127 { | |
| 128 delete [] _recDeviceName; | |
| 129 _recDeviceName = NULL; | |
| 130 } | |
| 131 | 123 |
| 132 delete &_recStartEvent; | 124 delete &_recStartEvent; |
| 133 delete &_playStartEvent; | 125 delete &_playStartEvent; |
| 134 delete &_timeEventRec; | 126 delete &_timeEventRec; |
| 135 delete &_timeEventPlay; | 127 delete &_timeEventPlay; |
| 136 } | 128 } |
| 137 | 129 |
| 138 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) | 130 void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
| 139 { | 131 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 140 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 141 | 132 |
| 142 _ptrAudioBuffer = audioBuffer; | 133 _ptrAudioBuffer = audioBuffer; |
| 143 | 134 |
| 144 // Inform the AudioBuffer about default settings for this implementation. | 135 // Inform the AudioBuffer about default settings for this implementation. |
| 145 // Set all values to zero here since the actual settings will be done by | 136 // Set all values to zero here since the actual settings will be done by |
| 146 // InitPlayout and InitRecording later. | 137 // InitPlayout and InitRecording later. |
| 147 _ptrAudioBuffer->SetRecordingSampleRate(0); | 138 _ptrAudioBuffer->SetRecordingSampleRate(0); |
| 148 _ptrAudioBuffer->SetPlayoutSampleRate(0); | 139 _ptrAudioBuffer->SetPlayoutSampleRate(0); |
| 149 _ptrAudioBuffer->SetRecordingChannels(0); | 140 _ptrAudioBuffer->SetRecordingChannels(0); |
| 150 _ptrAudioBuffer->SetPlayoutChannels(0); | 141 _ptrAudioBuffer->SetPlayoutChannels(0); |
| 151 } | 142 } |
| 152 | 143 |
| 153 // ---------------------------------------------------------------------------- | 144 // ---------------------------------------------------------------------------- |
| 154 // ActiveAudioLayer | 145 // ActiveAudioLayer |
| 155 // ---------------------------------------------------------------------------- | 146 // ---------------------------------------------------------------------------- |
| 156 | 147 |
| 157 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer( | 148 int32_t AudioDeviceLinuxPulse::ActiveAudioLayer( |
| 158 AudioDeviceModule::AudioLayer& audioLayer) const | 149 AudioDeviceModule::AudioLayer& audioLayer) const { |
| 159 { | 150 audioLayer = AudioDeviceModule::kLinuxPulseAudio; |
| 160 audioLayer = AudioDeviceModule::kLinuxPulseAudio; | 151 return 0; |
| 161 return 0; | |
| 162 } | 152 } |
| 163 | 153 |
| 164 AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() { | 154 AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() { |
| 165 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 155 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 166 if (_initialized) { | 156 if (_initialized) { |
| 167 return InitStatus::OK; | 157 return InitStatus::OK; |
| 168 } | 158 } |
| 169 | 159 |
| 170 // Initialize PulseAudio | 160 // Initialize PulseAudio |
| 171 if (InitPulseAudio() < 0) { | 161 if (InitPulseAudio() < 0) { |
| (...skipping 27 matching lines...) Expand all Loading... |
| 199 _ptrThreadPlay.reset(new rtc::PlatformThread( | 189 _ptrThreadPlay.reset(new rtc::PlatformThread( |
| 200 PlayThreadFunc, this, "webrtc_audio_module_play_thread")); | 190 PlayThreadFunc, this, "webrtc_audio_module_play_thread")); |
| 201 _ptrThreadPlay->Start(); | 191 _ptrThreadPlay->Start(); |
| 202 _ptrThreadPlay->SetPriority(rtc::kRealtimePriority); | 192 _ptrThreadPlay->SetPriority(rtc::kRealtimePriority); |
| 203 | 193 |
| 204 _initialized = true; | 194 _initialized = true; |
| 205 | 195 |
| 206 return InitStatus::OK; | 196 return InitStatus::OK; |
| 207 } | 197 } |
| 208 | 198 |
| 209 int32_t AudioDeviceLinuxPulse::Terminate() | 199 int32_t AudioDeviceLinuxPulse::Terminate() { |
| 210 { | 200 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 211 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 201 if (!_initialized) { |
| 212 if (!_initialized) | 202 return 0; |
| 213 { | 203 } |
| 214 return 0; | 204 |
| 215 } | 205 _mixerManager.Close(); |
| 216 | 206 |
| 217 _mixerManager.Close(); | 207 // RECORDING |
| 218 | 208 if (_ptrThreadRec) { |
| 219 // RECORDING | 209 rtc::PlatformThread* tmpThread = _ptrThreadRec.release(); |
| 220 if (_ptrThreadRec) | 210 |
| 221 { | 211 _timeEventRec.Set(); |
| 222 rtc::PlatformThread* tmpThread = _ptrThreadRec.release(); | 212 tmpThread->Stop(); |
| 223 | 213 delete tmpThread; |
| 224 _timeEventRec.Set(); | 214 } |
| 225 tmpThread->Stop(); | 215 |
| 226 delete tmpThread; | 216 // PLAYOUT |
| 227 } | 217 if (_ptrThreadPlay) { |
| 228 | 218 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); |
| 229 // PLAYOUT | 219 |
| 230 if (_ptrThreadPlay) | 220 _timeEventPlay.Set(); |
| 231 { | 221 tmpThread->Stop(); |
| 232 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); | 222 delete tmpThread; |
| 233 | 223 } |
| 234 _timeEventPlay.Set(); | 224 |
| 235 tmpThread->Stop(); | 225 // Terminate PulseAudio |
| 236 delete tmpThread; | 226 if (TerminatePulseAudio() < 0) { |
| 237 } | 227 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 238 | 228 " failed to terminate PulseAudio"); |
| 239 // Terminate PulseAudio | 229 return -1; |
| 240 if (TerminatePulseAudio() < 0) | 230 } |
| 241 { | 231 |
| 242 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 232 if (_XDisplay) { |
| 243 " failed to terminate PulseAudio"); | 233 XCloseDisplay(_XDisplay); |
| 244 return -1; | 234 _XDisplay = NULL; |
| 245 } | 235 } |
| 246 | 236 |
| 247 if (_XDisplay) | 237 _initialized = false; |
| 248 { | 238 _outputDeviceIsSpecified = false; |
| 249 XCloseDisplay(_XDisplay); | 239 _inputDeviceIsSpecified = false; |
| 250 _XDisplay = NULL; | 240 |
| 251 } | 241 return 0; |
| 252 | 242 } |
| 253 _initialized = false; | 243 |
| 254 _outputDeviceIsSpecified = false; | 244 bool AudioDeviceLinuxPulse::Initialized() const { |
| 255 _inputDeviceIsSpecified = false; | 245 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 256 | 246 return (_initialized); |
| 257 return 0; | 247 } |
| 258 } | 248 |
| 259 | 249 int32_t AudioDeviceLinuxPulse::InitSpeaker() { |
| 260 bool AudioDeviceLinuxPulse::Initialized() const | 250 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 261 { | 251 |
| 262 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 252 if (_playing) { |
| 263 return (_initialized); | 253 return -1; |
| 264 } | 254 } |
| 265 | 255 |
| 266 int32_t AudioDeviceLinuxPulse::InitSpeaker() | 256 if (!_outputDeviceIsSpecified) { |
| 267 { | 257 return -1; |
| 268 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 258 } |
| 269 | 259 |
| 270 if (_playing) | 260 // check if default device |
| 271 { | 261 if (_outputDeviceIndex == 0) { |
| 272 return -1; | 262 uint16_t deviceIndex = 0; |
| 273 } | 263 GetDefaultDeviceInfo(false, NULL, deviceIndex); |
| 274 | 264 _paDeviceIndex = deviceIndex; |
| 275 if (!_outputDeviceIsSpecified) | 265 } else { |
| 276 { | 266 // get the PA device index from |
| 277 return -1; | 267 // the callback |
| 278 } | 268 _deviceIndex = _outputDeviceIndex; |
| 279 | 269 |
| 280 // check if default device | 270 // get playout devices |
| 281 if (_outputDeviceIndex == 0) | 271 PlayoutDevices(); |
| 282 { | 272 } |
| 283 uint16_t deviceIndex = 0; | 273 |
| 284 GetDefaultDeviceInfo(false, NULL, deviceIndex); | 274 // the callback has now set the _paDeviceIndex to |
| 285 _paDeviceIndex = deviceIndex; | 275 // the PulseAudio index of the device |
| 286 } else | 276 if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) { |
| 287 { | 277 return -1; |
| 288 // get the PA device index from | 278 } |
| 289 // the callback | 279 |
| 290 _deviceIndex = _outputDeviceIndex; | 280 // clear _deviceIndex |
| 291 | 281 _deviceIndex = -1; |
| 292 // get playout devices | 282 _paDeviceIndex = -1; |
| 293 PlayoutDevices(); | 283 |
| 294 } | 284 return 0; |
| 295 | 285 } |
| 296 // the callback has now set the _paDeviceIndex to | 286 |
| 297 // the PulseAudio index of the device | 287 int32_t AudioDeviceLinuxPulse::InitMicrophone() { |
| 298 if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) | 288 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 299 { | 289 if (_recording) { |
| 300 return -1; | 290 return -1; |
| 301 } | 291 } |
| 302 | 292 |
| 303 // clear _deviceIndex | 293 if (!_inputDeviceIsSpecified) { |
| 304 _deviceIndex = -1; | 294 return -1; |
| 305 _paDeviceIndex = -1; | 295 } |
| 306 | 296 |
| 307 return 0; | 297 // Check if default device |
| 308 } | 298 if (_inputDeviceIndex == 0) { |
| 309 | 299 uint16_t deviceIndex = 0; |
| 310 int32_t AudioDeviceLinuxPulse::InitMicrophone() | 300 GetDefaultDeviceInfo(true, NULL, deviceIndex); |
| 311 { | 301 _paDeviceIndex = deviceIndex; |
| 312 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 302 } else { |
| 313 if (_recording) | 303 // Get the PA device index from |
| 314 { | 304 // the callback |
| 315 return -1; | 305 _deviceIndex = _inputDeviceIndex; |
| 316 } | 306 |
| 317 | 307 // get recording devices |
| 318 if (!_inputDeviceIsSpecified) | 308 RecordingDevices(); |
| 319 { | 309 } |
| 320 return -1; | 310 |
| 321 } | 311 // The callback has now set the _paDeviceIndex to |
| 322 | 312 // the PulseAudio index of the device |
| 323 // Check if default device | 313 if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) { |
| 324 if (_inputDeviceIndex == 0) | 314 return -1; |
| 325 { | 315 } |
| 326 uint16_t deviceIndex = 0; | 316 |
| 327 GetDefaultDeviceInfo(true, NULL, deviceIndex); | 317 // Clear _deviceIndex |
| 328 _paDeviceIndex = deviceIndex; | 318 _deviceIndex = -1; |
| 329 } else | 319 _paDeviceIndex = -1; |
| 330 { | 320 |
| 331 // Get the PA device index from | 321 return 0; |
| 332 // the callback | 322 } |
| 333 _deviceIndex = _inputDeviceIndex; | 323 |
| 334 | 324 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const { |
| 335 // get recording devices | 325 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 336 RecordingDevices(); | 326 return (_mixerManager.SpeakerIsInitialized()); |
| 337 } | 327 } |
| 338 | 328 |
| 339 // The callback has now set the _paDeviceIndex to | 329 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const { |
| 340 // the PulseAudio index of the device | 330 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 341 if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) | 331 return (_mixerManager.MicrophoneIsInitialized()); |
| 342 { | 332 } |
| 343 return -1; | 333 |
| 344 } | 334 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { |
| 345 | 335 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 346 // Clear _deviceIndex | 336 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 347 _deviceIndex = -1; | 337 |
| 348 _paDeviceIndex = -1; | 338 // Make an attempt to open up the |
| 349 | 339 // output mixer corresponding to the currently selected output device. |
| 350 return 0; | 340 if (!wasInitialized && InitSpeaker() == -1) { |
| 351 } | 341 // If we end up here it means that the selected speaker has no volume |
| 352 | 342 // control. |
| 353 bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const | 343 available = false; |
| 354 { | 344 return 0; |
| 355 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 345 } |
| 356 return (_mixerManager.SpeakerIsInitialized()); | 346 |
| 357 } | 347 // Given that InitSpeaker was successful, we know volume control exists. |
| 358 | 348 available = true; |
| 359 bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const | 349 |
| 360 { | 350 // Close the initialized output mixer |
| 361 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 351 if (!wasInitialized) { |
| 362 return (_mixerManager.MicrophoneIsInitialized()); | 352 _mixerManager.CloseSpeaker(); |
| 363 } | 353 } |
| 364 | 354 |
| 365 int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) | 355 return 0; |
| 366 { | 356 } |
| 367 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 357 |
| 368 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | 358 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) { |
| 369 | 359 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 370 // Make an attempt to open up the | 360 if (!_playing) { |
| 371 // output mixer corresponding to the currently selected output device. | 361 // Only update the volume if it's been set while we weren't playing. |
| 372 if (!wasInitialized && InitSpeaker() == -1) | 362 update_speaker_volume_at_startup_ = true; |
| 373 { | 363 } |
| 374 // If we end up here it means that the selected speaker has no volume | 364 return (_mixerManager.SetSpeakerVolume(volume)); |
| 375 // control. | 365 } |
| 376 available = false; | 366 |
| 377 return 0; | 367 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const { |
| 378 } | 368 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 379 | 369 uint32_t level(0); |
| 380 // Given that InitSpeaker was successful, we know volume control exists. | 370 |
| 371 if (_mixerManager.SpeakerVolume(level) == -1) { |
| 372 return -1; |
| 373 } |
| 374 |
| 375 volume = level; |
| 376 |
| 377 return 0; |
| 378 } |
| 379 |
| 380 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft, |
| 381 uint16_t volumeRight) { |
| 382 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 383 " API call not supported on this platform"); |
| 384 return -1; |
| 385 } |
| 386 |
| 387 int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/, |
| 388 uint16_t& /*volumeRight*/) const { |
| 389 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 390 " API call not supported on this platform"); |
| 391 return -1; |
| 392 } |
| 393 |
| 394 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const { |
| 395 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 396 uint32_t maxVol(0); |
| 397 |
| 398 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { |
| 399 return -1; |
| 400 } |
| 401 |
| 402 maxVolume = maxVol; |
| 403 |
| 404 return 0; |
| 405 } |
| 406 |
| 407 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const { |
| 408 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 409 uint32_t minVol(0); |
| 410 |
| 411 if (_mixerManager.MinSpeakerVolume(minVol) == -1) { |
| 412 return -1; |
| 413 } |
| 414 |
| 415 minVolume = minVol; |
| 416 |
| 417 return 0; |
| 418 } |
| 419 |
| 420 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const { |
| 421 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 422 uint16_t delta(0); |
| 423 |
| 424 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) { |
| 425 return -1; |
| 426 } |
| 427 |
| 428 stepSize = delta; |
| 429 |
| 430 return 0; |
| 431 } |
| 432 |
| 433 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) { |
| 434 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 435 bool isAvailable(false); |
| 436 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 437 |
| 438 // Make an attempt to open up the |
| 439 // output mixer corresponding to the currently selected output device. |
| 440 // |
| 441 if (!wasInitialized && InitSpeaker() == -1) { |
| 442 // If we end up here it means that the selected speaker has no volume |
| 443 // control, hence it is safe to state that there is no mute control |
| 444 // already at this stage. |
| 445 available = false; |
| 446 return 0; |
| 447 } |
| 448 |
| 449 // Check if the selected speaker has a mute control |
| 450 _mixerManager.SpeakerMuteIsAvailable(isAvailable); |
| 451 |
| 452 available = isAvailable; |
| 453 |
| 454 // Close the initialized output mixer |
| 455 if (!wasInitialized) { |
| 456 _mixerManager.CloseSpeaker(); |
| 457 } |
| 458 |
| 459 return 0; |
| 460 } |
| 461 |
| 462 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) { |
| 463 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 464 return (_mixerManager.SetSpeakerMute(enable)); |
| 465 } |
| 466 |
| 467 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const { |
| 468 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 469 bool muted(0); |
| 470 if (_mixerManager.SpeakerMute(muted) == -1) { |
| 471 return -1; |
| 472 } |
| 473 |
| 474 enabled = muted; |
| 475 return 0; |
| 476 } |
| 477 |
| 478 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) { |
| 479 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 480 bool isAvailable(false); |
| 481 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 482 |
| 483 // Make an attempt to open up the |
| 484 // input mixer corresponding to the currently selected input device. |
| 485 // |
| 486 if (!wasInitialized && InitMicrophone() == -1) { |
| 487 // If we end up here it means that the selected microphone has no |
| 488 // volume control, hence it is safe to state that there is no |
| 489 // boost control already at this stage. |
| 490 available = false; |
| 491 return 0; |
| 492 } |
| 493 |
| 494 // Check if the selected microphone has a mute control |
| 495 // |
| 496 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); |
| 497 available = isAvailable; |
| 498 |
| 499 // Close the initialized input mixer |
| 500 // |
| 501 if (!wasInitialized) { |
| 502 _mixerManager.CloseMicrophone(); |
| 503 } |
| 504 |
| 505 return 0; |
| 506 } |
| 507 |
| 508 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) { |
| 509 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 510 return (_mixerManager.SetMicrophoneMute(enable)); |
| 511 } |
| 512 |
| 513 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const { |
| 514 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 515 bool muted(0); |
| 516 if (_mixerManager.MicrophoneMute(muted) == -1) { |
| 517 return -1; |
| 518 } |
| 519 |
| 520 enabled = muted; |
| 521 return 0; |
| 522 } |
| 523 |
| 524 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) { |
| 525 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 526 bool isAvailable(false); |
| 527 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 528 |
| 529 // Enumerate all avaliable microphone and make an attempt to open up the |
| 530 // input mixer corresponding to the currently selected input device. |
| 531 // |
| 532 if (!wasInitialized && InitMicrophone() == -1) { |
| 533 // If we end up here it means that the selected microphone has no |
| 534 // volume control, hence it is safe to state that there is no |
| 535 // boost control already at this stage. |
| 536 available = false; |
| 537 return 0; |
| 538 } |
| 539 |
| 540 // Check if the selected microphone has a boost control |
| 541 _mixerManager.MicrophoneBoostIsAvailable(isAvailable); |
| 542 available = isAvailable; |
| 543 |
| 544 // Close the initialized input mixer |
| 545 if (!wasInitialized) { |
| 546 _mixerManager.CloseMicrophone(); |
| 547 } |
| 548 |
| 549 return 0; |
| 550 } |
| 551 |
| 552 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) { |
| 553 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 554 return (_mixerManager.SetMicrophoneBoost(enable)); |
| 555 } |
| 556 |
| 557 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const { |
| 558 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 559 bool onOff(0); |
| 560 |
| 561 if (_mixerManager.MicrophoneBoost(onOff) == -1) { |
| 562 return -1; |
| 563 } |
| 564 |
| 565 enabled = onOff; |
| 566 |
| 567 return 0; |
| 568 } |
| 569 |
| 570 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) { |
| 571 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 572 if (_recChannels == 2 && _recording) { |
| 381 available = true; | 573 available = true; |
| 382 | 574 return 0; |
| 383 // Close the initialized output mixer | 575 } |
| 384 if (!wasInitialized) | 576 |
| 385 { | 577 available = false; |
| 386 _mixerManager.CloseSpeaker(); | 578 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 387 } | 579 int error = 0; |
| 388 | 580 |
| 389 return 0; | 581 if (!wasInitialized && InitMicrophone() == -1) { |
| 390 } | 582 // Cannot open the specified device |
| 391 | 583 available = false; |
| 392 int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) | 584 return 0; |
| 393 { | 585 } |
| 394 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 586 |
| 395 if (!_playing) { | 587 // Check if the selected microphone can record stereo. |
| 396 // Only update the volume if it's been set while we weren't playing. | 588 bool isAvailable(false); |
| 397 update_speaker_volume_at_startup_ = true; | 589 error = _mixerManager.StereoRecordingIsAvailable(isAvailable); |
| 398 } | 590 if (!error) |
| 399 return (_mixerManager.SetSpeakerVolume(volume)); | 591 available = isAvailable; |
| 400 } | 592 |
| 401 | 593 // Close the initialized input mixer |
| 402 int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const | 594 if (!wasInitialized) { |
| 403 { | 595 _mixerManager.CloseMicrophone(); |
| 404 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 596 } |
| 405 uint32_t level(0); | 597 |
| 406 | 598 return error; |
| 407 if (_mixerManager.SpeakerVolume(level) == -1) | 599 } |
| 408 { | 600 |
| 409 return -1; | 601 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) { |
| 410 } | 602 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 411 | 603 if (enable) |
| 412 volume = level; | 604 _recChannels = 2; |
| 413 | 605 else |
| 414 return 0; | 606 _recChannels = 1; |
| 415 } | 607 |
| 416 | 608 return 0; |
| 417 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume( | 609 } |
| 418 uint16_t volumeLeft, | 610 |
| 419 uint16_t volumeRight) | 611 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const { |
| 420 { | 612 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 421 | 613 if (_recChannels == 2) |
| 614 enabled = true; |
| 615 else |
| 616 enabled = false; |
| 617 |
| 618 return 0; |
| 619 } |
| 620 |
| 621 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) { |
| 622 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 623 if (_playChannels == 2 && _playing) { |
| 624 available = true; |
| 625 return 0; |
| 626 } |
| 627 |
| 628 available = false; |
| 629 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
| 630 int error = 0; |
| 631 |
| 632 if (!wasInitialized && InitSpeaker() == -1) { |
| 633 // Cannot open the specified device. |
| 634 return -1; |
| 635 } |
| 636 |
| 637 // Check if the selected speaker can play stereo. |
| 638 bool isAvailable(false); |
| 639 error = _mixerManager.StereoPlayoutIsAvailable(isAvailable); |
| 640 if (!error) |
| 641 available = isAvailable; |
| 642 |
| 643 // Close the initialized input mixer |
| 644 if (!wasInitialized) { |
| 645 _mixerManager.CloseSpeaker(); |
| 646 } |
| 647 |
| 648 return error; |
| 649 } |
| 650 |
| 651 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) { |
| 652 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 653 if (enable) |
| 654 _playChannels = 2; |
| 655 else |
| 656 _playChannels = 1; |
| 657 |
| 658 return 0; |
| 659 } |
| 660 |
| 661 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const { |
| 662 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 663 if (_playChannels == 2) |
| 664 enabled = true; |
| 665 else |
| 666 enabled = false; |
| 667 |
| 668 return 0; |
| 669 } |
| 670 |
| 671 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) { |
| 672 rtc::CritScope lock(&_critSect); |
| 673 _AGC = enable; |
| 674 |
| 675 return 0; |
| 676 } |
| 677 |
| 678 bool AudioDeviceLinuxPulse::AGC() const { |
| 679 rtc::CritScope lock(&_critSect); |
| 680 return _AGC; |
| 681 } |
| 682 |
| 683 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(bool& available) { |
| 684 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 685 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
| 686 |
| 687 // Make an attempt to open up the |
| 688 // input mixer corresponding to the currently selected output device. |
| 689 if (!wasInitialized && InitMicrophone() == -1) { |
| 690 // If we end up here it means that the selected microphone has no |
| 691 // volume control. |
| 692 available = false; |
| 693 return 0; |
| 694 } |
| 695 |
| 696 // Given that InitMicrophone was successful, we know that a volume control |
| 697 // exists. |
| 698 available = true; |
| 699 |
| 700 // Close the initialized input mixer |
| 701 if (!wasInitialized) { |
| 702 _mixerManager.CloseMicrophone(); |
| 703 } |
| 704 |
| 705 return 0; |
| 706 } |
| 707 |
| 708 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { |
| 709 return (_mixerManager.SetMicrophoneVolume(volume)); |
| 710 } |
| 711 |
| 712 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const { |
| 713 uint32_t level(0); |
| 714 |
| 715 if (_mixerManager.MicrophoneVolume(level) == -1) { |
| 422 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 716 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 423 " API call not supported on this platform"); | 717 " failed to retrive current microphone level"); |
| 424 return -1; | 718 return -1; |
| 425 } | 719 } |
| 426 | 720 |
| 427 int32_t AudioDeviceLinuxPulse::WaveOutVolume( | 721 volume = level; |
| 428 uint16_t& /*volumeLeft*/, | 722 |
| 429 uint16_t& /*volumeRight*/) const | 723 return 0; |
| 430 { | 724 } |
| 431 | 725 |
| 432 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 726 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const { |
| 433 " API call not supported on this platform"); | 727 uint32_t maxVol(0); |
| 434 return -1; | 728 |
| 435 } | 729 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { |
| 436 | 730 return -1; |
| 437 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume( | 731 } |
| 438 uint32_t& maxVolume) const | 732 |
| 439 { | 733 maxVolume = maxVol; |
| 440 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 734 |
| 441 uint32_t maxVol(0); | 735 return 0; |
| 442 | 736 } |
| 443 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) | 737 |
| 444 { | 738 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const { |
| 445 return -1; | 739 uint32_t minVol(0); |
| 446 } | 740 |
| 447 | 741 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { |
| 448 maxVolume = maxVol; | 742 return -1; |
| 449 | 743 } |
| 450 return 0; | 744 |
| 451 } | 745 minVolume = minVol; |
| 452 | 746 |
| 453 int32_t AudioDeviceLinuxPulse::MinSpeakerVolume( | 747 return 0; |
| 454 uint32_t& minVolume) const | |
| 455 { | |
| 456 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 457 uint32_t minVol(0); | |
| 458 | |
| 459 if (_mixerManager.MinSpeakerVolume(minVol) == -1) | |
| 460 { | |
| 461 return -1; | |
| 462 } | |
| 463 | |
| 464 minVolume = minVol; | |
| 465 | |
| 466 return 0; | |
| 467 } | |
| 468 | |
| 469 int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize( | |
| 470 uint16_t& stepSize) const | |
| 471 { | |
| 472 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 473 uint16_t delta(0); | |
| 474 | |
| 475 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) | |
| 476 { | |
| 477 return -1; | |
| 478 } | |
| 479 | |
| 480 stepSize = delta; | |
| 481 | |
| 482 return 0; | |
| 483 } | |
| 484 | |
| 485 int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) | |
| 486 { | |
| 487 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 488 bool isAvailable(false); | |
| 489 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | |
| 490 | |
| 491 // Make an attempt to open up the | |
| 492 // output mixer corresponding to the currently selected output device. | |
| 493 // | |
| 494 if (!wasInitialized && InitSpeaker() == -1) | |
| 495 { | |
| 496 // If we end up here it means that the selected speaker has no volume | |
| 497 // control, hence it is safe to state that there is no mute control | |
| 498 // already at this stage. | |
| 499 available = false; | |
| 500 return 0; | |
| 501 } | |
| 502 | |
| 503 // Check if the selected speaker has a mute control | |
| 504 _mixerManager.SpeakerMuteIsAvailable(isAvailable); | |
| 505 | |
| 506 available = isAvailable; | |
| 507 | |
| 508 // Close the initialized output mixer | |
| 509 if (!wasInitialized) | |
| 510 { | |
| 511 _mixerManager.CloseSpeaker(); | |
| 512 } | |
| 513 | |
| 514 return 0; | |
| 515 } | |
| 516 | |
| 517 int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) | |
| 518 { | |
| 519 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 520 return (_mixerManager.SetSpeakerMute(enable)); | |
| 521 } | |
| 522 | |
| 523 int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const | |
| 524 { | |
| 525 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 526 bool muted(0); | |
| 527 if (_mixerManager.SpeakerMute(muted) == -1) | |
| 528 { | |
| 529 return -1; | |
| 530 } | |
| 531 | |
| 532 enabled = muted; | |
| 533 return 0; | |
| 534 } | |
| 535 | |
| 536 int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) | |
| 537 { | |
| 538 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 539 bool isAvailable(false); | |
| 540 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
| 541 | |
| 542 // Make an attempt to open up the | |
| 543 // input mixer corresponding to the currently selected input device. | |
| 544 // | |
| 545 if (!wasInitialized && InitMicrophone() == -1) | |
| 546 { | |
| 547 // If we end up here it means that the selected microphone has no | |
| 548 // volume control, hence it is safe to state that there is no | |
| 549 // boost control already at this stage. | |
| 550 available = false; | |
| 551 return 0; | |
| 552 } | |
| 553 | |
| 554 // Check if the selected microphone has a mute control | |
| 555 // | |
| 556 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); | |
| 557 available = isAvailable; | |
| 558 | |
| 559 // Close the initialized input mixer | |
| 560 // | |
| 561 if (!wasInitialized) | |
| 562 { | |
| 563 _mixerManager.CloseMicrophone(); | |
| 564 } | |
| 565 | |
| 566 return 0; | |
| 567 } | |
| 568 | |
| 569 int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) | |
| 570 { | |
| 571 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 572 return (_mixerManager.SetMicrophoneMute(enable)); | |
| 573 } | |
| 574 | |
| 575 int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const | |
| 576 { | |
| 577 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 578 bool muted(0); | |
| 579 if (_mixerManager.MicrophoneMute(muted) == -1) | |
| 580 { | |
| 581 return -1; | |
| 582 } | |
| 583 | |
| 584 enabled = muted; | |
| 585 return 0; | |
| 586 } | |
| 587 | |
| 588 int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) | |
| 589 { | |
| 590 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 591 bool isAvailable(false); | |
| 592 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
| 593 | |
| 594 // Enumerate all avaliable microphone and make an attempt to open up the | |
| 595 // input mixer corresponding to the currently selected input device. | |
| 596 // | |
| 597 if (!wasInitialized && InitMicrophone() == -1) | |
| 598 { | |
| 599 // If we end up here it means that the selected microphone has no | |
| 600 // volume control, hence it is safe to state that there is no | |
| 601 // boost control already at this stage. | |
| 602 available = false; | |
| 603 return 0; | |
| 604 } | |
| 605 | |
| 606 // Check if the selected microphone has a boost control | |
| 607 _mixerManager.MicrophoneBoostIsAvailable(isAvailable); | |
| 608 available = isAvailable; | |
| 609 | |
| 610 // Close the initialized input mixer | |
| 611 if (!wasInitialized) | |
| 612 { | |
| 613 _mixerManager.CloseMicrophone(); | |
| 614 } | |
| 615 | |
| 616 return 0; | |
| 617 } | |
| 618 | |
| 619 int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) | |
| 620 { | |
| 621 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 622 return (_mixerManager.SetMicrophoneBoost(enable)); | |
| 623 } | |
| 624 | |
| 625 int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const | |
| 626 { | |
| 627 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 628 bool onOff(0); | |
| 629 | |
| 630 if (_mixerManager.MicrophoneBoost(onOff) == -1) | |
| 631 { | |
| 632 return -1; | |
| 633 } | |
| 634 | |
| 635 enabled = onOff; | |
| 636 | |
| 637 return 0; | |
| 638 } | |
| 639 | |
| 640 int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) | |
| 641 { | |
| 642 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 643 if (_recChannels == 2 && _recording) { | |
| 644 available = true; | |
| 645 return 0; | |
| 646 } | |
| 647 | |
| 648 available = false; | |
| 649 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
| 650 int error = 0; | |
| 651 | |
| 652 if (!wasInitialized && InitMicrophone() == -1) | |
| 653 { | |
| 654 // Cannot open the specified device | |
| 655 available = false; | |
| 656 return 0; | |
| 657 } | |
| 658 | |
| 659 // Check if the selected microphone can record stereo. | |
| 660 bool isAvailable(false); | |
| 661 error = _mixerManager.StereoRecordingIsAvailable(isAvailable); | |
| 662 if (!error) | |
| 663 available = isAvailable; | |
| 664 | |
| 665 // Close the initialized input mixer | |
| 666 if (!wasInitialized) | |
| 667 { | |
| 668 _mixerManager.CloseMicrophone(); | |
| 669 } | |
| 670 | |
| 671 return error; | |
| 672 } | |
| 673 | |
| 674 int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) | |
| 675 { | |
| 676 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 677 if (enable) | |
| 678 _recChannels = 2; | |
| 679 else | |
| 680 _recChannels = 1; | |
| 681 | |
| 682 return 0; | |
| 683 } | |
| 684 | |
| 685 int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const | |
| 686 { | |
| 687 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 688 if (_recChannels == 2) | |
| 689 enabled = true; | |
| 690 else | |
| 691 enabled = false; | |
| 692 | |
| 693 return 0; | |
| 694 } | |
| 695 | |
| 696 int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) | |
| 697 { | |
| 698 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 699 if (_playChannels == 2 && _playing) { | |
| 700 available = true; | |
| 701 return 0; | |
| 702 } | |
| 703 | |
| 704 available = false; | |
| 705 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); | |
| 706 int error = 0; | |
| 707 | |
| 708 if (!wasInitialized && InitSpeaker() == -1) | |
| 709 { | |
| 710 // Cannot open the specified device. | |
| 711 return -1; | |
| 712 } | |
| 713 | |
| 714 // Check if the selected speaker can play stereo. | |
| 715 bool isAvailable(false); | |
| 716 error = _mixerManager.StereoPlayoutIsAvailable(isAvailable); | |
| 717 if (!error) | |
| 718 available = isAvailable; | |
| 719 | |
| 720 // Close the initialized input mixer | |
| 721 if (!wasInitialized) | |
| 722 { | |
| 723 _mixerManager.CloseSpeaker(); | |
| 724 } | |
| 725 | |
| 726 return error; | |
| 727 } | |
| 728 | |
| 729 int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) | |
| 730 { | |
| 731 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 732 if (enable) | |
| 733 _playChannels = 2; | |
| 734 else | |
| 735 _playChannels = 1; | |
| 736 | |
| 737 return 0; | |
| 738 } | |
| 739 | |
| 740 int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const | |
| 741 { | |
| 742 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 743 if (_playChannels == 2) | |
| 744 enabled = true; | |
| 745 else | |
| 746 enabled = false; | |
| 747 | |
| 748 return 0; | |
| 749 } | |
| 750 | |
| 751 int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) | |
| 752 { | |
| 753 rtc::CritScope lock(&_critSect); | |
| 754 _AGC = enable; | |
| 755 | |
| 756 return 0; | |
| 757 } | |
| 758 | |
| 759 bool AudioDeviceLinuxPulse::AGC() const | |
| 760 { | |
| 761 rtc::CritScope lock(&_critSect); | |
| 762 return _AGC; | |
| 763 } | |
| 764 | |
| 765 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable( | |
| 766 bool& available) | |
| 767 { | |
| 768 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 769 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); | |
| 770 | |
| 771 // Make an attempt to open up the | |
| 772 // input mixer corresponding to the currently selected output device. | |
| 773 if (!wasInitialized && InitMicrophone() == -1) | |
| 774 { | |
| 775 // If we end up here it means that the selected microphone has no | |
| 776 // volume control. | |
| 777 available = false; | |
| 778 return 0; | |
| 779 } | |
| 780 | |
| 781 // Given that InitMicrophone was successful, we know that a volume control | |
| 782 // exists. | |
| 783 available = true; | |
| 784 | |
| 785 // Close the initialized input mixer | |
| 786 if (!wasInitialized) | |
| 787 { | |
| 788 _mixerManager.CloseMicrophone(); | |
| 789 } | |
| 790 | |
| 791 return 0; | |
| 792 } | |
| 793 | |
| 794 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) | |
| 795 { | |
| 796 return (_mixerManager.SetMicrophoneVolume(volume)); | |
| 797 } | |
| 798 | |
| 799 int32_t AudioDeviceLinuxPulse::MicrophoneVolume( | |
| 800 uint32_t& volume) const | |
| 801 { | |
| 802 | |
| 803 uint32_t level(0); | |
| 804 | |
| 805 if (_mixerManager.MicrophoneVolume(level) == -1) | |
| 806 { | |
| 807 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 808 " failed to retrive current microphone level"); | |
| 809 return -1; | |
| 810 } | |
| 811 | |
| 812 volume = level; | |
| 813 | |
| 814 return 0; | |
| 815 } | |
| 816 | |
| 817 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume( | |
| 818 uint32_t& maxVolume) const | |
| 819 { | |
| 820 | |
| 821 uint32_t maxVol(0); | |
| 822 | |
| 823 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) | |
| 824 { | |
| 825 return -1; | |
| 826 } | |
| 827 | |
| 828 maxVolume = maxVol; | |
| 829 | |
| 830 return 0; | |
| 831 } | |
| 832 | |
| 833 int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume( | |
| 834 uint32_t& minVolume) const | |
| 835 { | |
| 836 | |
| 837 uint32_t minVol(0); | |
| 838 | |
| 839 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) | |
| 840 { | |
| 841 return -1; | |
| 842 } | |
| 843 | |
| 844 minVolume = minVol; | |
| 845 | |
| 846 return 0; | |
| 847 } | 748 } |
| 848 | 749 |
| 849 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( | 750 int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( |
| 850 uint16_t& stepSize) const | 751 uint16_t& stepSize) const { |
| 851 { | 752 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 852 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 753 uint16_t delta(0); |
| 853 uint16_t delta(0); | 754 |
| 854 | 755 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) { |
| 855 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) | 756 return -1; |
| 856 { | 757 } |
| 857 return -1; | 758 |
| 858 } | 759 stepSize = delta; |
| 859 | 760 |
| 860 stepSize = delta; | 761 return 0; |
| 861 | 762 } |
| 862 return 0; | 763 |
| 863 } | 764 int16_t AudioDeviceLinuxPulse::PlayoutDevices() { |
| 864 | 765 PaLock(); |
| 865 int16_t AudioDeviceLinuxPulse::PlayoutDevices() | 766 |
| 866 { | 767 pa_operation* paOperation = NULL; |
| 867 PaLock(); | 768 _numPlayDevices = 1; // init to 1 to account for "default" |
| 868 | 769 |
| 869 pa_operation* paOperation = NULL; | 770 // get the whole list of devices and update _numPlayDevices |
| 870 _numPlayDevices = 1; // init to 1 to account for "default" | 771 paOperation = |
| 871 | 772 LATE(pa_context_get_sink_info_list)(_paContext, PaSinkInfoCallback, this); |
| 872 // get the whole list of devices and update _numPlayDevices | 773 |
| 873 paOperation = LATE(pa_context_get_sink_info_list)(_paContext, | 774 WaitForOperationCompletion(paOperation); |
| 874 PaSinkInfoCallback, | 775 |
| 875 this); | 776 PaUnLock(); |
| 876 | 777 |
| 877 WaitForOperationCompletion(paOperation); | 778 return _numPlayDevices; |
| 878 | 779 } |
| 879 PaUnLock(); | 780 |
| 880 | 781 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { |
| 881 return _numPlayDevices; | 782 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 882 } | 783 if (_playIsInitialized) { |
| 883 | 784 return -1; |
| 884 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) | 785 } |
| 885 { | 786 |
| 886 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 787 const uint16_t nDevices = PlayoutDevices(); |
| 887 if (_playIsInitialized) | 788 |
| 888 { | 789 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 889 return -1; | 790 " number of availiable output devices is %u", nDevices); |
| 890 } | 791 |
| 891 | 792 if (index > (nDevices - 1)) { |
| 892 const uint16_t nDevices = PlayoutDevices(); | 793 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 893 | 794 " device index is out of range [0,%u]", (nDevices - 1)); |
| 894 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | 795 return -1; |
| 895 " number of availiable output devices is %u", nDevices); | 796 } |
| 896 | 797 |
| 897 if (index > (nDevices - 1)) | 798 _outputDeviceIndex = index; |
| 898 { | 799 _outputDeviceIsSpecified = true; |
| 899 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 800 |
| 900 " device index is out of range [0,%u]", (nDevices - 1)); | 801 return 0; |
| 901 return -1; | |
| 902 } | |
| 903 | |
| 904 _outputDeviceIndex = index; | |
| 905 _outputDeviceIsSpecified = true; | |
| 906 | |
| 907 return 0; | |
| 908 } | 802 } |
| 909 | 803 |
| 910 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( | 804 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( |
| 911 AudioDeviceModule::WindowsDeviceType /*device*/) | 805 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 912 { | 806 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 913 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 807 "WindowsDeviceType not supported"); |
| 914 "WindowsDeviceType not supported"); | 808 return -1; |
| 915 return -1; | |
| 916 } | 809 } |
| 917 | 810 |
| 918 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( | 811 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( |
| 919 uint16_t index, | 812 uint16_t index, |
| 920 char name[kAdmMaxDeviceNameSize], | 813 char name[kAdmMaxDeviceNameSize], |
| 921 char guid[kAdmMaxGuidSize]) | 814 char guid[kAdmMaxGuidSize]) { |
| 922 { | 815 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 923 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 816 const uint16_t nDevices = PlayoutDevices(); |
| 924 const uint16_t nDevices = PlayoutDevices(); | 817 |
| 925 | 818 if ((index > (nDevices - 1)) || (name == NULL)) { |
| 926 if ((index > (nDevices - 1)) || (name == NULL)) | 819 return -1; |
| 927 { | 820 } |
| 928 return -1; | 821 |
| 929 } | 822 memset(name, 0, kAdmMaxDeviceNameSize); |
| 930 | 823 |
| 931 memset(name, 0, kAdmMaxDeviceNameSize); | 824 if (guid != NULL) { |
| 932 | 825 memset(guid, 0, kAdmMaxGuidSize); |
| 933 if (guid != NULL) | 826 } |
| 934 { | 827 |
| 935 memset(guid, 0, kAdmMaxGuidSize); | 828 // Check if default device |
| 936 } | 829 if (index == 0) { |
| 937 | 830 uint16_t deviceIndex = 0; |
| 938 // Check if default device | 831 return GetDefaultDeviceInfo(false, name, deviceIndex); |
| 939 if (index == 0) | 832 } |
| 940 { | 833 |
| 941 uint16_t deviceIndex = 0; | 834 // Tell the callback that we want |
| 942 return GetDefaultDeviceInfo(false, name, deviceIndex); | 835 // The name for this device |
| 943 } | 836 _playDisplayDeviceName = name; |
| 944 | 837 _deviceIndex = index; |
| 945 // Tell the callback that we want | 838 |
| 946 // The name for this device | 839 // get playout devices |
| 947 _playDisplayDeviceName = name; | 840 PlayoutDevices(); |
| 948 _deviceIndex = index; | 841 |
| 949 | 842 // clear device name and index |
| 950 // get playout devices | 843 _playDisplayDeviceName = NULL; |
| 951 PlayoutDevices(); | 844 _deviceIndex = -1; |
| 952 | 845 |
| 953 // clear device name and index | 846 return 0; |
| 954 _playDisplayDeviceName = NULL; | |
| 955 _deviceIndex = -1; | |
| 956 | |
| 957 return 0; | |
| 958 } | 847 } |
| 959 | 848 |
| 960 int32_t AudioDeviceLinuxPulse::RecordingDeviceName( | 849 int32_t AudioDeviceLinuxPulse::RecordingDeviceName( |
| 961 uint16_t index, | 850 uint16_t index, |
| 962 char name[kAdmMaxDeviceNameSize], | 851 char name[kAdmMaxDeviceNameSize], |
| 963 char guid[kAdmMaxGuidSize]) | 852 char guid[kAdmMaxGuidSize]) { |
| 964 { | 853 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 965 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 854 const uint16_t nDevices(RecordingDevices()); |
| 966 const uint16_t nDevices(RecordingDevices()); | 855 |
| 967 | 856 if ((index > (nDevices - 1)) || (name == NULL)) { |
| 968 if ((index > (nDevices - 1)) || (name == NULL)) | 857 return -1; |
| 858 } |
| 859 |
| 860 memset(name, 0, kAdmMaxDeviceNameSize); |
| 861 |
| 862 if (guid != NULL) { |
| 863 memset(guid, 0, kAdmMaxGuidSize); |
| 864 } |
| 865 |
| 866 // Check if default device |
| 867 if (index == 0) { |
| 868 uint16_t deviceIndex = 0; |
| 869 return GetDefaultDeviceInfo(true, name, deviceIndex); |
| 870 } |
| 871 |
| 872 // Tell the callback that we want |
| 873 // the name for this device |
| 874 _recDisplayDeviceName = name; |
| 875 _deviceIndex = index; |
| 876 |
| 877 // Get recording devices |
| 878 RecordingDevices(); |
| 879 |
| 880 // Clear device name and index |
| 881 _recDisplayDeviceName = NULL; |
| 882 _deviceIndex = -1; |
| 883 |
| 884 return 0; |
| 885 } |
| 886 |
| 887 int16_t AudioDeviceLinuxPulse::RecordingDevices() { |
| 888 PaLock(); |
| 889 |
| 890 pa_operation* paOperation = NULL; |
| 891 _numRecDevices = 1; // Init to 1 to account for "default" |
| 892 |
| 893 // Get the whole list of devices and update _numRecDevices |
| 894 paOperation = LATE(pa_context_get_source_info_list)( |
| 895 _paContext, PaSourceInfoCallback, this); |
| 896 |
| 897 WaitForOperationCompletion(paOperation); |
| 898 |
| 899 PaUnLock(); |
| 900 |
| 901 return _numRecDevices; |
| 902 } |
| 903 |
| 904 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { |
| 905 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 906 if (_recIsInitialized) { |
| 907 return -1; |
| 908 } |
| 909 |
| 910 const uint16_t nDevices(RecordingDevices()); |
| 911 |
| 912 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 913 " number of availiable input devices is %u", nDevices); |
| 914 |
| 915 if (index > (nDevices - 1)) { |
| 916 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 917 " device index is out of range [0,%u]", (nDevices - 1)); |
| 918 return -1; |
| 919 } |
| 920 |
| 921 _inputDeviceIndex = index; |
| 922 _inputDeviceIsSpecified = true; |
| 923 |
| 924 return 0; |
| 925 } |
| 926 |
| 927 int32_t AudioDeviceLinuxPulse::SetRecordingDevice( |
| 928 AudioDeviceModule::WindowsDeviceType /*device*/) { |
| 929 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 930 "WindowsDeviceType not supported"); |
| 931 return -1; |
| 932 } |
| 933 |
| 934 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { |
| 935 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 936 available = false; |
| 937 |
| 938 // Try to initialize the playout side |
| 939 int32_t res = InitPlayout(); |
| 940 |
| 941 // Cancel effect of initialization |
| 942 StopPlayout(); |
| 943 |
| 944 if (res != -1) { |
| 945 available = true; |
| 946 } |
| 947 |
| 948 return res; |
| 949 } |
| 950 |
| 951 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) { |
| 952 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 953 available = false; |
| 954 |
| 955 // Try to initialize the playout side |
| 956 int32_t res = InitRecording(); |
| 957 |
| 958 // Cancel effect of initialization |
| 959 StopRecording(); |
| 960 |
| 961 if (res != -1) { |
| 962 available = true; |
| 963 } |
| 964 |
| 965 return res; |
| 966 } |
| 967 |
| 968 int32_t AudioDeviceLinuxPulse::InitPlayout() { |
| 969 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 970 |
| 971 if (_playing) { |
| 972 return -1; |
| 973 } |
| 974 |
| 975 if (!_outputDeviceIsSpecified) { |
| 976 return -1; |
| 977 } |
| 978 |
| 979 if (_playIsInitialized) { |
| 980 return 0; |
| 981 } |
| 982 |
| 983 // Initialize the speaker (devices might have been added or removed) |
| 984 if (InitSpeaker() == -1) { |
| 985 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 986 " InitSpeaker() failed"); |
| 987 } |
| 988 |
| 989 // Set the play sample specification |
| 990 pa_sample_spec playSampleSpec; |
| 991 playSampleSpec.channels = _playChannels; |
| 992 playSampleSpec.format = PA_SAMPLE_S16LE; |
| 993 playSampleSpec.rate = sample_rate_hz_; |
| 994 |
| 995 // Create a new play stream |
| 996 _playStream = |
| 997 LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL); |
| 998 |
| 999 if (!_playStream) { |
| 1000 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1001 " failed to create play stream, err=%d", |
| 1002 LATE(pa_context_errno)(_paContext)); |
| 1003 return -1; |
| 1004 } |
| 1005 |
| 1006 // Provide the playStream to the mixer |
| 1007 _mixerManager.SetPlayStream(_playStream); |
| 1008 |
| 1009 if (_ptrAudioBuffer) { |
| 1010 // Update audio buffer with the selected parameters |
| 1011 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); |
| 1012 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); |
| 1013 } |
| 1014 |
| 1015 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state %d\n", |
| 1016 LATE(pa_stream_get_state)(_playStream)); |
| 1017 |
| 1018 // Set stream flags |
| 1019 _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | |
| 1020 PA_STREAM_INTERPOLATE_TIMING); |
| 1021 |
| 1022 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
| 1023 // If configuring a specific latency then we want to specify |
| 1024 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
| 1025 // automatically to reach that target latency. However, that flag |
| 1026 // doesn't exist in Ubuntu 8.04 and many people still use that, |
| 1027 // so we have to check the protocol version of libpulse. |
| 1028 if (LATE(pa_context_get_protocol_version)(_paContext) >= |
| 1029 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
| 1030 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
| 1031 } |
| 1032 |
| 1033 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
| 1034 if (!spec) { |
| 1035 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1036 " pa_stream_get_sample_spec()"); |
| 1037 return -1; |
| 1038 } |
| 1039 |
| 1040 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
| 1041 uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / |
| 1042 WEBRTC_PA_MSECS_PER_SEC; |
| 1043 |
| 1044 // Set the play buffer attributes |
| 1045 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer |
| 1046 _playBufferAttr.tlength = latency; // target fill level of play buffer |
| 1047 // minimum free num bytes before server request more data |
| 1048 _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
| 1049 // prebuffer tlength before starting playout |
| 1050 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; |
| 1051 |
| 1052 _configuredLatencyPlay = latency; |
| 1053 } |
| 1054 |
| 1055 // num samples in bytes * num channels |
| 1056 _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels; |
| 1057 _playbackBufferUnused = _playbackBufferSize; |
| 1058 _playBuffer = new int8_t[_playbackBufferSize]; |
| 1059 |
| 1060 // Enable underflow callback |
| 1061 LATE(pa_stream_set_underflow_callback) |
| 1062 (_playStream, PaStreamUnderflowCallback, this); |
| 1063 |
| 1064 // Set the state callback function for the stream |
| 1065 LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this); |
| 1066 |
| 1067 // Mark playout side as initialized |
| 1068 _playIsInitialized = true; |
| 1069 _sndCardPlayDelay = 0; |
| 1070 _sndCardRecDelay = 0; |
| 1071 |
| 1072 return 0; |
| 1073 } |
| 1074 |
| 1075 int32_t AudioDeviceLinuxPulse::InitRecording() { |
| 1076 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1077 |
| 1078 if (_recording) { |
| 1079 return -1; |
| 1080 } |
| 1081 |
| 1082 if (!_inputDeviceIsSpecified) { |
| 1083 return -1; |
| 1084 } |
| 1085 |
| 1086 if (_recIsInitialized) { |
| 1087 return 0; |
| 1088 } |
| 1089 |
| 1090 // Initialize the microphone (devices might have been added or removed) |
| 1091 if (InitMicrophone() == -1) { |
| 1092 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1093 " InitMicrophone() failed"); |
| 1094 } |
| 1095 |
| 1096 // Set the rec sample specification |
| 1097 pa_sample_spec recSampleSpec; |
| 1098 recSampleSpec.channels = _recChannels; |
| 1099 recSampleSpec.format = PA_SAMPLE_S16LE; |
| 1100 recSampleSpec.rate = sample_rate_hz_; |
| 1101 |
| 1102 // Create a new rec stream |
| 1103 _recStream = |
| 1104 LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL); |
| 1105 if (!_recStream) { |
| 1106 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1107 " failed to create rec stream, err=%d", |
| 1108 LATE(pa_context_errno)(_paContext)); |
| 1109 return -1; |
| 1110 } |
| 1111 |
| 1112 // Provide the recStream to the mixer |
| 1113 _mixerManager.SetRecStream(_recStream); |
| 1114 |
| 1115 if (_ptrAudioBuffer) { |
| 1116 // Update audio buffer with the selected parameters |
| 1117 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); |
| 1118 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
| 1119 } |
| 1120 |
| 1121 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
| 1122 _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | |
| 1123 PA_STREAM_INTERPOLATE_TIMING); |
| 1124 |
| 1125 // If configuring a specific latency then we want to specify |
| 1126 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
| 1127 // automatically to reach that target latency. However, that flag |
| 1128 // doesn't exist in Ubuntu 8.04 and many people still use that, |
| 1129 // so we have to check the protocol version of libpulse. |
| 1130 if (LATE(pa_context_get_protocol_version)(_paContext) >= |
| 1131 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
| 1132 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
| 1133 } |
| 1134 |
| 1135 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream); |
| 1136 if (!spec) { |
| 1137 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1138 " pa_stream_get_sample_spec(rec)"); |
| 1139 return -1; |
| 1140 } |
| 1141 |
| 1142 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
| 1143 uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / |
| 1144 WEBRTC_PA_MSECS_PER_SEC; |
| 1145 |
| 1146 // Set the rec buffer attributes |
| 1147 // Note: fragsize specifies a maximum transfer size, not a minimum, so |
| 1148 // it is not possible to force a high latency setting, only a low one. |
| 1149 _recBufferAttr.fragsize = latency; // size of fragment |
| 1150 _recBufferAttr.maxlength = |
| 1151 latency + bytesPerSec * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / |
| 1152 WEBRTC_PA_MSECS_PER_SEC; |
| 1153 |
| 1154 _configuredLatencyRec = latency; |
| 1155 } |
| 1156 |
| 1157 _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels; |
| 1158 _recordBufferUsed = 0; |
| 1159 _recBuffer = new int8_t[_recordBufferSize]; |
| 1160 |
| 1161 // Enable overflow callback |
| 1162 LATE(pa_stream_set_overflow_callback) |
| 1163 (_recStream, PaStreamOverflowCallback, this); |
| 1164 |
| 1165 // Set the state callback function for the stream |
| 1166 LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this); |
| 1167 |
| 1168 // Mark recording side as initialized |
| 1169 _recIsInitialized = true; |
| 1170 |
| 1171 return 0; |
| 1172 } |
| 1173 |
| 1174 int32_t AudioDeviceLinuxPulse::StartRecording() { |
| 1175 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1176 if (!_recIsInitialized) { |
| 1177 return -1; |
| 1178 } |
| 1179 |
| 1180 if (_recording) { |
| 1181 return 0; |
| 1182 } |
| 1183 |
| 1184 // Set state to ensure that the recording starts from the audio thread. |
| 1185 _startRec = true; |
| 1186 |
| 1187 // The audio thread will signal when recording has started. |
| 1188 _timeEventRec.Set(); |
| 1189 if (kEventTimeout == _recStartEvent.Wait(10000)) { |
| 969 { | 1190 { |
| 970 return -1; | 1191 rtc::CritScope lock(&_critSect); |
| 1192 _startRec = false; |
| 971 } | 1193 } |
| 972 | 1194 StopRecording(); |
| 973 memset(name, 0, kAdmMaxDeviceNameSize); | 1195 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 974 | 1196 " failed to activate recording"); |
| 975 if (guid != NULL) | 1197 return -1; |
| 1198 } |
| 1199 |
| 1200 { |
| 1201 rtc::CritScope lock(&_critSect); |
| 1202 if (_recording) { |
| 1203 // The recording state is set by the audio thread after recording |
| 1204 // has started. |
| 1205 } else { |
| 1206 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1207 " failed to activate recording"); |
| 1208 return -1; |
| 1209 } |
| 1210 } |
| 1211 |
| 1212 return 0; |
| 1213 } |
| 1214 |
| 1215 int32_t AudioDeviceLinuxPulse::StopRecording() { |
| 1216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1217 rtc::CritScope lock(&_critSect); |
| 1218 |
| 1219 if (!_recIsInitialized) { |
| 1220 return 0; |
| 1221 } |
| 1222 |
| 1223 if (_recStream == NULL) { |
| 1224 return -1; |
| 1225 } |
| 1226 |
| 1227 _recIsInitialized = false; |
| 1228 _recording = false; |
| 1229 |
| 1230 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping recording"); |
| 1231 |
| 1232 // Stop Recording |
| 1233 PaLock(); |
| 1234 |
| 1235 DisableReadCallback(); |
| 1236 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); |
| 1237 |
| 1238 // Unset this here so that we don't get a TERMINATED callback |
| 1239 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); |
| 1240 |
| 1241 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) { |
| 1242 // Disconnect the stream |
| 1243 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) { |
| 1244 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1245 " failed to disconnect rec stream, err=%d\n", |
| 1246 LATE(pa_context_errno)(_paContext)); |
| 1247 PaUnLock(); |
| 1248 return -1; |
| 1249 } |
| 1250 |
| 1251 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 1252 " disconnected recording"); |
| 1253 } |
| 1254 |
| 1255 LATE(pa_stream_unref)(_recStream); |
| 1256 _recStream = NULL; |
| 1257 |
| 1258 PaUnLock(); |
| 1259 |
| 1260 // Provide the recStream to the mixer |
| 1261 _mixerManager.SetRecStream(_recStream); |
| 1262 |
| 1263 if (_recBuffer) { |
| 1264 delete[] _recBuffer; |
| 1265 _recBuffer = NULL; |
| 1266 } |
| 1267 |
| 1268 return 0; |
| 1269 } |
| 1270 |
| 1271 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const { |
| 1272 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1273 return (_recIsInitialized); |
| 1274 } |
| 1275 |
| 1276 bool AudioDeviceLinuxPulse::Recording() const { |
| 1277 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1278 return (_recording); |
| 1279 } |
| 1280 |
| 1281 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const { |
| 1282 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1283 return (_playIsInitialized); |
| 1284 } |
| 1285 |
| 1286 int32_t AudioDeviceLinuxPulse::StartPlayout() { |
| 1287 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1288 |
| 1289 if (!_playIsInitialized) { |
| 1290 return -1; |
| 1291 } |
| 1292 |
| 1293 if (_playing) { |
| 1294 return 0; |
| 1295 } |
| 1296 |
| 1297 // Set state to ensure that playout starts from the audio thread. |
| 1298 { |
| 1299 rtc::CritScope lock(&_critSect); |
| 1300 _startPlay = true; |
| 1301 } |
| 1302 |
| 1303 // Both |_startPlay| and |_playing| needs protction since they are also |
| 1304 // accessed on the playout thread. |
| 1305 |
| 1306 // The audio thread will signal when playout has started. |
| 1307 _timeEventPlay.Set(); |
| 1308 if (kEventTimeout == _playStartEvent.Wait(10000)) { |
| 976 { | 1309 { |
| 977 memset(guid, 0, kAdmMaxGuidSize); | 1310 rtc::CritScope lock(&_critSect); |
| 1311 _startPlay = false; |
| 978 } | 1312 } |
| 979 | 1313 StopPlayout(); |
| 980 // Check if default device | 1314 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 981 if (index == 0) | 1315 " failed to activate playout"); |
| 982 { | 1316 return -1; |
| 983 uint16_t deviceIndex = 0; | 1317 } |
| 984 return GetDefaultDeviceInfo(true, name, deviceIndex); | 1318 |
| 1319 { |
| 1320 rtc::CritScope lock(&_critSect); |
| 1321 if (_playing) { |
| 1322 // The playing state is set by the audio thread after playout |
| 1323 // has started. |
| 1324 } else { |
| 1325 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1326 " failed to activate playing"); |
| 1327 return -1; |
| 985 } | 1328 } |
| 986 | 1329 } |
| 987 // Tell the callback that we want | 1330 |
| 988 // the name for this device | 1331 return 0; |
| 989 _recDisplayDeviceName = name; | 1332 } |
| 990 _deviceIndex = index; | 1333 |
| 991 | 1334 int32_t AudioDeviceLinuxPulse::StopPlayout() { |
| 992 // Get recording devices | 1335 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 993 RecordingDevices(); | 1336 rtc::CritScope lock(&_critSect); |
| 994 | 1337 |
| 995 // Clear device name and index | 1338 if (!_playIsInitialized) { |
| 996 _recDisplayDeviceName = NULL; | 1339 return 0; |
| 997 _deviceIndex = -1; | 1340 } |
| 998 | 1341 |
| 999 return 0; | 1342 if (_playStream == NULL) { |
| 1000 } | 1343 return -1; |
| 1001 | 1344 } |
| 1002 int16_t AudioDeviceLinuxPulse::RecordingDevices() | 1345 |
| 1003 { | 1346 _playIsInitialized = false; |
| 1004 PaLock(); | 1347 _playing = false; |
| 1005 | 1348 _sndCardPlayDelay = 0; |
| 1006 pa_operation* paOperation = NULL; | 1349 _sndCardRecDelay = 0; |
| 1007 _numRecDevices = 1; // Init to 1 to account for "default" | 1350 |
| 1008 | 1351 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping playback"); |
| 1009 // Get the whole list of devices and update _numRecDevices | 1352 |
| 1010 paOperation = LATE(pa_context_get_source_info_list)(_paContext, | 1353 // Stop Playout |
| 1011 PaSourceInfoCallback, | 1354 PaLock(); |
| 1012 this); | 1355 |
| 1013 | 1356 DisableWriteCallback(); |
| 1014 WaitForOperationCompletion(paOperation); | 1357 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); |
| 1015 | 1358 |
| 1016 PaUnLock(); | 1359 // Unset this here so that we don't get a TERMINATED callback |
| 1017 | 1360 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); |
| 1018 return _numRecDevices; | 1361 |
| 1019 } | 1362 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) { |
| 1020 | 1363 // Disconnect the stream |
| 1021 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) | 1364 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) { |
| 1022 { | 1365 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1023 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 1366 " failed to disconnect play stream, err=%d", |
| 1024 if (_recIsInitialized) | 1367 LATE(pa_context_errno)(_paContext)); |
| 1025 { | 1368 PaUnLock(); |
| 1026 return -1; | 1369 return -1; |
| 1027 } | 1370 } |
| 1028 | 1371 |
| 1029 const uint16_t nDevices(RecordingDevices()); | |
| 1030 | |
| 1031 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
| 1032 " number of availiable input devices is %u", nDevices); | |
| 1033 | |
| 1034 if (index > (nDevices - 1)) | |
| 1035 { | |
| 1036 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1037 " device index is out of range [0,%u]", (nDevices - 1)); | |
| 1038 return -1; | |
| 1039 } | |
| 1040 | |
| 1041 _inputDeviceIndex = index; | |
| 1042 _inputDeviceIsSpecified = true; | |
| 1043 | |
| 1044 return 0; | |
| 1045 } | |
| 1046 | |
| 1047 int32_t AudioDeviceLinuxPulse::SetRecordingDevice( | |
| 1048 AudioDeviceModule::WindowsDeviceType /*device*/) | |
| 1049 { | |
| 1050 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1051 "WindowsDeviceType not supported"); | |
| 1052 return -1; | |
| 1053 } | |
| 1054 | |
| 1055 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) | |
| 1056 { | |
| 1057 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1058 available = false; | |
| 1059 | |
| 1060 // Try to initialize the playout side | |
| 1061 int32_t res = InitPlayout(); | |
| 1062 | |
| 1063 // Cancel effect of initialization | |
| 1064 StopPlayout(); | |
| 1065 | |
| 1066 if (res != -1) | |
| 1067 { | |
| 1068 available = true; | |
| 1069 } | |
| 1070 | |
| 1071 return res; | |
| 1072 } | |
| 1073 | |
| 1074 int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) | |
| 1075 { | |
| 1076 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1077 available = false; | |
| 1078 | |
| 1079 // Try to initialize the playout side | |
| 1080 int32_t res = InitRecording(); | |
| 1081 | |
| 1082 // Cancel effect of initialization | |
| 1083 StopRecording(); | |
| 1084 | |
| 1085 if (res != -1) | |
| 1086 { | |
| 1087 available = true; | |
| 1088 } | |
| 1089 | |
| 1090 return res; | |
| 1091 } | |
| 1092 | |
| 1093 int32_t AudioDeviceLinuxPulse::InitPlayout() | |
| 1094 { | |
| 1095 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1096 | |
| 1097 if (_playing) | |
| 1098 { | |
| 1099 return -1; | |
| 1100 } | |
| 1101 | |
| 1102 if (!_outputDeviceIsSpecified) | |
| 1103 { | |
| 1104 return -1; | |
| 1105 } | |
| 1106 | |
| 1107 if (_playIsInitialized) | |
| 1108 { | |
| 1109 return 0; | |
| 1110 } | |
| 1111 | |
| 1112 // Initialize the speaker (devices might have been added or removed) | |
| 1113 if (InitSpeaker() == -1) | |
| 1114 { | |
| 1115 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 1116 " InitSpeaker() failed"); | |
| 1117 } | |
| 1118 | |
| 1119 // Set the play sample specification | |
| 1120 pa_sample_spec playSampleSpec; | |
| 1121 playSampleSpec.channels = _playChannels; | |
| 1122 playSampleSpec.format = PA_SAMPLE_S16LE; | |
| 1123 playSampleSpec.rate = sample_rate_hz_; | |
| 1124 | |
| 1125 // Create a new play stream | |
| 1126 _playStream = LATE(pa_stream_new)(_paContext, "playStream", | |
| 1127 &playSampleSpec, NULL); | |
| 1128 | |
| 1129 if (!_playStream) | |
| 1130 { | |
| 1131 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1132 " failed to create play stream, err=%d", | |
| 1133 LATE(pa_context_errno)(_paContext)); | |
| 1134 return -1; | |
| 1135 } | |
| 1136 | |
| 1137 // Provide the playStream to the mixer | |
| 1138 _mixerManager.SetPlayStream(_playStream); | |
| 1139 | |
| 1140 if (_ptrAudioBuffer) | |
| 1141 { | |
| 1142 // Update audio buffer with the selected parameters | |
| 1143 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); | |
| 1144 _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels); | |
| 1145 } | |
| 1146 | |
| 1147 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1372 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 1148 " stream state %d\n", | 1373 " disconnected playback"); |
| 1149 LATE(pa_stream_get_state)(_playStream)); | 1374 } |
| 1150 | 1375 |
| 1151 // Set stream flags | 1376 LATE(pa_stream_unref)(_playStream); |
| 1152 _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE | 1377 _playStream = NULL; |
| 1153 | PA_STREAM_INTERPOLATE_TIMING); | 1378 |
| 1154 | 1379 PaUnLock(); |
| 1155 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) | 1380 |
| 1156 { | 1381 // Provide the playStream to the mixer |
| 1157 // If configuring a specific latency then we want to specify | 1382 _mixerManager.SetPlayStream(_playStream); |
| 1158 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters | 1383 |
| 1159 // automatically to reach that target latency. However, that flag | 1384 if (_playBuffer) { |
| 1160 // doesn't exist in Ubuntu 8.04 and many people still use that, | 1385 delete[] _playBuffer; |
| 1161 // so we have to check the protocol version of libpulse. | 1386 _playBuffer = NULL; |
| 1162 if (LATE(pa_context_get_protocol_version)(_paContext) | 1387 } |
| 1163 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) | 1388 |
| 1164 { | 1389 return 0; |
| 1165 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; | 1390 } |
| 1166 } | 1391 |
| 1167 | 1392 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const { |
| 1168 const pa_sample_spec *spec = | 1393 rtc::CritScope lock(&_critSect); |
| 1169 LATE(pa_stream_get_sample_spec)(_playStream); | 1394 delayMS = (uint16_t)_sndCardPlayDelay; |
| 1170 if (!spec) | 1395 return 0; |
| 1171 { | 1396 } |
| 1172 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1397 |
| 1173 " pa_stream_get_sample_spec()"); | 1398 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const { |
| 1174 return -1; | 1399 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1175 } | 1400 delayMS = (uint16_t)_sndCardRecDelay; |
| 1176 | 1401 return 0; |
| 1177 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); | 1402 } |
| 1178 uint32_t latency = bytesPerSec * | 1403 |
| 1179 WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / | 1404 bool AudioDeviceLinuxPulse::Playing() const { |
| 1180 WEBRTC_PA_MSECS_PER_SEC; | 1405 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1181 | 1406 return (_playing); |
| 1182 // Set the play buffer attributes | |
| 1183 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer | |
| 1184 _playBufferAttr.tlength = latency; // target fill level of play buffer | |
| 1185 // minimum free num bytes before server request more data | |
| 1186 _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; | |
| 1187 // prebuffer tlength before starting playout | |
| 1188 _playBufferAttr.prebuf = _playBufferAttr.tlength - | |
| 1189 _playBufferAttr.minreq; | |
| 1190 | |
| 1191 _configuredLatencyPlay = latency; | |
| 1192 } | |
| 1193 | |
| 1194 // num samples in bytes * num channels | |
| 1195 _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels; | |
| 1196 _playbackBufferUnused = _playbackBufferSize; | |
| 1197 _playBuffer = new int8_t[_playbackBufferSize]; | |
| 1198 | |
| 1199 // Enable underflow callback | |
| 1200 LATE(pa_stream_set_underflow_callback)(_playStream, | |
| 1201 PaStreamUnderflowCallback, this); | |
| 1202 | |
| 1203 // Set the state callback function for the stream | |
| 1204 LATE(pa_stream_set_state_callback)(_playStream, | |
| 1205 PaStreamStateCallback, this); | |
| 1206 | |
| 1207 // Mark playout side as initialized | |
| 1208 _playIsInitialized = true; | |
| 1209 _sndCardPlayDelay = 0; | |
| 1210 _sndCardRecDelay = 0; | |
| 1211 | |
| 1212 return 0; | |
| 1213 } | |
| 1214 | |
| 1215 int32_t AudioDeviceLinuxPulse::InitRecording() | |
| 1216 { | |
| 1217 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1218 | |
| 1219 if (_recording) | |
| 1220 { | |
| 1221 return -1; | |
| 1222 } | |
| 1223 | |
| 1224 if (!_inputDeviceIsSpecified) | |
| 1225 { | |
| 1226 return -1; | |
| 1227 } | |
| 1228 | |
| 1229 if (_recIsInitialized) | |
| 1230 { | |
| 1231 return 0; | |
| 1232 } | |
| 1233 | |
| 1234 // Initialize the microphone (devices might have been added or removed) | |
| 1235 if (InitMicrophone() == -1) | |
| 1236 { | |
| 1237 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 1238 " InitMicrophone() failed"); | |
| 1239 } | |
| 1240 | |
| 1241 // Set the rec sample specification | |
| 1242 pa_sample_spec recSampleSpec; | |
| 1243 recSampleSpec.channels = _recChannels; | |
| 1244 recSampleSpec.format = PA_SAMPLE_S16LE; | |
| 1245 recSampleSpec.rate = sample_rate_hz_; | |
| 1246 | |
| 1247 // Create a new rec stream | |
| 1248 _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, | |
| 1249 NULL); | |
| 1250 if (!_recStream) | |
| 1251 { | |
| 1252 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1253 " failed to create rec stream, err=%d", | |
| 1254 LATE(pa_context_errno)(_paContext)); | |
| 1255 return -1; | |
| 1256 } | |
| 1257 | |
| 1258 // Provide the recStream to the mixer | |
| 1259 _mixerManager.SetRecStream(_recStream); | |
| 1260 | |
| 1261 if (_ptrAudioBuffer) | |
| 1262 { | |
| 1263 // Update audio buffer with the selected parameters | |
| 1264 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); | |
| 1265 _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels); | |
| 1266 } | |
| 1267 | |
| 1268 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) | |
| 1269 { | |
| 1270 _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE | |
| 1271 | PA_STREAM_INTERPOLATE_TIMING); | |
| 1272 | |
| 1273 // If configuring a specific latency then we want to specify | |
| 1274 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters | |
| 1275 // automatically to reach that target latency. However, that flag | |
| 1276 // doesn't exist in Ubuntu 8.04 and many people still use that, | |
| 1277 // so we have to check the protocol version of libpulse. | |
| 1278 if (LATE(pa_context_get_protocol_version)(_paContext) | |
| 1279 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) | |
| 1280 { | |
| 1281 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; | |
| 1282 } | |
| 1283 | |
| 1284 const pa_sample_spec *spec = | |
| 1285 LATE(pa_stream_get_sample_spec)(_recStream); | |
| 1286 if (!spec) | |
| 1287 { | |
| 1288 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1289 " pa_stream_get_sample_spec(rec)"); | |
| 1290 return -1; | |
| 1291 } | |
| 1292 | |
| 1293 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); | |
| 1294 uint32_t latency = bytesPerSec | |
| 1295 * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC; | |
| 1296 | |
| 1297 // Set the rec buffer attributes | |
| 1298 // Note: fragsize specifies a maximum transfer size, not a minimum, so | |
| 1299 // it is not possible to force a high latency setting, only a low one. | |
| 1300 _recBufferAttr.fragsize = latency; // size of fragment | |
| 1301 _recBufferAttr.maxlength = latency + bytesPerSec | |
| 1302 * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC; | |
| 1303 | |
| 1304 _configuredLatencyRec = latency; | |
| 1305 } | |
| 1306 | |
| 1307 _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels; | |
| 1308 _recordBufferUsed = 0; | |
| 1309 _recBuffer = new int8_t[_recordBufferSize]; | |
| 1310 | |
| 1311 // Enable overflow callback | |
| 1312 LATE(pa_stream_set_overflow_callback)(_recStream, | |
| 1313 PaStreamOverflowCallback, | |
| 1314 this); | |
| 1315 | |
| 1316 // Set the state callback function for the stream | |
| 1317 LATE(pa_stream_set_state_callback)(_recStream, | |
| 1318 PaStreamStateCallback, | |
| 1319 this); | |
| 1320 | |
| 1321 // Mark recording side as initialized | |
| 1322 _recIsInitialized = true; | |
| 1323 | |
| 1324 return 0; | |
| 1325 } | |
| 1326 | |
| 1327 int32_t AudioDeviceLinuxPulse::StartRecording() | |
| 1328 { | |
| 1329 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1330 if (!_recIsInitialized) | |
| 1331 { | |
| 1332 return -1; | |
| 1333 } | |
| 1334 | |
| 1335 if (_recording) | |
| 1336 { | |
| 1337 return 0; | |
| 1338 } | |
| 1339 | |
| 1340 // Set state to ensure that the recording starts from the audio thread. | |
| 1341 _startRec = true; | |
| 1342 | |
| 1343 // The audio thread will signal when recording has started. | |
| 1344 _timeEventRec.Set(); | |
| 1345 if (kEventTimeout == _recStartEvent.Wait(10000)) | |
| 1346 { | |
| 1347 { | |
| 1348 rtc::CritScope lock(&_critSect); | |
| 1349 _startRec = false; | |
| 1350 } | |
| 1351 StopRecording(); | |
| 1352 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1353 " failed to activate recording"); | |
| 1354 return -1; | |
| 1355 } | |
| 1356 | |
| 1357 { | |
| 1358 rtc::CritScope lock(&_critSect); | |
| 1359 if (_recording) | |
| 1360 { | |
| 1361 // The recording state is set by the audio thread after recording | |
| 1362 // has started. | |
| 1363 } else | |
| 1364 { | |
| 1365 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1366 " failed to activate recording"); | |
| 1367 return -1; | |
| 1368 } | |
| 1369 } | |
| 1370 | |
| 1371 return 0; | |
| 1372 } | |
| 1373 | |
| 1374 int32_t AudioDeviceLinuxPulse::StopRecording() | |
| 1375 { | |
| 1376 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1377 rtc::CritScope lock(&_critSect); | |
| 1378 | |
| 1379 if (!_recIsInitialized) | |
| 1380 { | |
| 1381 return 0; | |
| 1382 } | |
| 1383 | |
| 1384 if (_recStream == NULL) | |
| 1385 { | |
| 1386 return -1; | |
| 1387 } | |
| 1388 | |
| 1389 _recIsInitialized = false; | |
| 1390 _recording = false; | |
| 1391 | |
| 1392 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 1393 " stopping recording"); | |
| 1394 | |
| 1395 // Stop Recording | |
| 1396 PaLock(); | |
| 1397 | |
| 1398 DisableReadCallback(); | |
| 1399 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); | |
| 1400 | |
| 1401 // Unset this here so that we don't get a TERMINATED callback | |
| 1402 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); | |
| 1403 | |
| 1404 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) | |
| 1405 { | |
| 1406 // Disconnect the stream | |
| 1407 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) | |
| 1408 { | |
| 1409 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1410 " failed to disconnect rec stream, err=%d\n", | |
| 1411 LATE(pa_context_errno)(_paContext)); | |
| 1412 PaUnLock(); | |
| 1413 return -1; | |
| 1414 } | |
| 1415 | |
| 1416 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 1417 " disconnected recording"); | |
| 1418 } | |
| 1419 | |
| 1420 LATE(pa_stream_unref)(_recStream); | |
| 1421 _recStream = NULL; | |
| 1422 | |
| 1423 PaUnLock(); | |
| 1424 | |
| 1425 // Provide the recStream to the mixer | |
| 1426 _mixerManager.SetRecStream(_recStream); | |
| 1427 | |
| 1428 if (_recBuffer) | |
| 1429 { | |
| 1430 delete [] _recBuffer; | |
| 1431 _recBuffer = NULL; | |
| 1432 } | |
| 1433 | |
| 1434 return 0; | |
| 1435 } | |
| 1436 | |
| 1437 bool AudioDeviceLinuxPulse::RecordingIsInitialized() const | |
| 1438 { | |
| 1439 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1440 return (_recIsInitialized); | |
| 1441 } | |
| 1442 | |
| 1443 bool AudioDeviceLinuxPulse::Recording() const | |
| 1444 { | |
| 1445 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1446 return (_recording); | |
| 1447 } | |
| 1448 | |
| 1449 bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const | |
| 1450 { | |
| 1451 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1452 return (_playIsInitialized); | |
| 1453 } | |
| 1454 | |
| 1455 int32_t AudioDeviceLinuxPulse::StartPlayout() | |
| 1456 { | |
| 1457 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1458 | |
| 1459 if (!_playIsInitialized) | |
| 1460 { | |
| 1461 return -1; | |
| 1462 } | |
| 1463 | |
| 1464 if (_playing) | |
| 1465 { | |
| 1466 return 0; | |
| 1467 } | |
| 1468 | |
| 1469 // Set state to ensure that playout starts from the audio thread. | |
| 1470 { | |
| 1471 rtc::CritScope lock(&_critSect); | |
| 1472 _startPlay = true; | |
| 1473 } | |
| 1474 | |
| 1475 // Both |_startPlay| and |_playing| needs protction since they are also | |
| 1476 // accessed on the playout thread. | |
| 1477 | |
| 1478 // The audio thread will signal when playout has started. | |
| 1479 _timeEventPlay.Set(); | |
| 1480 if (kEventTimeout == _playStartEvent.Wait(10000)) | |
| 1481 { | |
| 1482 { | |
| 1483 rtc::CritScope lock(&_critSect); | |
| 1484 _startPlay = false; | |
| 1485 } | |
| 1486 StopPlayout(); | |
| 1487 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1488 " failed to activate playout"); | |
| 1489 return -1; | |
| 1490 } | |
| 1491 | |
| 1492 { | |
| 1493 rtc::CritScope lock(&_critSect); | |
| 1494 if (_playing) | |
| 1495 { | |
| 1496 // The playing state is set by the audio thread after playout | |
| 1497 // has started. | |
| 1498 } else | |
| 1499 { | |
| 1500 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1501 " failed to activate playing"); | |
| 1502 return -1; | |
| 1503 } | |
| 1504 } | |
| 1505 | |
| 1506 return 0; | |
| 1507 } | |
| 1508 | |
| 1509 int32_t AudioDeviceLinuxPulse::StopPlayout() | |
| 1510 { | |
| 1511 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1512 rtc::CritScope lock(&_critSect); | |
| 1513 | |
| 1514 if (!_playIsInitialized) | |
| 1515 { | |
| 1516 return 0; | |
| 1517 } | |
| 1518 | |
| 1519 if (_playStream == NULL) | |
| 1520 { | |
| 1521 return -1; | |
| 1522 } | |
| 1523 | |
| 1524 _playIsInitialized = false; | |
| 1525 _playing = false; | |
| 1526 _sndCardPlayDelay = 0; | |
| 1527 _sndCardRecDelay = 0; | |
| 1528 | |
| 1529 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 1530 " stopping playback"); | |
| 1531 | |
| 1532 // Stop Playout | |
| 1533 PaLock(); | |
| 1534 | |
| 1535 DisableWriteCallback(); | |
| 1536 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); | |
| 1537 | |
| 1538 // Unset this here so that we don't get a TERMINATED callback | |
| 1539 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); | |
| 1540 | |
| 1541 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) | |
| 1542 { | |
| 1543 // Disconnect the stream | |
| 1544 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) | |
| 1545 { | |
| 1546 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 1547 " failed to disconnect play stream, err=%d", | |
| 1548 LATE(pa_context_errno)(_paContext)); | |
| 1549 PaUnLock(); | |
| 1550 return -1; | |
| 1551 } | |
| 1552 | |
| 1553 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 1554 " disconnected playback"); | |
| 1555 } | |
| 1556 | |
| 1557 LATE(pa_stream_unref)(_playStream); | |
| 1558 _playStream = NULL; | |
| 1559 | |
| 1560 PaUnLock(); | |
| 1561 | |
| 1562 // Provide the playStream to the mixer | |
| 1563 _mixerManager.SetPlayStream(_playStream); | |
| 1564 | |
| 1565 if (_playBuffer) | |
| 1566 { | |
| 1567 delete [] _playBuffer; | |
| 1568 _playBuffer = NULL; | |
| 1569 } | |
| 1570 | |
| 1571 return 0; | |
| 1572 } | |
| 1573 | |
| 1574 int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const | |
| 1575 { | |
| 1576 rtc::CritScope lock(&_critSect); | |
| 1577 delayMS = (uint16_t) _sndCardPlayDelay; | |
| 1578 return 0; | |
| 1579 } | |
| 1580 | |
| 1581 int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const | |
| 1582 { | |
| 1583 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1584 delayMS = (uint16_t) _sndCardRecDelay; | |
| 1585 return 0; | |
| 1586 } | |
| 1587 | |
| 1588 bool AudioDeviceLinuxPulse::Playing() const | |
| 1589 { | |
| 1590 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | |
| 1591 return (_playing); | |
| 1592 } | 1407 } |
| 1593 | 1408 |
| 1594 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( | 1409 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( |
| 1595 const AudioDeviceModule::BufferType type, | 1410 const AudioDeviceModule::BufferType type, |
| 1596 uint16_t sizeMS) | 1411 uint16_t sizeMS) { |
| 1597 { | 1412 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1598 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 1413 if (type != AudioDeviceModule::kFixedBufferSize) { |
| 1599 if (type != AudioDeviceModule::kFixedBufferSize) | 1414 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1600 { | 1415 " Adaptive buffer size not supported on this platform"); |
| 1601 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1416 return -1; |
| 1602 " Adaptive buffer size not supported on this platform"); | 1417 } |
| 1603 return -1; | 1418 |
| 1604 } | 1419 _playBufType = type; |
| 1605 | 1420 _playBufDelayFixed = sizeMS; |
| 1606 _playBufType = type; | 1421 |
| 1607 _playBufDelayFixed = sizeMS; | 1422 return 0; |
| 1608 | |
| 1609 return 0; | |
| 1610 } | 1423 } |
| 1611 | 1424 |
| 1612 int32_t AudioDeviceLinuxPulse::PlayoutBuffer( | 1425 int32_t AudioDeviceLinuxPulse::PlayoutBuffer( |
| 1613 AudioDeviceModule::BufferType& type, | 1426 AudioDeviceModule::BufferType& type, |
| 1614 uint16_t& sizeMS) const | 1427 uint16_t& sizeMS) const { |
| 1615 { | 1428 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 1616 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 1429 type = _playBufType; |
| 1617 type = _playBufType; | 1430 sizeMS = _playBufDelayFixed; |
| 1618 sizeMS = _playBufDelayFixed; | 1431 |
| 1619 | 1432 return 0; |
| 1620 return 0; | 1433 } |
| 1621 } | 1434 |
| 1622 | 1435 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const { |
| 1623 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const | 1436 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 1624 { | 1437 " API call not supported on this platform"); |
| 1625 | 1438 return -1; |
| 1626 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | 1439 } |
| 1627 " API call not supported on this platform"); | 1440 |
| 1628 return -1; | 1441 bool AudioDeviceLinuxPulse::PlayoutWarning() const { |
| 1629 } | |
| 1630 | |
| 1631 bool AudioDeviceLinuxPulse::PlayoutWarning() const | |
| 1632 { | |
| 1633 rtc::CritScope lock(&_critSect); | 1442 rtc::CritScope lock(&_critSect); |
| 1634 return (_playWarning > 0); | 1443 return (_playWarning > 0); |
| 1635 } | 1444 } |
| 1636 | 1445 |
| 1637 bool AudioDeviceLinuxPulse::PlayoutError() const | 1446 bool AudioDeviceLinuxPulse::PlayoutError() const { |
| 1638 { | |
| 1639 rtc::CritScope lock(&_critSect); | 1447 rtc::CritScope lock(&_critSect); |
| 1640 return (_playError > 0); | 1448 return (_playError > 0); |
| 1641 } | 1449 } |
| 1642 | 1450 |
| 1643 bool AudioDeviceLinuxPulse::RecordingWarning() const | 1451 bool AudioDeviceLinuxPulse::RecordingWarning() const { |
| 1644 { | |
| 1645 rtc::CritScope lock(&_critSect); | 1452 rtc::CritScope lock(&_critSect); |
| 1646 return (_recWarning > 0); | 1453 return (_recWarning > 0); |
| 1647 } | 1454 } |
| 1648 | 1455 |
| 1649 bool AudioDeviceLinuxPulse::RecordingError() const | 1456 bool AudioDeviceLinuxPulse::RecordingError() const { |
| 1650 { | |
| 1651 rtc::CritScope lock(&_critSect); | 1457 rtc::CritScope lock(&_critSect); |
| 1652 return (_recError > 0); | 1458 return (_recError > 0); |
| 1653 } | 1459 } |
| 1654 | 1460 |
| 1655 void AudioDeviceLinuxPulse::ClearPlayoutWarning() | 1461 void AudioDeviceLinuxPulse::ClearPlayoutWarning() { |
| 1656 { | |
| 1657 rtc::CritScope lock(&_critSect); | 1462 rtc::CritScope lock(&_critSect); |
| 1658 _playWarning = 0; | 1463 _playWarning = 0; |
| 1659 } | 1464 } |
| 1660 | 1465 |
| 1661 void AudioDeviceLinuxPulse::ClearPlayoutError() | 1466 void AudioDeviceLinuxPulse::ClearPlayoutError() { |
| 1662 { | |
| 1663 rtc::CritScope lock(&_critSect); | 1467 rtc::CritScope lock(&_critSect); |
| 1664 _playError = 0; | 1468 _playError = 0; |
| 1665 } | 1469 } |
| 1666 | 1470 |
| 1667 void AudioDeviceLinuxPulse::ClearRecordingWarning() | 1471 void AudioDeviceLinuxPulse::ClearRecordingWarning() { |
| 1668 { | |
| 1669 rtc::CritScope lock(&_critSect); | 1472 rtc::CritScope lock(&_critSect); |
| 1670 _recWarning = 0; | 1473 _recWarning = 0; |
| 1671 } | 1474 } |
| 1672 | 1475 |
| 1673 void AudioDeviceLinuxPulse::ClearRecordingError() | 1476 void AudioDeviceLinuxPulse::ClearRecordingError() { |
| 1674 { | |
| 1675 rtc::CritScope lock(&_critSect); | 1477 rtc::CritScope lock(&_critSect); |
| 1676 _recError = 0; | 1478 _recError = 0; |
| 1677 } | 1479 } |
| 1678 | 1480 |
| 1679 // ============================================================================ | 1481 // ============================================================================ |
| 1680 // Private Methods | 1482 // Private Methods |
| 1681 // ============================================================================ | 1483 // ============================================================================ |
| 1682 | 1484 |
| 1683 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis) | 1485 void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context* c, void* pThis) { |
| 1684 { | 1486 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaContextStateCallbackHandler(c); |
| 1685 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | |
| 1686 PaContextStateCallbackHandler(c); | |
| 1687 } | 1487 } |
| 1688 | 1488 |
| 1689 // ---------------------------------------------------------------------------- | 1489 // ---------------------------------------------------------------------------- |
| 1690 // PaSinkInfoCallback | 1490 // PaSinkInfoCallback |
| 1691 // ---------------------------------------------------------------------------- | 1491 // ---------------------------------------------------------------------------- |
| 1692 | 1492 |
| 1693 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, | 1493 void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/, |
| 1694 const pa_sink_info *i, int eol, | 1494 const pa_sink_info* i, |
| 1695 void *pThis) | 1495 int eol, |
| 1696 { | 1496 void* pThis) { |
| 1697 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler( | 1497 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(i, eol); |
| 1698 i, eol); | 1498 } |
| 1699 } | 1499 |
| 1700 | 1500 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/, |
| 1701 void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, | 1501 const pa_source_info* i, |
| 1702 const pa_source_info *i, | 1502 int eol, |
| 1703 int eol, void *pThis) | 1503 void* pThis) { |
| 1704 { | 1504 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(i, |
| 1705 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler( | 1505 eol); |
| 1706 i, eol); | 1506 } |
| 1707 } | 1507 |
| 1708 | 1508 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context* /*c*/, |
| 1709 void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/, | 1509 const pa_server_info* i, |
| 1710 const pa_server_info *i, | 1510 void* pThis) { |
| 1711 void *pThis) | 1511 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i); |
| 1712 { | 1512 } |
| 1713 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | 1513 |
| 1714 PaServerInfoCallbackHandler(i); | 1514 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) { |
| 1715 } | 1515 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p); |
| 1716 | 1516 } |
| 1717 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis) | 1517 |
| 1718 { | 1518 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) { |
| 1719 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | 1519 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " context state cb"); |
| 1720 PaStreamStateCallbackHandler(p); | 1520 |
| 1721 } | 1521 pa_context_state_t state = LATE(pa_context_get_state)(c); |
| 1722 | 1522 switch (state) { |
| 1723 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c) | 1523 case PA_CONTEXT_UNCONNECTED: |
| 1724 { | 1524 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); |
| 1725 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1525 break; |
| 1726 " context state cb"); | 1526 case PA_CONTEXT_CONNECTING: |
| 1727 | 1527 case PA_CONTEXT_AUTHORIZING: |
| 1728 pa_context_state_t state = LATE(pa_context_get_state)(c); | 1528 case PA_CONTEXT_SETTING_NAME: |
| 1729 switch (state) | 1529 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " no state"); |
| 1730 { | 1530 break; |
| 1731 case PA_CONTEXT_UNCONNECTED: | 1531 case PA_CONTEXT_FAILED: |
| 1732 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1532 case PA_CONTEXT_TERMINATED: |
| 1733 " unconnected"); | 1533 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); |
| 1734 break; | 1534 _paStateChanged = true; |
| 1735 case PA_CONTEXT_CONNECTING: | 1535 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1736 case PA_CONTEXT_AUTHORIZING: | 1536 break; |
| 1737 case PA_CONTEXT_SETTING_NAME: | 1537 case PA_CONTEXT_READY: |
| 1738 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1538 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); |
| 1739 " no state"); | 1539 _paStateChanged = true; |
| 1740 break; | 1540 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1741 case PA_CONTEXT_FAILED: | 1541 break; |
| 1742 case PA_CONTEXT_TERMINATED: | 1542 } |
| 1743 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1543 } |
| 1744 " failed"); | 1544 |
| 1745 _paStateChanged = true; | 1545 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i, |
| 1746 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1546 int eol) { |
| 1747 break; | 1547 if (eol) { |
| 1748 case PA_CONTEXT_READY: | 1548 // Signal that we are done |
| 1749 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1549 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1750 " ready"); | 1550 return; |
| 1751 _paStateChanged = true; | 1551 } |
| 1752 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1552 |
| 1753 break; | 1553 if (_numPlayDevices == _deviceIndex) { |
| 1754 } | 1554 // Convert the device index to the one of the sink |
| 1755 } | 1555 _paDeviceIndex = i->index; |
| 1756 | 1556 |
| 1757 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i, | 1557 if (_playDeviceName) { |
| 1758 int eol) | 1558 // Copy the sink name |
| 1759 { | 1559 strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); |
| 1760 if (eol) | 1560 _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1761 { | 1561 } |
| 1762 // Signal that we are done | 1562 if (_playDisplayDeviceName) { |
| 1763 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1563 // Copy the sink display name |
| 1764 return; | 1564 strncpy(_playDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); |
| 1765 } | 1565 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1766 | 1566 } |
| 1767 if (_numPlayDevices == _deviceIndex) | 1567 } |
| 1768 { | 1568 |
| 1769 // Convert the device index to the one of the sink | 1569 _numPlayDevices++; |
| 1770 _paDeviceIndex = i->index; | 1570 } |
| 1771 | 1571 |
| 1772 if (_playDeviceName) | 1572 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(const pa_source_info* i, |
| 1773 { | 1573 int eol) { |
| 1774 // Copy the sink name | 1574 if (eol) { |
| 1775 strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); | 1575 // Signal that we are done |
| 1776 _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | 1576 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1777 } | 1577 return; |
| 1778 if (_playDisplayDeviceName) | 1578 } |
| 1779 { | 1579 |
| 1780 // Copy the sink display name | 1580 // We don't want to list output devices |
| 1781 strncpy(_playDisplayDeviceName, i->description, | 1581 if (i->monitor_of_sink == PA_INVALID_INDEX) { |
| 1782 kAdmMaxDeviceNameSize); | 1582 if (_numRecDevices == _deviceIndex) { |
| 1783 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | 1583 // Convert the device index to the one of the source |
| 1784 } | 1584 _paDeviceIndex = i->index; |
| 1785 } | 1585 |
| 1786 | 1586 if (_recDeviceName) { |
| 1787 _numPlayDevices++; | 1587 // copy the source name |
| 1788 } | 1588 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); |
| 1789 | 1589 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1790 void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler( | 1590 } |
| 1791 const pa_source_info *i, | 1591 if (_recDisplayDeviceName) { |
| 1792 int eol) | 1592 // Copy the source display name |
| 1793 { | 1593 strncpy(_recDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); |
| 1794 if (eol) | 1594 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1795 { | 1595 } |
| 1796 // Signal that we are done | 1596 } |
| 1797 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1597 |
| 1798 return; | 1598 _numRecDevices++; |
| 1799 } | 1599 } |
| 1800 | |
| 1801 // We don't want to list output devices | |
| 1802 if (i->monitor_of_sink == PA_INVALID_INDEX) | |
| 1803 { | |
| 1804 if (_numRecDevices == _deviceIndex) | |
| 1805 { | |
| 1806 // Convert the device index to the one of the source | |
| 1807 _paDeviceIndex = i->index; | |
| 1808 | |
| 1809 if (_recDeviceName) | |
| 1810 { | |
| 1811 // copy the source name | |
| 1812 strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); | |
| 1813 _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | |
| 1814 } | |
| 1815 if (_recDisplayDeviceName) | |
| 1816 { | |
| 1817 // Copy the source display name | |
| 1818 strncpy(_recDisplayDeviceName, i->description, | |
| 1819 kAdmMaxDeviceNameSize); | |
| 1820 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | |
| 1821 } | |
| 1822 } | |
| 1823 | |
| 1824 _numRecDevices++; | |
| 1825 } | |
| 1826 } | 1600 } |
| 1827 | 1601 |
| 1828 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( | 1602 void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( |
| 1829 const pa_server_info *i) | 1603 const pa_server_info* i) { |
| 1830 { | 1604 // Use PA native sampling rate |
| 1831 // Use PA native sampling rate | 1605 sample_rate_hz_ = i->sample_spec.rate; |
| 1832 sample_rate_hz_ = i->sample_spec.rate; | 1606 |
| 1833 | 1607 // Copy the PA server version |
| 1834 // Copy the PA server version | 1608 strncpy(_paServerVersion, i->server_version, 31); |
| 1835 strncpy(_paServerVersion, i->server_version, 31); | 1609 _paServerVersion[31] = '\0'; |
| 1836 _paServerVersion[31] = '\0'; | 1610 |
| 1837 | 1611 if (_recDisplayDeviceName) { |
| 1838 if (_recDisplayDeviceName) | 1612 // Copy the source name |
| 1839 { | 1613 strncpy(_recDisplayDeviceName, i->default_source_name, |
| 1840 // Copy the source name | 1614 kAdmMaxDeviceNameSize); |
| 1841 strncpy(_recDisplayDeviceName, i->default_source_name, | 1615 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1842 kAdmMaxDeviceNameSize); | 1616 } |
| 1843 _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | 1617 |
| 1844 } | 1618 if (_playDisplayDeviceName) { |
| 1845 | 1619 // Copy the sink name |
| 1846 if (_playDisplayDeviceName) | 1620 strncpy(_playDisplayDeviceName, i->default_sink_name, |
| 1847 { | 1621 kAdmMaxDeviceNameSize); |
| 1848 // Copy the sink name | 1622 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
| 1849 strncpy(_playDisplayDeviceName, i->default_sink_name, | 1623 } |
| 1850 kAdmMaxDeviceNameSize); | 1624 |
| 1851 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; | 1625 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1852 } | 1626 } |
| 1853 | 1627 |
| 1854 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1628 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) { |
| 1855 } | 1629 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state cb"); |
| 1856 | 1630 |
| 1857 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p) | 1631 pa_stream_state_t state = LATE(pa_stream_get_state)(p); |
| 1858 { | 1632 switch (state) { |
| 1859 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1633 case PA_STREAM_UNCONNECTED: |
| 1860 " stream state cb"); | 1634 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); |
| 1861 | 1635 break; |
| 1862 pa_stream_state_t state = LATE(pa_stream_get_state)(p); | 1636 case PA_STREAM_CREATING: |
| 1863 switch (state) | 1637 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " creating"); |
| 1864 { | 1638 break; |
| 1865 case PA_STREAM_UNCONNECTED: | 1639 case PA_STREAM_FAILED: |
| 1866 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1640 case PA_STREAM_TERMINATED: |
| 1867 " unconnected"); | 1641 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); |
| 1868 break; | 1642 break; |
| 1869 case PA_STREAM_CREATING: | 1643 case PA_STREAM_READY: |
| 1870 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1644 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); |
| 1871 " creating"); | 1645 break; |
| 1872 break; | 1646 } |
| 1873 case PA_STREAM_FAILED: | 1647 |
| 1874 case PA_STREAM_TERMINATED: | 1648 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
| 1875 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1649 } |
| 1876 " failed"); | 1650 |
| 1877 break; | 1651 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() { |
| 1878 case PA_STREAM_READY: | 1652 PaLock(); |
| 1879 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | 1653 |
| 1880 " ready"); | 1654 pa_operation* paOperation = NULL; |
| 1881 break; | 1655 |
| 1882 } | 1656 // get the server info and update deviceName |
| 1883 | 1657 paOperation = |
| 1884 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); | 1658 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
| 1885 } | 1659 |
| 1886 | 1660 WaitForOperationCompletion(paOperation); |
| 1887 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() | 1661 |
| 1888 { | 1662 PaUnLock(); |
| 1889 PaLock(); | 1663 |
| 1890 | 1664 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, |
| 1891 pa_operation* paOperation = NULL; | 1665 " checking PulseAudio version: %s", _paServerVersion); |
| 1892 | 1666 |
| 1893 // get the server info and update deviceName | 1667 return 0; |
| 1894 paOperation = LATE(pa_context_get_server_info)(_paContext, | 1668 } |
| 1895 PaServerInfoCallback, | 1669 |
| 1896 this); | 1670 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() { |
| 1897 | 1671 PaLock(); |
| 1898 WaitForOperationCompletion(paOperation); | 1672 |
| 1899 | 1673 pa_operation* paOperation = NULL; |
| 1900 PaUnLock(); | 1674 |
| 1901 | 1675 // Get the server info and update sample_rate_hz_ |
| 1902 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, | 1676 paOperation = |
| 1903 " checking PulseAudio version: %s", _paServerVersion); | 1677 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
| 1904 | 1678 |
| 1905 return 0; | 1679 WaitForOperationCompletion(paOperation); |
| 1906 } | 1680 |
| 1907 | 1681 PaUnLock(); |
| 1908 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() | 1682 |
| 1909 { | 1683 return 0; |
| 1910 PaLock(); | |
| 1911 | |
| 1912 pa_operation* paOperation = NULL; | |
| 1913 | |
| 1914 // Get the server info and update sample_rate_hz_ | |
| 1915 paOperation = LATE(pa_context_get_server_info)(_paContext, | |
| 1916 PaServerInfoCallback, | |
| 1917 this); | |
| 1918 | |
| 1919 WaitForOperationCompletion(paOperation); | |
| 1920 | |
| 1921 PaUnLock(); | |
| 1922 | |
| 1923 return 0; | |
| 1924 } | 1684 } |
| 1925 | 1685 |
| 1926 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, | 1686 int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, |
| 1927 char* name, | 1687 char* name, |
| 1928 uint16_t& index) | 1688 uint16_t& index) { |
| 1929 { | 1689 char tmpName[kAdmMaxDeviceNameSize] = {0}; |
| 1930 char tmpName[kAdmMaxDeviceNameSize] = {0}; | 1690 // subtract length of "default: " |
| 1931 // subtract length of "default: " | 1691 uint16_t nameLen = kAdmMaxDeviceNameSize - 9; |
| 1932 uint16_t nameLen = kAdmMaxDeviceNameSize - 9; | 1692 char* pName = NULL; |
| 1933 char* pName = NULL; | 1693 |
| 1934 | 1694 if (name) { |
| 1935 if (name) | 1695 // Add "default: " |
| 1936 { | 1696 strcpy(name, "default: "); |
| 1937 // Add "default: " | 1697 pName = &name[9]; |
| 1938 strcpy(name, "default: "); | 1698 } |
| 1939 pName = &name[9]; | 1699 |
| 1940 } | 1700 // Tell the callback that we want |
| 1941 | 1701 // the name for this device |
| 1942 // Tell the callback that we want | 1702 if (recDevice) { |
| 1943 // the name for this device | 1703 _recDisplayDeviceName = tmpName; |
| 1944 if (recDevice) | 1704 } else { |
| 1945 { | 1705 _playDisplayDeviceName = tmpName; |
| 1946 _recDisplayDeviceName = tmpName; | 1706 } |
| 1947 } else | 1707 |
| 1948 { | 1708 // Set members |
| 1949 _playDisplayDeviceName = tmpName; | 1709 _paDeviceIndex = -1; |
| 1950 } | 1710 _deviceIndex = 0; |
| 1951 | 1711 _numPlayDevices = 0; |
| 1952 // Set members | 1712 _numRecDevices = 0; |
| 1953 _paDeviceIndex = -1; | 1713 |
| 1954 _deviceIndex = 0; | 1714 PaLock(); |
| 1955 _numPlayDevices = 0; | 1715 |
| 1956 _numRecDevices = 0; | 1716 pa_operation* paOperation = NULL; |
| 1957 | 1717 |
| 1958 PaLock(); | 1718 // Get the server info and update deviceName |
| 1959 | 1719 paOperation = |
| 1960 pa_operation* paOperation = NULL; | 1720 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
| 1961 | 1721 |
| 1962 // Get the server info and update deviceName | 1722 WaitForOperationCompletion(paOperation); |
| 1963 paOperation = LATE(pa_context_get_server_info)(_paContext, | 1723 |
| 1964 PaServerInfoCallback, | 1724 // Get the device index |
| 1965 this); | 1725 if (recDevice) { |
| 1966 | 1726 paOperation = LATE(pa_context_get_source_info_by_name)( |
| 1967 WaitForOperationCompletion(paOperation); | 1727 _paContext, (char*)tmpName, PaSourceInfoCallback, this); |
| 1968 | 1728 } else { |
| 1969 // Get the device index | 1729 paOperation = LATE(pa_context_get_sink_info_by_name)( |
| 1970 if (recDevice) | 1730 _paContext, (char*)tmpName, PaSinkInfoCallback, this); |
| 1971 { | 1731 } |
| 1972 paOperation | 1732 |
| 1973 = LATE(pa_context_get_source_info_by_name)(_paContext, | 1733 WaitForOperationCompletion(paOperation); |
| 1974 (char *) tmpName, | 1734 |
| 1975 PaSourceInfoCallback, | 1735 PaUnLock(); |
| 1976 this); | 1736 |
| 1977 } else | 1737 // Set the index |
| 1978 { | 1738 index = _paDeviceIndex; |
| 1979 paOperation | 1739 |
| 1980 = LATE(pa_context_get_sink_info_by_name)(_paContext, | 1740 if (name) { |
| 1981 (char *) tmpName, | 1741 // Copy to name string |
| 1982 PaSinkInfoCallback, | 1742 strncpy(pName, tmpName, nameLen); |
| 1983 this); | 1743 } |
| 1984 } | 1744 |
| 1985 | 1745 // Clear members |
| 1986 WaitForOperationCompletion(paOperation); | 1746 _playDisplayDeviceName = NULL; |
| 1987 | 1747 _recDisplayDeviceName = NULL; |
| 1748 _paDeviceIndex = -1; |
| 1749 _deviceIndex = -1; |
| 1750 _numPlayDevices = 0; |
| 1751 _numRecDevices = 0; |
| 1752 |
| 1753 return 0; |
| 1754 } |
| 1755 |
| 1756 int32_t AudioDeviceLinuxPulse::InitPulseAudio() { |
| 1757 int retVal = 0; |
| 1758 |
| 1759 // Load libpulse |
| 1760 if (!PaSymbolTable.Load()) { |
| 1761 // Most likely the Pulse library and sound server are not installed on |
| 1762 // this system |
| 1763 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1764 " failed to load symbol table"); |
| 1765 return -1; |
| 1766 } |
| 1767 |
| 1768 // Create a mainloop API and connection to the default server |
| 1769 // the mainloop is the internal asynchronous API event loop |
| 1770 if (_paMainloop) { |
| 1771 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1772 " PA mainloop has already existed"); |
| 1773 return -1; |
| 1774 } |
| 1775 _paMainloop = LATE(pa_threaded_mainloop_new)(); |
| 1776 if (!_paMainloop) { |
| 1777 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1778 " could not create mainloop"); |
| 1779 return -1; |
| 1780 } |
| 1781 |
| 1782 // Start the threaded main loop |
| 1783 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); |
| 1784 if (retVal != PA_OK) { |
| 1785 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1786 " failed to start main loop, error=%d", retVal); |
| 1787 return -1; |
| 1788 } |
| 1789 |
| 1790 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " mainloop running!"); |
| 1791 |
| 1792 PaLock(); |
| 1793 |
| 1794 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); |
| 1795 if (!_paMainloopApi) { |
| 1796 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1797 " could not create mainloop API"); |
| 1988 PaUnLock(); | 1798 PaUnLock(); |
| 1989 | 1799 return -1; |
| 1990 // Set the index | 1800 } |
| 1991 index = _paDeviceIndex; | 1801 |
| 1992 | 1802 // Create a new PulseAudio context |
| 1993 if (name) | 1803 if (_paContext) { |
| 1994 { | 1804 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1995 // Copy to name string | 1805 " PA context has already existed"); |
| 1996 strncpy(pName, tmpName, nameLen); | 1806 PaUnLock(); |
| 1997 } | 1807 return -1; |
| 1998 | 1808 } |
| 1999 // Clear members | 1809 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); |
| 2000 _playDisplayDeviceName = NULL; | 1810 |
| 2001 _recDisplayDeviceName = NULL; | 1811 if (!_paContext) { |
| 2002 _paDeviceIndex = -1; | 1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2003 _deviceIndex = -1; | 1813 " could not create context"); |
| 2004 _numPlayDevices = 0; | 1814 PaUnLock(); |
| 2005 _numRecDevices = 0; | 1815 return -1; |
| 2006 | 1816 } |
| 1817 |
| 1818 // Set state callback function |
| 1819 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this); |
| 1820 |
| 1821 // Connect the context to a server (default) |
| 1822 _paStateChanged = false; |
| 1823 retVal = |
| 1824 LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); |
| 1825 |
| 1826 if (retVal != PA_OK) { |
| 1827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1828 " failed to connect context, error=%d", retVal); |
| 1829 PaUnLock(); |
| 1830 return -1; |
| 1831 } |
| 1832 |
| 1833 // Wait for state change |
| 1834 while (!_paStateChanged) { |
| 1835 LATE(pa_threaded_mainloop_wait)(_paMainloop); |
| 1836 } |
| 1837 |
| 1838 // Now check to see what final state we reached. |
| 1839 pa_context_state_t state = LATE(pa_context_get_state)(_paContext); |
| 1840 |
| 1841 if (state != PA_CONTEXT_READY) { |
| 1842 if (state == PA_CONTEXT_FAILED) { |
| 1843 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1844 " failed to connect to PulseAudio sound server"); |
| 1845 } else if (state == PA_CONTEXT_TERMINATED) { |
| 1846 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1847 " PulseAudio connection terminated early"); |
| 1848 } else { |
| 1849 // Shouldn't happen, because we only signal on one of those three |
| 1850 // states |
| 1851 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1852 " unknown problem connecting to PulseAudio"); |
| 1853 } |
| 1854 PaUnLock(); |
| 1855 return -1; |
| 1856 } |
| 1857 |
| 1858 PaUnLock(); |
| 1859 |
| 1860 // Give the objects to the mixer manager |
| 1861 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); |
| 1862 |
| 1863 // Check the version |
| 1864 if (CheckPulseAudioVersion() < 0) { |
| 1865 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1866 " PulseAudio version %s not supported", _paServerVersion); |
| 1867 return -1; |
| 1868 } |
| 1869 |
| 1870 // Initialize sampling frequency |
| 1871 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { |
| 1872 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 1873 " failed to initialize sampling frequency," |
| 1874 " set to %d Hz", |
| 1875 sample_rate_hz_); |
| 1876 return -1; |
| 1877 } |
| 1878 |
| 1879 return 0; |
| 1880 } |
| 1881 |
| 1882 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() { |
| 1883 // Do nothing if the instance doesn't exist |
| 1884 // likely PaSymbolTable.Load() fails |
| 1885 if (!_paMainloop) { |
| 2007 return 0; | 1886 return 0; |
| 2008 } | 1887 } |
| 2009 | 1888 |
| 2010 int32_t AudioDeviceLinuxPulse::InitPulseAudio() | 1889 PaLock(); |
| 2011 { | 1890 |
| 2012 int retVal = 0; | 1891 // Disconnect the context |
| 2013 | 1892 if (_paContext) { |
| 2014 // Load libpulse | 1893 LATE(pa_context_disconnect)(_paContext); |
| 2015 if (!PaSymbolTable.Load()) | 1894 } |
| 2016 { | 1895 |
| 2017 // Most likely the Pulse library and sound server are not installed on | 1896 // Unreference the context |
| 2018 // this system | 1897 if (_paContext) { |
| 2019 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1898 LATE(pa_context_unref)(_paContext); |
| 2020 " failed to load symbol table"); | 1899 } |
| 2021 return -1; | 1900 |
| 2022 } | 1901 PaUnLock(); |
| 2023 | 1902 _paContext = NULL; |
| 2024 // Create a mainloop API and connection to the default server | 1903 |
| 2025 // the mainloop is the internal asynchronous API event loop | 1904 // Stop the threaded main loop |
| 2026 if (_paMainloop) { | 1905 if (_paMainloop) { |
| 2027 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1906 LATE(pa_threaded_mainloop_stop)(_paMainloop); |
| 2028 " PA mainloop has already existed"); | 1907 } |
| 2029 return -1; | 1908 |
| 2030 } | 1909 // Free the mainloop |
| 2031 _paMainloop = LATE(pa_threaded_mainloop_new)(); | 1910 if (_paMainloop) { |
| 2032 if (!_paMainloop) | 1911 LATE(pa_threaded_mainloop_free)(_paMainloop); |
| 2033 { | 1912 } |
| 2034 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1913 |
| 2035 " could not create mainloop"); | 1914 _paMainloop = NULL; |
| 2036 return -1; | 1915 |
| 2037 } | 1916 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " PulseAudio terminated"); |
| 2038 | 1917 |
| 2039 // Start the threaded main loop | 1918 return 0; |
| 2040 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); | 1919 } |
| 2041 if (retVal != PA_OK) | 1920 |
| 2042 { | 1921 void AudioDeviceLinuxPulse::PaLock() { |
| 2043 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1922 LATE(pa_threaded_mainloop_lock)(_paMainloop); |
| 2044 " failed to start main loop, error=%d", retVal); | 1923 } |
| 2045 return -1; | 1924 |
| 2046 } | 1925 void AudioDeviceLinuxPulse::PaUnLock() { |
| 2047 | 1926 LATE(pa_threaded_mainloop_unlock)(_paMainloop); |
| 2048 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2049 " mainloop running!"); | |
| 2050 | |
| 2051 PaLock(); | |
| 2052 | |
| 2053 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); | |
| 2054 if (!_paMainloopApi) | |
| 2055 { | |
| 2056 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2057 " could not create mainloop API"); | |
| 2058 PaUnLock(); | |
| 2059 return -1; | |
| 2060 } | |
| 2061 | |
| 2062 // Create a new PulseAudio context | |
| 2063 if (_paContext){ | |
| 2064 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2065 " PA context has already existed"); | |
| 2066 PaUnLock(); | |
| 2067 return -1; | |
| 2068 } | |
| 2069 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); | |
| 2070 | |
| 2071 if (!_paContext) | |
| 2072 { | |
| 2073 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2074 " could not create context"); | |
| 2075 PaUnLock(); | |
| 2076 return -1; | |
| 2077 } | |
| 2078 | |
| 2079 // Set state callback function | |
| 2080 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, | |
| 2081 this); | |
| 2082 | |
| 2083 // Connect the context to a server (default) | |
| 2084 _paStateChanged = false; | |
| 2085 retVal = LATE(pa_context_connect)(_paContext, | |
| 2086 NULL, | |
| 2087 PA_CONTEXT_NOAUTOSPAWN, | |
| 2088 NULL); | |
| 2089 | |
| 2090 if (retVal != PA_OK) | |
| 2091 { | |
| 2092 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2093 " failed to connect context, error=%d", retVal); | |
| 2094 PaUnLock(); | |
| 2095 return -1; | |
| 2096 } | |
| 2097 | |
| 2098 // Wait for state change | |
| 2099 while (!_paStateChanged) | |
| 2100 { | |
| 2101 LATE(pa_threaded_mainloop_wait)(_paMainloop); | |
| 2102 } | |
| 2103 | |
| 2104 // Now check to see what final state we reached. | |
| 2105 pa_context_state_t state = LATE(pa_context_get_state)(_paContext); | |
| 2106 | |
| 2107 if (state != PA_CONTEXT_READY) | |
| 2108 { | |
| 2109 if (state == PA_CONTEXT_FAILED) | |
| 2110 { | |
| 2111 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2112 " failed to connect to PulseAudio sound server"); | |
| 2113 } else if (state == PA_CONTEXT_TERMINATED) | |
| 2114 { | |
| 2115 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2116 " PulseAudio connection terminated early"); | |
| 2117 } else | |
| 2118 { | |
| 2119 // Shouldn't happen, because we only signal on one of those three | |
| 2120 // states | |
| 2121 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2122 " unknown problem connecting to PulseAudio"); | |
| 2123 } | |
| 2124 PaUnLock(); | |
| 2125 return -1; | |
| 2126 } | |
| 2127 | |
| 2128 PaUnLock(); | |
| 2129 | |
| 2130 // Give the objects to the mixer manager | |
| 2131 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); | |
| 2132 | |
| 2133 // Check the version | |
| 2134 if (CheckPulseAudioVersion() < 0) | |
| 2135 { | |
| 2136 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2137 " PulseAudio version %s not supported", | |
| 2138 _paServerVersion); | |
| 2139 return -1; | |
| 2140 } | |
| 2141 | |
| 2142 // Initialize sampling frequency | |
| 2143 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) | |
| 2144 { | |
| 2145 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2146 " failed to initialize sampling frequency," | |
| 2147 " set to %d Hz", | |
| 2148 sample_rate_hz_); | |
| 2149 return -1; | |
| 2150 } | |
| 2151 | |
| 2152 return 0; | |
| 2153 } | |
| 2154 | |
| 2155 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() | |
| 2156 { | |
| 2157 // Do nothing if the instance doesn't exist | |
| 2158 // likely PaSymbolTable.Load() fails | |
| 2159 if (!_paMainloop) { | |
| 2160 return 0; | |
| 2161 } | |
| 2162 | |
| 2163 PaLock(); | |
| 2164 | |
| 2165 // Disconnect the context | |
| 2166 if (_paContext) | |
| 2167 { | |
| 2168 LATE(pa_context_disconnect)(_paContext); | |
| 2169 } | |
| 2170 | |
| 2171 // Unreference the context | |
| 2172 if (_paContext) | |
| 2173 { | |
| 2174 LATE(pa_context_unref)(_paContext); | |
| 2175 } | |
| 2176 | |
| 2177 PaUnLock(); | |
| 2178 _paContext = NULL; | |
| 2179 | |
| 2180 // Stop the threaded main loop | |
| 2181 if (_paMainloop) | |
| 2182 { | |
| 2183 LATE(pa_threaded_mainloop_stop)(_paMainloop); | |
| 2184 } | |
| 2185 | |
| 2186 // Free the mainloop | |
| 2187 if (_paMainloop) | |
| 2188 { | |
| 2189 LATE(pa_threaded_mainloop_free)(_paMainloop); | |
| 2190 } | |
| 2191 | |
| 2192 _paMainloop = NULL; | |
| 2193 | |
| 2194 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2195 " PulseAudio terminated"); | |
| 2196 | |
| 2197 return 0; | |
| 2198 } | |
| 2199 | |
| 2200 void AudioDeviceLinuxPulse::PaLock() | |
| 2201 { | |
| 2202 LATE(pa_threaded_mainloop_lock)(_paMainloop); | |
| 2203 } | |
| 2204 | |
| 2205 void AudioDeviceLinuxPulse::PaUnLock() | |
| 2206 { | |
| 2207 LATE(pa_threaded_mainloop_unlock)(_paMainloop); | |
| 2208 } | 1927 } |
| 2209 | 1928 |
| 2210 void AudioDeviceLinuxPulse::WaitForOperationCompletion( | 1929 void AudioDeviceLinuxPulse::WaitForOperationCompletion( |
| 2211 pa_operation* paOperation) const | 1930 pa_operation* paOperation) const { |
| 2212 { | 1931 if (!paOperation) { |
| 2213 if (!paOperation) | 1932 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2214 { | 1933 "paOperation NULL in WaitForOperationCompletion"); |
| 2215 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 1934 return; |
| 2216 "paOperation NULL in WaitForOperationCompletion"); | 1935 } |
| 2217 return; | 1936 |
| 2218 } | 1937 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { |
| 2219 | 1938 LATE(pa_threaded_mainloop_wait)(_paMainloop); |
| 2220 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) | 1939 } |
| 2221 { | 1940 |
| 2222 LATE(pa_threaded_mainloop_wait)(_paMainloop); | 1941 LATE(pa_operation_unref)(paOperation); |
| 2223 } | |
| 2224 | |
| 2225 LATE(pa_operation_unref)(paOperation); | |
| 2226 } | 1942 } |
| 2227 | 1943 |
| 2228 // ============================================================================ | 1944 // ============================================================================ |
| 2229 // Thread Methods | 1945 // Thread Methods |
| 2230 // ============================================================================ | 1946 // ============================================================================ |
| 2231 | 1947 |
| 2232 void AudioDeviceLinuxPulse::EnableWriteCallback() | 1948 void AudioDeviceLinuxPulse::EnableWriteCallback() { |
| 2233 { | 1949 if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) { |
| 2234 if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) | 1950 // May already have available space. Must check. |
| 2235 { | 1951 _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); |
| 2236 // May already have available space. Must check. | 1952 if (_tempBufferSpace > 0) { |
| 2237 _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); | 1953 // Yup, there is already space available, so if we register a |
| 2238 if (_tempBufferSpace > 0) | 1954 // write callback then it will not receive any event. So dispatch |
| 2239 { | 1955 // one ourself instead. |
| 2240 // Yup, there is already space available, so if we register a | 1956 _timeEventPlay.Set(); |
| 2241 // write callback then it will not receive any event. So dispatch | 1957 return; |
| 2242 // one ourself instead. | 1958 } |
| 2243 _timeEventPlay.Set(); | 1959 } |
| 2244 return; | 1960 |
| 1961 LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, this); |
| 1962 } |
| 1963 |
| 1964 void AudioDeviceLinuxPulse::DisableWriteCallback() { |
| 1965 LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); |
| 1966 } |
| 1967 |
| 1968 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream* /*unused*/, |
| 1969 size_t buffer_space, |
| 1970 void* pThis) { |
| 1971 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamWriteCallbackHandler( |
| 1972 buffer_space); |
| 1973 } |
| 1974 |
| 1975 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) { |
| 1976 _tempBufferSpace = bufferSpace; |
| 1977 |
| 1978 // Since we write the data asynchronously on a different thread, we have |
| 1979 // to temporarily disable the write callback or else Pulse will call it |
| 1980 // continuously until we write the data. We re-enable it below. |
| 1981 DisableWriteCallback(); |
| 1982 _timeEventPlay.Set(); |
| 1983 } |
| 1984 |
| 1985 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/, |
| 1986 void* pThis) { |
| 1987 static_cast<AudioDeviceLinuxPulse*>(pThis) |
| 1988 ->PaStreamUnderflowCallbackHandler(); |
| 1989 } |
| 1990 |
| 1991 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() { |
| 1992 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Playout underflow"); |
| 1993 |
| 1994 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
| 1995 // We didn't configure a pa_buffer_attr before, so switching to |
| 1996 // one now would be questionable. |
| 1997 return; |
| 1998 } |
| 1999 |
| 2000 // Otherwise reconfigure the stream with a higher target latency. |
| 2001 |
| 2002 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
| 2003 if (!spec) { |
| 2004 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2005 " pa_stream_get_sample_spec()"); |
| 2006 return; |
| 2007 } |
| 2008 |
| 2009 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
| 2010 uint32_t newLatency = |
| 2011 _configuredLatencyPlay + bytesPerSec * |
| 2012 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / |
| 2013 WEBRTC_PA_MSECS_PER_SEC; |
| 2014 |
| 2015 // Set the play buffer attributes |
| 2016 _playBufferAttr.maxlength = newLatency; |
| 2017 _playBufferAttr.tlength = newLatency; |
| 2018 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
| 2019 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; |
| 2020 |
| 2021 pa_operation* op = LATE(pa_stream_set_buffer_attr)( |
| 2022 _playStream, &_playBufferAttr, NULL, NULL); |
| 2023 if (!op) { |
| 2024 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2025 " pa_stream_set_buffer_attr()"); |
| 2026 return; |
| 2027 } |
| 2028 |
| 2029 // Don't need to wait for this to complete. |
| 2030 LATE(pa_operation_unref)(op); |
| 2031 |
| 2032 // Save the new latency in case we underflow again. |
| 2033 _configuredLatencyPlay = newLatency; |
| 2034 } |
| 2035 |
| 2036 void AudioDeviceLinuxPulse::EnableReadCallback() { |
| 2037 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); |
| 2038 } |
| 2039 |
| 2040 void AudioDeviceLinuxPulse::DisableReadCallback() { |
| 2041 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); |
| 2042 } |
| 2043 |
| 2044 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/, |
| 2045 size_t /*unused2*/, |
| 2046 void* pThis) { |
| 2047 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler(); |
| 2048 } |
| 2049 |
| 2050 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { |
| 2051 // We get the data pointer and size now in order to save one Lock/Unlock |
| 2052 // in the worker thread. |
| 2053 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, |
| 2054 &_tempSampleDataSize) != 0) { |
| 2055 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't read data!"); |
| 2056 return; |
| 2057 } |
| 2058 |
| 2059 // Since we consume the data asynchronously on a different thread, we have |
| 2060 // to temporarily disable the read callback or else Pulse will call it |
| 2061 // continuously until we consume the data. We re-enable it below. |
| 2062 DisableReadCallback(); |
| 2063 _timeEventRec.Set(); |
| 2064 } |
| 2065 |
| 2066 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/, |
| 2067 void* pThis) { |
| 2068 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler(); |
| 2069 } |
| 2070 |
| 2071 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() { |
| 2072 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Recording overflow"); |
| 2073 } |
| 2074 |
| 2075 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) { |
| 2076 if (!WEBRTC_PA_REPORT_LATENCY) { |
| 2077 return 0; |
| 2078 } |
| 2079 |
| 2080 if (!stream) { |
| 2081 return 0; |
| 2082 } |
| 2083 |
| 2084 pa_usec_t latency; |
| 2085 int negative; |
| 2086 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) { |
| 2087 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't query latency"); |
| 2088 // We'd rather continue playout/capture with an incorrect delay than |
| 2089 // stop it altogether, so return a valid value. |
| 2090 return 0; |
| 2091 } |
| 2092 |
| 2093 if (negative) { |
| 2094 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 2095 " warning: pa_stream_get_latency reported negative " |
| 2096 "delay"); |
| 2097 |
| 2098 // The delay can be negative for monitoring streams if the captured |
| 2099 // samples haven't been played yet. In such a case, "latency" |
| 2100 // contains the magnitude, so we must negate it to get the real value. |
| 2101 int32_t tmpLatency = (int32_t)-latency; |
| 2102 if (tmpLatency < 0) { |
| 2103 // Make sure that we don't use a negative delay. |
| 2104 tmpLatency = 0; |
| 2105 } |
| 2106 |
| 2107 return tmpLatency; |
| 2108 } else { |
| 2109 return (int32_t)latency; |
| 2110 } |
| 2111 } |
| 2112 |
| 2113 int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, |
| 2114 size_t bufferSize) |
| 2115 EXCLUSIVE_LOCKS_REQUIRED(_critSect) { |
| 2116 size_t size = bufferSize; |
| 2117 uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); |
| 2118 |
| 2119 // Account for the peeked data and the used data. |
| 2120 uint32_t recDelay = |
| 2121 (uint32_t)((LatencyUsecs(_recStream) / 1000) + |
| 2122 10 * ((size + _recordBufferUsed) / _recordBufferSize)); |
| 2123 |
| 2124 _sndCardRecDelay = recDelay; |
| 2125 |
| 2126 if (_playStream) { |
| 2127 // Get the playout delay. |
| 2128 _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); |
| 2129 } |
| 2130 |
| 2131 if (_recordBufferUsed > 0) { |
| 2132 // Have to copy to the buffer until it is full. |
| 2133 size_t copy = _recordBufferSize - _recordBufferUsed; |
| 2134 if (size < copy) { |
| 2135 copy = size; |
| 2136 } |
| 2137 |
| 2138 memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); |
| 2139 _recordBufferUsed += copy; |
| 2140 bufferData = static_cast<const char*>(bufferData) + copy; |
| 2141 size -= copy; |
| 2142 |
| 2143 if (_recordBufferUsed != _recordBufferSize) { |
| 2144 // Not enough data yet to pass to VoE. |
| 2145 return 0; |
| 2146 } |
| 2147 |
| 2148 // Provide data to VoiceEngine. |
| 2149 if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) { |
| 2150 // We have stopped recording. |
| 2151 return -1; |
| 2152 } |
| 2153 |
| 2154 _recordBufferUsed = 0; |
| 2155 } |
| 2156 |
| 2157 // Now process full 10ms sample sets directly from the input. |
| 2158 while (size >= _recordBufferSize) { |
| 2159 // Provide data to VoiceEngine. |
| 2160 if (ProcessRecordedData(static_cast<int8_t*>(const_cast<void*>(bufferData)), |
| 2161 numRecSamples, recDelay) == -1) { |
| 2162 // We have stopped recording. |
| 2163 return -1; |
| 2164 } |
| 2165 |
| 2166 bufferData = static_cast<const char*>(bufferData) + _recordBufferSize; |
| 2167 size -= _recordBufferSize; |
| 2168 |
| 2169 // We have consumed 10ms of data. |
| 2170 recDelay -= 10; |
| 2171 } |
| 2172 |
| 2173 // Now save any leftovers for later. |
| 2174 if (size > 0) { |
| 2175 memcpy(_recBuffer, bufferData, size); |
| 2176 _recordBufferUsed = size; |
| 2177 } |
| 2178 |
| 2179 return 0; |
| 2180 } |
| 2181 |
| 2182 int32_t AudioDeviceLinuxPulse::ProcessRecordedData(int8_t* bufferData, |
| 2183 uint32_t bufferSizeInSamples, |
| 2184 uint32_t recDelay) |
| 2185 EXCLUSIVE_LOCKS_REQUIRED(_critSect) { |
| 2186 uint32_t currentMicLevel(0); |
| 2187 uint32_t newMicLevel(0); |
| 2188 |
| 2189 _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); |
| 2190 |
| 2191 if (AGC()) { |
| 2192 // Store current mic level in the audio buffer if AGC is enabled |
| 2193 if (MicrophoneVolume(currentMicLevel) == 0) { |
| 2194 // This call does not affect the actual microphone volume |
| 2195 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); |
| 2196 } |
| 2197 } |
| 2198 |
| 2199 const uint32_t clockDrift(0); |
| 2200 // TODO(andrew): this is a temporary hack, to avoid non-causal far- and |
| 2201 // near-end signals at the AEC for PulseAudio. I think the system delay is |
| 2202 // being correctly calculated here, but for legacy reasons we add +10 ms |
| 2203 // to the value in the AEC. The real fix will be part of a larger |
| 2204 // investigation into managing system delay in the AEC. |
| 2205 if (recDelay > 10) |
| 2206 recDelay -= 10; |
| 2207 else |
| 2208 recDelay = 0; |
| 2209 _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); |
| 2210 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); |
| 2211 // Deliver recorded samples at specified sample rate, |
| 2212 // mic level etc. to the observer using callback. |
| 2213 UnLock(); |
| 2214 _ptrAudioBuffer->DeliverRecordedData(); |
| 2215 Lock(); |
| 2216 |
| 2217 // We have been unlocked - check the flag again. |
| 2218 if (!_recording) { |
| 2219 return -1; |
| 2220 } |
| 2221 |
| 2222 if (AGC()) { |
| 2223 newMicLevel = _ptrAudioBuffer->NewMicLevel(); |
| 2224 if (newMicLevel != 0) { |
| 2225 // The VQE will only deliver non-zero microphone levels when a |
| 2226 // change is needed. |
| 2227 // Set this new mic level (received from the observer as return |
| 2228 // value in the callback). |
| 2229 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, |
| 2230 " AGC change of volume: old=%u => new=%u", currentMicLevel, |
| 2231 newMicLevel); |
| 2232 if (SetMicrophoneVolume(newMicLevel) == -1) { |
| 2233 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 2234 " the required modification of the microphone " |
| 2235 "volume failed"); |
| 2236 } |
| 2237 } |
| 2238 } |
| 2239 |
| 2240 return 0; |
| 2241 } |
| 2242 |
| 2243 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) { |
| 2244 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess()); |
| 2245 } |
| 2246 |
| 2247 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) { |
| 2248 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess()); |
| 2249 } |
| 2250 |
| 2251 bool AudioDeviceLinuxPulse::PlayThreadProcess() { |
| 2252 switch (_timeEventPlay.Wait(1000)) { |
| 2253 case kEventSignaled: |
| 2254 break; |
| 2255 case kEventError: |
| 2256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 2257 "EventWrapper::Wait() failed"); |
| 2258 return true; |
| 2259 case kEventTimeout: |
| 2260 return true; |
| 2261 } |
| 2262 |
| 2263 rtc::CritScope lock(&_critSect); |
| 2264 |
| 2265 if (_startPlay) { |
| 2266 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2267 "_startPlay true, performing initial actions"); |
| 2268 |
| 2269 _startPlay = false; |
| 2270 _playDeviceName = NULL; |
| 2271 |
| 2272 // Set if not default device |
| 2273 if (_outputDeviceIndex > 0) { |
| 2274 // Get the playout device name |
| 2275 _playDeviceName = new char[kAdmMaxDeviceNameSize]; |
| 2276 _deviceIndex = _outputDeviceIndex; |
| 2277 PlayoutDevices(); |
| 2278 } |
| 2279 |
| 2280 // Start muted only supported on 0.9.11 and up |
| 2281 if (LATE(pa_context_get_protocol_version)(_paContext) >= |
| 2282 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
| 2283 // Get the currently saved speaker mute status |
| 2284 // and set the initial mute status accordingly |
| 2285 bool enabled(false); |
| 2286 _mixerManager.SpeakerMute(enabled); |
| 2287 if (enabled) { |
| 2288 _playStreamFlags |= PA_STREAM_START_MUTED; |
| 2289 } |
| 2290 } |
| 2291 |
| 2292 // Get the currently saved speaker volume |
| 2293 uint32_t volume = 0; |
| 2294 if (update_speaker_volume_at_startup_) |
| 2295 _mixerManager.SpeakerVolume(volume); |
| 2296 |
| 2297 PaLock(); |
| 2298 |
| 2299 // NULL gives PA the choice of startup volume. |
| 2300 pa_cvolume* ptr_cvolume = NULL; |
| 2301 if (update_speaker_volume_at_startup_) { |
| 2302 pa_cvolume cVolumes; |
| 2303 ptr_cvolume = &cVolumes; |
| 2304 |
| 2305 // Set the same volume for all channels |
| 2306 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
| 2307 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); |
| 2308 update_speaker_volume_at_startup_ = false; |
| 2309 } |
| 2310 |
| 2311 // Connect the stream to a sink |
| 2312 if (LATE(pa_stream_connect_playback)( |
| 2313 _playStream, _playDeviceName, &_playBufferAttr, |
| 2314 (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) { |
| 2315 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2316 " failed to connect play stream, err=%d", |
| 2317 LATE(pa_context_errno)(_paContext)); |
| 2318 } |
| 2319 |
| 2320 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
| 2321 " play stream connected"); |
| 2322 |
| 2323 // Wait for state change |
| 2324 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) { |
| 2325 LATE(pa_threaded_mainloop_wait)(_paMainloop); |
| 2326 } |
| 2327 |
| 2328 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " play stream ready"); |
| 2329 |
| 2330 // We can now handle write callbacks |
| 2331 EnableWriteCallback(); |
| 2332 |
| 2333 PaUnLock(); |
| 2334 |
| 2335 // Clear device name |
| 2336 if (_playDeviceName) { |
| 2337 delete[] _playDeviceName; |
| 2338 _playDeviceName = NULL; |
| 2339 } |
| 2340 |
| 2341 _playing = true; |
| 2342 _playStartEvent.Set(); |
| 2343 |
| 2344 return true; |
| 2345 } |
| 2346 |
| 2347 if (_playing) { |
| 2348 if (!_recording) { |
| 2349 // Update the playout delay |
| 2350 _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); |
| 2351 } |
| 2352 |
| 2353 if (_playbackBufferUnused < _playbackBufferSize) { |
| 2354 size_t write = _playbackBufferSize - _playbackBufferUnused; |
| 2355 if (_tempBufferSpace < write) { |
| 2356 write = _tempBufferSpace; |
| 2357 } |
| 2358 |
| 2359 PaLock(); |
| 2360 if (LATE(pa_stream_write)( |
| 2361 _playStream, (void*)&_playBuffer[_playbackBufferUnused], write, |
| 2362 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { |
| 2363 _writeErrors++; |
| 2364 if (_writeErrors > 10) { |
| 2365 if (_playError == 1) { |
| 2366 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, |
| 2367 " pending playout error exists"); |
| 2368 } |
| 2369 // Triggers callback from module process thread. |
| 2370 _playError = 1; |
| 2371 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, |
| 2372 " kPlayoutError message posted: " |
| 2373 "_writeErrors=%u, error=%d", |
| 2374 _writeErrors, LATE(pa_context_errno)(_paContext)); |
| 2375 _writeErrors = 0; |
| 2245 } | 2376 } |
| 2246 } | 2377 } |
| 2247 | 2378 PaUnLock(); |
| 2248 LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, | 2379 |
| 2249 this); | 2380 _playbackBufferUnused += write; |
| 2250 } | 2381 _tempBufferSpace -= write; |
| 2251 | 2382 } |
| 2252 void AudioDeviceLinuxPulse::DisableWriteCallback() | 2383 |
| 2253 { | 2384 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); |
| 2254 LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); | 2385 // Might have been reduced to zero by the above. |
| 2255 } | 2386 if (_tempBufferSpace > 0) { |
| 2256 | 2387 // Ask for new PCM data to be played out using the |
| 2257 void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/, | 2388 // AudioDeviceBuffer ensure that this callback is executed |
| 2258 size_t buffer_space, | 2389 // without taking the audio-thread lock. |
| 2259 void *pThis) | 2390 UnLock(); |
| 2260 { | 2391 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " requesting data"); |
| 2261 static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler( | 2392 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); |
| 2262 buffer_space); | 2393 Lock(); |
| 2263 } | 2394 |
| 2264 | 2395 // We have been unlocked - check the flag again. |
| 2265 void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) | 2396 if (!_playing) { |
| 2266 { | 2397 return true; |
| 2267 _tempBufferSpace = bufferSpace; | 2398 } |
| 2268 | 2399 |
| 2269 // Since we write the data asynchronously on a different thread, we have | 2400 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); |
| 2270 // to temporarily disable the write callback or else Pulse will call it | 2401 if (nSamples != numPlaySamples) { |
| 2271 // continuously until we write the data. We re-enable it below. | |
| 2272 DisableWriteCallback(); | |
| 2273 _timeEventPlay.Set(); | |
| 2274 } | |
| 2275 | |
| 2276 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/, | |
| 2277 void *pThis) | |
| 2278 { | |
| 2279 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | |
| 2280 PaStreamUnderflowCallbackHandler(); | |
| 2281 } | |
| 2282 | |
| 2283 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() | |
| 2284 { | |
| 2285 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 2286 " Playout underflow"); | |
| 2287 | |
| 2288 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) | |
| 2289 { | |
| 2290 // We didn't configure a pa_buffer_attr before, so switching to | |
| 2291 // one now would be questionable. | |
| 2292 return; | |
| 2293 } | |
| 2294 | |
| 2295 // Otherwise reconfigure the stream with a higher target latency. | |
| 2296 | |
| 2297 const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream); | |
| 2298 if (!spec) | |
| 2299 { | |
| 2300 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2402 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2301 " pa_stream_get_sample_spec()"); | 2403 " invalid number of output samples(%d)", nSamples); |
| 2302 return; | 2404 } |
| 2303 } | 2405 |
| 2304 | 2406 size_t write = _playbackBufferSize; |
| 2305 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); | 2407 if (_tempBufferSpace < write) { |
| 2306 uint32_t newLatency = _configuredLatencyPlay + bytesPerSec * | 2408 write = _tempBufferSpace; |
| 2307 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / | 2409 } |
| 2308 WEBRTC_PA_MSECS_PER_SEC; | 2410 |
| 2309 | 2411 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " will write"); |
| 2310 // Set the play buffer attributes | 2412 PaLock(); |
| 2311 _playBufferAttr.maxlength = newLatency; | 2413 if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write, |
| 2312 _playBufferAttr.tlength = newLatency; | 2414 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { |
| 2313 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; | 2415 _writeErrors++; |
| 2314 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; | 2416 if (_writeErrors > 10) { |
| 2315 | 2417 if (_playError == 1) { |
| 2316 pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream, | 2418 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, |
| 2317 &_playBufferAttr, NULL, | 2419 " pending playout error exists"); |
| 2318 NULL); | 2420 } |
| 2319 if (!op) | 2421 // Triggers callback from module process thread. |
| 2320 { | 2422 _playError = 1; |
| 2423 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, |
| 2424 " kPlayoutError message posted: " |
| 2425 "_writeErrors=%u, error=%d", |
| 2426 _writeErrors, LATE(pa_context_errno)(_paContext)); |
| 2427 _writeErrors = 0; |
| 2428 } |
| 2429 } |
| 2430 PaUnLock(); |
| 2431 |
| 2432 _playbackBufferUnused = write; |
| 2433 } |
| 2434 |
| 2435 _tempBufferSpace = 0; |
| 2436 PaLock(); |
| 2437 EnableWriteCallback(); |
| 2438 PaUnLock(); |
| 2439 |
| 2440 } // _playing |
| 2441 |
| 2442 return true; |
| 2443 } |
| 2444 |
| 2445 bool AudioDeviceLinuxPulse::RecThreadProcess() { |
| 2446 switch (_timeEventRec.Wait(1000)) { |
| 2447 case kEventSignaled: |
| 2448 break; |
| 2449 case kEventError: |
| 2450 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 2451 "EventWrapper::Wait() failed"); |
| 2452 return true; |
| 2453 case kEventTimeout: |
| 2454 return true; |
| 2455 } |
| 2456 |
| 2457 rtc::CritScope lock(&_critSect); |
| 2458 |
| 2459 if (_startRec) { |
| 2460 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
| 2461 "_startRec true, performing initial actions"); |
| 2462 |
| 2463 _recDeviceName = NULL; |
| 2464 |
| 2465 // Set if not default device |
| 2466 if (_inputDeviceIndex > 0) { |
| 2467 // Get the recording device name |
| 2468 _recDeviceName = new char[kAdmMaxDeviceNameSize]; |
| 2469 _deviceIndex = _inputDeviceIndex; |
| 2470 RecordingDevices(); |
| 2471 } |
| 2472 |
| 2473 PaLock(); |
| 2474 |
| 2475 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connecting stream"); |
| 2476 |
| 2477 // Connect the stream to a source |
| 2478 if (LATE(pa_stream_connect_record)( |
| 2479 _recStream, _recDeviceName, &_recBufferAttr, |
| 2480 (pa_stream_flags_t)_recStreamFlags) != PA_OK) { |
| 2481 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2482 " failed to connect rec stream, err=%d", |
| 2483 LATE(pa_context_errno)(_paContext)); |
| 2484 } |
| 2485 |
| 2486 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connected"); |
| 2487 |
| 2488 // Wait for state change |
| 2489 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) { |
| 2490 LATE(pa_threaded_mainloop_wait)(_paMainloop); |
| 2491 } |
| 2492 |
| 2493 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " done"); |
| 2494 |
| 2495 // We can now handle read callbacks |
| 2496 EnableReadCallback(); |
| 2497 |
| 2498 PaUnLock(); |
| 2499 |
| 2500 // Clear device name |
| 2501 if (_recDeviceName) { |
| 2502 delete[] _recDeviceName; |
| 2503 _recDeviceName = NULL; |
| 2504 } |
| 2505 |
| 2506 _startRec = false; |
| 2507 _recording = true; |
| 2508 _recStartEvent.Set(); |
| 2509 |
| 2510 return true; |
| 2511 } |
| 2512 |
| 2513 if (_recording) { |
| 2514 // Read data and provide it to VoiceEngine |
| 2515 if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) { |
| 2516 return true; |
| 2517 } |
| 2518 |
| 2519 _tempSampleData = NULL; |
| 2520 _tempSampleDataSize = 0; |
| 2521 |
| 2522 PaLock(); |
| 2523 while (true) { |
| 2524 // Ack the last thing we read |
| 2525 if (LATE(pa_stream_drop)(_recStream) != 0) { |
| 2526 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
| 2527 " failed to drop, err=%d\n", |
| 2528 LATE(pa_context_errno)(_paContext)); |
| 2529 } |
| 2530 |
| 2531 if (LATE(pa_stream_readable_size)(_recStream) <= 0) { |
| 2532 // Then that was all the data |
| 2533 break; |
| 2534 } |
| 2535 |
| 2536 // Else more data. |
| 2537 const void* sampleData; |
| 2538 size_t sampleDataSize; |
| 2539 |
| 2540 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) { |
| 2541 _recError = 1; // triggers callback from module process thread |
| 2321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | 2542 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
| 2322 " pa_stream_set_buffer_attr()"); | 2543 " RECORD_ERROR message posted, error = %d", |
| 2323 return; | 2544 LATE(pa_context_errno)(_paContext)); |
| 2324 } | 2545 break; |
| 2325 | 2546 } |
| 2326 // Don't need to wait for this to complete. | 2547 |
| 2327 LATE(pa_operation_unref)(op); | 2548 _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000); |
| 2328 | 2549 |
| 2329 // Save the new latency in case we underflow again. | 2550 // Drop lock for sigslot dispatch, which could take a while. |
| 2330 _configuredLatencyPlay = newLatency; | 2551 PaUnLock(); |
| 2331 } | 2552 // Read data and provide it to VoiceEngine |
| 2332 | 2553 if (ReadRecordedData(sampleData, sampleDataSize) == -1) { |
| 2333 void AudioDeviceLinuxPulse::EnableReadCallback() | |
| 2334 { | |
| 2335 LATE(pa_stream_set_read_callback)(_recStream, | |
| 2336 &PaStreamReadCallback, | |
| 2337 this); | |
| 2338 } | |
| 2339 | |
| 2340 void AudioDeviceLinuxPulse::DisableReadCallback() | |
| 2341 { | |
| 2342 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); | |
| 2343 } | |
| 2344 | |
| 2345 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/, | |
| 2346 size_t /*unused2*/, | |
| 2347 void *pThis) | |
| 2348 { | |
| 2349 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | |
| 2350 PaStreamReadCallbackHandler(); | |
| 2351 } | |
| 2352 | |
| 2353 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() | |
| 2354 { | |
| 2355 // We get the data pointer and size now in order to save one Lock/Unlock | |
| 2356 // in the worker thread. | |
| 2357 if (LATE(pa_stream_peek)(_recStream, | |
| 2358 &_tempSampleData, | |
| 2359 &_tempSampleDataSize) != 0) | |
| 2360 { | |
| 2361 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2362 " Can't read data!"); | |
| 2363 return; | |
| 2364 } | |
| 2365 | |
| 2366 // Since we consume the data asynchronously on a different thread, we have | |
| 2367 // to temporarily disable the read callback or else Pulse will call it | |
| 2368 // continuously until we consume the data. We re-enable it below. | |
| 2369 DisableReadCallback(); | |
| 2370 _timeEventRec.Set(); | |
| 2371 } | |
| 2372 | |
| 2373 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/, | |
| 2374 void *pThis) | |
| 2375 { | |
| 2376 static_cast<AudioDeviceLinuxPulse*> (pThis)-> | |
| 2377 PaStreamOverflowCallbackHandler(); | |
| 2378 } | |
| 2379 | |
| 2380 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() | |
| 2381 { | |
| 2382 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 2383 " Recording overflow"); | |
| 2384 } | |
| 2385 | |
| 2386 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream) | |
| 2387 { | |
| 2388 if (!WEBRTC_PA_REPORT_LATENCY) | |
| 2389 { | |
| 2390 return 0; | |
| 2391 } | |
| 2392 | |
| 2393 if (!stream) | |
| 2394 { | |
| 2395 return 0; | |
| 2396 } | |
| 2397 | |
| 2398 pa_usec_t latency; | |
| 2399 int negative; | |
| 2400 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) | |
| 2401 { | |
| 2402 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2403 " Can't query latency"); | |
| 2404 // We'd rather continue playout/capture with an incorrect delay than | |
| 2405 // stop it altogether, so return a valid value. | |
| 2406 return 0; | |
| 2407 } | |
| 2408 | |
| 2409 if (negative) | |
| 2410 { | |
| 2411 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2412 " warning: pa_stream_get_latency reported negative " | |
| 2413 "delay"); | |
| 2414 | |
| 2415 // The delay can be negative for monitoring streams if the captured | |
| 2416 // samples haven't been played yet. In such a case, "latency" | |
| 2417 // contains the magnitude, so we must negate it to get the real value. | |
| 2418 int32_t tmpLatency = (int32_t) -latency; | |
| 2419 if (tmpLatency < 0) | |
| 2420 { | |
| 2421 // Make sure that we don't use a negative delay. | |
| 2422 tmpLatency = 0; | |
| 2423 } | |
| 2424 | |
| 2425 return tmpLatency; | |
| 2426 } else | |
| 2427 { | |
| 2428 return (int32_t) latency; | |
| 2429 } | |
| 2430 } | |
| 2431 | |
| 2432 int32_t AudioDeviceLinuxPulse::ReadRecordedData( | |
| 2433 const void* bufferData, | |
| 2434 size_t bufferSize) EXCLUSIVE_LOCKS_REQUIRED(_critSect) | |
| 2435 { | |
| 2436 size_t size = bufferSize; | |
| 2437 uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); | |
| 2438 | |
| 2439 // Account for the peeked data and the used data. | |
| 2440 uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream) | |
| 2441 / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize)); | |
| 2442 | |
| 2443 _sndCardRecDelay = recDelay; | |
| 2444 | |
| 2445 if (_playStream) | |
| 2446 { | |
| 2447 // Get the playout delay. | |
| 2448 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000); | |
| 2449 } | |
| 2450 | |
| 2451 if (_recordBufferUsed > 0) | |
| 2452 { | |
| 2453 // Have to copy to the buffer until it is full. | |
| 2454 size_t copy = _recordBufferSize - _recordBufferUsed; | |
| 2455 if (size < copy) | |
| 2456 { | |
| 2457 copy = size; | |
| 2458 } | |
| 2459 | |
| 2460 memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); | |
| 2461 _recordBufferUsed += copy; | |
| 2462 bufferData = static_cast<const char *> (bufferData) + copy; | |
| 2463 size -= copy; | |
| 2464 | |
| 2465 if (_recordBufferUsed != _recordBufferSize) | |
| 2466 { | |
| 2467 // Not enough data yet to pass to VoE. | |
| 2468 return 0; | |
| 2469 } | |
| 2470 | |
| 2471 // Provide data to VoiceEngine. | |
| 2472 if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) | |
| 2473 { | |
| 2474 // We have stopped recording. | |
| 2475 return -1; | |
| 2476 } | |
| 2477 | |
| 2478 _recordBufferUsed = 0; | |
| 2479 } | |
| 2480 | |
| 2481 // Now process full 10ms sample sets directly from the input. | |
| 2482 while (size >= _recordBufferSize) | |
| 2483 { | |
| 2484 // Provide data to VoiceEngine. | |
| 2485 if (ProcessRecordedData( | |
| 2486 static_cast<int8_t *> (const_cast<void *> (bufferData)), | |
| 2487 numRecSamples, recDelay) == -1) | |
| 2488 { | |
| 2489 // We have stopped recording. | |
| 2490 return -1; | |
| 2491 } | |
| 2492 | |
| 2493 bufferData = static_cast<const char *> (bufferData) + | |
| 2494 _recordBufferSize; | |
| 2495 size -= _recordBufferSize; | |
| 2496 | |
| 2497 // We have consumed 10ms of data. | |
| 2498 recDelay -= 10; | |
| 2499 } | |
| 2500 | |
| 2501 // Now save any leftovers for later. | |
| 2502 if (size > 0) | |
| 2503 { | |
| 2504 memcpy(_recBuffer, bufferData, size); | |
| 2505 _recordBufferUsed = size; | |
| 2506 } | |
| 2507 | |
| 2508 return 0; | |
| 2509 } | |
| 2510 | |
| 2511 int32_t AudioDeviceLinuxPulse::ProcessRecordedData( | |
| 2512 int8_t *bufferData, | |
| 2513 uint32_t bufferSizeInSamples, | |
| 2514 uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect) | |
| 2515 { | |
| 2516 uint32_t currentMicLevel(0); | |
| 2517 uint32_t newMicLevel(0); | |
| 2518 | |
| 2519 _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); | |
| 2520 | |
| 2521 if (AGC()) | |
| 2522 { | |
| 2523 // Store current mic level in the audio buffer if AGC is enabled | |
| 2524 if (MicrophoneVolume(currentMicLevel) == 0) | |
| 2525 { | |
| 2526 // This call does not affect the actual microphone volume | |
| 2527 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); | |
| 2528 } | |
| 2529 } | |
| 2530 | |
| 2531 const uint32_t clockDrift(0); | |
| 2532 // TODO(andrew): this is a temporary hack, to avoid non-causal far- and | |
| 2533 // near-end signals at the AEC for PulseAudio. I think the system delay is | |
| 2534 // being correctly calculated here, but for legacy reasons we add +10 ms | |
| 2535 // to the value in the AEC. The real fix will be part of a larger | |
| 2536 // investigation into managing system delay in the AEC. | |
| 2537 if (recDelay > 10) | |
| 2538 recDelay -= 10; | |
| 2539 else | |
| 2540 recDelay = 0; | |
| 2541 _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); | |
| 2542 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); | |
| 2543 // Deliver recorded samples at specified sample rate, | |
| 2544 // mic level etc. to the observer using callback. | |
| 2545 UnLock(); | |
| 2546 _ptrAudioBuffer->DeliverRecordedData(); | |
| 2547 Lock(); | |
| 2548 | |
| 2549 // We have been unlocked - check the flag again. | |
| 2550 if (!_recording) | |
| 2551 { | |
| 2552 return -1; | |
| 2553 } | |
| 2554 | |
| 2555 if (AGC()) | |
| 2556 { | |
| 2557 newMicLevel = _ptrAudioBuffer->NewMicLevel(); | |
| 2558 if (newMicLevel != 0) | |
| 2559 { | |
| 2560 // The VQE will only deliver non-zero microphone levels when a | |
| 2561 // change is needed. | |
| 2562 // Set this new mic level (received from the observer as return | |
| 2563 // value in the callback). | |
| 2564 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, | |
| 2565 " AGC change of volume: old=%u => new=%u", | |
| 2566 currentMicLevel, newMicLevel); | |
| 2567 if (SetMicrophoneVolume(newMicLevel) == -1) | |
| 2568 { | |
| 2569 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | |
| 2570 _id, | |
| 2571 " the required modification of the microphone " | |
| 2572 "volume failed"); | |
| 2573 } | |
| 2574 } | |
| 2575 } | |
| 2576 | |
| 2577 return 0; | |
| 2578 } | |
| 2579 | |
| 2580 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) | |
| 2581 { | |
| 2582 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess()); | |
| 2583 } | |
| 2584 | |
| 2585 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) | |
| 2586 { | |
| 2587 return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess()); | |
| 2588 } | |
| 2589 | |
| 2590 bool AudioDeviceLinuxPulse::PlayThreadProcess() | |
| 2591 { | |
| 2592 switch (_timeEventPlay.Wait(1000)) | |
| 2593 { | |
| 2594 case kEventSignaled: | |
| 2595 break; | |
| 2596 case kEventError: | |
| 2597 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 2598 "EventWrapper::Wait() failed"); | |
| 2599 return true; | |
| 2600 case kEventTimeout: | |
| 2601 return true; | |
| 2602 } | |
| 2603 | |
| 2604 rtc::CritScope lock(&_critSect); | |
| 2605 | |
| 2606 if (_startPlay) | |
| 2607 { | |
| 2608 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
| 2609 "_startPlay true, performing initial actions"); | |
| 2610 | |
| 2611 _startPlay = false; | |
| 2612 _playDeviceName = NULL; | |
| 2613 | |
| 2614 // Set if not default device | |
| 2615 if (_outputDeviceIndex > 0) | |
| 2616 { | |
| 2617 // Get the playout device name | |
| 2618 _playDeviceName = new char[kAdmMaxDeviceNameSize]; | |
| 2619 _deviceIndex = _outputDeviceIndex; | |
| 2620 PlayoutDevices(); | |
| 2621 } | |
| 2622 | |
| 2623 // Start muted only supported on 0.9.11 and up | |
| 2624 if (LATE(pa_context_get_protocol_version)(_paContext) | |
| 2625 >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) | |
| 2626 { | |
| 2627 // Get the currently saved speaker mute status | |
| 2628 // and set the initial mute status accordingly | |
| 2629 bool enabled(false); | |
| 2630 _mixerManager.SpeakerMute(enabled); | |
| 2631 if (enabled) | |
| 2632 { | |
| 2633 _playStreamFlags |= PA_STREAM_START_MUTED; | |
| 2634 } | |
| 2635 } | |
| 2636 | |
| 2637 // Get the currently saved speaker volume | |
| 2638 uint32_t volume = 0; | |
| 2639 if (update_speaker_volume_at_startup_) | |
| 2640 _mixerManager.SpeakerVolume(volume); | |
| 2641 | |
| 2642 PaLock(); | |
| 2643 | |
| 2644 // NULL gives PA the choice of startup volume. | |
| 2645 pa_cvolume* ptr_cvolume = NULL; | |
| 2646 if (update_speaker_volume_at_startup_) { | |
| 2647 pa_cvolume cVolumes; | |
| 2648 ptr_cvolume = &cVolumes; | |
| 2649 | |
| 2650 // Set the same volume for all channels | |
| 2651 const pa_sample_spec *spec = | |
| 2652 LATE(pa_stream_get_sample_spec)(_playStream); | |
| 2653 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); | |
| 2654 update_speaker_volume_at_startup_ = false; | |
| 2655 } | |
| 2656 | |
| 2657 // Connect the stream to a sink | |
| 2658 if (LATE(pa_stream_connect_playback)( | |
| 2659 _playStream, | |
| 2660 _playDeviceName, | |
| 2661 &_playBufferAttr, | |
| 2662 (pa_stream_flags_t) _playStreamFlags, | |
| 2663 ptr_cvolume, NULL) != PA_OK) | |
| 2664 { | |
| 2665 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2666 " failed to connect play stream, err=%d", | |
| 2667 LATE(pa_context_errno)(_paContext)); | |
| 2668 } | |
| 2669 | |
| 2670 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2671 " play stream connected"); | |
| 2672 | |
| 2673 // Wait for state change | |
| 2674 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) | |
| 2675 { | |
| 2676 LATE(pa_threaded_mainloop_wait)(_paMainloop); | |
| 2677 } | |
| 2678 | |
| 2679 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2680 " play stream ready"); | |
| 2681 | |
| 2682 // We can now handle write callbacks | |
| 2683 EnableWriteCallback(); | |
| 2684 | |
| 2685 PaUnLock(); | |
| 2686 | |
| 2687 // Clear device name | |
| 2688 if (_playDeviceName) | |
| 2689 { | |
| 2690 delete [] _playDeviceName; | |
| 2691 _playDeviceName = NULL; | |
| 2692 } | |
| 2693 | |
| 2694 _playing = true; | |
| 2695 _playStartEvent.Set(); | |
| 2696 | |
| 2697 return true; | 2554 return true; |
| 2698 } | 2555 } |
| 2699 | 2556 PaLock(); |
| 2700 if (_playing) | 2557 |
| 2701 { | 2558 // Return to top of loop for the ack and the check for more data. |
| 2702 if (!_recording) | 2559 } |
| 2703 { | 2560 |
| 2704 // Update the playout delay | 2561 EnableReadCallback(); |
| 2705 _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) | 2562 PaUnLock(); |
| 2706 / 1000); | 2563 |
| 2707 } | 2564 } // _recording |
| 2708 | 2565 |
| 2709 if (_playbackBufferUnused < _playbackBufferSize) | 2566 return true; |
| 2710 { | 2567 } |
| 2711 | 2568 |
| 2712 size_t write = _playbackBufferSize - _playbackBufferUnused; | 2569 bool AudioDeviceLinuxPulse::KeyPressed() const { |
| 2713 if (_tempBufferSpace < write) | |
| 2714 { | |
| 2715 write = _tempBufferSpace; | |
| 2716 } | |
| 2717 | |
| 2718 PaLock(); | |
| 2719 if (LATE(pa_stream_write)( | |
| 2720 _playStream, | |
| 2721 (void *) &_playBuffer[_playbackBufferUnused], | |
| 2722 write, NULL, (int64_t) 0, | |
| 2723 PA_SEEK_RELATIVE) != PA_OK) | |
| 2724 { | |
| 2725 _writeErrors++; | |
| 2726 if (_writeErrors > 10) | |
| 2727 { | |
| 2728 if (_playError == 1) | |
| 2729 { | |
| 2730 WEBRTC_TRACE(kTraceWarning, | |
| 2731 kTraceUtility, _id, | |
| 2732 " pending playout error exists"); | |
| 2733 } | |
| 2734 // Triggers callback from module process thread. | |
| 2735 _playError = 1; | |
| 2736 WEBRTC_TRACE( | |
| 2737 kTraceError, | |
| 2738 kTraceUtility, | |
| 2739 _id, | |
| 2740 " kPlayoutError message posted: " | |
| 2741 "_writeErrors=%u, error=%d", | |
| 2742 _writeErrors, | |
| 2743 LATE(pa_context_errno)(_paContext)); | |
| 2744 _writeErrors = 0; | |
| 2745 } | |
| 2746 } | |
| 2747 PaUnLock(); | |
| 2748 | |
| 2749 _playbackBufferUnused += write; | |
| 2750 _tempBufferSpace -= write; | |
| 2751 } | |
| 2752 | |
| 2753 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); | |
| 2754 // Might have been reduced to zero by the above. | |
| 2755 if (_tempBufferSpace > 0) | |
| 2756 { | |
| 2757 // Ask for new PCM data to be played out using the | |
| 2758 // AudioDeviceBuffer ensure that this callback is executed | |
| 2759 // without taking the audio-thread lock. | |
| 2760 UnLock(); | |
| 2761 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2762 " requesting data"); | |
| 2763 uint32_t nSamples = | |
| 2764 _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); | |
| 2765 Lock(); | |
| 2766 | |
| 2767 // We have been unlocked - check the flag again. | |
| 2768 if (!_playing) | |
| 2769 { | |
| 2770 return true; | |
| 2771 } | |
| 2772 | |
| 2773 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); | |
| 2774 if (nSamples != numPlaySamples) | |
| 2775 { | |
| 2776 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
| 2777 _id, " invalid number of output samples(%d)", | |
| 2778 nSamples); | |
| 2779 } | |
| 2780 | |
| 2781 size_t write = _playbackBufferSize; | |
| 2782 if (_tempBufferSpace < write) | |
| 2783 { | |
| 2784 write = _tempBufferSpace; | |
| 2785 } | |
| 2786 | |
| 2787 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2788 " will write"); | |
| 2789 PaLock(); | |
| 2790 if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0], | |
| 2791 write, NULL, (int64_t) 0, | |
| 2792 PA_SEEK_RELATIVE) != PA_OK) | |
| 2793 { | |
| 2794 _writeErrors++; | |
| 2795 if (_writeErrors > 10) | |
| 2796 { | |
| 2797 if (_playError == 1) | |
| 2798 { | |
| 2799 WEBRTC_TRACE(kTraceWarning, | |
| 2800 kTraceUtility, _id, | |
| 2801 " pending playout error exists"); | |
| 2802 } | |
| 2803 // Triggers callback from module process thread. | |
| 2804 _playError = 1; | |
| 2805 WEBRTC_TRACE( | |
| 2806 kTraceError, | |
| 2807 kTraceUtility, | |
| 2808 _id, | |
| 2809 " kPlayoutError message posted: " | |
| 2810 "_writeErrors=%u, error=%d", | |
| 2811 _writeErrors, | |
| 2812 LATE(pa_context_errno)(_paContext)); | |
| 2813 _writeErrors = 0; | |
| 2814 } | |
| 2815 } | |
| 2816 PaUnLock(); | |
| 2817 | |
| 2818 _playbackBufferUnused = write; | |
| 2819 } | |
| 2820 | |
| 2821 _tempBufferSpace = 0; | |
| 2822 PaLock(); | |
| 2823 EnableWriteCallback(); | |
| 2824 PaUnLock(); | |
| 2825 | |
| 2826 } // _playing | |
| 2827 | |
| 2828 return true; | |
| 2829 } | |
| 2830 | |
| 2831 bool AudioDeviceLinuxPulse::RecThreadProcess() | |
| 2832 { | |
| 2833 switch (_timeEventRec.Wait(1000)) | |
| 2834 { | |
| 2835 case kEventSignaled: | |
| 2836 break; | |
| 2837 case kEventError: | |
| 2838 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
| 2839 "EventWrapper::Wait() failed"); | |
| 2840 return true; | |
| 2841 case kEventTimeout: | |
| 2842 return true; | |
| 2843 } | |
| 2844 | |
| 2845 rtc::CritScope lock(&_critSect); | |
| 2846 | |
| 2847 if (_startRec) | |
| 2848 { | |
| 2849 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
| 2850 "_startRec true, performing initial actions"); | |
| 2851 | |
| 2852 _recDeviceName = NULL; | |
| 2853 | |
| 2854 // Set if not default device | |
| 2855 if (_inputDeviceIndex > 0) | |
| 2856 { | |
| 2857 // Get the recording device name | |
| 2858 _recDeviceName = new char[kAdmMaxDeviceNameSize]; | |
| 2859 _deviceIndex = _inputDeviceIndex; | |
| 2860 RecordingDevices(); | |
| 2861 } | |
| 2862 | |
| 2863 PaLock(); | |
| 2864 | |
| 2865 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2866 " connecting stream"); | |
| 2867 | |
| 2868 // Connect the stream to a source | |
| 2869 if (LATE(pa_stream_connect_record)(_recStream, | |
| 2870 _recDeviceName, | |
| 2871 &_recBufferAttr, | |
| 2872 (pa_stream_flags_t) _recStreamFlags) != PA_OK) | |
| 2873 { | |
| 2874 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
| 2875 " failed to connect rec stream, err=%d", | |
| 2876 LATE(pa_context_errno)(_paContext)); | |
| 2877 } | |
| 2878 | |
| 2879 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2880 " connected"); | |
| 2881 | |
| 2882 // Wait for state change | |
| 2883 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) | |
| 2884 { | |
| 2885 LATE(pa_threaded_mainloop_wait)(_paMainloop); | |
| 2886 } | |
| 2887 | |
| 2888 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, | |
| 2889 " done"); | |
| 2890 | |
| 2891 // We can now handle read callbacks | |
| 2892 EnableReadCallback(); | |
| 2893 | |
| 2894 PaUnLock(); | |
| 2895 | |
| 2896 // Clear device name | |
| 2897 if (_recDeviceName) | |
| 2898 { | |
| 2899 delete [] _recDeviceName; | |
| 2900 _recDeviceName = NULL; | |
| 2901 } | |
| 2902 | |
| 2903 _startRec = false; | |
| 2904 _recording = true; | |
| 2905 _recStartEvent.Set(); | |
| 2906 | |
| 2907 return true; | |
| 2908 } | |
| 2909 | |
| 2910 if (_recording) | |
| 2911 { | |
| 2912 // Read data and provide it to VoiceEngine | |
| 2913 if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) | |
| 2914 { | |
| 2915 return true; | |
| 2916 } | |
| 2917 | |
| 2918 _tempSampleData = NULL; | |
| 2919 _tempSampleDataSize = 0; | |
| 2920 | |
| 2921 PaLock(); | |
| 2922 while (true) | |
| 2923 { | |
| 2924 // Ack the last thing we read | |
| 2925 if (LATE(pa_stream_drop)(_recStream) != 0) | |
| 2926 { | |
| 2927 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, | |
| 2928 _id, " failed to drop, err=%d\n", | |
| 2929 LATE(pa_context_errno)(_paContext)); | |
| 2930 } | |
| 2931 | |
| 2932 if (LATE(pa_stream_readable_size)(_recStream) <= 0) | |
| 2933 { | |
| 2934 // Then that was all the data | |
| 2935 break; | |
| 2936 } | |
| 2937 | |
| 2938 // Else more data. | |
| 2939 const void *sampleData; | |
| 2940 size_t sampleDataSize; | |
| 2941 | |
| 2942 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) | |
| 2943 != 0) | |
| 2944 { | |
| 2945 _recError = 1; // triggers callback from module process thread | |
| 2946 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, | |
| 2947 _id, " RECORD_ERROR message posted, error = %d", | |
| 2948 LATE(pa_context_errno)(_paContext)); | |
| 2949 break; | |
| 2950 } | |
| 2951 | |
| 2952 _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream) | |
| 2953 / 1000); | |
| 2954 | |
| 2955 // Drop lock for sigslot dispatch, which could take a while. | |
| 2956 PaUnLock(); | |
| 2957 // Read data and provide it to VoiceEngine | |
| 2958 if (ReadRecordedData(sampleData, sampleDataSize) == -1) | |
| 2959 { | |
| 2960 return true; | |
| 2961 } | |
| 2962 PaLock(); | |
| 2963 | |
| 2964 // Return to top of loop for the ack and the check for more data. | |
| 2965 } | |
| 2966 | |
| 2967 EnableReadCallback(); | |
| 2968 PaUnLock(); | |
| 2969 | |
| 2970 } // _recording | |
| 2971 | |
| 2972 return true; | |
| 2973 } | |
| 2974 | |
| 2975 bool AudioDeviceLinuxPulse::KeyPressed() const{ | |
| 2976 | |
| 2977 char szKey[32]; | 2570 char szKey[32]; |
| 2978 unsigned int i = 0; | 2571 unsigned int i = 0; |
| 2979 char state = 0; | 2572 char state = 0; |
| 2980 | 2573 |
| 2981 if (!_XDisplay) | 2574 if (!_XDisplay) |
| 2982 return false; | 2575 return false; |
| 2983 | 2576 |
| 2984 // Check key map status | 2577 // Check key map status |
| 2985 XQueryKeymap(_XDisplay, szKey); | 2578 XQueryKeymap(_XDisplay, szKey); |
| 2986 | 2579 |
| 2987 // A bit change in keymap means a key is pressed | 2580 // A bit change in keymap means a key is pressed |
| 2988 for (i = 0; i < sizeof(szKey); i++) | 2581 for (i = 0; i < sizeof(szKey); i++) |
| 2989 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; | 2582 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; |
| 2990 | 2583 |
| 2991 // Save old state | 2584 // Save old state |
| 2992 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); | 2585 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); |
| 2993 return (state != 0); | 2586 return (state != 0); |
| 2994 } | 2587 } |
| 2995 } | 2588 } // namespace webrtc |
| OLD | NEW |