Index: webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc |
diff --git a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc |
index bbc6fadbbf2e88fa3cf376f912db3d5ed5c2b85c..54fabb56edcb2d281e4c69ed6cee350e0d6ed4fd 100644 |
--- a/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc |
+++ b/webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc |
@@ -26,128 +26,119 @@ webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; |
LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \ |
sym) |
-namespace webrtc |
-{ |
- |
-AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) : |
- _ptrAudioBuffer(NULL), |
- _timeEventRec(*EventWrapper::Create()), |
- _timeEventPlay(*EventWrapper::Create()), |
- _recStartEvent(*EventWrapper::Create()), |
- _playStartEvent(*EventWrapper::Create()), |
- _id(id), |
- _mixerManager(id), |
- _inputDeviceIndex(0), |
- _outputDeviceIndex(0), |
- _inputDeviceIsSpecified(false), |
- _outputDeviceIsSpecified(false), |
- sample_rate_hz_(0), |
- _recChannels(1), |
- _playChannels(1), |
- _playBufType(AudioDeviceModule::kFixedBufferSize), |
- _initialized(false), |
- _recording(false), |
- _playing(false), |
- _recIsInitialized(false), |
- _playIsInitialized(false), |
- _startRec(false), |
- _stopRec(false), |
- _startPlay(false), |
- _stopPlay(false), |
- _AGC(false), |
- update_speaker_volume_at_startup_(false), |
- _playBufDelayFixed(20), |
- _sndCardPlayDelay(0), |
- _sndCardRecDelay(0), |
- _writeErrors(0), |
- _playWarning(0), |
- _playError(0), |
- _recWarning(0), |
- _recError(0), |
- _deviceIndex(-1), |
- _numPlayDevices(0), |
- _numRecDevices(0), |
- _playDeviceName(NULL), |
- _recDeviceName(NULL), |
- _playDisplayDeviceName(NULL), |
- _recDisplayDeviceName(NULL), |
- _playBuffer(NULL), |
- _playbackBufferSize(0), |
- _playbackBufferUnused(0), |
- _tempBufferSpace(0), |
- _recBuffer(NULL), |
- _recordBufferSize(0), |
- _recordBufferUsed(0), |
- _tempSampleData(NULL), |
- _tempSampleDataSize(0), |
- _configuredLatencyPlay(0), |
- _configuredLatencyRec(0), |
- _paDeviceIndex(-1), |
- _paStateChanged(false), |
- _paMainloop(NULL), |
- _paMainloopApi(NULL), |
- _paContext(NULL), |
- _recStream(NULL), |
- _playStream(NULL), |
- _recStreamFlags(0), |
- _playStreamFlags(0) |
-{ |
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, |
- "%s created", __FUNCTION__); |
- |
- memset(_paServerVersion, 0, sizeof(_paServerVersion)); |
- memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); |
- memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); |
- memset(_oldKeyState, 0, sizeof(_oldKeyState)); |
-} |
- |
-AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() |
-{ |
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, |
- "%s destroyed", __FUNCTION__); |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- Terminate(); |
- |
- if (_recBuffer) |
- { |
- delete [] _recBuffer; |
- _recBuffer = NULL; |
- } |
- if (_playBuffer) |
- { |
- delete [] _playBuffer; |
- _playBuffer = NULL; |
- } |
- if (_playDeviceName) |
- { |
- delete [] _playDeviceName; |
- _playDeviceName = NULL; |
- } |
- if (_recDeviceName) |
- { |
- delete [] _recDeviceName; |
- _recDeviceName = NULL; |
- } |
+namespace webrtc { |
+ |
+AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) |
+ : _ptrAudioBuffer(NULL), |
+ _timeEventRec(*EventWrapper::Create()), |
+ _timeEventPlay(*EventWrapper::Create()), |
+ _recStartEvent(*EventWrapper::Create()), |
+ _playStartEvent(*EventWrapper::Create()), |
+ _id(id), |
+ _mixerManager(id), |
+ _inputDeviceIndex(0), |
+ _outputDeviceIndex(0), |
+ _inputDeviceIsSpecified(false), |
+ _outputDeviceIsSpecified(false), |
+ sample_rate_hz_(0), |
+ _recChannels(1), |
+ _playChannels(1), |
+ _playBufType(AudioDeviceModule::kFixedBufferSize), |
+ _initialized(false), |
+ _recording(false), |
+ _playing(false), |
+ _recIsInitialized(false), |
+ _playIsInitialized(false), |
+ _startRec(false), |
+ _stopRec(false), |
+ _startPlay(false), |
+ _stopPlay(false), |
+ _AGC(false), |
+ update_speaker_volume_at_startup_(false), |
+ _playBufDelayFixed(20), |
+ _sndCardPlayDelay(0), |
+ _sndCardRecDelay(0), |
+ _writeErrors(0), |
+ _playWarning(0), |
+ _playError(0), |
+ _recWarning(0), |
+ _recError(0), |
+ _deviceIndex(-1), |
+ _numPlayDevices(0), |
+ _numRecDevices(0), |
+ _playDeviceName(NULL), |
+ _recDeviceName(NULL), |
+ _playDisplayDeviceName(NULL), |
+ _recDisplayDeviceName(NULL), |
+ _playBuffer(NULL), |
+ _playbackBufferSize(0), |
+ _playbackBufferUnused(0), |
+ _tempBufferSpace(0), |
+ _recBuffer(NULL), |
+ _recordBufferSize(0), |
+ _recordBufferUsed(0), |
+ _tempSampleData(NULL), |
+ _tempSampleDataSize(0), |
+ _configuredLatencyPlay(0), |
+ _configuredLatencyRec(0), |
+ _paDeviceIndex(-1), |
+ _paStateChanged(false), |
+ _paMainloop(NULL), |
+ _paMainloopApi(NULL), |
+ _paContext(NULL), |
+ _recStream(NULL), |
+ _playStream(NULL), |
+ _recStreamFlags(0), |
+ _playStreamFlags(0) { |
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); |
+ |
+ memset(_paServerVersion, 0, sizeof(_paServerVersion)); |
+ memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); |
+ memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); |
+ memset(_oldKeyState, 0, sizeof(_oldKeyState)); |
+} |
+ |
+AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { |
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", |
+ __FUNCTION__); |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ Terminate(); |
+ |
+ if (_recBuffer) { |
+ delete[] _recBuffer; |
+ _recBuffer = NULL; |
+ } |
+ if (_playBuffer) { |
+ delete[] _playBuffer; |
+ _playBuffer = NULL; |
+ } |
+ if (_playDeviceName) { |
+ delete[] _playDeviceName; |
+ _playDeviceName = NULL; |
+ } |
+ if (_recDeviceName) { |
+ delete[] _recDeviceName; |
+ _recDeviceName = NULL; |
+ } |
- delete &_recStartEvent; |
- delete &_playStartEvent; |
- delete &_timeEventRec; |
- delete &_timeEventPlay; |
+ delete &_recStartEvent; |
+ delete &_playStartEvent; |
+ delete &_timeEventRec; |
+ delete &_timeEventPlay; |
} |
-void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- _ptrAudioBuffer = audioBuffer; |
+ _ptrAudioBuffer = audioBuffer; |
- // Inform the AudioBuffer about default settings for this implementation. |
- // Set all values to zero here since the actual settings will be done by |
- // InitPlayout and InitRecording later. |
- _ptrAudioBuffer->SetRecordingSampleRate(0); |
- _ptrAudioBuffer->SetPlayoutSampleRate(0); |
- _ptrAudioBuffer->SetRecordingChannels(0); |
- _ptrAudioBuffer->SetPlayoutChannels(0); |
+ // Inform the AudioBuffer about default settings for this implementation. |
+ // Set all values to zero here since the actual settings will be done by |
+ // InitPlayout and InitRecording later. |
+ _ptrAudioBuffer->SetRecordingSampleRate(0); |
+ _ptrAudioBuffer->SetPlayoutSampleRate(0); |
+ _ptrAudioBuffer->SetRecordingChannels(0); |
+ _ptrAudioBuffer->SetPlayoutChannels(0); |
} |
// ---------------------------------------------------------------------------- |
@@ -155,10 +146,9 @@ void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) |
// ---------------------------------------------------------------------------- |
int32_t AudioDeviceLinuxPulse::ActiveAudioLayer( |
- AudioDeviceModule::AudioLayer& audioLayer) const |
-{ |
- audioLayer = AudioDeviceModule::kLinuxPulseAudio; |
- return 0; |
+ AudioDeviceModule::AudioLayer& audioLayer) const { |
+ audioLayer = AudioDeviceModule::kLinuxPulseAudio; |
+ return 0; |
} |
AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() { |
@@ -206,1472 +196,1284 @@ AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() { |
return InitStatus::OK; |
} |
-int32_t AudioDeviceLinuxPulse::Terminate() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!_initialized) |
- { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::Terminate() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (!_initialized) { |
+ return 0; |
+ } |
- _mixerManager.Close(); |
+ _mixerManager.Close(); |
- // RECORDING |
- if (_ptrThreadRec) |
- { |
- rtc::PlatformThread* tmpThread = _ptrThreadRec.release(); |
+ // RECORDING |
+ if (_ptrThreadRec) { |
+ rtc::PlatformThread* tmpThread = _ptrThreadRec.release(); |
- _timeEventRec.Set(); |
- tmpThread->Stop(); |
- delete tmpThread; |
- } |
+ _timeEventRec.Set(); |
+ tmpThread->Stop(); |
+ delete tmpThread; |
+ } |
- // PLAYOUT |
- if (_ptrThreadPlay) |
- { |
- rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); |
+ // PLAYOUT |
+ if (_ptrThreadPlay) { |
+ rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); |
- _timeEventPlay.Set(); |
- tmpThread->Stop(); |
- delete tmpThread; |
- } |
+ _timeEventPlay.Set(); |
+ tmpThread->Stop(); |
+ delete tmpThread; |
+ } |
- // Terminate PulseAudio |
- if (TerminatePulseAudio() < 0) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to terminate PulseAudio"); |
- return -1; |
- } |
+ // Terminate PulseAudio |
+ if (TerminatePulseAudio() < 0) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to terminate PulseAudio"); |
+ return -1; |
+ } |
- if (_XDisplay) |
- { |
- XCloseDisplay(_XDisplay); |
- _XDisplay = NULL; |
- } |
+ if (_XDisplay) { |
+ XCloseDisplay(_XDisplay); |
+ _XDisplay = NULL; |
+ } |
- _initialized = false; |
- _outputDeviceIsSpecified = false; |
- _inputDeviceIsSpecified = false; |
+ _initialized = false; |
+ _outputDeviceIsSpecified = false; |
+ _inputDeviceIsSpecified = false; |
- return 0; |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::Initialized() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_initialized); |
+bool AudioDeviceLinuxPulse::Initialized() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_initialized); |
} |
-int32_t AudioDeviceLinuxPulse::InitSpeaker() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+int32_t AudioDeviceLinuxPulse::InitSpeaker() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_playing) |
- { |
- return -1; |
- } |
+ if (_playing) { |
+ return -1; |
+ } |
- if (!_outputDeviceIsSpecified) |
- { |
- return -1; |
- } |
+ if (!_outputDeviceIsSpecified) { |
+ return -1; |
+ } |
- // check if default device |
- if (_outputDeviceIndex == 0) |
- { |
- uint16_t deviceIndex = 0; |
- GetDefaultDeviceInfo(false, NULL, deviceIndex); |
- _paDeviceIndex = deviceIndex; |
- } else |
- { |
- // get the PA device index from |
- // the callback |
- _deviceIndex = _outputDeviceIndex; |
+ // check if default device |
+ if (_outputDeviceIndex == 0) { |
+ uint16_t deviceIndex = 0; |
+ GetDefaultDeviceInfo(false, NULL, deviceIndex); |
+ _paDeviceIndex = deviceIndex; |
+ } else { |
+ // get the PA device index from |
+ // the callback |
+ _deviceIndex = _outputDeviceIndex; |
- // get playout devices |
- PlayoutDevices(); |
- } |
+ // get playout devices |
+ PlayoutDevices(); |
+ } |
- // the callback has now set the _paDeviceIndex to |
- // the PulseAudio index of the device |
- if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) |
- { |
- return -1; |
- } |
+ // the callback has now set the _paDeviceIndex to |
+ // the PulseAudio index of the device |
+ if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1) { |
+ return -1; |
+ } |
- // clear _deviceIndex |
- _deviceIndex = -1; |
- _paDeviceIndex = -1; |
+ // clear _deviceIndex |
+ _deviceIndex = -1; |
+ _paDeviceIndex = -1; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::InitMicrophone() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_recording) |
- { |
- return -1; |
- } |
- |
- if (!_inputDeviceIsSpecified) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::InitMicrophone() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_recording) { |
+ return -1; |
+ } |
- // Check if default device |
- if (_inputDeviceIndex == 0) |
- { |
- uint16_t deviceIndex = 0; |
- GetDefaultDeviceInfo(true, NULL, deviceIndex); |
- _paDeviceIndex = deviceIndex; |
- } else |
- { |
- // Get the PA device index from |
- // the callback |
- _deviceIndex = _inputDeviceIndex; |
+ if (!_inputDeviceIsSpecified) { |
+ return -1; |
+ } |
- // get recording devices |
- RecordingDevices(); |
- } |
+ // Check if default device |
+ if (_inputDeviceIndex == 0) { |
+ uint16_t deviceIndex = 0; |
+ GetDefaultDeviceInfo(true, NULL, deviceIndex); |
+ _paDeviceIndex = deviceIndex; |
+ } else { |
+ // Get the PA device index from |
+ // the callback |
+ _deviceIndex = _inputDeviceIndex; |
+ |
+ // get recording devices |
+ RecordingDevices(); |
+ } |
- // The callback has now set the _paDeviceIndex to |
- // the PulseAudio index of the device |
- if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) |
- { |
- return -1; |
- } |
+ // The callback has now set the _paDeviceIndex to |
+ // the PulseAudio index of the device |
+ if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1) { |
+ return -1; |
+ } |
- // Clear _deviceIndex |
- _deviceIndex = -1; |
- _paDeviceIndex = -1; |
+ // Clear _deviceIndex |
+ _deviceIndex = -1; |
+ _paDeviceIndex = -1; |
- return 0; |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_mixerManager.SpeakerIsInitialized()); |
+bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_mixerManager.SpeakerIsInitialized()); |
} |
-bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_mixerManager.MicrophoneIsInitialized()); |
+bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_mixerManager.MicrophoneIsInitialized()); |
} |
-int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
+int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
- // Make an attempt to open up the |
- // output mixer corresponding to the currently selected output device. |
- if (!wasInitialized && InitSpeaker() == -1) |
- { |
- // If we end up here it means that the selected speaker has no volume |
- // control. |
- available = false; |
- return 0; |
- } |
+ // Make an attempt to open up the |
+ // output mixer corresponding to the currently selected output device. |
+ if (!wasInitialized && InitSpeaker() == -1) { |
+ // If we end up here it means that the selected speaker has no volume |
+ // control. |
+ available = false; |
+ return 0; |
+ } |
- // Given that InitSpeaker was successful, we know volume control exists. |
- available = true; |
+ // Given that InitSpeaker was successful, we know volume control exists. |
+ available = true; |
- // Close the initialized output mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseSpeaker(); |
- } |
+ // Close the initialized output mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseSpeaker(); |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!_playing) { |
- // Only update the volume if it's been set while we weren't playing. |
- update_speaker_volume_at_startup_ = true; |
- } |
- return (_mixerManager.SetSpeakerVolume(volume)); |
+int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (!_playing) { |
+ // Only update the volume if it's been set while we weren't playing. |
+ update_speaker_volume_at_startup_ = true; |
+ } |
+ return (_mixerManager.SetSpeakerVolume(volume)); |
} |
-int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- uint32_t level(0); |
+int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ uint32_t level(0); |
- if (_mixerManager.SpeakerVolume(level) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.SpeakerVolume(level) == -1) { |
+ return -1; |
+ } |
- volume = level; |
+ volume = level; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetWaveOutVolume( |
- uint16_t volumeLeft, |
- uint16_t volumeRight) |
-{ |
- |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " API call not supported on this platform"); |
- return -1; |
+int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft, |
+ uint16_t volumeRight) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " API call not supported on this platform"); |
+ return -1; |
} |
-int32_t AudioDeviceLinuxPulse::WaveOutVolume( |
- uint16_t& /*volumeLeft*/, |
- uint16_t& /*volumeRight*/) const |
-{ |
- |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " API call not supported on this platform"); |
- return -1; |
+int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/, |
+ uint16_t& /*volumeRight*/) const { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " API call not supported on this platform"); |
+ return -1; |
} |
-int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume( |
- uint32_t& maxVolume) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- uint32_t maxVol(0); |
+int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ uint32_t maxVol(0); |
- if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { |
+ return -1; |
+ } |
- maxVolume = maxVol; |
+ maxVolume = maxVol; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::MinSpeakerVolume( |
- uint32_t& minVolume) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- uint32_t minVol(0); |
+int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(uint32_t& minVolume) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ uint32_t minVol(0); |
- if (_mixerManager.MinSpeakerVolume(minVol) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.MinSpeakerVolume(minVol) == -1) { |
+ return -1; |
+ } |
- minVolume = minVol; |
+ minVolume = minVol; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize( |
- uint16_t& stepSize) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- uint16_t delta(0); |
+int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(uint16_t& stepSize) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ uint16_t delta(0); |
- if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) { |
+ return -1; |
+ } |
- stepSize = delta; |
+ stepSize = delta; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool isAvailable(false); |
- bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
- |
- // Make an attempt to open up the |
- // output mixer corresponding to the currently selected output device. |
- // |
- if (!wasInitialized && InitSpeaker() == -1) |
- { |
- // If we end up here it means that the selected speaker has no volume |
- // control, hence it is safe to state that there is no mute control |
- // already at this stage. |
- available = false; |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool isAvailable(false); |
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
+ |
+ // Make an attempt to open up the |
+ // output mixer corresponding to the currently selected output device. |
+ // |
+ if (!wasInitialized && InitSpeaker() == -1) { |
+ // If we end up here it means that the selected speaker has no volume |
+ // control, hence it is safe to state that there is no mute control |
+ // already at this stage. |
+ available = false; |
+ return 0; |
+ } |
- // Check if the selected speaker has a mute control |
- _mixerManager.SpeakerMuteIsAvailable(isAvailable); |
+ // Check if the selected speaker has a mute control |
+ _mixerManager.SpeakerMuteIsAvailable(isAvailable); |
- available = isAvailable; |
+ available = isAvailable; |
- // Close the initialized output mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseSpeaker(); |
- } |
+ // Close the initialized output mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseSpeaker(); |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_mixerManager.SetSpeakerMute(enable)); |
+int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_mixerManager.SetSpeakerMute(enable)); |
} |
-int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool muted(0); |
- if (_mixerManager.SpeakerMute(muted) == -1) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool muted(0); |
+ if (_mixerManager.SpeakerMute(muted) == -1) { |
+ return -1; |
+ } |
- enabled = muted; |
- return 0; |
+ enabled = muted; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool isAvailable(false); |
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
- |
- // Make an attempt to open up the |
- // input mixer corresponding to the currently selected input device. |
- // |
- if (!wasInitialized && InitMicrophone() == -1) |
- { |
- // If we end up here it means that the selected microphone has no |
- // volume control, hence it is safe to state that there is no |
- // boost control already at this stage. |
- available = false; |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool isAvailable(false); |
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
+ |
+ // Make an attempt to open up the |
+ // input mixer corresponding to the currently selected input device. |
+ // |
+ if (!wasInitialized && InitMicrophone() == -1) { |
+ // If we end up here it means that the selected microphone has no |
+ // volume control, hence it is safe to state that there is no |
+ // boost control already at this stage. |
+ available = false; |
+ return 0; |
+ } |
- // Check if the selected microphone has a mute control |
- // |
- _mixerManager.MicrophoneMuteIsAvailable(isAvailable); |
- available = isAvailable; |
+ // Check if the selected microphone has a mute control |
+ // |
+ _mixerManager.MicrophoneMuteIsAvailable(isAvailable); |
+ available = isAvailable; |
- // Close the initialized input mixer |
- // |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseMicrophone(); |
- } |
+ // Close the initialized input mixer |
+ // |
+ if (!wasInitialized) { |
+ _mixerManager.CloseMicrophone(); |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_mixerManager.SetMicrophoneMute(enable)); |
+int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_mixerManager.SetMicrophoneMute(enable)); |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool muted(0); |
- if (_mixerManager.MicrophoneMute(muted) == -1) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool muted(0); |
+ if (_mixerManager.MicrophoneMute(muted) == -1) { |
+ return -1; |
+ } |
- enabled = muted; |
- return 0; |
+ enabled = muted; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool isAvailable(false); |
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
- |
- // Enumerate all avaliable microphone and make an attempt to open up the |
- // input mixer corresponding to the currently selected input device. |
- // |
- if (!wasInitialized && InitMicrophone() == -1) |
- { |
- // If we end up here it means that the selected microphone has no |
- // volume control, hence it is safe to state that there is no |
- // boost control already at this stage. |
- available = false; |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool isAvailable(false); |
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
+ |
+ // Enumerate all avaliable microphone and make an attempt to open up the |
+ // input mixer corresponding to the currently selected input device. |
+ // |
+ if (!wasInitialized && InitMicrophone() == -1) { |
+ // If we end up here it means that the selected microphone has no |
+ // volume control, hence it is safe to state that there is no |
+ // boost control already at this stage. |
+ available = false; |
+ return 0; |
+ } |
- // Check if the selected microphone has a boost control |
- _mixerManager.MicrophoneBoostIsAvailable(isAvailable); |
- available = isAvailable; |
+ // Check if the selected microphone has a boost control |
+ _mixerManager.MicrophoneBoostIsAvailable(isAvailable); |
+ available = isAvailable; |
- // Close the initialized input mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseMicrophone(); |
- } |
+ // Close the initialized input mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseMicrophone(); |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_mixerManager.SetMicrophoneBoost(enable)); |
+int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_mixerManager.SetMicrophoneBoost(enable)); |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool onOff(0); |
+int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool onOff(0); |
- if (_mixerManager.MicrophoneBoost(onOff) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.MicrophoneBoost(onOff) == -1) { |
+ return -1; |
+ } |
- enabled = onOff; |
+ enabled = onOff; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_recChannels == 2 && _recording) { |
- available = true; |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_recChannels == 2 && _recording) { |
+ available = true; |
+ return 0; |
+ } |
- available = false; |
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
- int error = 0; |
+ available = false; |
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
+ int error = 0; |
- if (!wasInitialized && InitMicrophone() == -1) |
- { |
- // Cannot open the specified device |
- available = false; |
- return 0; |
- } |
+ if (!wasInitialized && InitMicrophone() == -1) { |
+ // Cannot open the specified device |
+ available = false; |
+ return 0; |
+ } |
- // Check if the selected microphone can record stereo. |
- bool isAvailable(false); |
- error = _mixerManager.StereoRecordingIsAvailable(isAvailable); |
- if (!error) |
- available = isAvailable; |
+ // Check if the selected microphone can record stereo. |
+ bool isAvailable(false); |
+ error = _mixerManager.StereoRecordingIsAvailable(isAvailable); |
+ if (!error) |
+ available = isAvailable; |
- // Close the initialized input mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseMicrophone(); |
- } |
+ // Close the initialized input mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseMicrophone(); |
+ } |
- return error; |
+ return error; |
} |
-int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (enable) |
- _recChannels = 2; |
- else |
- _recChannels = 1; |
+int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (enable) |
+ _recChannels = 2; |
+ else |
+ _recChannels = 1; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_recChannels == 2) |
- enabled = true; |
- else |
- enabled = false; |
+int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_recChannels == 2) |
+ enabled = true; |
+ else |
+ enabled = false; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_playChannels == 2 && _playing) { |
- available = true; |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_playChannels == 2 && _playing) { |
+ available = true; |
+ return 0; |
+ } |
- available = false; |
- bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
- int error = 0; |
+ available = false; |
+ bool wasInitialized = _mixerManager.SpeakerIsInitialized(); |
+ int error = 0; |
- if (!wasInitialized && InitSpeaker() == -1) |
- { |
- // Cannot open the specified device. |
- return -1; |
- } |
+ if (!wasInitialized && InitSpeaker() == -1) { |
+ // Cannot open the specified device. |
+ return -1; |
+ } |
- // Check if the selected speaker can play stereo. |
- bool isAvailable(false); |
- error = _mixerManager.StereoPlayoutIsAvailable(isAvailable); |
- if (!error) |
- available = isAvailable; |
+ // Check if the selected speaker can play stereo. |
+ bool isAvailable(false); |
+ error = _mixerManager.StereoPlayoutIsAvailable(isAvailable); |
+ if (!error) |
+ available = isAvailable; |
- // Close the initialized input mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseSpeaker(); |
- } |
+ // Close the initialized input mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseSpeaker(); |
+ } |
- return error; |
+ return error; |
} |
-int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (enable) |
- _playChannels = 2; |
- else |
- _playChannels = 1; |
+int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (enable) |
+ _playChannels = 2; |
+ else |
+ _playChannels = 1; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_playChannels == 2) |
- enabled = true; |
- else |
- enabled = false; |
+int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_playChannels == 2) |
+ enabled = true; |
+ else |
+ enabled = false; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) |
-{ |
- rtc::CritScope lock(&_critSect); |
- _AGC = enable; |
+int32_t AudioDeviceLinuxPulse::SetAGC(bool enable) { |
+ rtc::CritScope lock(&_critSect); |
+ _AGC = enable; |
- return 0; |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::AGC() const |
-{ |
- rtc::CritScope lock(&_critSect); |
- return _AGC; |
+bool AudioDeviceLinuxPulse::AGC() const { |
+ rtc::CritScope lock(&_critSect); |
+ return _AGC; |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable( |
- bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
+int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); |
- // Make an attempt to open up the |
- // input mixer corresponding to the currently selected output device. |
- if (!wasInitialized && InitMicrophone() == -1) |
- { |
- // If we end up here it means that the selected microphone has no |
- // volume control. |
- available = false; |
- return 0; |
- } |
+ // Make an attempt to open up the |
+ // input mixer corresponding to the currently selected output device. |
+ if (!wasInitialized && InitMicrophone() == -1) { |
+ // If we end up here it means that the selected microphone has no |
+ // volume control. |
+ available = false; |
+ return 0; |
+ } |
- // Given that InitMicrophone was successful, we know that a volume control |
- // exists. |
- available = true; |
+ // Given that InitMicrophone was successful, we know that a volume control |
+ // exists. |
+ available = true; |
- // Close the initialized input mixer |
- if (!wasInitialized) |
- { |
- _mixerManager.CloseMicrophone(); |
- } |
+ // Close the initialized input mixer |
+ if (!wasInitialized) { |
+ _mixerManager.CloseMicrophone(); |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) |
-{ |
- return (_mixerManager.SetMicrophoneVolume(volume)); |
+int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { |
+ return (_mixerManager.SetMicrophoneVolume(volume)); |
} |
-int32_t AudioDeviceLinuxPulse::MicrophoneVolume( |
- uint32_t& volume) const |
-{ |
+int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const { |
+ uint32_t level(0); |
- uint32_t level(0); |
- |
- if (_mixerManager.MicrophoneVolume(level) == -1) |
- { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " failed to retrive current microphone level"); |
- return -1; |
- } |
+ if (_mixerManager.MicrophoneVolume(level) == -1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " failed to retrive current microphone level"); |
+ return -1; |
+ } |
- volume = level; |
+ volume = level; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume( |
- uint32_t& maxVolume) const |
-{ |
+int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const { |
+ uint32_t maxVol(0); |
- uint32_t maxVol(0); |
- |
- if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { |
+ return -1; |
+ } |
- maxVolume = maxVol; |
+ maxVolume = maxVol; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume( |
- uint32_t& minVolume) const |
-{ |
+int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(uint32_t& minVolume) const { |
+ uint32_t minVol(0); |
- uint32_t minVol(0); |
+ if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { |
+ return -1; |
+ } |
- if (_mixerManager.MinMicrophoneVolume(minVol) == -1) |
- { |
- return -1; |
- } |
- |
- minVolume = minVol; |
+ minVolume = minVol; |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize( |
- uint16_t& stepSize) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- uint16_t delta(0); |
+ uint16_t& stepSize) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ uint16_t delta(0); |
- if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) |
- { |
- return -1; |
- } |
+ if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) { |
+ return -1; |
+ } |
- stepSize = delta; |
+ stepSize = delta; |
- return 0; |
+ return 0; |
} |
-int16_t AudioDeviceLinuxPulse::PlayoutDevices() |
-{ |
- PaLock(); |
+int16_t AudioDeviceLinuxPulse::PlayoutDevices() { |
+ PaLock(); |
- pa_operation* paOperation = NULL; |
- _numPlayDevices = 1; // init to 1 to account for "default" |
+ pa_operation* paOperation = NULL; |
+ _numPlayDevices = 1; // init to 1 to account for "default" |
- // get the whole list of devices and update _numPlayDevices |
- paOperation = LATE(pa_context_get_sink_info_list)(_paContext, |
- PaSinkInfoCallback, |
- this); |
+ // get the whole list of devices and update _numPlayDevices |
+ paOperation = |
+ LATE(pa_context_get_sink_info_list)(_paContext, PaSinkInfoCallback, this); |
- WaitForOperationCompletion(paOperation); |
+ WaitForOperationCompletion(paOperation); |
- PaUnLock(); |
+ PaUnLock(); |
- return _numPlayDevices; |
+ return _numPlayDevices; |
} |
-int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_playIsInitialized) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_playIsInitialized) { |
+ return -1; |
+ } |
- const uint16_t nDevices = PlayoutDevices(); |
+ const uint16_t nDevices = PlayoutDevices(); |
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
- " number of availiable output devices is %u", nDevices); |
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
+ " number of availiable output devices is %u", nDevices); |
- if (index > (nDevices - 1)) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " device index is out of range [0,%u]", (nDevices - 1)); |
- return -1; |
- } |
+ if (index > (nDevices - 1)) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " device index is out of range [0,%u]", (nDevices - 1)); |
+ return -1; |
+ } |
- _outputDeviceIndex = index; |
- _outputDeviceIsSpecified = true; |
+ _outputDeviceIndex = index; |
+ _outputDeviceIsSpecified = true; |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( |
- AudioDeviceModule::WindowsDeviceType /*device*/) |
-{ |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- "WindowsDeviceType not supported"); |
- return -1; |
+ AudioDeviceModule::WindowsDeviceType /*device*/) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ "WindowsDeviceType not supported"); |
+ return -1; |
} |
int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( |
uint16_t index, |
char name[kAdmMaxDeviceNameSize], |
- char guid[kAdmMaxGuidSize]) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- const uint16_t nDevices = PlayoutDevices(); |
+ char guid[kAdmMaxGuidSize]) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ const uint16_t nDevices = PlayoutDevices(); |
- if ((index > (nDevices - 1)) || (name == NULL)) |
- { |
- return -1; |
- } |
+ if ((index > (nDevices - 1)) || (name == NULL)) { |
+ return -1; |
+ } |
- memset(name, 0, kAdmMaxDeviceNameSize); |
+ memset(name, 0, kAdmMaxDeviceNameSize); |
- if (guid != NULL) |
- { |
- memset(guid, 0, kAdmMaxGuidSize); |
- } |
+ if (guid != NULL) { |
+ memset(guid, 0, kAdmMaxGuidSize); |
+ } |
- // Check if default device |
- if (index == 0) |
- { |
- uint16_t deviceIndex = 0; |
- return GetDefaultDeviceInfo(false, name, deviceIndex); |
- } |
+ // Check if default device |
+ if (index == 0) { |
+ uint16_t deviceIndex = 0; |
+ return GetDefaultDeviceInfo(false, name, deviceIndex); |
+ } |
- // Tell the callback that we want |
- // The name for this device |
- _playDisplayDeviceName = name; |
- _deviceIndex = index; |
+ // Tell the callback that we want |
+ // The name for this device |
+ _playDisplayDeviceName = name; |
+ _deviceIndex = index; |
- // get playout devices |
- PlayoutDevices(); |
+ // get playout devices |
+ PlayoutDevices(); |
- // clear device name and index |
- _playDisplayDeviceName = NULL; |
- _deviceIndex = -1; |
+ // clear device name and index |
+ _playDisplayDeviceName = NULL; |
+ _deviceIndex = -1; |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::RecordingDeviceName( |
uint16_t index, |
char name[kAdmMaxDeviceNameSize], |
- char guid[kAdmMaxGuidSize]) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- const uint16_t nDevices(RecordingDevices()); |
+ char guid[kAdmMaxGuidSize]) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ const uint16_t nDevices(RecordingDevices()); |
- if ((index > (nDevices - 1)) || (name == NULL)) |
- { |
- return -1; |
- } |
+ if ((index > (nDevices - 1)) || (name == NULL)) { |
+ return -1; |
+ } |
- memset(name, 0, kAdmMaxDeviceNameSize); |
+ memset(name, 0, kAdmMaxDeviceNameSize); |
- if (guid != NULL) |
- { |
- memset(guid, 0, kAdmMaxGuidSize); |
- } |
+ if (guid != NULL) { |
+ memset(guid, 0, kAdmMaxGuidSize); |
+ } |
- // Check if default device |
- if (index == 0) |
- { |
- uint16_t deviceIndex = 0; |
- return GetDefaultDeviceInfo(true, name, deviceIndex); |
- } |
+ // Check if default device |
+ if (index == 0) { |
+ uint16_t deviceIndex = 0; |
+ return GetDefaultDeviceInfo(true, name, deviceIndex); |
+ } |
- // Tell the callback that we want |
- // the name for this device |
- _recDisplayDeviceName = name; |
- _deviceIndex = index; |
+ // Tell the callback that we want |
+ // the name for this device |
+ _recDisplayDeviceName = name; |
+ _deviceIndex = index; |
- // Get recording devices |
- RecordingDevices(); |
+ // Get recording devices |
+ RecordingDevices(); |
- // Clear device name and index |
- _recDisplayDeviceName = NULL; |
- _deviceIndex = -1; |
+ // Clear device name and index |
+ _recDisplayDeviceName = NULL; |
+ _deviceIndex = -1; |
- return 0; |
+ return 0; |
} |
-int16_t AudioDeviceLinuxPulse::RecordingDevices() |
-{ |
- PaLock(); |
+int16_t AudioDeviceLinuxPulse::RecordingDevices() { |
+ PaLock(); |
- pa_operation* paOperation = NULL; |
- _numRecDevices = 1; // Init to 1 to account for "default" |
+ pa_operation* paOperation = NULL; |
+ _numRecDevices = 1; // Init to 1 to account for "default" |
- // Get the whole list of devices and update _numRecDevices |
- paOperation = LATE(pa_context_get_source_info_list)(_paContext, |
- PaSourceInfoCallback, |
- this); |
+ // Get the whole list of devices and update _numRecDevices |
+ paOperation = LATE(pa_context_get_source_info_list)( |
+ _paContext, PaSourceInfoCallback, this); |
- WaitForOperationCompletion(paOperation); |
+ WaitForOperationCompletion(paOperation); |
- PaUnLock(); |
+ PaUnLock(); |
- return _numRecDevices; |
+ return _numRecDevices; |
} |
-int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (_recIsInitialized) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (_recIsInitialized) { |
+ return -1; |
+ } |
- const uint16_t nDevices(RecordingDevices()); |
+ const uint16_t nDevices(RecordingDevices()); |
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
- " number of availiable input devices is %u", nDevices); |
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
+ " number of availiable input devices is %u", nDevices); |
- if (index > (nDevices - 1)) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " device index is out of range [0,%u]", (nDevices - 1)); |
- return -1; |
- } |
+ if (index > (nDevices - 1)) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " device index is out of range [0,%u]", (nDevices - 1)); |
+ return -1; |
+ } |
- _inputDeviceIndex = index; |
- _inputDeviceIsSpecified = true; |
+ _inputDeviceIndex = index; |
+ _inputDeviceIsSpecified = true; |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::SetRecordingDevice( |
- AudioDeviceModule::WindowsDeviceType /*device*/) |
-{ |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- "WindowsDeviceType not supported"); |
- return -1; |
+ AudioDeviceModule::WindowsDeviceType /*device*/) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ "WindowsDeviceType not supported"); |
+ return -1; |
} |
-int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- available = false; |
+int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ available = false; |
- // Try to initialize the playout side |
- int32_t res = InitPlayout(); |
+ // Try to initialize the playout side |
+ int32_t res = InitPlayout(); |
- // Cancel effect of initialization |
- StopPlayout(); |
+ // Cancel effect of initialization |
+ StopPlayout(); |
- if (res != -1) |
- { |
- available = true; |
- } |
+ if (res != -1) { |
+ available = true; |
+ } |
- return res; |
+ return res; |
} |
-int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- available = false; |
+int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ available = false; |
- // Try to initialize the playout side |
- int32_t res = InitRecording(); |
+ // Try to initialize the playout side |
+ int32_t res = InitRecording(); |
- // Cancel effect of initialization |
- StopRecording(); |
+ // Cancel effect of initialization |
+ StopRecording(); |
- if (res != -1) |
- { |
- available = true; |
- } |
+ if (res != -1) { |
+ available = true; |
+ } |
- return res; |
+ return res; |
} |
-int32_t AudioDeviceLinuxPulse::InitPlayout() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- |
- if (_playing) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::InitPlayout() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!_outputDeviceIsSpecified) |
- { |
- return -1; |
- } |
+ if (_playing) { |
+ return -1; |
+ } |
- if (_playIsInitialized) |
- { |
- return 0; |
- } |
+ if (!_outputDeviceIsSpecified) { |
+ return -1; |
+ } |
- // Initialize the speaker (devices might have been added or removed) |
- if (InitSpeaker() == -1) |
- { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " InitSpeaker() failed"); |
- } |
+ if (_playIsInitialized) { |
+ return 0; |
+ } |
- // Set the play sample specification |
- pa_sample_spec playSampleSpec; |
- playSampleSpec.channels = _playChannels; |
- playSampleSpec.format = PA_SAMPLE_S16LE; |
- playSampleSpec.rate = sample_rate_hz_; |
+ // Initialize the speaker (devices might have been added or removed) |
+ if (InitSpeaker() == -1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " InitSpeaker() failed"); |
+ } |
- // Create a new play stream |
- _playStream = LATE(pa_stream_new)(_paContext, "playStream", |
- &playSampleSpec, NULL); |
+ // Set the play sample specification |
+ pa_sample_spec playSampleSpec; |
+ playSampleSpec.channels = _playChannels; |
+ playSampleSpec.format = PA_SAMPLE_S16LE; |
+ playSampleSpec.rate = sample_rate_hz_; |
- if (!_playStream) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to create play stream, err=%d", |
- LATE(pa_context_errno)(_paContext)); |
- return -1; |
- } |
+ // Create a new play stream |
+ _playStream = |
+ LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL); |
- // Provide the playStream to the mixer |
- _mixerManager.SetPlayStream(_playStream); |
+ if (!_playStream) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to create play stream, err=%d", |
+ LATE(pa_context_errno)(_paContext)); |
+ return -1; |
+ } |
- if (_ptrAudioBuffer) |
- { |
- // Update audio buffer with the selected parameters |
- _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); |
- _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels); |
- } |
+ // Provide the playStream to the mixer |
+ _mixerManager.SetPlayStream(_playStream); |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " stream state %d\n", |
- LATE(pa_stream_get_state)(_playStream)); |
+ if (_ptrAudioBuffer) { |
+ // Update audio buffer with the selected parameters |
+ _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); |
+ _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); |
+ } |
- // Set stream flags |
- _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE |
- | PA_STREAM_INTERPOLATE_TIMING); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state %d\n", |
+ LATE(pa_stream_get_state)(_playStream)); |
- if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) |
- { |
- // If configuring a specific latency then we want to specify |
- // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
- // automatically to reach that target latency. However, that flag |
- // doesn't exist in Ubuntu 8.04 and many people still use that, |
- // so we have to check the protocol version of libpulse. |
- if (LATE(pa_context_get_protocol_version)(_paContext) |
- >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) |
- { |
- _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
- } |
+ // Set stream flags |
+ _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | |
+ PA_STREAM_INTERPOLATE_TIMING); |
- const pa_sample_spec *spec = |
- LATE(pa_stream_get_sample_spec)(_playStream); |
- if (!spec) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " pa_stream_get_sample_spec()"); |
- return -1; |
- } |
+ if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
+ // If configuring a specific latency then we want to specify |
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
+ // automatically to reach that target latency. However, that flag |
+ // doesn't exist in Ubuntu 8.04 and many people still use that, |
+ // so we have to check the protocol version of libpulse. |
+ if (LATE(pa_context_get_protocol_version)(_paContext) >= |
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
+ _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
+ } |
- size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
- uint32_t latency = bytesPerSec * |
- WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / |
- WEBRTC_PA_MSECS_PER_SEC; |
- |
- // Set the play buffer attributes |
- _playBufferAttr.maxlength = latency; // num bytes stored in the buffer |
- _playBufferAttr.tlength = latency; // target fill level of play buffer |
- // minimum free num bytes before server request more data |
- _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
- // prebuffer tlength before starting playout |
- _playBufferAttr.prebuf = _playBufferAttr.tlength - |
- _playBufferAttr.minreq; |
- |
- _configuredLatencyPlay = latency; |
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
+ if (!spec) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " pa_stream_get_sample_spec()"); |
+ return -1; |
} |
- // num samples in bytes * num channels |
- _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels; |
- _playbackBufferUnused = _playbackBufferSize; |
- _playBuffer = new int8_t[_playbackBufferSize]; |
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
+ uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / |
+ WEBRTC_PA_MSECS_PER_SEC; |
- // Enable underflow callback |
- LATE(pa_stream_set_underflow_callback)(_playStream, |
- PaStreamUnderflowCallback, this); |
+ // Set the play buffer attributes |
+ _playBufferAttr.maxlength = latency; // num bytes stored in the buffer |
+ _playBufferAttr.tlength = latency; // target fill level of play buffer |
+ // minimum free num bytes before server request more data |
+ _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
+ // prebuffer tlength before starting playout |
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; |
- // Set the state callback function for the stream |
- LATE(pa_stream_set_state_callback)(_playStream, |
- PaStreamStateCallback, this); |
+ _configuredLatencyPlay = latency; |
+ } |
- // Mark playout side as initialized |
- _playIsInitialized = true; |
- _sndCardPlayDelay = 0; |
- _sndCardRecDelay = 0; |
+ // num samples in bytes * num channels |
+ _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels; |
+ _playbackBufferUnused = _playbackBufferSize; |
+ _playBuffer = new int8_t[_playbackBufferSize]; |
- return 0; |
-} |
+ // Enable underflow callback |
+ LATE(pa_stream_set_underflow_callback) |
+ (_playStream, PaStreamUnderflowCallback, this); |
-int32_t AudioDeviceLinuxPulse::InitRecording() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ // Set the state callback function for the stream |
+ LATE(pa_stream_set_state_callback)(_playStream, PaStreamStateCallback, this); |
- if (_recording) |
- { |
- return -1; |
- } |
+ // Mark playout side as initialized |
+ _playIsInitialized = true; |
+ _sndCardPlayDelay = 0; |
+ _sndCardRecDelay = 0; |
- if (!_inputDeviceIsSpecified) |
- { |
- return -1; |
- } |
+ return 0; |
+} |
- if (_recIsInitialized) |
- { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::InitRecording() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- // Initialize the microphone (devices might have been added or removed) |
- if (InitMicrophone() == -1) |
- { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " InitMicrophone() failed"); |
- } |
+ if (_recording) { |
+ return -1; |
+ } |
- // Set the rec sample specification |
- pa_sample_spec recSampleSpec; |
- recSampleSpec.channels = _recChannels; |
- recSampleSpec.format = PA_SAMPLE_S16LE; |
- recSampleSpec.rate = sample_rate_hz_; |
+ if (!_inputDeviceIsSpecified) { |
+ return -1; |
+ } |
- // Create a new rec stream |
- _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, |
- NULL); |
- if (!_recStream) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to create rec stream, err=%d", |
- LATE(pa_context_errno)(_paContext)); |
- return -1; |
- } |
+ if (_recIsInitialized) { |
+ return 0; |
+ } |
- // Provide the recStream to the mixer |
- _mixerManager.SetRecStream(_recStream); |
+ // Initialize the microphone (devices might have been added or removed) |
+ if (InitMicrophone() == -1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " InitMicrophone() failed"); |
+ } |
- if (_ptrAudioBuffer) |
- { |
- // Update audio buffer with the selected parameters |
- _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); |
- _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels); |
- } |
+ // Set the rec sample specification |
+ pa_sample_spec recSampleSpec; |
+ recSampleSpec.channels = _recChannels; |
+ recSampleSpec.format = PA_SAMPLE_S16LE; |
+ recSampleSpec.rate = sample_rate_hz_; |
- if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) |
- { |
- _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE |
- | PA_STREAM_INTERPOLATE_TIMING); |
- |
- // If configuring a specific latency then we want to specify |
- // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
- // automatically to reach that target latency. However, that flag |
- // doesn't exist in Ubuntu 8.04 and many people still use that, |
- // so we have to check the protocol version of libpulse. |
- if (LATE(pa_context_get_protocol_version)(_paContext) |
- >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) |
- { |
- _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
- } |
+ // Create a new rec stream |
+ _recStream = |
+ LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL); |
+ if (!_recStream) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to create rec stream, err=%d", |
+ LATE(pa_context_errno)(_paContext)); |
+ return -1; |
+ } |
- const pa_sample_spec *spec = |
- LATE(pa_stream_get_sample_spec)(_recStream); |
- if (!spec) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " pa_stream_get_sample_spec(rec)"); |
- return -1; |
- } |
+ // Provide the recStream to the mixer |
+ _mixerManager.SetRecStream(_recStream); |
- size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
- uint32_t latency = bytesPerSec |
- * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC; |
+ if (_ptrAudioBuffer) { |
+ // Update audio buffer with the selected parameters |
+ _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); |
+ _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); |
+ } |
- // Set the rec buffer attributes |
- // Note: fragsize specifies a maximum transfer size, not a minimum, so |
- // it is not possible to force a high latency setting, only a low one. |
- _recBufferAttr.fragsize = latency; // size of fragment |
- _recBufferAttr.maxlength = latency + bytesPerSec |
- * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC; |
+ if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
+ _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | |
+ PA_STREAM_INTERPOLATE_TIMING); |
- _configuredLatencyRec = latency; |
+ // If configuring a specific latency then we want to specify |
+ // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters |
+ // automatically to reach that target latency. However, that flag |
+ // doesn't exist in Ubuntu 8.04 and many people still use that, |
+ // so we have to check the protocol version of libpulse. |
+ if (LATE(pa_context_get_protocol_version)(_paContext) >= |
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
+ _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; |
} |
- _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels; |
- _recordBufferUsed = 0; |
- _recBuffer = new int8_t[_recordBufferSize]; |
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream); |
+ if (!spec) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " pa_stream_get_sample_spec(rec)"); |
+ return -1; |
+ } |
+ |
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
+ uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / |
+ WEBRTC_PA_MSECS_PER_SEC; |
+ |
+ // Set the rec buffer attributes |
+ // Note: fragsize specifies a maximum transfer size, not a minimum, so |
+ // it is not possible to force a high latency setting, only a low one. |
+ _recBufferAttr.fragsize = latency; // size of fragment |
+ _recBufferAttr.maxlength = |
+ latency + bytesPerSec * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / |
+ WEBRTC_PA_MSECS_PER_SEC; |
+ |
+ _configuredLatencyRec = latency; |
+ } |
- // Enable overflow callback |
- LATE(pa_stream_set_overflow_callback)(_recStream, |
- PaStreamOverflowCallback, |
- this); |
+ _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels; |
+ _recordBufferUsed = 0; |
+ _recBuffer = new int8_t[_recordBufferSize]; |
- // Set the state callback function for the stream |
- LATE(pa_stream_set_state_callback)(_recStream, |
- PaStreamStateCallback, |
- this); |
+ // Enable overflow callback |
+ LATE(pa_stream_set_overflow_callback) |
+ (_recStream, PaStreamOverflowCallback, this); |
- // Mark recording side as initialized |
- _recIsInitialized = true; |
+ // Set the state callback function for the stream |
+ LATE(pa_stream_set_state_callback)(_recStream, PaStreamStateCallback, this); |
- return 0; |
+ // Mark recording side as initialized |
+ _recIsInitialized = true; |
+ |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StartRecording() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!_recIsInitialized) |
- { |
- return -1; |
- } |
+int32_t AudioDeviceLinuxPulse::StartRecording() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (!_recIsInitialized) { |
+ return -1; |
+ } |
- if (_recording) |
- { |
- return 0; |
- } |
+ if (_recording) { |
+ return 0; |
+ } |
- // Set state to ensure that the recording starts from the audio thread. |
- _startRec = true; |
+ // Set state to ensure that the recording starts from the audio thread. |
+ _startRec = true; |
- // The audio thread will signal when recording has started. |
- _timeEventRec.Set(); |
- if (kEventTimeout == _recStartEvent.Wait(10000)) |
+ // The audio thread will signal when recording has started. |
+ _timeEventRec.Set(); |
+ if (kEventTimeout == _recStartEvent.Wait(10000)) { |
{ |
- { |
- rtc::CritScope lock(&_critSect); |
- _startRec = false; |
- } |
- StopRecording(); |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to activate recording"); |
- return -1; |
+ rtc::CritScope lock(&_critSect); |
+ _startRec = false; |
} |
+ StopRecording(); |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to activate recording"); |
+ return -1; |
+ } |
- { |
- rtc::CritScope lock(&_critSect); |
- if (_recording) |
- { |
- // The recording state is set by the audio thread after recording |
- // has started. |
- } else |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to activate recording"); |
- return -1; |
- } |
+ { |
+ rtc::CritScope lock(&_critSect); |
+ if (_recording) { |
+ // The recording state is set by the audio thread after recording |
+ // has started. |
+ } else { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to activate recording"); |
+ return -1; |
} |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StopRecording() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- rtc::CritScope lock(&_critSect); |
- |
- if (!_recIsInitialized) |
- { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::StopRecording() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ rtc::CritScope lock(&_critSect); |
- if (_recStream == NULL) |
- { |
- return -1; |
- } |
+ if (!_recIsInitialized) { |
+ return 0; |
+ } |
- _recIsInitialized = false; |
- _recording = false; |
+ if (_recStream == NULL) { |
+ return -1; |
+ } |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " stopping recording"); |
+ _recIsInitialized = false; |
+ _recording = false; |
- // Stop Recording |
- PaLock(); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping recording"); |
- DisableReadCallback(); |
- LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); |
+ // Stop Recording |
+ PaLock(); |
- // Unset this here so that we don't get a TERMINATED callback |
- LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); |
+ DisableReadCallback(); |
+ LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); |
- if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) |
- { |
- // Disconnect the stream |
- if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to disconnect rec stream, err=%d\n", |
- LATE(pa_context_errno)(_paContext)); |
- PaUnLock(); |
- return -1; |
- } |
+ // Unset this here so that we don't get a TERMINATED callback |
+ LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " disconnected recording"); |
+ if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) { |
+ // Disconnect the stream |
+ if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to disconnect rec stream, err=%d\n", |
+ LATE(pa_context_errno)(_paContext)); |
+ PaUnLock(); |
+ return -1; |
} |
- LATE(pa_stream_unref)(_recStream); |
- _recStream = NULL; |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
+ " disconnected recording"); |
+ } |
- PaUnLock(); |
+ LATE(pa_stream_unref)(_recStream); |
+ _recStream = NULL; |
- // Provide the recStream to the mixer |
- _mixerManager.SetRecStream(_recStream); |
+ PaUnLock(); |
- if (_recBuffer) |
- { |
- delete [] _recBuffer; |
- _recBuffer = NULL; |
- } |
+ // Provide the recStream to the mixer |
+ _mixerManager.SetRecStream(_recStream); |
- return 0; |
+ if (_recBuffer) { |
+ delete[] _recBuffer; |
+ _recBuffer = NULL; |
+ } |
+ |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::RecordingIsInitialized() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_recIsInitialized); |
+bool AudioDeviceLinuxPulse::RecordingIsInitialized() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_recIsInitialized); |
} |
-bool AudioDeviceLinuxPulse::Recording() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_recording); |
+bool AudioDeviceLinuxPulse::Recording() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_recording); |
} |
-bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_playIsInitialized); |
+bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_playIsInitialized); |
} |
-int32_t AudioDeviceLinuxPulse::StartPlayout() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+int32_t AudioDeviceLinuxPulse::StartPlayout() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (!_playIsInitialized) |
- { |
- return -1; |
- } |
+ if (!_playIsInitialized) { |
+ return -1; |
+ } |
- if (_playing) |
- { |
- return 0; |
- } |
+ if (_playing) { |
+ return 0; |
+ } |
- // Set state to ensure that playout starts from the audio thread. |
- { |
- rtc::CritScope lock(&_critSect); |
- _startPlay = true; |
- } |
+ // Set state to ensure that playout starts from the audio thread. |
+ { |
+ rtc::CritScope lock(&_critSect); |
+ _startPlay = true; |
+ } |
- // Both |_startPlay| and |_playing| needs protction since they are also |
- // accessed on the playout thread. |
+ // Both |_startPlay| and |_playing| needs protction since they are also |
+ // accessed on the playout thread. |
- // The audio thread will signal when playout has started. |
- _timeEventPlay.Set(); |
- if (kEventTimeout == _playStartEvent.Wait(10000)) |
+ // The audio thread will signal when playout has started. |
+ _timeEventPlay.Set(); |
+ if (kEventTimeout == _playStartEvent.Wait(10000)) { |
{ |
- { |
- rtc::CritScope lock(&_critSect); |
- _startPlay = false; |
- } |
- StopPlayout(); |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to activate playout"); |
- return -1; |
+ rtc::CritScope lock(&_critSect); |
+ _startPlay = false; |
} |
+ StopPlayout(); |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to activate playout"); |
+ return -1; |
+ } |
- { |
- rtc::CritScope lock(&_critSect); |
- if (_playing) |
- { |
- // The playing state is set by the audio thread after playout |
- // has started. |
- } else |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to activate playing"); |
- return -1; |
- } |
+ { |
+ rtc::CritScope lock(&_critSect); |
+ if (_playing) { |
+ // The playing state is set by the audio thread after playout |
+ // has started. |
+ } else { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to activate playing"); |
+ return -1; |
} |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::StopPlayout() |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- rtc::CritScope lock(&_critSect); |
- |
- if (!_playIsInitialized) |
- { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::StopPlayout() { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ rtc::CritScope lock(&_critSect); |
- if (_playStream == NULL) |
- { |
- return -1; |
- } |
+ if (!_playIsInitialized) { |
+ return 0; |
+ } |
- _playIsInitialized = false; |
- _playing = false; |
- _sndCardPlayDelay = 0; |
- _sndCardRecDelay = 0; |
+ if (_playStream == NULL) { |
+ return -1; |
+ } |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " stopping playback"); |
+ _playIsInitialized = false; |
+ _playing = false; |
+ _sndCardPlayDelay = 0; |
+ _sndCardRecDelay = 0; |
- // Stop Playout |
- PaLock(); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping playback"); |
- DisableWriteCallback(); |
- LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); |
+ // Stop Playout |
+ PaLock(); |
- // Unset this here so that we don't get a TERMINATED callback |
- LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); |
+ DisableWriteCallback(); |
+ LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); |
- if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) |
- { |
- // Disconnect the stream |
- if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to disconnect play stream, err=%d", |
- LATE(pa_context_errno)(_paContext)); |
- PaUnLock(); |
- return -1; |
- } |
+ // Unset this here so that we don't get a TERMINATED callback |
+ LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " disconnected playback"); |
+ if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) { |
+ // Disconnect the stream |
+ if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to disconnect play stream, err=%d", |
+ LATE(pa_context_errno)(_paContext)); |
+ PaUnLock(); |
+ return -1; |
} |
- LATE(pa_stream_unref)(_playStream); |
- _playStream = NULL; |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
+ " disconnected playback"); |
+ } |
- PaUnLock(); |
+ LATE(pa_stream_unref)(_playStream); |
+ _playStream = NULL; |
- // Provide the playStream to the mixer |
- _mixerManager.SetPlayStream(_playStream); |
+ PaUnLock(); |
- if (_playBuffer) |
- { |
- delete [] _playBuffer; |
- _playBuffer = NULL; |
- } |
+ // Provide the playStream to the mixer |
+ _mixerManager.SetPlayStream(_playStream); |
- return 0; |
+ if (_playBuffer) { |
+ delete[] _playBuffer; |
+ _playBuffer = NULL; |
+ } |
+ |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const |
-{ |
- rtc::CritScope lock(&_critSect); |
- delayMS = (uint16_t) _sndCardPlayDelay; |
- return 0; |
+int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const { |
+ rtc::CritScope lock(&_critSect); |
+ delayMS = (uint16_t)_sndCardPlayDelay; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- delayMS = (uint16_t) _sndCardRecDelay; |
- return 0; |
+int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ delayMS = (uint16_t)_sndCardRecDelay; |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::Playing() const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- return (_playing); |
+bool AudioDeviceLinuxPulse::Playing() const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ return (_playing); |
} |
int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( |
const AudioDeviceModule::BufferType type, |
- uint16_t sizeMS) |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- if (type != AudioDeviceModule::kFixedBufferSize) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " Adaptive buffer size not supported on this platform"); |
- return -1; |
- } |
+ uint16_t sizeMS) { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (type != AudioDeviceModule::kFixedBufferSize) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " Adaptive buffer size not supported on this platform"); |
+ return -1; |
+ } |
- _playBufType = type; |
- _playBufDelayFixed = sizeMS; |
+ _playBufType = type; |
+ _playBufDelayFixed = sizeMS; |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::PlayoutBuffer( |
AudioDeviceModule::BufferType& type, |
- uint16_t& sizeMS) const |
-{ |
- RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- type = _playBufType; |
- sizeMS = _playBufDelayFixed; |
+ uint16_t& sizeMS) const { |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
+ type = _playBufType; |
+ sizeMS = _playBufDelayFixed; |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const |
-{ |
- |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " API call not supported on this platform"); |
- return -1; |
+int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " API call not supported on this platform"); |
+ return -1; |
} |
-bool AudioDeviceLinuxPulse::PlayoutWarning() const |
-{ |
+bool AudioDeviceLinuxPulse::PlayoutWarning() const { |
rtc::CritScope lock(&_critSect); |
return (_playWarning > 0); |
} |
-bool AudioDeviceLinuxPulse::PlayoutError() const |
-{ |
+bool AudioDeviceLinuxPulse::PlayoutError() const { |
rtc::CritScope lock(&_critSect); |
return (_playError > 0); |
} |
-bool AudioDeviceLinuxPulse::RecordingWarning() const |
-{ |
+bool AudioDeviceLinuxPulse::RecordingWarning() const { |
rtc::CritScope lock(&_critSect); |
return (_recWarning > 0); |
} |
-bool AudioDeviceLinuxPulse::RecordingError() const |
-{ |
+bool AudioDeviceLinuxPulse::RecordingError() const { |
rtc::CritScope lock(&_critSect); |
return (_recError > 0); |
} |
-void AudioDeviceLinuxPulse::ClearPlayoutWarning() |
-{ |
+void AudioDeviceLinuxPulse::ClearPlayoutWarning() { |
rtc::CritScope lock(&_critSect); |
_playWarning = 0; |
} |
-void AudioDeviceLinuxPulse::ClearPlayoutError() |
-{ |
+void AudioDeviceLinuxPulse::ClearPlayoutError() { |
rtc::CritScope lock(&_critSect); |
_playError = 0; |
} |
-void AudioDeviceLinuxPulse::ClearRecordingWarning() |
-{ |
+void AudioDeviceLinuxPulse::ClearRecordingWarning() { |
rtc::CritScope lock(&_critSect); |
_recWarning = 0; |
} |
-void AudioDeviceLinuxPulse::ClearRecordingError() |
-{ |
+void AudioDeviceLinuxPulse::ClearRecordingError() { |
rtc::CritScope lock(&_critSect); |
_recError = 0; |
} |
@@ -1680,1300 +1482,1091 @@ void AudioDeviceLinuxPulse::ClearRecordingError() |
// Private Methods |
// ============================================================================ |
-void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaContextStateCallbackHandler(c); |
+void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context* c, void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaContextStateCallbackHandler(c); |
} |
// ---------------------------------------------------------------------------- |
// PaSinkInfoCallback |
// ---------------------------------------------------------------------------- |
-void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/, |
- const pa_sink_info *i, int eol, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler( |
- i, eol); |
-} |
- |
-void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/, |
- const pa_source_info *i, |
- int eol, void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler( |
- i, eol); |
-} |
- |
-void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/, |
- const pa_server_info *i, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaServerInfoCallbackHandler(i); |
-} |
- |
-void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaStreamStateCallbackHandler(p); |
+void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context* /*c*/, |
+ const pa_sink_info* i, |
+ int eol, |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSinkInfoCallbackHandler(i, eol); |
+} |
+ |
+void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context* /*c*/, |
+ const pa_source_info* i, |
+ int eol, |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaSourceInfoCallbackHandler(i, |
+ eol); |
+} |
+ |
+void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context* /*c*/, |
+ const pa_server_info* i, |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i); |
+} |
+ |
+void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p); |
+} |
+ |
+void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) { |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " context state cb"); |
+ |
+ pa_context_state_t state = LATE(pa_context_get_state)(c); |
+ switch (state) { |
+ case PA_CONTEXT_UNCONNECTED: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); |
+ break; |
+ case PA_CONTEXT_CONNECTING: |
+ case PA_CONTEXT_AUTHORIZING: |
+ case PA_CONTEXT_SETTING_NAME: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " no state"); |
+ break; |
+ case PA_CONTEXT_FAILED: |
+ case PA_CONTEXT_TERMINATED: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); |
+ _paStateChanged = true; |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ break; |
+ case PA_CONTEXT_READY: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); |
+ _paStateChanged = true; |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ break; |
+ } |
} |
-void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c) |
-{ |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " context state cb"); |
+void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i, |
+ int eol) { |
+ if (eol) { |
+ // Signal that we are done |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ return; |
+ } |
- pa_context_state_t state = LATE(pa_context_get_state)(c); |
- switch (state) |
- { |
- case PA_CONTEXT_UNCONNECTED: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " unconnected"); |
- break; |
- case PA_CONTEXT_CONNECTING: |
- case PA_CONTEXT_AUTHORIZING: |
- case PA_CONTEXT_SETTING_NAME: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " no state"); |
- break; |
- case PA_CONTEXT_FAILED: |
- case PA_CONTEXT_TERMINATED: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " failed"); |
- _paStateChanged = true; |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
- break; |
- case PA_CONTEXT_READY: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " ready"); |
- _paStateChanged = true; |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
- break; |
- } |
-} |
+ if (_numPlayDevices == _deviceIndex) { |
+ // Convert the device index to the one of the sink |
+ _paDeviceIndex = i->index; |
-void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i, |
- int eol) |
-{ |
- if (eol) |
- { |
- // Signal that we are done |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
- return; |
- } |
- |
- if (_numPlayDevices == _deviceIndex) |
- { |
- // Convert the device index to the one of the sink |
- _paDeviceIndex = i->index; |
- |
- if (_playDeviceName) |
- { |
- // Copy the sink name |
- strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); |
- _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
- } |
- if (_playDisplayDeviceName) |
- { |
- // Copy the sink display name |
- strncpy(_playDisplayDeviceName, i->description, |
- kAdmMaxDeviceNameSize); |
- _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
- } |
+ if (_playDeviceName) { |
+ // Copy the sink name |
+ strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize); |
+ _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
} |
- |
- _numPlayDevices++; |
-} |
- |
-void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler( |
- const pa_source_info *i, |
- int eol) |
-{ |
- if (eol) |
- { |
- // Signal that we are done |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
- return; |
+ if (_playDisplayDeviceName) { |
+ // Copy the sink display name |
+ strncpy(_playDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); |
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
} |
+ } |
- // We don't want to list output devices |
- if (i->monitor_of_sink == PA_INVALID_INDEX) |
- { |
- if (_numRecDevices == _deviceIndex) |
- { |
- // Convert the device index to the one of the source |
- _paDeviceIndex = i->index; |
- |
- if (_recDeviceName) |
- { |
- // copy the source name |
- strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); |
- _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
- } |
- if (_recDisplayDeviceName) |
- { |
- // Copy the source display name |
- strncpy(_recDisplayDeviceName, i->description, |
- kAdmMaxDeviceNameSize); |
- _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
- } |
- } |
- |
- _numRecDevices++; |
- } |
+ _numPlayDevices++; |
} |
-void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( |
- const pa_server_info *i) |
-{ |
- // Use PA native sampling rate |
- sample_rate_hz_ = i->sample_spec.rate; |
- |
- // Copy the PA server version |
- strncpy(_paServerVersion, i->server_version, 31); |
- _paServerVersion[31] = '\0'; |
+void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(const pa_source_info* i, |
+ int eol) { |
+ if (eol) { |
+ // Signal that we are done |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ return; |
+ } |
- if (_recDisplayDeviceName) |
- { |
- // Copy the source name |
- strncpy(_recDisplayDeviceName, i->default_source_name, |
- kAdmMaxDeviceNameSize); |
+ // We don't want to list output devices |
+ if (i->monitor_of_sink == PA_INVALID_INDEX) { |
+ if (_numRecDevices == _deviceIndex) { |
+ // Convert the device index to the one of the source |
+ _paDeviceIndex = i->index; |
+ |
+ if (_recDeviceName) { |
+ // copy the source name |
+ strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize); |
+ _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
+ } |
+ if (_recDisplayDeviceName) { |
+ // Copy the source display name |
+ strncpy(_recDisplayDeviceName, i->description, kAdmMaxDeviceNameSize); |
_recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
+ } |
} |
- if (_playDisplayDeviceName) |
- { |
- // Copy the sink name |
- strncpy(_playDisplayDeviceName, i->default_sink_name, |
- kAdmMaxDeviceNameSize); |
- _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
- } |
- |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ _numRecDevices++; |
+ } |
} |
-void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p) |
-{ |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " stream state cb"); |
+void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler( |
+ const pa_server_info* i) { |
+ // Use PA native sampling rate |
+ sample_rate_hz_ = i->sample_spec.rate; |
+ |
+ // Copy the PA server version |
+ strncpy(_paServerVersion, i->server_version, 31); |
+ _paServerVersion[31] = '\0'; |
+ |
+ if (_recDisplayDeviceName) { |
+ // Copy the source name |
+ strncpy(_recDisplayDeviceName, i->default_source_name, |
+ kAdmMaxDeviceNameSize); |
+ _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
+ } |
- pa_stream_state_t state = LATE(pa_stream_get_state)(p); |
- switch (state) |
- { |
- case PA_STREAM_UNCONNECTED: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " unconnected"); |
- break; |
- case PA_STREAM_CREATING: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " creating"); |
- break; |
- case PA_STREAM_FAILED: |
- case PA_STREAM_TERMINATED: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " failed"); |
- break; |
- case PA_STREAM_READY: |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " ready"); |
- break; |
- } |
+ if (_playDisplayDeviceName) { |
+ // Copy the sink name |
+ strncpy(_playDisplayDeviceName, i->default_sink_name, |
+ kAdmMaxDeviceNameSize); |
+ _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; |
+ } |
- LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
+} |
+ |
+void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) { |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state cb"); |
+ |
+ pa_stream_state_t state = LATE(pa_stream_get_state)(p); |
+ switch (state) { |
+ case PA_STREAM_UNCONNECTED: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); |
+ break; |
+ case PA_STREAM_CREATING: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " creating"); |
+ break; |
+ case PA_STREAM_FAILED: |
+ case PA_STREAM_TERMINATED: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); |
+ break; |
+ case PA_STREAM_READY: |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); |
+ break; |
+ } |
+ |
+ LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); |
} |
-int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() |
-{ |
- PaLock(); |
+int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() { |
+ PaLock(); |
- pa_operation* paOperation = NULL; |
+ pa_operation* paOperation = NULL; |
- // get the server info and update deviceName |
- paOperation = LATE(pa_context_get_server_info)(_paContext, |
- PaServerInfoCallback, |
- this); |
+ // get the server info and update deviceName |
+ paOperation = |
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
- WaitForOperationCompletion(paOperation); |
+ WaitForOperationCompletion(paOperation); |
- PaUnLock(); |
+ PaUnLock(); |
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, |
- " checking PulseAudio version: %s", _paServerVersion); |
+ WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, |
+ " checking PulseAudio version: %s", _paServerVersion); |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() |
-{ |
- PaLock(); |
+int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() { |
+ PaLock(); |
- pa_operation* paOperation = NULL; |
+ pa_operation* paOperation = NULL; |
- // Get the server info and update sample_rate_hz_ |
- paOperation = LATE(pa_context_get_server_info)(_paContext, |
- PaServerInfoCallback, |
- this); |
+ // Get the server info and update sample_rate_hz_ |
+ paOperation = |
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
- WaitForOperationCompletion(paOperation); |
+ WaitForOperationCompletion(paOperation); |
- PaUnLock(); |
+ PaUnLock(); |
- return 0; |
+ return 0; |
} |
int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice, |
char* name, |
- uint16_t& index) |
-{ |
- char tmpName[kAdmMaxDeviceNameSize] = {0}; |
- // subtract length of "default: " |
- uint16_t nameLen = kAdmMaxDeviceNameSize - 9; |
- char* pName = NULL; |
- |
- if (name) |
- { |
- // Add "default: " |
- strcpy(name, "default: "); |
- pName = &name[9]; |
- } |
- |
- // Tell the callback that we want |
- // the name for this device |
- if (recDevice) |
- { |
- _recDisplayDeviceName = tmpName; |
- } else |
- { |
- _playDisplayDeviceName = tmpName; |
- } |
- |
- // Set members |
- _paDeviceIndex = -1; |
- _deviceIndex = 0; |
- _numPlayDevices = 0; |
- _numRecDevices = 0; |
- |
- PaLock(); |
- |
- pa_operation* paOperation = NULL; |
- |
- // Get the server info and update deviceName |
- paOperation = LATE(pa_context_get_server_info)(_paContext, |
- PaServerInfoCallback, |
- this); |
+ uint16_t& index) { |
+ char tmpName[kAdmMaxDeviceNameSize] = {0}; |
+ // subtract length of "default: " |
+ uint16_t nameLen = kAdmMaxDeviceNameSize - 9; |
+ char* pName = NULL; |
+ |
+ if (name) { |
+ // Add "default: " |
+ strcpy(name, "default: "); |
+ pName = &name[9]; |
+ } |
- WaitForOperationCompletion(paOperation); |
+ // Tell the callback that we want |
+ // the name for this device |
+ if (recDevice) { |
+ _recDisplayDeviceName = tmpName; |
+ } else { |
+ _playDisplayDeviceName = tmpName; |
+ } |
- // Get the device index |
- if (recDevice) |
- { |
- paOperation |
- = LATE(pa_context_get_source_info_by_name)(_paContext, |
- (char *) tmpName, |
- PaSourceInfoCallback, |
- this); |
- } else |
- { |
- paOperation |
- = LATE(pa_context_get_sink_info_by_name)(_paContext, |
- (char *) tmpName, |
- PaSinkInfoCallback, |
- this); |
- } |
+ // Set members |
+ _paDeviceIndex = -1; |
+ _deviceIndex = 0; |
+ _numPlayDevices = 0; |
+ _numRecDevices = 0; |
- WaitForOperationCompletion(paOperation); |
+ PaLock(); |
- PaUnLock(); |
+ pa_operation* paOperation = NULL; |
- // Set the index |
- index = _paDeviceIndex; |
+ // Get the server info and update deviceName |
+ paOperation = |
+ LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); |
- if (name) |
- { |
- // Copy to name string |
- strncpy(pName, tmpName, nameLen); |
- } |
+ WaitForOperationCompletion(paOperation); |
- // Clear members |
- _playDisplayDeviceName = NULL; |
- _recDisplayDeviceName = NULL; |
- _paDeviceIndex = -1; |
- _deviceIndex = -1; |
- _numPlayDevices = 0; |
- _numRecDevices = 0; |
+ // Get the device index |
+ if (recDevice) { |
+ paOperation = LATE(pa_context_get_source_info_by_name)( |
+ _paContext, (char*)tmpName, PaSourceInfoCallback, this); |
+ } else { |
+ paOperation = LATE(pa_context_get_sink_info_by_name)( |
+ _paContext, (char*)tmpName, PaSinkInfoCallback, this); |
+ } |
- return 0; |
-} |
+ WaitForOperationCompletion(paOperation); |
-int32_t AudioDeviceLinuxPulse::InitPulseAudio() |
-{ |
- int retVal = 0; |
+ PaUnLock(); |
- // Load libpulse |
- if (!PaSymbolTable.Load()) |
- { |
- // Most likely the Pulse library and sound server are not installed on |
- // this system |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to load symbol table"); |
- return -1; |
- } |
+ // Set the index |
+ index = _paDeviceIndex; |
- // Create a mainloop API and connection to the default server |
- // the mainloop is the internal asynchronous API event loop |
- if (_paMainloop) { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " PA mainloop has already existed"); |
- return -1; |
- } |
- _paMainloop = LATE(pa_threaded_mainloop_new)(); |
- if (!_paMainloop) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " could not create mainloop"); |
- return -1; |
- } |
+ if (name) { |
+ // Copy to name string |
+ strncpy(pName, tmpName, nameLen); |
+ } |
- // Start the threaded main loop |
- retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); |
- if (retVal != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to start main loop, error=%d", retVal); |
- return -1; |
- } |
+ // Clear members |
+ _playDisplayDeviceName = NULL; |
+ _recDisplayDeviceName = NULL; |
+ _paDeviceIndex = -1; |
+ _deviceIndex = -1; |
+ _numPlayDevices = 0; |
+ _numRecDevices = 0; |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " mainloop running!"); |
+ return 0; |
+} |
- PaLock(); |
+int32_t AudioDeviceLinuxPulse::InitPulseAudio() { |
+ int retVal = 0; |
- _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); |
- if (!_paMainloopApi) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " could not create mainloop API"); |
- PaUnLock(); |
- return -1; |
- } |
+ // Load libpulse |
+ if (!PaSymbolTable.Load()) { |
+ // Most likely the Pulse library and sound server are not installed on |
+ // this system |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to load symbol table"); |
+ return -1; |
+ } |
- // Create a new PulseAudio context |
- if (_paContext){ |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " PA context has already existed"); |
- PaUnLock(); |
- return -1; |
- } |
- _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); |
+ // Create a mainloop API and connection to the default server |
+ // the mainloop is the internal asynchronous API event loop |
+ if (_paMainloop) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " PA mainloop has already existed"); |
+ return -1; |
+ } |
+ _paMainloop = LATE(pa_threaded_mainloop_new)(); |
+ if (!_paMainloop) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " could not create mainloop"); |
+ return -1; |
+ } |
- if (!_paContext) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " could not create context"); |
- PaUnLock(); |
- return -1; |
- } |
+ // Start the threaded main loop |
+ retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); |
+ if (retVal != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to start main loop, error=%d", retVal); |
+ return -1; |
+ } |
- // Set state callback function |
- LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, |
- this); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " mainloop running!"); |
- // Connect the context to a server (default) |
- _paStateChanged = false; |
- retVal = LATE(pa_context_connect)(_paContext, |
- NULL, |
- PA_CONTEXT_NOAUTOSPAWN, |
- NULL); |
+ PaLock(); |
- if (retVal != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to connect context, error=%d", retVal); |
- PaUnLock(); |
- return -1; |
- } |
+ _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); |
+ if (!_paMainloopApi) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " could not create mainloop API"); |
+ PaUnLock(); |
+ return -1; |
+ } |
- // Wait for state change |
- while (!_paStateChanged) |
- { |
- LATE(pa_threaded_mainloop_wait)(_paMainloop); |
- } |
+ // Create a new PulseAudio context |
+ if (_paContext) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " PA context has already existed"); |
+ PaUnLock(); |
+ return -1; |
+ } |
+ _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); |
+ |
+ if (!_paContext) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " could not create context"); |
+ PaUnLock(); |
+ return -1; |
+ } |
- // Now check to see what final state we reached. |
- pa_context_state_t state = LATE(pa_context_get_state)(_paContext); |
+ // Set state callback function |
+ LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this); |
- if (state != PA_CONTEXT_READY) |
- { |
- if (state == PA_CONTEXT_FAILED) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to connect to PulseAudio sound server"); |
- } else if (state == PA_CONTEXT_TERMINATED) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " PulseAudio connection terminated early"); |
- } else |
- { |
- // Shouldn't happen, because we only signal on one of those three |
- // states |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " unknown problem connecting to PulseAudio"); |
- } |
- PaUnLock(); |
- return -1; |
- } |
+ // Connect the context to a server (default) |
+ _paStateChanged = false; |
+ retVal = |
+ LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); |
+ if (retVal != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to connect context, error=%d", retVal); |
PaUnLock(); |
+ return -1; |
+ } |
- // Give the objects to the mixer manager |
- _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); |
+ // Wait for state change |
+ while (!_paStateChanged) { |
+ LATE(pa_threaded_mainloop_wait)(_paMainloop); |
+ } |
- // Check the version |
- if (CheckPulseAudioVersion() < 0) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " PulseAudio version %s not supported", |
- _paServerVersion); |
- return -1; |
+ // Now check to see what final state we reached. |
+ pa_context_state_t state = LATE(pa_context_get_state)(_paContext); |
+ |
+ if (state != PA_CONTEXT_READY) { |
+ if (state == PA_CONTEXT_FAILED) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to connect to PulseAudio sound server"); |
+ } else if (state == PA_CONTEXT_TERMINATED) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " PulseAudio connection terminated early"); |
+ } else { |
+ // Shouldn't happen, because we only signal on one of those three |
+ // states |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " unknown problem connecting to PulseAudio"); |
} |
+ PaUnLock(); |
+ return -1; |
+ } |
- // Initialize sampling frequency |
- if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to initialize sampling frequency," |
- " set to %d Hz", |
- sample_rate_hz_); |
- return -1; |
- } |
+ PaUnLock(); |
- return 0; |
+ // Give the objects to the mixer manager |
+ _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); |
+ |
+ // Check the version |
+ if (CheckPulseAudioVersion() < 0) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " PulseAudio version %s not supported", _paServerVersion); |
+ return -1; |
+ } |
+ |
+ // Initialize sampling frequency |
+ if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to initialize sampling frequency," |
+ " set to %d Hz", |
+ sample_rate_hz_); |
+ return -1; |
+ } |
+ |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() |
-{ |
- // Do nothing if the instance doesn't exist |
- // likely PaSymbolTable.Load() fails |
- if (!_paMainloop) { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() { |
+ // Do nothing if the instance doesn't exist |
+ // likely PaSymbolTable.Load() fails |
+ if (!_paMainloop) { |
+ return 0; |
+ } |
- PaLock(); |
+ PaLock(); |
- // Disconnect the context |
- if (_paContext) |
- { |
- LATE(pa_context_disconnect)(_paContext); |
- } |
+ // Disconnect the context |
+ if (_paContext) { |
+ LATE(pa_context_disconnect)(_paContext); |
+ } |
- // Unreference the context |
- if (_paContext) |
- { |
- LATE(pa_context_unref)(_paContext); |
- } |
+ // Unreference the context |
+ if (_paContext) { |
+ LATE(pa_context_unref)(_paContext); |
+ } |
- PaUnLock(); |
- _paContext = NULL; |
+ PaUnLock(); |
+ _paContext = NULL; |
- // Stop the threaded main loop |
- if (_paMainloop) |
- { |
- LATE(pa_threaded_mainloop_stop)(_paMainloop); |
- } |
+ // Stop the threaded main loop |
+ if (_paMainloop) { |
+ LATE(pa_threaded_mainloop_stop)(_paMainloop); |
+ } |
- // Free the mainloop |
- if (_paMainloop) |
- { |
- LATE(pa_threaded_mainloop_free)(_paMainloop); |
- } |
+ // Free the mainloop |
+ if (_paMainloop) { |
+ LATE(pa_threaded_mainloop_free)(_paMainloop); |
+ } |
- _paMainloop = NULL; |
+ _paMainloop = NULL; |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " PulseAudio terminated"); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " PulseAudio terminated"); |
- return 0; |
+ return 0; |
} |
-void AudioDeviceLinuxPulse::PaLock() |
-{ |
- LATE(pa_threaded_mainloop_lock)(_paMainloop); |
+void AudioDeviceLinuxPulse::PaLock() { |
+ LATE(pa_threaded_mainloop_lock)(_paMainloop); |
} |
-void AudioDeviceLinuxPulse::PaUnLock() |
-{ |
- LATE(pa_threaded_mainloop_unlock)(_paMainloop); |
+void AudioDeviceLinuxPulse::PaUnLock() { |
+ LATE(pa_threaded_mainloop_unlock)(_paMainloop); |
} |
void AudioDeviceLinuxPulse::WaitForOperationCompletion( |
- pa_operation* paOperation) const |
-{ |
- if (!paOperation) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- "paOperation NULL in WaitForOperationCompletion"); |
- return; |
- } |
+ pa_operation* paOperation) const { |
+ if (!paOperation) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ "paOperation NULL in WaitForOperationCompletion"); |
+ return; |
+ } |
- while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) |
- { |
- LATE(pa_threaded_mainloop_wait)(_paMainloop); |
- } |
+ while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { |
+ LATE(pa_threaded_mainloop_wait)(_paMainloop); |
+ } |
- LATE(pa_operation_unref)(paOperation); |
+ LATE(pa_operation_unref)(paOperation); |
} |
// ============================================================================ |
// Thread Methods |
// ============================================================================ |
-void AudioDeviceLinuxPulse::EnableWriteCallback() |
-{ |
- if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) |
- { |
- // May already have available space. Must check. |
- _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); |
- if (_tempBufferSpace > 0) |
- { |
- // Yup, there is already space available, so if we register a |
- // write callback then it will not receive any event. So dispatch |
- // one ourself instead. |
- _timeEventPlay.Set(); |
- return; |
- } |
+void AudioDeviceLinuxPulse::EnableWriteCallback() { |
+ if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY) { |
+ // May already have available space. Must check. |
+ _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream); |
+ if (_tempBufferSpace > 0) { |
+ // Yup, there is already space available, so if we register a |
+ // write callback then it will not receive any event. So dispatch |
+ // one ourself instead. |
+ _timeEventPlay.Set(); |
+ return; |
} |
+ } |
- LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, |
- this); |
+ LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback, this); |
} |
-void AudioDeviceLinuxPulse::DisableWriteCallback() |
-{ |
- LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); |
+void AudioDeviceLinuxPulse::DisableWriteCallback() { |
+ LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL); |
} |
-void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/, |
+void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream* /*unused*/, |
size_t buffer_space, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler( |
- buffer_space); |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamWriteCallbackHandler( |
+ buffer_space); |
} |
-void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) |
-{ |
- _tempBufferSpace = bufferSpace; |
+void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace) { |
+ _tempBufferSpace = bufferSpace; |
- // Since we write the data asynchronously on a different thread, we have |
- // to temporarily disable the write callback or else Pulse will call it |
- // continuously until we write the data. We re-enable it below. |
- DisableWriteCallback(); |
- _timeEventPlay.Set(); |
+ // Since we write the data asynchronously on a different thread, we have |
+ // to temporarily disable the write callback or else Pulse will call it |
+ // continuously until we write the data. We re-enable it below. |
+ DisableWriteCallback(); |
+ _timeEventPlay.Set(); |
} |
-void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaStreamUnderflowCallbackHandler(); |
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/, |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis) |
+ ->PaStreamUnderflowCallbackHandler(); |
} |
-void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() |
-{ |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " Playout underflow"); |
- |
- if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) |
- { |
- // We didn't configure a pa_buffer_attr before, so switching to |
- // one now would be questionable. |
- return; |
- } |
+void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Playout underflow"); |
- // Otherwise reconfigure the stream with a higher target latency. |
- |
- const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream); |
- if (!spec) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " pa_stream_get_sample_spec()"); |
- return; |
- } |
+ if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { |
+ // We didn't configure a pa_buffer_attr before, so switching to |
+ // one now would be questionable. |
+ return; |
+ } |
- size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
- uint32_t newLatency = _configuredLatencyPlay + bytesPerSec * |
- WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / |
- WEBRTC_PA_MSECS_PER_SEC; |
+ // Otherwise reconfigure the stream with a higher target latency. |
- // Set the play buffer attributes |
- _playBufferAttr.maxlength = newLatency; |
- _playBufferAttr.tlength = newLatency; |
- _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
- _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; |
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
+ if (!spec) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " pa_stream_get_sample_spec()"); |
+ return; |
+ } |
- pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream, |
- &_playBufferAttr, NULL, |
- NULL); |
- if (!op) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " pa_stream_set_buffer_attr()"); |
- return; |
- } |
+ size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); |
+ uint32_t newLatency = |
+ _configuredLatencyPlay + bytesPerSec * |
+ WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / |
+ WEBRTC_PA_MSECS_PER_SEC; |
+ |
+ // Set the play buffer attributes |
+ _playBufferAttr.maxlength = newLatency; |
+ _playBufferAttr.tlength = newLatency; |
+ _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; |
+ _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; |
+ |
+ pa_operation* op = LATE(pa_stream_set_buffer_attr)( |
+ _playStream, &_playBufferAttr, NULL, NULL); |
+ if (!op) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " pa_stream_set_buffer_attr()"); |
+ return; |
+ } |
- // Don't need to wait for this to complete. |
- LATE(pa_operation_unref)(op); |
+ // Don't need to wait for this to complete. |
+ LATE(pa_operation_unref)(op); |
- // Save the new latency in case we underflow again. |
- _configuredLatencyPlay = newLatency; |
+ // Save the new latency in case we underflow again. |
+ _configuredLatencyPlay = newLatency; |
} |
-void AudioDeviceLinuxPulse::EnableReadCallback() |
-{ |
- LATE(pa_stream_set_read_callback)(_recStream, |
- &PaStreamReadCallback, |
- this); |
+void AudioDeviceLinuxPulse::EnableReadCallback() { |
+ LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); |
} |
-void AudioDeviceLinuxPulse::DisableReadCallback() |
-{ |
- LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); |
+void AudioDeviceLinuxPulse::DisableReadCallback() { |
+ LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); |
} |
-void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/, |
+void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/, |
size_t /*unused2*/, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaStreamReadCallbackHandler(); |
-} |
- |
-void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() |
-{ |
- // We get the data pointer and size now in order to save one Lock/Unlock |
- // in the worker thread. |
- if (LATE(pa_stream_peek)(_recStream, |
- &_tempSampleData, |
- &_tempSampleDataSize) != 0) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " Can't read data!"); |
- return; |
- } |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler(); |
+} |
- // Since we consume the data asynchronously on a different thread, we have |
- // to temporarily disable the read callback or else Pulse will call it |
- // continuously until we consume the data. We re-enable it below. |
- DisableReadCallback(); |
- _timeEventRec.Set(); |
+void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { |
+ // We get the data pointer and size now in order to save one Lock/Unlock |
+ // in the worker thread. |
+ if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, |
+ &_tempSampleDataSize) != 0) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't read data!"); |
+ return; |
+ } |
+ |
+ // Since we consume the data asynchronously on a different thread, we have |
+ // to temporarily disable the read callback or else Pulse will call it |
+ // continuously until we consume the data. We re-enable it below. |
+ DisableReadCallback(); |
+ _timeEventRec.Set(); |
} |
-void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/, |
- void *pThis) |
-{ |
- static_cast<AudioDeviceLinuxPulse*> (pThis)-> |
- PaStreamOverflowCallbackHandler(); |
+void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/, |
+ void* pThis) { |
+ static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler(); |
} |
-void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() |
-{ |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- " Recording overflow"); |
+void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Recording overflow"); |
} |
-int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream) |
-{ |
- if (!WEBRTC_PA_REPORT_LATENCY) |
- { |
- return 0; |
- } |
+int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) { |
+ if (!WEBRTC_PA_REPORT_LATENCY) { |
+ return 0; |
+ } |
- if (!stream) |
- { |
- return 0; |
- } |
+ if (!stream) { |
+ return 0; |
+ } |
- pa_usec_t latency; |
- int negative; |
- if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " Can't query latency"); |
- // We'd rather continue playout/capture with an incorrect delay than |
- // stop it altogether, so return a valid value. |
- return 0; |
- } |
+ pa_usec_t latency; |
+ int negative; |
+ if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't query latency"); |
+ // We'd rather continue playout/capture with an incorrect delay than |
+ // stop it altogether, so return a valid value. |
+ return 0; |
+ } |
- if (negative) |
- { |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " warning: pa_stream_get_latency reported negative " |
- "delay"); |
- |
- // The delay can be negative for monitoring streams if the captured |
- // samples haven't been played yet. In such a case, "latency" |
- // contains the magnitude, so we must negate it to get the real value. |
- int32_t tmpLatency = (int32_t) -latency; |
- if (tmpLatency < 0) |
- { |
- // Make sure that we don't use a negative delay. |
- tmpLatency = 0; |
- } |
+ if (negative) { |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
+ " warning: pa_stream_get_latency reported negative " |
+ "delay"); |
- return tmpLatency; |
- } else |
- { |
- return (int32_t) latency; |
+ // The delay can be negative for monitoring streams if the captured |
+ // samples haven't been played yet. In such a case, "latency" |
+ // contains the magnitude, so we must negate it to get the real value. |
+ int32_t tmpLatency = (int32_t)-latency; |
+ if (tmpLatency < 0) { |
+ // Make sure that we don't use a negative delay. |
+ tmpLatency = 0; |
} |
+ |
+ return tmpLatency; |
+ } else { |
+ return (int32_t)latency; |
+ } |
} |
-int32_t AudioDeviceLinuxPulse::ReadRecordedData( |
- const void* bufferData, |
- size_t bufferSize) EXCLUSIVE_LOCKS_REQUIRED(_critSect) |
-{ |
- size_t size = bufferSize; |
- uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); |
+int32_t AudioDeviceLinuxPulse::ReadRecordedData(const void* bufferData, |
+ size_t bufferSize) |
+ EXCLUSIVE_LOCKS_REQUIRED(_critSect) { |
+ size_t size = bufferSize; |
+ uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels); |
- // Account for the peeked data and the used data. |
- uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream) |
- / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize)); |
+ // Account for the peeked data and the used data. |
+ uint32_t recDelay = |
+ (uint32_t)((LatencyUsecs(_recStream) / 1000) + |
+ 10 * ((size + _recordBufferUsed) / _recordBufferSize)); |
- _sndCardRecDelay = recDelay; |
+ _sndCardRecDelay = recDelay; |
- if (_playStream) |
- { |
- // Get the playout delay. |
- _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000); |
+ if (_playStream) { |
+ // Get the playout delay. |
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); |
+ } |
+ |
+ if (_recordBufferUsed > 0) { |
+ // Have to copy to the buffer until it is full. |
+ size_t copy = _recordBufferSize - _recordBufferUsed; |
+ if (size < copy) { |
+ copy = size; |
} |
- if (_recordBufferUsed > 0) |
- { |
- // Have to copy to the buffer until it is full. |
- size_t copy = _recordBufferSize - _recordBufferUsed; |
- if (size < copy) |
- { |
- copy = size; |
- } |
+ memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); |
+ _recordBufferUsed += copy; |
+ bufferData = static_cast<const char*>(bufferData) + copy; |
+ size -= copy; |
- memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy); |
- _recordBufferUsed += copy; |
- bufferData = static_cast<const char *> (bufferData) + copy; |
- size -= copy; |
+ if (_recordBufferUsed != _recordBufferSize) { |
+ // Not enough data yet to pass to VoE. |
+ return 0; |
+ } |
- if (_recordBufferUsed != _recordBufferSize) |
- { |
- // Not enough data yet to pass to VoE. |
- return 0; |
- } |
+ // Provide data to VoiceEngine. |
+ if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) { |
+ // We have stopped recording. |
+ return -1; |
+ } |
- // Provide data to VoiceEngine. |
- if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1) |
- { |
- // We have stopped recording. |
- return -1; |
- } |
+ _recordBufferUsed = 0; |
+ } |
- _recordBufferUsed = 0; |
+ // Now process full 10ms sample sets directly from the input. |
+ while (size >= _recordBufferSize) { |
+ // Provide data to VoiceEngine. |
+ if (ProcessRecordedData(static_cast<int8_t*>(const_cast<void*>(bufferData)), |
+ numRecSamples, recDelay) == -1) { |
+ // We have stopped recording. |
+ return -1; |
} |
- // Now process full 10ms sample sets directly from the input. |
- while (size >= _recordBufferSize) |
- { |
- // Provide data to VoiceEngine. |
- if (ProcessRecordedData( |
- static_cast<int8_t *> (const_cast<void *> (bufferData)), |
- numRecSamples, recDelay) == -1) |
- { |
- // We have stopped recording. |
- return -1; |
- } |
+ bufferData = static_cast<const char*>(bufferData) + _recordBufferSize; |
+ size -= _recordBufferSize; |
- bufferData = static_cast<const char *> (bufferData) + |
- _recordBufferSize; |
- size -= _recordBufferSize; |
- |
- // We have consumed 10ms of data. |
- recDelay -= 10; |
- } |
+ // We have consumed 10ms of data. |
+ recDelay -= 10; |
+ } |
- // Now save any leftovers for later. |
- if (size > 0) |
- { |
- memcpy(_recBuffer, bufferData, size); |
- _recordBufferUsed = size; |
- } |
+ // Now save any leftovers for later. |
+ if (size > 0) { |
+ memcpy(_recBuffer, bufferData, size); |
+ _recordBufferUsed = size; |
+ } |
- return 0; |
+ return 0; |
} |
-int32_t AudioDeviceLinuxPulse::ProcessRecordedData( |
- int8_t *bufferData, |
- uint32_t bufferSizeInSamples, |
- uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect) |
-{ |
- uint32_t currentMicLevel(0); |
- uint32_t newMicLevel(0); |
+int32_t AudioDeviceLinuxPulse::ProcessRecordedData(int8_t* bufferData, |
+ uint32_t bufferSizeInSamples, |
+ uint32_t recDelay) |
+ EXCLUSIVE_LOCKS_REQUIRED(_critSect) { |
+ uint32_t currentMicLevel(0); |
+ uint32_t newMicLevel(0); |
- _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); |
+ _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples); |
- if (AGC()) |
- { |
- // Store current mic level in the audio buffer if AGC is enabled |
- if (MicrophoneVolume(currentMicLevel) == 0) |
- { |
- // This call does not affect the actual microphone volume |
- _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); |
- } |
+ if (AGC()) { |
+ // Store current mic level in the audio buffer if AGC is enabled |
+ if (MicrophoneVolume(currentMicLevel) == 0) { |
+ // This call does not affect the actual microphone volume |
+ _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); |
} |
+ } |
- const uint32_t clockDrift(0); |
- // TODO(andrew): this is a temporary hack, to avoid non-causal far- and |
- // near-end signals at the AEC for PulseAudio. I think the system delay is |
- // being correctly calculated here, but for legacy reasons we add +10 ms |
- // to the value in the AEC. The real fix will be part of a larger |
- // investigation into managing system delay in the AEC. |
- if (recDelay > 10) |
- recDelay -= 10; |
- else |
- recDelay = 0; |
- _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); |
- _ptrAudioBuffer->SetTypingStatus(KeyPressed()); |
- // Deliver recorded samples at specified sample rate, |
- // mic level etc. to the observer using callback. |
- UnLock(); |
- _ptrAudioBuffer->DeliverRecordedData(); |
- Lock(); |
- |
- // We have been unlocked - check the flag again. |
- if (!_recording) |
- { |
- return -1; |
- } |
+ const uint32_t clockDrift(0); |
+ // TODO(andrew): this is a temporary hack, to avoid non-causal far- and |
+ // near-end signals at the AEC for PulseAudio. I think the system delay is |
+ // being correctly calculated here, but for legacy reasons we add +10 ms |
+ // to the value in the AEC. The real fix will be part of a larger |
+ // investigation into managing system delay in the AEC. |
+ if (recDelay > 10) |
+ recDelay -= 10; |
+ else |
+ recDelay = 0; |
+ _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift); |
+ _ptrAudioBuffer->SetTypingStatus(KeyPressed()); |
+ // Deliver recorded samples at specified sample rate, |
+ // mic level etc. to the observer using callback. |
+ UnLock(); |
+ _ptrAudioBuffer->DeliverRecordedData(); |
+ Lock(); |
+ |
+ // We have been unlocked - check the flag again. |
+ if (!_recording) { |
+ return -1; |
+ } |
- if (AGC()) |
- { |
- newMicLevel = _ptrAudioBuffer->NewMicLevel(); |
- if (newMicLevel != 0) |
- { |
- // The VQE will only deliver non-zero microphone levels when a |
- // change is needed. |
- // Set this new mic level (received from the observer as return |
- // value in the callback). |
- WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, |
- " AGC change of volume: old=%u => new=%u", |
- currentMicLevel, newMicLevel); |
- if (SetMicrophoneVolume(newMicLevel) == -1) |
- { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, |
- _id, |
- " the required modification of the microphone " |
- "volume failed"); |
- } |
- } |
+ if (AGC()) { |
+ newMicLevel = _ptrAudioBuffer->NewMicLevel(); |
+ if (newMicLevel != 0) { |
+ // The VQE will only deliver non-zero microphone levels when a |
+ // change is needed. |
+ // Set this new mic level (received from the observer as return |
+ // value in the callback). |
+ WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, |
+ " AGC change of volume: old=%u => new=%u", currentMicLevel, |
+ newMicLevel); |
+ if (SetMicrophoneVolume(newMicLevel) == -1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " the required modification of the microphone " |
+ "volume failed"); |
+ } |
} |
+ } |
- return 0; |
+ return 0; |
} |
-bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) |
-{ |
- return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess()); |
+bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) { |
+ return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess()); |
} |
-bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) |
-{ |
- return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess()); |
+bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) { |
+ return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess()); |
} |
-bool AudioDeviceLinuxPulse::PlayThreadProcess() |
-{ |
- switch (_timeEventPlay.Wait(1000)) |
- { |
- case kEventSignaled: |
- break; |
- case kEventError: |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- "EventWrapper::Wait() failed"); |
- return true; |
- case kEventTimeout: |
- return true; |
- } |
+bool AudioDeviceLinuxPulse::PlayThreadProcess() { |
+ switch (_timeEventPlay.Wait(1000)) { |
+ case kEventSignaled: |
+ break; |
+ case kEventError: |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ "EventWrapper::Wait() failed"); |
+ return true; |
+ case kEventTimeout: |
+ return true; |
+ } |
- rtc::CritScope lock(&_critSect); |
+ rtc::CritScope lock(&_critSect); |
- if (_startPlay) |
- { |
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
- "_startPlay true, performing initial actions"); |
- |
- _startPlay = false; |
- _playDeviceName = NULL; |
- |
- // Set if not default device |
- if (_outputDeviceIndex > 0) |
- { |
- // Get the playout device name |
- _playDeviceName = new char[kAdmMaxDeviceNameSize]; |
- _deviceIndex = _outputDeviceIndex; |
- PlayoutDevices(); |
- } |
+ if (_startPlay) { |
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
+ "_startPlay true, performing initial actions"); |
- // Start muted only supported on 0.9.11 and up |
- if (LATE(pa_context_get_protocol_version)(_paContext) |
- >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) |
- { |
- // Get the currently saved speaker mute status |
- // and set the initial mute status accordingly |
- bool enabled(false); |
- _mixerManager.SpeakerMute(enabled); |
- if (enabled) |
- { |
- _playStreamFlags |= PA_STREAM_START_MUTED; |
- } |
- } |
+ _startPlay = false; |
+ _playDeviceName = NULL; |
- // Get the currently saved speaker volume |
- uint32_t volume = 0; |
- if (update_speaker_volume_at_startup_) |
- _mixerManager.SpeakerVolume(volume); |
+ // Set if not default device |
+ if (_outputDeviceIndex > 0) { |
+ // Get the playout device name |
+ _playDeviceName = new char[kAdmMaxDeviceNameSize]; |
+ _deviceIndex = _outputDeviceIndex; |
+ PlayoutDevices(); |
+ } |
- PaLock(); |
+ // Start muted only supported on 0.9.11 and up |
+ if (LATE(pa_context_get_protocol_version)(_paContext) >= |
+ WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { |
+ // Get the currently saved speaker mute status |
+ // and set the initial mute status accordingly |
+ bool enabled(false); |
+ _mixerManager.SpeakerMute(enabled); |
+ if (enabled) { |
+ _playStreamFlags |= PA_STREAM_START_MUTED; |
+ } |
+ } |
- // NULL gives PA the choice of startup volume. |
- pa_cvolume* ptr_cvolume = NULL; |
- if (update_speaker_volume_at_startup_) { |
- pa_cvolume cVolumes; |
- ptr_cvolume = &cVolumes; |
+ // Get the currently saved speaker volume |
+ uint32_t volume = 0; |
+ if (update_speaker_volume_at_startup_) |
+ _mixerManager.SpeakerVolume(volume); |
- // Set the same volume for all channels |
- const pa_sample_spec *spec = |
- LATE(pa_stream_get_sample_spec)(_playStream); |
- LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); |
- update_speaker_volume_at_startup_ = false; |
- } |
+ PaLock(); |
- // Connect the stream to a sink |
- if (LATE(pa_stream_connect_playback)( |
- _playStream, |
- _playDeviceName, |
- &_playBufferAttr, |
- (pa_stream_flags_t) _playStreamFlags, |
- ptr_cvolume, NULL) != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to connect play stream, err=%d", |
- LATE(pa_context_errno)(_paContext)); |
- } |
+ // NULL gives PA the choice of startup volume. |
+ pa_cvolume* ptr_cvolume = NULL; |
+ if (update_speaker_volume_at_startup_) { |
+ pa_cvolume cVolumes; |
+ ptr_cvolume = &cVolumes; |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " play stream connected"); |
+ // Set the same volume for all channels |
+ const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); |
+ LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); |
+ update_speaker_volume_at_startup_ = false; |
+ } |
- // Wait for state change |
- while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) |
- { |
- LATE(pa_threaded_mainloop_wait)(_paMainloop); |
- } |
+ // Connect the stream to a sink |
+ if (LATE(pa_stream_connect_playback)( |
+ _playStream, _playDeviceName, &_playBufferAttr, |
+ (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to connect play stream, err=%d", |
+ LATE(pa_context_errno)(_paContext)); |
+ } |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " play stream ready"); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
+ " play stream connected"); |
- // We can now handle write callbacks |
- EnableWriteCallback(); |
+ // Wait for state change |
+ while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) { |
+ LATE(pa_threaded_mainloop_wait)(_paMainloop); |
+ } |
- PaUnLock(); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " play stream ready"); |
- // Clear device name |
- if (_playDeviceName) |
- { |
- delete [] _playDeviceName; |
- _playDeviceName = NULL; |
- } |
+ // We can now handle write callbacks |
+ EnableWriteCallback(); |
- _playing = true; |
- _playStartEvent.Set(); |
+ PaUnLock(); |
- return true; |
+ // Clear device name |
+ if (_playDeviceName) { |
+ delete[] _playDeviceName; |
+ _playDeviceName = NULL; |
} |
- if (_playing) |
- { |
- if (!_recording) |
- { |
- // Update the playout delay |
- _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) |
- / 1000); |
- } |
+ _playing = true; |
+ _playStartEvent.Set(); |
+ |
+ return true; |
+ } |
- if (_playbackBufferUnused < _playbackBufferSize) |
- { |
- |
- size_t write = _playbackBufferSize - _playbackBufferUnused; |
- if (_tempBufferSpace < write) |
- { |
- write = _tempBufferSpace; |
- } |
- |
- PaLock(); |
- if (LATE(pa_stream_write)( |
- _playStream, |
- (void *) &_playBuffer[_playbackBufferUnused], |
- write, NULL, (int64_t) 0, |
- PA_SEEK_RELATIVE) != PA_OK) |
- { |
- _writeErrors++; |
- if (_writeErrors > 10) |
- { |
- if (_playError == 1) |
- { |
- WEBRTC_TRACE(kTraceWarning, |
- kTraceUtility, _id, |
- " pending playout error exists"); |
- } |
- // Triggers callback from module process thread. |
- _playError = 1; |
- WEBRTC_TRACE( |
- kTraceError, |
- kTraceUtility, |
- _id, |
- " kPlayoutError message posted: " |
- "_writeErrors=%u, error=%d", |
- _writeErrors, |
- LATE(pa_context_errno)(_paContext)); |
- _writeErrors = 0; |
- } |
- } |
- PaUnLock(); |
- |
- _playbackBufferUnused += write; |
- _tempBufferSpace -= write; |
+ if (_playing) { |
+ if (!_recording) { |
+ // Update the playout delay |
+ _sndCardPlayDelay = (uint32_t)(LatencyUsecs(_playStream) / 1000); |
+ } |
+ |
+ if (_playbackBufferUnused < _playbackBufferSize) { |
+ size_t write = _playbackBufferSize - _playbackBufferUnused; |
+ if (_tempBufferSpace < write) { |
+ write = _tempBufferSpace; |
+ } |
+ |
+ PaLock(); |
+ if (LATE(pa_stream_write)( |
+ _playStream, (void*)&_playBuffer[_playbackBufferUnused], write, |
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { |
+ _writeErrors++; |
+ if (_writeErrors > 10) { |
+ if (_playError == 1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, |
+ " pending playout error exists"); |
+ } |
+ // Triggers callback from module process thread. |
+ _playError = 1; |
+ WEBRTC_TRACE(kTraceError, kTraceUtility, _id, |
+ " kPlayoutError message posted: " |
+ "_writeErrors=%u, error=%d", |
+ _writeErrors, LATE(pa_context_errno)(_paContext)); |
+ _writeErrors = 0; |
} |
+ } |
+ PaUnLock(); |
+ |
+ _playbackBufferUnused += write; |
+ _tempBufferSpace -= write; |
+ } |
+ |
+ uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); |
+ // Might have been reduced to zero by the above. |
+ if (_tempBufferSpace > 0) { |
+ // Ask for new PCM data to be played out using the |
+ // AudioDeviceBuffer ensure that this callback is executed |
+ // without taking the audio-thread lock. |
+ UnLock(); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " requesting data"); |
+ uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); |
+ Lock(); |
+ |
+ // We have been unlocked - check the flag again. |
+ if (!_playing) { |
+ return true; |
+ } |
- uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); |
- // Might have been reduced to zero by the above. |
- if (_tempBufferSpace > 0) |
- { |
- // Ask for new PCM data to be played out using the |
- // AudioDeviceBuffer ensure that this callback is executed |
- // without taking the audio-thread lock. |
- UnLock(); |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " requesting data"); |
- uint32_t nSamples = |
- _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); |
- Lock(); |
- |
- // We have been unlocked - check the flag again. |
- if (!_playing) |
- { |
- return true; |
- } |
- |
- nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); |
- if (nSamples != numPlaySamples) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, |
- _id, " invalid number of output samples(%d)", |
- nSamples); |
- } |
- |
- size_t write = _playbackBufferSize; |
- if (_tempBufferSpace < write) |
- { |
- write = _tempBufferSpace; |
- } |
- |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " will write"); |
- PaLock(); |
- if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0], |
- write, NULL, (int64_t) 0, |
- PA_SEEK_RELATIVE) != PA_OK) |
- { |
- _writeErrors++; |
- if (_writeErrors > 10) |
- { |
- if (_playError == 1) |
- { |
- WEBRTC_TRACE(kTraceWarning, |
- kTraceUtility, _id, |
- " pending playout error exists"); |
- } |
- // Triggers callback from module process thread. |
- _playError = 1; |
- WEBRTC_TRACE( |
- kTraceError, |
- kTraceUtility, |
- _id, |
- " kPlayoutError message posted: " |
- "_writeErrors=%u, error=%d", |
- _writeErrors, |
- LATE(pa_context_errno)(_paContext)); |
- _writeErrors = 0; |
- } |
- } |
- PaUnLock(); |
- |
- _playbackBufferUnused = write; |
+ nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); |
+ if (nSamples != numPlaySamples) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " invalid number of output samples(%d)", nSamples); |
+ } |
+ |
+ size_t write = _playbackBufferSize; |
+ if (_tempBufferSpace < write) { |
+ write = _tempBufferSpace; |
+ } |
+ |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " will write"); |
+ PaLock(); |
+ if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write, |
+ NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { |
+ _writeErrors++; |
+ if (_writeErrors > 10) { |
+ if (_playError == 1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, |
+ " pending playout error exists"); |
+ } |
+ // Triggers callback from module process thread. |
+ _playError = 1; |
+ WEBRTC_TRACE(kTraceError, kTraceUtility, _id, |
+ " kPlayoutError message posted: " |
+ "_writeErrors=%u, error=%d", |
+ _writeErrors, LATE(pa_context_errno)(_paContext)); |
+ _writeErrors = 0; |
} |
+ } |
+ PaUnLock(); |
+ |
+ _playbackBufferUnused = write; |
+ } |
- _tempBufferSpace = 0; |
- PaLock(); |
- EnableWriteCallback(); |
- PaUnLock(); |
+ _tempBufferSpace = 0; |
+ PaLock(); |
+ EnableWriteCallback(); |
+ PaUnLock(); |
- } // _playing |
+ } // _playing |
- return true; |
+ return true; |
} |
-bool AudioDeviceLinuxPulse::RecThreadProcess() |
-{ |
- switch (_timeEventRec.Wait(1000)) |
- { |
- case kEventSignaled: |
- break; |
- case kEventError: |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
- "EventWrapper::Wait() failed"); |
- return true; |
- case kEventTimeout: |
- return true; |
- } |
+bool AudioDeviceLinuxPulse::RecThreadProcess() { |
+ switch (_timeEventRec.Wait(1000)) { |
+ case kEventSignaled: |
+ break; |
+ case kEventError: |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ "EventWrapper::Wait() failed"); |
+ return true; |
+ case kEventTimeout: |
+ return true; |
+ } |
- rtc::CritScope lock(&_critSect); |
+ rtc::CritScope lock(&_critSect); |
- if (_startRec) |
- { |
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
- "_startRec true, performing initial actions"); |
- |
- _recDeviceName = NULL; |
- |
- // Set if not default device |
- if (_inputDeviceIndex > 0) |
- { |
- // Get the recording device name |
- _recDeviceName = new char[kAdmMaxDeviceNameSize]; |
- _deviceIndex = _inputDeviceIndex; |
- RecordingDevices(); |
- } |
+ if (_startRec) { |
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, |
+ "_startRec true, performing initial actions"); |
- PaLock(); |
+ _recDeviceName = NULL; |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " connecting stream"); |
+ // Set if not default device |
+ if (_inputDeviceIndex > 0) { |
+ // Get the recording device name |
+ _recDeviceName = new char[kAdmMaxDeviceNameSize]; |
+ _deviceIndex = _inputDeviceIndex; |
+ RecordingDevices(); |
+ } |
- // Connect the stream to a source |
- if (LATE(pa_stream_connect_record)(_recStream, |
- _recDeviceName, |
- &_recBufferAttr, |
- (pa_stream_flags_t) _recStreamFlags) != PA_OK) |
- { |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
- " failed to connect rec stream, err=%d", |
- LATE(pa_context_errno)(_paContext)); |
- } |
+ PaLock(); |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " connected"); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connecting stream"); |
- // Wait for state change |
- while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) |
- { |
- LATE(pa_threaded_mainloop_wait)(_paMainloop); |
- } |
+ // Connect the stream to a source |
+ if (LATE(pa_stream_connect_record)( |
+ _recStream, _recDeviceName, &_recBufferAttr, |
+ (pa_stream_flags_t)_recStreamFlags) != PA_OK) { |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " failed to connect rec stream, err=%d", |
+ LATE(pa_context_errno)(_paContext)); |
+ } |
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, |
- " done"); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connected"); |
- // We can now handle read callbacks |
- EnableReadCallback(); |
+ // Wait for state change |
+ while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) { |
+ LATE(pa_threaded_mainloop_wait)(_paMainloop); |
+ } |
- PaUnLock(); |
+ WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " done"); |
- // Clear device name |
- if (_recDeviceName) |
- { |
- delete [] _recDeviceName; |
- _recDeviceName = NULL; |
- } |
+ // We can now handle read callbacks |
+ EnableReadCallback(); |
- _startRec = false; |
- _recording = true; |
- _recStartEvent.Set(); |
+ PaUnLock(); |
- return true; |
+ // Clear device name |
+ if (_recDeviceName) { |
+ delete[] _recDeviceName; |
+ _recDeviceName = NULL; |
} |
- if (_recording) |
- { |
- // Read data and provide it to VoiceEngine |
- if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) |
- { |
- return true; |
- } |
+ _startRec = false; |
+ _recording = true; |
+ _recStartEvent.Set(); |
- _tempSampleData = NULL; |
- _tempSampleDataSize = 0; |
- |
- PaLock(); |
- while (true) |
- { |
- // Ack the last thing we read |
- if (LATE(pa_stream_drop)(_recStream) != 0) |
- { |
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, |
- _id, " failed to drop, err=%d\n", |
- LATE(pa_context_errno)(_paContext)); |
- } |
- |
- if (LATE(pa_stream_readable_size)(_recStream) <= 0) |
- { |
- // Then that was all the data |
- break; |
- } |
- |
- // Else more data. |
- const void *sampleData; |
- size_t sampleDataSize; |
- |
- if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) |
- != 0) |
- { |
- _recError = 1; // triggers callback from module process thread |
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, |
- _id, " RECORD_ERROR message posted, error = %d", |
- LATE(pa_context_errno)(_paContext)); |
- break; |
- } |
- |
- _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream) |
- / 1000); |
- |
- // Drop lock for sigslot dispatch, which could take a while. |
- PaUnLock(); |
- // Read data and provide it to VoiceEngine |
- if (ReadRecordedData(sampleData, sampleDataSize) == -1) |
- { |
- return true; |
- } |
- PaLock(); |
- |
- // Return to top of loop for the ack and the check for more data. |
- } |
+ return true; |
+ } |
- EnableReadCallback(); |
- PaUnLock(); |
+ if (_recording) { |
+ // Read data and provide it to VoiceEngine |
+ if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1) { |
+ return true; |
+ } |
- } // _recording |
+ _tempSampleData = NULL; |
+ _tempSampleDataSize = 0; |
- return true; |
-} |
+ PaLock(); |
+ while (true) { |
+ // Ack the last thing we read |
+ if (LATE(pa_stream_drop)(_recStream) != 0) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, |
+ " failed to drop, err=%d\n", |
+ LATE(pa_context_errno)(_paContext)); |
+ } |
+ |
+ if (LATE(pa_stream_readable_size)(_recStream) <= 0) { |
+ // Then that was all the data |
+ break; |
+ } |
+ |
+ // Else more data. |
+ const void* sampleData; |
+ size_t sampleDataSize; |
+ |
+ if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) { |
+ _recError = 1; // triggers callback from module process thread |
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, |
+ " RECORD_ERROR message posted, error = %d", |
+ LATE(pa_context_errno)(_paContext)); |
+ break; |
+ } |
+ |
+ _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000); |
+ |
+ // Drop lock for sigslot dispatch, which could take a while. |
+ PaUnLock(); |
+ // Read data and provide it to VoiceEngine |
+ if (ReadRecordedData(sampleData, sampleDataSize) == -1) { |
+ return true; |
+ } |
+ PaLock(); |
+ |
+ // Return to top of loop for the ack and the check for more data. |
+ } |
+ |
+ EnableReadCallback(); |
+ PaUnLock(); |
+ |
+ } // _recording |
-bool AudioDeviceLinuxPulse::KeyPressed() const{ |
+ return true; |
+} |
+bool AudioDeviceLinuxPulse::KeyPressed() const { |
char szKey[32]; |
unsigned int i = 0; |
char state = 0; |
@@ -2992,4 +2585,4 @@ bool AudioDeviceLinuxPulse::KeyPressed() const{ |
memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); |
return (state != 0); |
} |
-} |
+} // namespace webrtc |