Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(309)

Side by Side Diff: webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc

Issue 2958273002: Remove some occurrences of WEBRTC_TRACE in webrtc/modules/audio_device/linux/ (Closed)
Patch Set: Remove superfluous spaces. Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <assert.h> 11 #include <assert.h>
12 12
13 #include "webrtc/modules/audio_device/audio_device_config.h" 13 #include "webrtc/modules/audio_device/audio_device_config.h"
14 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h" 14 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
15 #include "webrtc/rtc_base/checks.h" 15 #include "webrtc/rtc_base/checks.h"
16 #include "webrtc/rtc_base/logging.h" 16 #include "webrtc/rtc_base/logging.h"
17 #include "webrtc/system_wrappers/include/event_wrapper.h" 17 #include "webrtc/system_wrappers/include/event_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19 18
20 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; 19 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
21 20
22 // Accesses Pulse functions through our late-binding symbol table instead of 21 // Accesses Pulse functions through our late-binding symbol table instead of
23 // directly. This way we don't have to link to libpulse, which means our binary 22 // directly. This way we don't have to link to libpulse, which means our binary
24 // will work on systems that don't have it. 23 // will work on systems that don't have it.
25 #define LATE(sym) \ 24 #define LATE(sym) \
26 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \ 25 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \
27 sym) 26 sym)
28 27
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 _configuredLatencyRec(0), 82 _configuredLatencyRec(0),
84 _paDeviceIndex(-1), 83 _paDeviceIndex(-1),
85 _paStateChanged(false), 84 _paStateChanged(false),
86 _paMainloop(NULL), 85 _paMainloop(NULL),
87 _paMainloopApi(NULL), 86 _paMainloopApi(NULL),
88 _paContext(NULL), 87 _paContext(NULL),
89 _recStream(NULL), 88 _recStream(NULL),
90 _playStream(NULL), 89 _playStream(NULL),
91 _recStreamFlags(0), 90 _recStreamFlags(0),
92 _playStreamFlags(0) { 91 _playStreamFlags(0) {
93 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); 92 LOG(LS_INFO) << __FUNCTION__ << " created";
94 93
95 memset(_paServerVersion, 0, sizeof(_paServerVersion)); 94 memset(_paServerVersion, 0, sizeof(_paServerVersion));
96 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); 95 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
97 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); 96 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
98 memset(_oldKeyState, 0, sizeof(_oldKeyState)); 97 memset(_oldKeyState, 0, sizeof(_oldKeyState));
99 } 98 }
100 99
101 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { 100 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() {
102 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", 101 LOG(LS_INFO) << __FUNCTION__ << " destroyed";
103 __FUNCTION__);
104 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 102 RTC_DCHECK(thread_checker_.CalledOnValidThread());
105 Terminate(); 103 Terminate();
106 104
107 if (_recBuffer) { 105 if (_recBuffer) {
108 delete[] _recBuffer; 106 delete[] _recBuffer;
109 _recBuffer = NULL; 107 _recBuffer = NULL;
110 } 108 }
111 if (_playBuffer) { 109 if (_playBuffer) {
112 delete[] _playBuffer; 110 delete[] _playBuffer;
113 _playBuffer = NULL; 111 _playBuffer = NULL;
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 if (_ptrThreadPlay) { 215 if (_ptrThreadPlay) {
218 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); 216 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
219 217
220 _timeEventPlay.Set(); 218 _timeEventPlay.Set();
221 tmpThread->Stop(); 219 tmpThread->Stop();
222 delete tmpThread; 220 delete tmpThread;
223 } 221 }
224 222
225 // Terminate PulseAudio 223 // Terminate PulseAudio
226 if (TerminatePulseAudio() < 0) { 224 if (TerminatePulseAudio() < 0) {
227 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 225 LOG(LS_ERROR) << "failed to terminate PulseAudio";
228 " failed to terminate PulseAudio");
229 return -1; 226 return -1;
230 } 227 }
231 228
232 if (_XDisplay) { 229 if (_XDisplay) {
233 XCloseDisplay(_XDisplay); 230 XCloseDisplay(_XDisplay);
234 _XDisplay = NULL; 231 _XDisplay = NULL;
235 } 232 }
236 233
237 _initialized = false; 234 _initialized = false;
238 _outputDeviceIsSpecified = false; 235 _outputDeviceIsSpecified = false;
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 return -1; 369 return -1;
373 } 370 }
374 371
375 volume = level; 372 volume = level;
376 373
377 return 0; 374 return 0;
378 } 375 }
379 376
380 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft, 377 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft,
381 uint16_t volumeRight) { 378 uint16_t volumeRight) {
382 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 379 LOG(LS_WARNING) << "API call not supported on this platform";
383 " API call not supported on this platform");
384 return -1; 380 return -1;
385 } 381 }
386 382
387 int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/, 383 int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/,
388 uint16_t& /*volumeRight*/) const { 384 uint16_t& /*volumeRight*/) const {
389 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 385 LOG(LS_WARNING) << "API call not supported on this platform";
390 " API call not supported on this platform");
391 return -1; 386 return -1;
392 } 387 }
393 388
394 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const { 389 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const {
395 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 390 RTC_DCHECK(thread_checker_.CalledOnValidThread());
396 uint32_t maxVol(0); 391 uint32_t maxVol(0);
397 392
398 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { 393 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
399 return -1; 394 return -1;
400 } 395 }
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after
706 } 701 }
707 702
708 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { 703 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
709 return (_mixerManager.SetMicrophoneVolume(volume)); 704 return (_mixerManager.SetMicrophoneVolume(volume));
710 } 705 }
711 706
712 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const { 707 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
713 uint32_t level(0); 708 uint32_t level(0);
714 709
715 if (_mixerManager.MicrophoneVolume(level) == -1) { 710 if (_mixerManager.MicrophoneVolume(level) == -1) {
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 711 LOG(LS_WARNING) << "failed to retrieve current microphone level";
717 " failed to retrive current microphone level");
718 return -1; 712 return -1;
719 } 713 }
720 714
721 volume = level; 715 volume = level;
722 716
723 return 0; 717 return 0;
724 } 718 }
725 719
726 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const { 720 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const {
727 uint32_t maxVol(0); 721 uint32_t maxVol(0);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
779 } 773 }
780 774
781 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { 775 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) {
782 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 776 RTC_DCHECK(thread_checker_.CalledOnValidThread());
783 if (_playIsInitialized) { 777 if (_playIsInitialized) {
784 return -1; 778 return -1;
785 } 779 }
786 780
787 const uint16_t nDevices = PlayoutDevices(); 781 const uint16_t nDevices = PlayoutDevices();
788 782
789 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 783 LOG(LS_VERBOSE) << "number of availiable output devices is " << nDevices;
790 " number of availiable output devices is %u", nDevices);
791 784
792 if (index > (nDevices - 1)) { 785 if (index > (nDevices - 1)) {
793 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 786 LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
794 " device index is out of range [0,%u]", (nDevices - 1)); 787 << "]";
795 return -1; 788 return -1;
796 } 789 }
797 790
798 _outputDeviceIndex = index; 791 _outputDeviceIndex = index;
799 _outputDeviceIsSpecified = true; 792 _outputDeviceIsSpecified = true;
800 793
801 return 0; 794 return 0;
802 } 795 }
803 796
804 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( 797 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
805 AudioDeviceModule::WindowsDeviceType /*device*/) { 798 AudioDeviceModule::WindowsDeviceType /*device*/) {
806 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 799 LOG(LS_ERROR) << "WindowsDeviceType not supported";
807 "WindowsDeviceType not supported");
808 return -1; 800 return -1;
809 } 801 }
810 802
811 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( 803 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
812 uint16_t index, 804 uint16_t index,
813 char name[kAdmMaxDeviceNameSize], 805 char name[kAdmMaxDeviceNameSize],
814 char guid[kAdmMaxGuidSize]) { 806 char guid[kAdmMaxGuidSize]) {
815 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 807 RTC_DCHECK(thread_checker_.CalledOnValidThread());
816 const uint16_t nDevices = PlayoutDevices(); 808 const uint16_t nDevices = PlayoutDevices();
817 809
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
902 } 894 }
903 895
904 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { 896 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) {
905 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 897 RTC_DCHECK(thread_checker_.CalledOnValidThread());
906 if (_recIsInitialized) { 898 if (_recIsInitialized) {
907 return -1; 899 return -1;
908 } 900 }
909 901
910 const uint16_t nDevices(RecordingDevices()); 902 const uint16_t nDevices(RecordingDevices());
911 903
912 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 904 LOG(LS_VERBOSE) << "number of availiable input devices is " << nDevices;
913 " number of availiable input devices is %u", nDevices);
914 905
915 if (index > (nDevices - 1)) { 906 if (index > (nDevices - 1)) {
916 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 907 LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
917 " device index is out of range [0,%u]", (nDevices - 1)); 908 << "]";
918 return -1; 909 return -1;
919 } 910 }
920 911
921 _inputDeviceIndex = index; 912 _inputDeviceIndex = index;
922 _inputDeviceIsSpecified = true; 913 _inputDeviceIsSpecified = true;
923 914
924 return 0; 915 return 0;
925 } 916 }
926 917
927 int32_t AudioDeviceLinuxPulse::SetRecordingDevice( 918 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
928 AudioDeviceModule::WindowsDeviceType /*device*/) { 919 AudioDeviceModule::WindowsDeviceType /*device*/) {
929 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 920 LOG(LS_ERROR) << "WindowsDeviceType not supported";
930 "WindowsDeviceType not supported");
931 return -1; 921 return -1;
932 } 922 }
933 923
934 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { 924 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) {
935 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 925 RTC_DCHECK(thread_checker_.CalledOnValidThread());
936 available = false; 926 available = false;
937 927
938 // Try to initialize the playout side 928 // Try to initialize the playout side
939 int32_t res = InitPlayout(); 929 int32_t res = InitPlayout();
940 930
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 if (!_outputDeviceIsSpecified) { 965 if (!_outputDeviceIsSpecified) {
976 return -1; 966 return -1;
977 } 967 }
978 968
979 if (_playIsInitialized) { 969 if (_playIsInitialized) {
980 return 0; 970 return 0;
981 } 971 }
982 972
983 // Initialize the speaker (devices might have been added or removed) 973 // Initialize the speaker (devices might have been added or removed)
984 if (InitSpeaker() == -1) { 974 if (InitSpeaker() == -1) {
985 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 975 LOG(LS_WARNING) << "InitSpeaker() failed";
986 " InitSpeaker() failed");
987 } 976 }
988 977
989 // Set the play sample specification 978 // Set the play sample specification
990 pa_sample_spec playSampleSpec; 979 pa_sample_spec playSampleSpec;
991 playSampleSpec.channels = _playChannels; 980 playSampleSpec.channels = _playChannels;
992 playSampleSpec.format = PA_SAMPLE_S16LE; 981 playSampleSpec.format = PA_SAMPLE_S16LE;
993 playSampleSpec.rate = sample_rate_hz_; 982 playSampleSpec.rate = sample_rate_hz_;
994 983
995 // Create a new play stream 984 // Create a new play stream
996 _playStream = 985 _playStream =
997 LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL); 986 LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL);
998 987
999 if (!_playStream) { 988 if (!_playStream) {
1000 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 989 LOG(LS_ERROR) << "failed to create play stream, err="
1001 " failed to create play stream, err=%d", 990 << LATE(pa_context_errno)(_paContext);
1002 LATE(pa_context_errno)(_paContext));
1003 return -1; 991 return -1;
1004 } 992 }
1005 993
1006 // Provide the playStream to the mixer 994 // Provide the playStream to the mixer
1007 _mixerManager.SetPlayStream(_playStream); 995 _mixerManager.SetPlayStream(_playStream);
1008 996
1009 if (_ptrAudioBuffer) { 997 if (_ptrAudioBuffer) {
1010 // Update audio buffer with the selected parameters 998 // Update audio buffer with the selected parameters
1011 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); 999 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1012 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); 1000 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
1013 } 1001 }
1014 1002
1015 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state %d\n", 1003 LOG(LS_VERBOSE) << "stream state " << LATE(pa_stream_get_state)(_playStream);
1016 LATE(pa_stream_get_state)(_playStream));
1017 1004
1018 // Set stream flags 1005 // Set stream flags
1019 _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | 1006 _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
1020 PA_STREAM_INTERPOLATE_TIMING); 1007 PA_STREAM_INTERPOLATE_TIMING);
1021 1008
1022 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1009 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1023 // If configuring a specific latency then we want to specify 1010 // If configuring a specific latency then we want to specify
1024 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters 1011 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1025 // automatically to reach that target latency. However, that flag 1012 // automatically to reach that target latency. However, that flag
1026 // doesn't exist in Ubuntu 8.04 and many people still use that, 1013 // doesn't exist in Ubuntu 8.04 and many people still use that,
1027 // so we have to check the protocol version of libpulse. 1014 // so we have to check the protocol version of libpulse.
1028 if (LATE(pa_context_get_protocol_version)(_paContext) >= 1015 if (LATE(pa_context_get_protocol_version)(_paContext) >=
1029 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { 1016 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
1030 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; 1017 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1031 } 1018 }
1032 1019
1033 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 1020 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
1034 if (!spec) { 1021 if (!spec) {
1035 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1022 LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
1036 " pa_stream_get_sample_spec()");
1037 return -1; 1023 return -1;
1038 } 1024 }
1039 1025
1040 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1026 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1041 uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / 1027 uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
1042 WEBRTC_PA_MSECS_PER_SEC; 1028 WEBRTC_PA_MSECS_PER_SEC;
1043 1029
1044 // Set the play buffer attributes 1030 // Set the play buffer attributes
1045 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer 1031 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1046 _playBufferAttr.tlength = latency; // target fill level of play buffer 1032 _playBufferAttr.tlength = latency; // target fill level of play buffer
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1082 if (!_inputDeviceIsSpecified) { 1068 if (!_inputDeviceIsSpecified) {
1083 return -1; 1069 return -1;
1084 } 1070 }
1085 1071
1086 if (_recIsInitialized) { 1072 if (_recIsInitialized) {
1087 return 0; 1073 return 0;
1088 } 1074 }
1089 1075
1090 // Initialize the microphone (devices might have been added or removed) 1076 // Initialize the microphone (devices might have been added or removed)
1091 if (InitMicrophone() == -1) { 1077 if (InitMicrophone() == -1) {
1092 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1078 LOG(LS_WARNING) << "InitMicrophone() failed";
1093 " InitMicrophone() failed");
1094 } 1079 }
1095 1080
1096 // Set the rec sample specification 1081 // Set the rec sample specification
1097 pa_sample_spec recSampleSpec; 1082 pa_sample_spec recSampleSpec;
1098 recSampleSpec.channels = _recChannels; 1083 recSampleSpec.channels = _recChannels;
1099 recSampleSpec.format = PA_SAMPLE_S16LE; 1084 recSampleSpec.format = PA_SAMPLE_S16LE;
1100 recSampleSpec.rate = sample_rate_hz_; 1085 recSampleSpec.rate = sample_rate_hz_;
1101 1086
1102 // Create a new rec stream 1087 // Create a new rec stream
1103 _recStream = 1088 _recStream =
1104 LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL); 1089 LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL);
1105 if (!_recStream) { 1090 if (!_recStream) {
1106 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1091 LOG(LS_ERROR) << "failed to create rec stream, err="
1107 " failed to create rec stream, err=%d", 1092 << LATE(pa_context_errno)(_paContext);
1108 LATE(pa_context_errno)(_paContext));
1109 return -1; 1093 return -1;
1110 } 1094 }
1111 1095
1112 // Provide the recStream to the mixer 1096 // Provide the recStream to the mixer
1113 _mixerManager.SetRecStream(_recStream); 1097 _mixerManager.SetRecStream(_recStream);
1114 1098
1115 if (_ptrAudioBuffer) { 1099 if (_ptrAudioBuffer) {
1116 // Update audio buffer with the selected parameters 1100 // Update audio buffer with the selected parameters
1117 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); 1101 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1118 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); 1102 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
1119 } 1103 }
1120 1104
1121 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1105 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1122 _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | 1106 _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
1123 PA_STREAM_INTERPOLATE_TIMING); 1107 PA_STREAM_INTERPOLATE_TIMING);
1124 1108
1125 // If configuring a specific latency then we want to specify 1109 // If configuring a specific latency then we want to specify
1126 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters 1110 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1127 // automatically to reach that target latency. However, that flag 1111 // automatically to reach that target latency. However, that flag
1128 // doesn't exist in Ubuntu 8.04 and many people still use that, 1112 // doesn't exist in Ubuntu 8.04 and many people still use that,
1129 // so we have to check the protocol version of libpulse. 1113 // so we have to check the protocol version of libpulse.
1130 if (LATE(pa_context_get_protocol_version)(_paContext) >= 1114 if (LATE(pa_context_get_protocol_version)(_paContext) >=
1131 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { 1115 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
1132 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; 1116 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1133 } 1117 }
1134 1118
1135 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream); 1119 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream);
1136 if (!spec) { 1120 if (!spec) {
1137 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1121 LOG(LS_ERROR) << "pa_stream_get_sample_spec(rec)";
1138 " pa_stream_get_sample_spec(rec)");
1139 return -1; 1122 return -1;
1140 } 1123 }
1141 1124
1142 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1125 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1143 uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / 1126 uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS /
1144 WEBRTC_PA_MSECS_PER_SEC; 1127 WEBRTC_PA_MSECS_PER_SEC;
1145 1128
1146 // Set the rec buffer attributes 1129 // Set the rec buffer attributes
1147 // Note: fragsize specifies a maximum transfer size, not a minimum, so 1130 // Note: fragsize specifies a maximum transfer size, not a minimum, so
1148 // it is not possible to force a high latency setting, only a low one. 1131 // it is not possible to force a high latency setting, only a low one.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1185 _startRec = true; 1168 _startRec = true;
1186 1169
1187 // The audio thread will signal when recording has started. 1170 // The audio thread will signal when recording has started.
1188 _timeEventRec.Set(); 1171 _timeEventRec.Set();
1189 if (kEventTimeout == _recStartEvent.Wait(10000)) { 1172 if (kEventTimeout == _recStartEvent.Wait(10000)) {
1190 { 1173 {
1191 rtc::CritScope lock(&_critSect); 1174 rtc::CritScope lock(&_critSect);
1192 _startRec = false; 1175 _startRec = false;
1193 } 1176 }
1194 StopRecording(); 1177 StopRecording();
1195 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1178 LOG(LS_ERROR) << "failed to activate recording";
1196 " failed to activate recording");
1197 return -1; 1179 return -1;
1198 } 1180 }
1199 1181
1200 { 1182 {
1201 rtc::CritScope lock(&_critSect); 1183 rtc::CritScope lock(&_critSect);
1202 if (_recording) { 1184 if (_recording) {
1203 // The recording state is set by the audio thread after recording 1185 // The recording state is set by the audio thread after recording
1204 // has started. 1186 // has started.
1205 } else { 1187 } else {
1206 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1188 LOG(LS_ERROR) << "failed to activate recording";
1207 " failed to activate recording");
1208 return -1; 1189 return -1;
1209 } 1190 }
1210 } 1191 }
1211 1192
1212 return 0; 1193 return 0;
1213 } 1194 }
1214 1195
1215 int32_t AudioDeviceLinuxPulse::StopRecording() { 1196 int32_t AudioDeviceLinuxPulse::StopRecording() {
1216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1197 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1217 rtc::CritScope lock(&_critSect); 1198 rtc::CritScope lock(&_critSect);
1218 1199
1219 if (!_recIsInitialized) { 1200 if (!_recIsInitialized) {
1220 return 0; 1201 return 0;
1221 } 1202 }
1222 1203
1223 if (_recStream == NULL) { 1204 if (_recStream == NULL) {
1224 return -1; 1205 return -1;
1225 } 1206 }
1226 1207
1227 _recIsInitialized = false; 1208 _recIsInitialized = false;
1228 _recording = false; 1209 _recording = false;
1229 1210
1230 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping recording"); 1211 LOG(LS_VERBOSE) << "stopping recording";
1231 1212
1232 // Stop Recording 1213 // Stop Recording
1233 PaLock(); 1214 PaLock();
1234 1215
1235 DisableReadCallback(); 1216 DisableReadCallback();
1236 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); 1217 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1237 1218
1238 // Unset this here so that we don't get a TERMINATED callback 1219 // Unset this here so that we don't get a TERMINATED callback
1239 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); 1220 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1240 1221
1241 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) { 1222 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) {
1242 // Disconnect the stream 1223 // Disconnect the stream
1243 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) { 1224 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) {
1244 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1225 LOG(LS_ERROR) << "failed to disconnect rec stream, err="
1245 " failed to disconnect rec stream, err=%d\n", 1226 << LATE(pa_context_errno)(_paContext);
1246 LATE(pa_context_errno)(_paContext));
1247 PaUnLock(); 1227 PaUnLock();
1248 return -1; 1228 return -1;
1249 } 1229 }
1250 1230
1251 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 1231 LOG(LS_VERBOSE) << "disconnected recording";
1252 " disconnected recording");
1253 } 1232 }
1254 1233
1255 LATE(pa_stream_unref)(_recStream); 1234 LATE(pa_stream_unref)(_recStream);
1256 _recStream = NULL; 1235 _recStream = NULL;
1257 1236
1258 PaUnLock(); 1237 PaUnLock();
1259 1238
1260 // Provide the recStream to the mixer 1239 // Provide the recStream to the mixer
1261 _mixerManager.SetRecStream(_recStream); 1240 _mixerManager.SetRecStream(_recStream);
1262 1241
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 // accessed on the playout thread. 1283 // accessed on the playout thread.
1305 1284
1306 // The audio thread will signal when playout has started. 1285 // The audio thread will signal when playout has started.
1307 _timeEventPlay.Set(); 1286 _timeEventPlay.Set();
1308 if (kEventTimeout == _playStartEvent.Wait(10000)) { 1287 if (kEventTimeout == _playStartEvent.Wait(10000)) {
1309 { 1288 {
1310 rtc::CritScope lock(&_critSect); 1289 rtc::CritScope lock(&_critSect);
1311 _startPlay = false; 1290 _startPlay = false;
1312 } 1291 }
1313 StopPlayout(); 1292 StopPlayout();
1314 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1293 LOG(LS_ERROR) << "failed to activate playout";
1315 " failed to activate playout");
1316 return -1; 1294 return -1;
1317 } 1295 }
1318 1296
1319 { 1297 {
1320 rtc::CritScope lock(&_critSect); 1298 rtc::CritScope lock(&_critSect);
1321 if (_playing) { 1299 if (_playing) {
1322 // The playing state is set by the audio thread after playout 1300 // The playing state is set by the audio thread after playout
1323 // has started. 1301 // has started.
1324 } else { 1302 } else {
1325 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1303 LOG(LS_ERROR) << "failed to activate playing";
1326 " failed to activate playing");
1327 return -1; 1304 return -1;
1328 } 1305 }
1329 } 1306 }
1330 1307
1331 return 0; 1308 return 0;
1332 } 1309 }
1333 1310
1334 int32_t AudioDeviceLinuxPulse::StopPlayout() { 1311 int32_t AudioDeviceLinuxPulse::StopPlayout() {
1335 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1312 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1336 rtc::CritScope lock(&_critSect); 1313 rtc::CritScope lock(&_critSect);
1337 1314
1338 if (!_playIsInitialized) { 1315 if (!_playIsInitialized) {
1339 return 0; 1316 return 0;
1340 } 1317 }
1341 1318
1342 if (_playStream == NULL) { 1319 if (_playStream == NULL) {
1343 return -1; 1320 return -1;
1344 } 1321 }
1345 1322
1346 _playIsInitialized = false; 1323 _playIsInitialized = false;
1347 _playing = false; 1324 _playing = false;
1348 _sndCardPlayDelay = 0; 1325 _sndCardPlayDelay = 0;
1349 _sndCardRecDelay = 0; 1326 _sndCardRecDelay = 0;
1350 1327
1351 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping playback"); 1328 LOG(LS_VERBOSE) << "stopping playback";
1352 1329
1353 // Stop Playout 1330 // Stop Playout
1354 PaLock(); 1331 PaLock();
1355 1332
1356 DisableWriteCallback(); 1333 DisableWriteCallback();
1357 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); 1334 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1358 1335
1359 // Unset this here so that we don't get a TERMINATED callback 1336 // Unset this here so that we don't get a TERMINATED callback
1360 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); 1337 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1361 1338
1362 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) { 1339 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) {
1363 // Disconnect the stream 1340 // Disconnect the stream
1364 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) { 1341 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) {
1365 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1342 LOG(LS_ERROR) << "failed to disconnect play stream, err="
1366 " failed to disconnect play stream, err=%d", 1343 << LATE(pa_context_errno)(_paContext);
1367 LATE(pa_context_errno)(_paContext));
1368 PaUnLock(); 1344 PaUnLock();
1369 return -1; 1345 return -1;
1370 } 1346 }
1371 1347
1372 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 1348 LOG(LS_VERBOSE) << "disconnected playback";
1373 " disconnected playback");
1374 } 1349 }
1375 1350
1376 LATE(pa_stream_unref)(_playStream); 1351 LATE(pa_stream_unref)(_playStream);
1377 _playStream = NULL; 1352 _playStream = NULL;
1378 1353
1379 PaUnLock(); 1354 PaUnLock();
1380 1355
1381 // Provide the playStream to the mixer 1356 // Provide the playStream to the mixer
1382 _mixerManager.SetPlayStream(_playStream); 1357 _mixerManager.SetPlayStream(_playStream);
1383 1358
(...skipping 20 matching lines...) Expand all
1404 bool AudioDeviceLinuxPulse::Playing() const { 1379 bool AudioDeviceLinuxPulse::Playing() const {
1405 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1380 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1406 return (_playing); 1381 return (_playing);
1407 } 1382 }
1408 1383
1409 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( 1384 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1410 const AudioDeviceModule::BufferType type, 1385 const AudioDeviceModule::BufferType type,
1411 uint16_t sizeMS) { 1386 uint16_t sizeMS) {
1412 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1387 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1413 if (type != AudioDeviceModule::kFixedBufferSize) { 1388 if (type != AudioDeviceModule::kFixedBufferSize) {
1414 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1389 LOG(LS_ERROR) << "Adaptive buffer size not supported on this platform";
1415 " Adaptive buffer size not supported on this platform");
1416 return -1; 1390 return -1;
1417 } 1391 }
1418 1392
1419 _playBufType = type; 1393 _playBufType = type;
1420 _playBufDelayFixed = sizeMS; 1394 _playBufDelayFixed = sizeMS;
1421 1395
1422 return 0; 1396 return 0;
1423 } 1397 }
1424 1398
1425 int32_t AudioDeviceLinuxPulse::PlayoutBuffer( 1399 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1426 AudioDeviceModule::BufferType& type, 1400 AudioDeviceModule::BufferType& type,
1427 uint16_t& sizeMS) const { 1401 uint16_t& sizeMS) const {
1428 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1402 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1429 type = _playBufType; 1403 type = _playBufType;
1430 sizeMS = _playBufDelayFixed; 1404 sizeMS = _playBufDelayFixed;
1431 1405
1432 return 0; 1406 return 0;
1433 } 1407 }
1434 1408
1435 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const { 1409 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const {
1436 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1410 LOG(LS_WARNING) << "API call not supported on this platform";
1437 " API call not supported on this platform");
1438 return -1; 1411 return -1;
1439 } 1412 }
1440 1413
1441 bool AudioDeviceLinuxPulse::PlayoutWarning() const { 1414 bool AudioDeviceLinuxPulse::PlayoutWarning() const {
1442 rtc::CritScope lock(&_critSect); 1415 rtc::CritScope lock(&_critSect);
1443 return (_playWarning > 0); 1416 return (_playWarning > 0);
1444 } 1417 }
1445 1418
1446 bool AudioDeviceLinuxPulse::PlayoutError() const { 1419 bool AudioDeviceLinuxPulse::PlayoutError() const {
1447 rtc::CritScope lock(&_critSect); 1420 rtc::CritScope lock(&_critSect);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1509 const pa_server_info* i, 1482 const pa_server_info* i,
1510 void* pThis) { 1483 void* pThis) {
1511 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i); 1484 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i);
1512 } 1485 }
1513 1486
1514 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) { 1487 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) {
1515 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p); 1488 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p);
1516 } 1489 }
1517 1490
1518 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) { 1491 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) {
1519 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " context state cb"); 1492 LOG(LS_VERBOSE) << "context state cb";
1520 1493
1521 pa_context_state_t state = LATE(pa_context_get_state)(c); 1494 pa_context_state_t state = LATE(pa_context_get_state)(c);
1522 switch (state) { 1495 switch (state) {
1523 case PA_CONTEXT_UNCONNECTED: 1496 case PA_CONTEXT_UNCONNECTED:
1524 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); 1497 LOG(LS_VERBOSE) << "unconnected";
1525 break; 1498 break;
1526 case PA_CONTEXT_CONNECTING: 1499 case PA_CONTEXT_CONNECTING:
1527 case PA_CONTEXT_AUTHORIZING: 1500 case PA_CONTEXT_AUTHORIZING:
1528 case PA_CONTEXT_SETTING_NAME: 1501 case PA_CONTEXT_SETTING_NAME:
1529 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " no state"); 1502 LOG(LS_VERBOSE) << "no state";
1530 break; 1503 break;
1531 case PA_CONTEXT_FAILED: 1504 case PA_CONTEXT_FAILED:
1532 case PA_CONTEXT_TERMINATED: 1505 case PA_CONTEXT_TERMINATED:
1533 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); 1506 LOG(LS_VERBOSE) << "failed";
1534 _paStateChanged = true; 1507 _paStateChanged = true;
1535 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1508 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1536 break; 1509 break;
1537 case PA_CONTEXT_READY: 1510 case PA_CONTEXT_READY:
1538 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); 1511 LOG(LS_VERBOSE) << "ready";
1539 _paStateChanged = true; 1512 _paStateChanged = true;
1540 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1513 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1541 break; 1514 break;
1542 } 1515 }
1543 } 1516 }
1544 1517
1545 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i, 1518 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i,
1546 int eol) { 1519 int eol) {
1547 if (eol) { 1520 if (eol) {
1548 // Signal that we are done 1521 // Signal that we are done
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1619 // Copy the sink name 1592 // Copy the sink name
1620 strncpy(_playDisplayDeviceName, i->default_sink_name, 1593 strncpy(_playDisplayDeviceName, i->default_sink_name,
1621 kAdmMaxDeviceNameSize); 1594 kAdmMaxDeviceNameSize);
1622 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; 1595 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1623 } 1596 }
1624 1597
1625 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1598 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1626 } 1599 }
1627 1600
1628 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) { 1601 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) {
1629 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state cb"); 1602 LOG(LS_VERBOSE) << "stream state cb";
1630 1603
1631 pa_stream_state_t state = LATE(pa_stream_get_state)(p); 1604 pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1632 switch (state) { 1605 switch (state) {
1633 case PA_STREAM_UNCONNECTED: 1606 case PA_STREAM_UNCONNECTED:
1634 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); 1607 LOG(LS_VERBOSE) << "unconnected";
1635 break; 1608 break;
1636 case PA_STREAM_CREATING: 1609 case PA_STREAM_CREATING:
1637 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " creating"); 1610 LOG(LS_VERBOSE) << "creating";
1638 break; 1611 break;
1639 case PA_STREAM_FAILED: 1612 case PA_STREAM_FAILED:
1640 case PA_STREAM_TERMINATED: 1613 case PA_STREAM_TERMINATED:
1641 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); 1614 LOG(LS_VERBOSE) << "failed";
1642 break; 1615 break;
1643 case PA_STREAM_READY: 1616 case PA_STREAM_READY:
1644 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); 1617 LOG(LS_VERBOSE) << "ready";
1645 break; 1618 break;
1646 } 1619 }
1647 1620
1648 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1621 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1649 } 1622 }
1650 1623
1651 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() { 1624 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() {
1652 PaLock(); 1625 PaLock();
1653 1626
1654 pa_operation* paOperation = NULL; 1627 pa_operation* paOperation = NULL;
1655 1628
1656 // get the server info and update deviceName 1629 // get the server info and update deviceName
1657 paOperation = 1630 paOperation =
1658 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); 1631 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
1659 1632
1660 WaitForOperationCompletion(paOperation); 1633 WaitForOperationCompletion(paOperation);
1661 1634
1662 PaUnLock(); 1635 PaUnLock();
1663 1636
1664 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, 1637 LOG(LS_VERBOSE) << "checking PulseAudio version: " << _paServerVersion;
1665 " checking PulseAudio version: %s", _paServerVersion);
1666 1638
1667 return 0; 1639 return 0;
1668 } 1640 }
1669 1641
1670 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() { 1642 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() {
1671 PaLock(); 1643 PaLock();
1672 1644
1673 pa_operation* paOperation = NULL; 1645 pa_operation* paOperation = NULL;
1674 1646
1675 // Get the server info and update sample_rate_hz_ 1647 // Get the server info and update sample_rate_hz_
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1753 return 0; 1725 return 0;
1754 } 1726 }
1755 1727
1756 int32_t AudioDeviceLinuxPulse::InitPulseAudio() { 1728 int32_t AudioDeviceLinuxPulse::InitPulseAudio() {
1757 int retVal = 0; 1729 int retVal = 0;
1758 1730
1759 // Load libpulse 1731 // Load libpulse
1760 if (!PaSymbolTable.Load()) { 1732 if (!PaSymbolTable.Load()) {
1761 // Most likely the Pulse library and sound server are not installed on 1733 // Most likely the Pulse library and sound server are not installed on
1762 // this system 1734 // this system
1763 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1735 LOG(LS_ERROR) << "failed to load symbol table";
1764 " failed to load symbol table");
1765 return -1; 1736 return -1;
1766 } 1737 }
1767 1738
1768 // Create a mainloop API and connection to the default server 1739 // Create a mainloop API and connection to the default server
1769 // the mainloop is the internal asynchronous API event loop 1740 // the mainloop is the internal asynchronous API event loop
1770 if (_paMainloop) { 1741 if (_paMainloop) {
1771 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1742 LOG(LS_ERROR) << "PA mainloop has already existed";
1772 " PA mainloop has already existed");
1773 return -1; 1743 return -1;
1774 } 1744 }
1775 _paMainloop = LATE(pa_threaded_mainloop_new)(); 1745 _paMainloop = LATE(pa_threaded_mainloop_new)();
1776 if (!_paMainloop) { 1746 if (!_paMainloop) {
1777 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1747 LOG(LS_ERROR) << "could not create mainloop";
1778 " could not create mainloop");
1779 return -1; 1748 return -1;
1780 } 1749 }
1781 1750
1782 // Start the threaded main loop 1751 // Start the threaded main loop
1783 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); 1752 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
1784 if (retVal != PA_OK) { 1753 if (retVal != PA_OK) {
1785 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1754 LOG(LS_ERROR) << "failed to start main loop, error=" << retVal;
1786 " failed to start main loop, error=%d", retVal);
1787 return -1; 1755 return -1;
1788 } 1756 }
1789 1757
1790 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " mainloop running!"); 1758 LOG(LS_VERBOSE) << "mainloop running!";
1791 1759
1792 PaLock(); 1760 PaLock();
1793 1761
1794 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); 1762 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
1795 if (!_paMainloopApi) { 1763 if (!_paMainloopApi) {
1796 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1764 LOG(LS_ERROR) << "could not create mainloop API";
1797 " could not create mainloop API");
1798 PaUnLock(); 1765 PaUnLock();
1799 return -1; 1766 return -1;
1800 } 1767 }
1801 1768
1802 // Create a new PulseAudio context 1769 // Create a new PulseAudio context
1803 if (_paContext) { 1770 if (_paContext) {
1804 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1771 LOG(LS_ERROR) << "PA context has already existed";
1805 " PA context has already existed");
1806 PaUnLock(); 1772 PaUnLock();
1807 return -1; 1773 return -1;
1808 } 1774 }
1809 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); 1775 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
1810 1776
1811 if (!_paContext) { 1777 if (!_paContext) {
1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1778 LOG(LS_ERROR) << "could not create context";
1813 " could not create context");
1814 PaUnLock(); 1779 PaUnLock();
1815 return -1; 1780 return -1;
1816 } 1781 }
1817 1782
1818 // Set state callback function 1783 // Set state callback function
1819 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this); 1784 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this);
1820 1785
1821 // Connect the context to a server (default) 1786 // Connect the context to a server (default)
1822 _paStateChanged = false; 1787 _paStateChanged = false;
1823 retVal = 1788 retVal =
1824 LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); 1789 LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL);
1825 1790
1826 if (retVal != PA_OK) { 1791 if (retVal != PA_OK) {
1827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1792 LOG(LS_ERROR) << "failed to connect context, error=" << retVal;
1828 " failed to connect context, error=%d", retVal);
1829 PaUnLock(); 1793 PaUnLock();
1830 return -1; 1794 return -1;
1831 } 1795 }
1832 1796
1833 // Wait for state change 1797 // Wait for state change
1834 while (!_paStateChanged) { 1798 while (!_paStateChanged) {
1835 LATE(pa_threaded_mainloop_wait)(_paMainloop); 1799 LATE(pa_threaded_mainloop_wait)(_paMainloop);
1836 } 1800 }
1837 1801
1838 // Now check to see what final state we reached. 1802 // Now check to see what final state we reached.
1839 pa_context_state_t state = LATE(pa_context_get_state)(_paContext); 1803 pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
1840 1804
1841 if (state != PA_CONTEXT_READY) { 1805 if (state != PA_CONTEXT_READY) {
1842 if (state == PA_CONTEXT_FAILED) { 1806 if (state == PA_CONTEXT_FAILED) {
1843 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1807 LOG(LS_ERROR) << "failed to connect to PulseAudio sound server";
1844 " failed to connect to PulseAudio sound server");
1845 } else if (state == PA_CONTEXT_TERMINATED) { 1808 } else if (state == PA_CONTEXT_TERMINATED) {
1846 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1809 LOG(LS_ERROR) << "PulseAudio connection terminated early";
1847 " PulseAudio connection terminated early");
1848 } else { 1810 } else {
1849 // Shouldn't happen, because we only signal on one of those three 1811 // Shouldn't happen, because we only signal on one of those three
1850 // states 1812 // states
1851 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1813 LOG(LS_ERROR) << "unknown problem connecting to PulseAudio";
1852 " unknown problem connecting to PulseAudio");
1853 } 1814 }
1854 PaUnLock(); 1815 PaUnLock();
1855 return -1; 1816 return -1;
1856 } 1817 }
1857 1818
1858 PaUnLock(); 1819 PaUnLock();
1859 1820
1860 // Give the objects to the mixer manager 1821 // Give the objects to the mixer manager
1861 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); 1822 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
1862 1823
1863 // Check the version 1824 // Check the version
1864 if (CheckPulseAudioVersion() < 0) { 1825 if (CheckPulseAudioVersion() < 0) {
1865 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1826 LOG(LS_ERROR) << "PulseAudio version " << _paServerVersion
1866 " PulseAudio version %s not supported", _paServerVersion); 1827 << " not supported";
1867 return -1; 1828 return -1;
1868 } 1829 }
1869 1830
1870 // Initialize sampling frequency 1831 // Initialize sampling frequency
1871 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { 1832 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) {
1872 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1833 LOG(LS_ERROR) << "failed to initialize sampling frequency, set to "
1873 " failed to initialize sampling frequency," 1834 << sample_rate_hz_ << " Hz";
1874 " set to %d Hz",
1875 sample_rate_hz_);
1876 return -1; 1835 return -1;
1877 } 1836 }
1878 1837
1879 return 0; 1838 return 0;
1880 } 1839 }
1881 1840
1882 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() { 1841 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() {
1883 // Do nothing if the instance doesn't exist 1842 // Do nothing if the instance doesn't exist
1884 // likely PaSymbolTable.Load() fails 1843 // likely PaSymbolTable.Load() fails
1885 if (!_paMainloop) { 1844 if (!_paMainloop) {
(...skipping 20 matching lines...) Expand all
1906 LATE(pa_threaded_mainloop_stop)(_paMainloop); 1865 LATE(pa_threaded_mainloop_stop)(_paMainloop);
1907 } 1866 }
1908 1867
1909 // Free the mainloop 1868 // Free the mainloop
1910 if (_paMainloop) { 1869 if (_paMainloop) {
1911 LATE(pa_threaded_mainloop_free)(_paMainloop); 1870 LATE(pa_threaded_mainloop_free)(_paMainloop);
1912 } 1871 }
1913 1872
1914 _paMainloop = NULL; 1873 _paMainloop = NULL;
1915 1874
1916 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " PulseAudio terminated"); 1875 LOG(LS_VERBOSE) << "PulseAudio terminated";
1917 1876
1918 return 0; 1877 return 0;
1919 } 1878 }
1920 1879
1921 void AudioDeviceLinuxPulse::PaLock() { 1880 void AudioDeviceLinuxPulse::PaLock() {
1922 LATE(pa_threaded_mainloop_lock)(_paMainloop); 1881 LATE(pa_threaded_mainloop_lock)(_paMainloop);
1923 } 1882 }
1924 1883
1925 void AudioDeviceLinuxPulse::PaUnLock() { 1884 void AudioDeviceLinuxPulse::PaUnLock() {
1926 LATE(pa_threaded_mainloop_unlock)(_paMainloop); 1885 LATE(pa_threaded_mainloop_unlock)(_paMainloop);
1927 } 1886 }
1928 1887
1929 void AudioDeviceLinuxPulse::WaitForOperationCompletion( 1888 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
1930 pa_operation* paOperation) const { 1889 pa_operation* paOperation) const {
1931 if (!paOperation) { 1890 if (!paOperation) {
1932 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1891 LOG(LS_ERROR) << "paOperation NULL in WaitForOperationCompletion";
1933 "paOperation NULL in WaitForOperationCompletion");
1934 return; 1892 return;
1935 } 1893 }
1936 1894
1937 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { 1895 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
1938 LATE(pa_threaded_mainloop_wait)(_paMainloop); 1896 LATE(pa_threaded_mainloop_wait)(_paMainloop);
1939 } 1897 }
1940 1898
1941 LATE(pa_operation_unref)(paOperation); 1899 LATE(pa_operation_unref)(paOperation);
1942 } 1900 }
1943 1901
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1982 _timeEventPlay.Set(); 1940 _timeEventPlay.Set();
1983 } 1941 }
1984 1942
1985 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/, 1943 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/,
1986 void* pThis) { 1944 void* pThis) {
1987 static_cast<AudioDeviceLinuxPulse*>(pThis) 1945 static_cast<AudioDeviceLinuxPulse*>(pThis)
1988 ->PaStreamUnderflowCallbackHandler(); 1946 ->PaStreamUnderflowCallbackHandler();
1989 } 1947 }
1990 1948
1991 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() { 1949 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() {
1992 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Playout underflow"); 1950 LOG(LS_WARNING) << "Playout underflow";
1993 1951
1994 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1952 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1995 // We didn't configure a pa_buffer_attr before, so switching to 1953 // We didn't configure a pa_buffer_attr before, so switching to
1996 // one now would be questionable. 1954 // one now would be questionable.
1997 return; 1955 return;
1998 } 1956 }
1999 1957
2000 // Otherwise reconfigure the stream with a higher target latency. 1958 // Otherwise reconfigure the stream with a higher target latency.
2001 1959
2002 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 1960 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
2003 if (!spec) { 1961 if (!spec) {
2004 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1962 LOG(LS_ERROR) << "pa_stream_get_sample_spec()";
2005 " pa_stream_get_sample_spec()");
2006 return; 1963 return;
2007 } 1964 }
2008 1965
2009 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1966 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2010 uint32_t newLatency = 1967 uint32_t newLatency =
2011 _configuredLatencyPlay + bytesPerSec * 1968 _configuredLatencyPlay + bytesPerSec *
2012 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / 1969 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
2013 WEBRTC_PA_MSECS_PER_SEC; 1970 WEBRTC_PA_MSECS_PER_SEC;
2014 1971
2015 // Set the play buffer attributes 1972 // Set the play buffer attributes
2016 _playBufferAttr.maxlength = newLatency; 1973 _playBufferAttr.maxlength = newLatency;
2017 _playBufferAttr.tlength = newLatency; 1974 _playBufferAttr.tlength = newLatency;
2018 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; 1975 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2019 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; 1976 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2020 1977
2021 pa_operation* op = LATE(pa_stream_set_buffer_attr)( 1978 pa_operation* op = LATE(pa_stream_set_buffer_attr)(
2022 _playStream, &_playBufferAttr, NULL, NULL); 1979 _playStream, &_playBufferAttr, NULL, NULL);
2023 if (!op) { 1980 if (!op) {
2024 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1981 LOG(LS_ERROR) << "pa_stream_set_buffer_attr()";
2025 " pa_stream_set_buffer_attr()");
2026 return; 1982 return;
2027 } 1983 }
2028 1984
2029 // Don't need to wait for this to complete. 1985 // Don't need to wait for this to complete.
2030 LATE(pa_operation_unref)(op); 1986 LATE(pa_operation_unref)(op);
2031 1987
2032 // Save the new latency in case we underflow again. 1988 // Save the new latency in case we underflow again.
2033 _configuredLatencyPlay = newLatency; 1989 _configuredLatencyPlay = newLatency;
2034 } 1990 }
2035 1991
2036 void AudioDeviceLinuxPulse::EnableReadCallback() { 1992 void AudioDeviceLinuxPulse::EnableReadCallback() {
2037 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); 1993 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
2038 } 1994 }
2039 1995
2040 void AudioDeviceLinuxPulse::DisableReadCallback() { 1996 void AudioDeviceLinuxPulse::DisableReadCallback() {
2041 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); 1997 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2042 } 1998 }
2043 1999
2044 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/, 2000 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/,
2045 size_t /*unused2*/, 2001 size_t /*unused2*/,
2046 void* pThis) { 2002 void* pThis) {
2047 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler(); 2003 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler();
2048 } 2004 }
2049 2005
2050 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { 2006 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() {
2051 // We get the data pointer and size now in order to save one Lock/Unlock 2007 // We get the data pointer and size now in order to save one Lock/Unlock
2052 // in the worker thread. 2008 // in the worker thread.
2053 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, 2009 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData,
2054 &_tempSampleDataSize) != 0) { 2010 &_tempSampleDataSize) != 0) {
2055 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't read data!"); 2011 LOG(LS_ERROR) << "Can't read data!";
2056 return; 2012 return;
2057 } 2013 }
2058 2014
2059 // Since we consume the data asynchronously on a different thread, we have 2015 // Since we consume the data asynchronously on a different thread, we have
2060 // to temporarily disable the read callback or else Pulse will call it 2016 // to temporarily disable the read callback or else Pulse will call it
2061 // continuously until we consume the data. We re-enable it below. 2017 // continuously until we consume the data. We re-enable it below.
2062 DisableReadCallback(); 2018 DisableReadCallback();
2063 _timeEventRec.Set(); 2019 _timeEventRec.Set();
2064 } 2020 }
2065 2021
2066 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/, 2022 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/,
2067 void* pThis) { 2023 void* pThis) {
2068 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler(); 2024 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler();
2069 } 2025 }
2070 2026
2071 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() { 2027 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() {
2072 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Recording overflow"); 2028 LOG(LS_WARNING) << "Recording overflow";
2073 } 2029 }
2074 2030
2075 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) { 2031 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) {
2076 if (!WEBRTC_PA_REPORT_LATENCY) { 2032 if (!WEBRTC_PA_REPORT_LATENCY) {
2077 return 0; 2033 return 0;
2078 } 2034 }
2079 2035
2080 if (!stream) { 2036 if (!stream) {
2081 return 0; 2037 return 0;
2082 } 2038 }
2083 2039
2084 pa_usec_t latency; 2040 pa_usec_t latency;
2085 int negative; 2041 int negative;
2086 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) { 2042 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) {
2087 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't query latency"); 2043 LOG(LS_ERROR) << "Can't query latency";
2088 // We'd rather continue playout/capture with an incorrect delay than 2044 // We'd rather continue playout/capture with an incorrect delay than
2089 // stop it altogether, so return a valid value. 2045 // stop it altogether, so return a valid value.
2090 return 0; 2046 return 0;
2091 } 2047 }
2092 2048
2093 if (negative) { 2049 if (negative) {
2094 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 2050 LOG(LS_VERBOSE) << "warning: pa_stream_get_latency reported negative delay";
2095 " warning: pa_stream_get_latency reported negative "
2096 "delay");
2097 2051
2098 // The delay can be negative for monitoring streams if the captured 2052 // The delay can be negative for monitoring streams if the captured
2099 // samples haven't been played yet. In such a case, "latency" 2053 // samples haven't been played yet. In such a case, "latency"
2100 // contains the magnitude, so we must negate it to get the real value. 2054 // contains the magnitude, so we must negate it to get the real value.
2101 int32_t tmpLatency = (int32_t)-latency; 2055 int32_t tmpLatency = (int32_t)-latency;
2102 if (tmpLatency < 0) { 2056 if (tmpLatency < 0) {
2103 // Make sure that we don't use a negative delay. 2057 // Make sure that we don't use a negative delay.
2104 tmpLatency = 0; 2058 tmpLatency = 0;
2105 } 2059 }
2106 2060
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2219 return -1; 2173 return -1;
2220 } 2174 }
2221 2175
2222 if (AGC()) { 2176 if (AGC()) {
2223 newMicLevel = _ptrAudioBuffer->NewMicLevel(); 2177 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2224 if (newMicLevel != 0) { 2178 if (newMicLevel != 0) {
2225 // The VQE will only deliver non-zero microphone levels when a 2179 // The VQE will only deliver non-zero microphone levels when a
2226 // change is needed. 2180 // change is needed.
2227 // Set this new mic level (received from the observer as return 2181 // Set this new mic level (received from the observer as return
2228 // value in the callback). 2182 // value in the callback).
2229 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, 2183 LOG(LS_VERBOSE) << "AGC change of volume: old=" << currentMicLevel
2230 " AGC change of volume: old=%u => new=%u", currentMicLevel, 2184 << " => new=" << newMicLevel;
2231 newMicLevel);
2232 if (SetMicrophoneVolume(newMicLevel) == -1) { 2185 if (SetMicrophoneVolume(newMicLevel) == -1) {
2233 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2186 LOG(LS_WARNING)
2234 " the required modification of the microphone " 2187 << "the required modification of the microphone volume failed";
2235 "volume failed");
2236 } 2188 }
2237 } 2189 }
2238 } 2190 }
2239 2191
2240 return 0; 2192 return 0;
2241 } 2193 }
2242 2194
2243 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) { 2195 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) {
2244 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess()); 2196 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess());
2245 } 2197 }
2246 2198
2247 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) { 2199 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) {
2248 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess()); 2200 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess());
2249 } 2201 }
2250 2202
2251 bool AudioDeviceLinuxPulse::PlayThreadProcess() { 2203 bool AudioDeviceLinuxPulse::PlayThreadProcess() {
2252 switch (_timeEventPlay.Wait(1000)) { 2204 switch (_timeEventPlay.Wait(1000)) {
2253 case kEventSignaled: 2205 case kEventSignaled:
2254 break; 2206 break;
2255 case kEventError: 2207 case kEventError:
2256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2208 LOG(LS_WARNING) << "EventWrapper::Wait() failed";
2257 "EventWrapper::Wait() failed");
2258 return true; 2209 return true;
2259 case kEventTimeout: 2210 case kEventTimeout:
2260 return true; 2211 return true;
2261 } 2212 }
2262 2213
2263 rtc::CritScope lock(&_critSect); 2214 rtc::CritScope lock(&_critSect);
2264 2215
2265 if (_startPlay) { 2216 if (_startPlay) {
2266 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 2217 LOG(LS_VERBOSE) << "_startPlay true, performing initial actions";
2267 "_startPlay true, performing initial actions");
2268 2218
2269 _startPlay = false; 2219 _startPlay = false;
2270 _playDeviceName = NULL; 2220 _playDeviceName = NULL;
2271 2221
2272 // Set if not default device 2222 // Set if not default device
2273 if (_outputDeviceIndex > 0) { 2223 if (_outputDeviceIndex > 0) {
2274 // Get the playout device name 2224 // Get the playout device name
2275 _playDeviceName = new char[kAdmMaxDeviceNameSize]; 2225 _playDeviceName = new char[kAdmMaxDeviceNameSize];
2276 _deviceIndex = _outputDeviceIndex; 2226 _deviceIndex = _outputDeviceIndex;
2277 PlayoutDevices(); 2227 PlayoutDevices();
(...skipping 27 matching lines...) Expand all
2305 // Set the same volume for all channels 2255 // Set the same volume for all channels
2306 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 2256 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
2307 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); 2257 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2308 update_speaker_volume_at_startup_ = false; 2258 update_speaker_volume_at_startup_ = false;
2309 } 2259 }
2310 2260
2311 // Connect the stream to a sink 2261 // Connect the stream to a sink
2312 if (LATE(pa_stream_connect_playback)( 2262 if (LATE(pa_stream_connect_playback)(
2313 _playStream, _playDeviceName, &_playBufferAttr, 2263 _playStream, _playDeviceName, &_playBufferAttr,
2314 (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) { 2264 (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) {
2315 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2265 LOG(LS_ERROR) << "failed to connect play stream, err="
2316 " failed to connect play stream, err=%d", 2266 << LATE(pa_context_errno)(_paContext);
2317 LATE(pa_context_errno)(_paContext));
2318 } 2267 }
2319 2268
2320 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 2269 LOG(LS_VERBOSE) << "play stream connected";
2321 " play stream connected");
2322 2270
2323 // Wait for state change 2271 // Wait for state change
2324 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) { 2272 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) {
2325 LATE(pa_threaded_mainloop_wait)(_paMainloop); 2273 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2326 } 2274 }
2327 2275
2328 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " play stream ready"); 2276 LOG(LS_VERBOSE) << "play stream ready";
2329 2277
2330 // We can now handle write callbacks 2278 // We can now handle write callbacks
2331 EnableWriteCallback(); 2279 EnableWriteCallback();
2332 2280
2333 PaUnLock(); 2281 PaUnLock();
2334 2282
2335 // Clear device name 2283 // Clear device name
2336 if (_playDeviceName) { 2284 if (_playDeviceName) {
2337 delete[] _playDeviceName; 2285 delete[] _playDeviceName;
2338 _playDeviceName = NULL; 2286 _playDeviceName = NULL;
(...skipping 17 matching lines...) Expand all
2356 write = _tempBufferSpace; 2304 write = _tempBufferSpace;
2357 } 2305 }
2358 2306
2359 PaLock(); 2307 PaLock();
2360 if (LATE(pa_stream_write)( 2308 if (LATE(pa_stream_write)(
2361 _playStream, (void*)&_playBuffer[_playbackBufferUnused], write, 2309 _playStream, (void*)&_playBuffer[_playbackBufferUnused], write,
2362 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { 2310 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
2363 _writeErrors++; 2311 _writeErrors++;
2364 if (_writeErrors > 10) { 2312 if (_writeErrors > 10) {
2365 if (_playError == 1) { 2313 if (_playError == 1) {
2366 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, 2314 LOG(LS_WARNING) << "pending playout error exists";
2367 " pending playout error exists");
2368 } 2315 }
2369 // Triggers callback from module process thread. 2316 // Triggers callback from module process thread.
2370 _playError = 1; 2317 _playError = 1;
2371 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, 2318 LOG(LS_ERROR) << "kPlayoutError message posted: _writeErrors="
2372 " kPlayoutError message posted: " 2319 << _writeErrors
2373 "_writeErrors=%u, error=%d", 2320 << ", error=" << LATE(pa_context_errno)(_paContext);
2374 _writeErrors, LATE(pa_context_errno)(_paContext));
2375 _writeErrors = 0; 2321 _writeErrors = 0;
2376 } 2322 }
2377 } 2323 }
2378 PaUnLock(); 2324 PaUnLock();
2379 2325
2380 _playbackBufferUnused += write; 2326 _playbackBufferUnused += write;
2381 _tempBufferSpace -= write; 2327 _tempBufferSpace -= write;
2382 } 2328 }
2383 2329
2384 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); 2330 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2385 // Might have been reduced to zero by the above. 2331 // Might have been reduced to zero by the above.
2386 if (_tempBufferSpace > 0) { 2332 if (_tempBufferSpace > 0) {
2387 // Ask for new PCM data to be played out using the 2333 // Ask for new PCM data to be played out using the
2388 // AudioDeviceBuffer ensure that this callback is executed 2334 // AudioDeviceBuffer ensure that this callback is executed
2389 // without taking the audio-thread lock. 2335 // without taking the audio-thread lock.
2390 UnLock(); 2336 UnLock();
2391 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " requesting data"); 2337 LOG(LS_VERBOSE) << "requesting data";
2392 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); 2338 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2393 Lock(); 2339 Lock();
2394 2340
2395 // We have been unlocked - check the flag again. 2341 // We have been unlocked - check the flag again.
2396 if (!_playing) { 2342 if (!_playing) {
2397 return true; 2343 return true;
2398 } 2344 }
2399 2345
2400 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); 2346 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2401 if (nSamples != numPlaySamples) { 2347 if (nSamples != numPlaySamples) {
2402 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2348 LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
2403 " invalid number of output samples(%d)", nSamples);
2404 } 2349 }
2405 2350
2406 size_t write = _playbackBufferSize; 2351 size_t write = _playbackBufferSize;
2407 if (_tempBufferSpace < write) { 2352 if (_tempBufferSpace < write) {
2408 write = _tempBufferSpace; 2353 write = _tempBufferSpace;
2409 } 2354 }
2410 2355
2411 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " will write"); 2356 LOG(LS_VERBOSE) << "will write";
2412 PaLock(); 2357 PaLock();
2413 if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write, 2358 if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write,
2414 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { 2359 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
2415 _writeErrors++; 2360 _writeErrors++;
2416 if (_writeErrors > 10) { 2361 if (_writeErrors > 10) {
2417 if (_playError == 1) { 2362 if (_playError == 1) {
2418 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, 2363 LOG(LS_WARNING) << "pending playout error exists";
2419 " pending playout error exists");
2420 } 2364 }
2421 // Triggers callback from module process thread. 2365 // Triggers callback from module process thread.
2422 _playError = 1; 2366 _playError = 1;
2423 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, 2367 LOG(LS_ERROR) << "kPlayoutError message posted: _writeErrors="
2424 " kPlayoutError message posted: " 2368 << _writeErrors
2425 "_writeErrors=%u, error=%d", 2369 << ", error=" << LATE(pa_context_errno)(_paContext);
2426 _writeErrors, LATE(pa_context_errno)(_paContext));
2427 _writeErrors = 0; 2370 _writeErrors = 0;
2428 } 2371 }
2429 } 2372 }
2430 PaUnLock(); 2373 PaUnLock();
2431 2374
2432 _playbackBufferUnused = write; 2375 _playbackBufferUnused = write;
2433 } 2376 }
2434 2377
2435 _tempBufferSpace = 0; 2378 _tempBufferSpace = 0;
2436 PaLock(); 2379 PaLock();
2437 EnableWriteCallback(); 2380 EnableWriteCallback();
2438 PaUnLock(); 2381 PaUnLock();
2439 2382
2440 } // _playing 2383 } // _playing
2441 2384
2442 return true; 2385 return true;
2443 } 2386 }
2444 2387
2445 bool AudioDeviceLinuxPulse::RecThreadProcess() { 2388 bool AudioDeviceLinuxPulse::RecThreadProcess() {
2446 switch (_timeEventRec.Wait(1000)) { 2389 switch (_timeEventRec.Wait(1000)) {
2447 case kEventSignaled: 2390 case kEventSignaled:
2448 break; 2391 break;
2449 case kEventError: 2392 case kEventError:
2450 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2393 LOG(LS_WARNING) << "EventWrapper::Wait() failed";
2451 "EventWrapper::Wait() failed");
2452 return true; 2394 return true;
2453 case kEventTimeout: 2395 case kEventTimeout:
2454 return true; 2396 return true;
2455 } 2397 }
2456 2398
2457 rtc::CritScope lock(&_critSect); 2399 rtc::CritScope lock(&_critSect);
2458 2400
2459 if (_startRec) { 2401 if (_startRec) {
2460 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 2402 LOG(LS_VERBOSE) << "_startRec true, performing initial actions";
2461 "_startRec true, performing initial actions");
2462 2403
2463 _recDeviceName = NULL; 2404 _recDeviceName = NULL;
2464 2405
2465 // Set if not default device 2406 // Set if not default device
2466 if (_inputDeviceIndex > 0) { 2407 if (_inputDeviceIndex > 0) {
2467 // Get the recording device name 2408 // Get the recording device name
2468 _recDeviceName = new char[kAdmMaxDeviceNameSize]; 2409 _recDeviceName = new char[kAdmMaxDeviceNameSize];
2469 _deviceIndex = _inputDeviceIndex; 2410 _deviceIndex = _inputDeviceIndex;
2470 RecordingDevices(); 2411 RecordingDevices();
2471 } 2412 }
2472 2413
2473 PaLock(); 2414 PaLock();
2474 2415
2475 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connecting stream"); 2416 LOG(LS_VERBOSE) << "connecting stream";
2476 2417
2477 // Connect the stream to a source 2418 // Connect the stream to a source
2478 if (LATE(pa_stream_connect_record)( 2419 if (LATE(pa_stream_connect_record)(
2479 _recStream, _recDeviceName, &_recBufferAttr, 2420 _recStream, _recDeviceName, &_recBufferAttr,
2480 (pa_stream_flags_t)_recStreamFlags) != PA_OK) { 2421 (pa_stream_flags_t)_recStreamFlags) != PA_OK) {
2481 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2422 LOG(LS_ERROR) << "failed to connect rec stream, err="
2482 " failed to connect rec stream, err=%d", 2423 << LATE(pa_context_errno)(_paContext);
2483 LATE(pa_context_errno)(_paContext));
2484 } 2424 }
2485 2425
2486 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connected"); 2426 LOG(LS_VERBOSE) << "connected";
2487 2427
2488 // Wait for state change 2428 // Wait for state change
2489 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) { 2429 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) {
2490 LATE(pa_threaded_mainloop_wait)(_paMainloop); 2430 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2491 } 2431 }
2492 2432
2493 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " done"); 2433 LOG(LS_VERBOSE) << "done";
2494 2434
2495 // We can now handle read callbacks 2435 // We can now handle read callbacks
2496 EnableReadCallback(); 2436 EnableReadCallback();
2497 2437
2498 PaUnLock(); 2438 PaUnLock();
2499 2439
2500 // Clear device name 2440 // Clear device name
2501 if (_recDeviceName) { 2441 if (_recDeviceName) {
2502 delete[] _recDeviceName; 2442 delete[] _recDeviceName;
2503 _recDeviceName = NULL; 2443 _recDeviceName = NULL;
(...skipping 12 matching lines...) Expand all
2516 return true; 2456 return true;
2517 } 2457 }
2518 2458
2519 _tempSampleData = NULL; 2459 _tempSampleData = NULL;
2520 _tempSampleDataSize = 0; 2460 _tempSampleDataSize = 0;
2521 2461
2522 PaLock(); 2462 PaLock();
2523 while (true) { 2463 while (true) {
2524 // Ack the last thing we read 2464 // Ack the last thing we read
2525 if (LATE(pa_stream_drop)(_recStream) != 0) { 2465 if (LATE(pa_stream_drop)(_recStream) != 0) {
2526 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2466 LOG(LS_WARNING) << "failed to drop, err="
2527 " failed to drop, err=%d\n", 2467 << LATE(pa_context_errno)(_paContext);
2528 LATE(pa_context_errno)(_paContext));
2529 } 2468 }
2530 2469
2531 if (LATE(pa_stream_readable_size)(_recStream) <= 0) { 2470 if (LATE(pa_stream_readable_size)(_recStream) <= 0) {
2532 // Then that was all the data 2471 // Then that was all the data
2533 break; 2472 break;
2534 } 2473 }
2535 2474
2536 // Else more data. 2475 // Else more data.
2537 const void* sampleData; 2476 const void* sampleData;
2538 size_t sampleDataSize; 2477 size_t sampleDataSize;
2539 2478
2540 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) { 2479 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) {
2541 _recError = 1; // triggers callback from module process thread 2480 _recError = 1; // triggers callback from module process thread
2542 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2481 LOG(LS_ERROR) << "RECORD_ERROR message posted, error = "
2543 " RECORD_ERROR message posted, error = %d", 2482 << LATE(pa_context_errno)(_paContext);
2544 LATE(pa_context_errno)(_paContext));
2545 break; 2483 break;
2546 } 2484 }
2547 2485
2548 _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000); 2486 _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000);
2549 2487
2550 // Drop lock for sigslot dispatch, which could take a while. 2488 // Drop lock for sigslot dispatch, which could take a while.
2551 PaUnLock(); 2489 PaUnLock();
2552 // Read data and provide it to VoiceEngine 2490 // Read data and provide it to VoiceEngine
2553 if (ReadRecordedData(sampleData, sampleDataSize) == -1) { 2491 if (ReadRecordedData(sampleData, sampleDataSize) == -1) {
2554 return true; 2492 return true;
(...skipping 24 matching lines...) Expand all
2579 2517
2580 // A bit change in keymap means a key is pressed 2518 // A bit change in keymap means a key is pressed
2581 for (i = 0; i < sizeof(szKey); i++) 2519 for (i = 0; i < sizeof(szKey); i++)
2582 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; 2520 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
2583 2521
2584 // Save old state 2522 // Save old state
2585 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); 2523 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
2586 return (state != 0); 2524 return (state != 0);
2587 } 2525 }
2588 } // namespace webrtc 2526 } // namespace webrtc
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698