Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: webrtc/modules/audio_device/linux/audio_device_pulse_linux.cc

Issue 2958273002: Remove some occurrences of WEBRTC_TRACE in webrtc/modules/audio_device/linux/ (Closed)
Patch Set: Remove include of WEBRTC_TRACE header and fix some edge cases. Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include <assert.h> 11 #include <assert.h>
12 12
13 #include "webrtc/base/checks.h" 13 #include "webrtc/base/checks.h"
14 #include "webrtc/base/logging.h" 14 #include "webrtc/base/logging.h"
15 #include "webrtc/modules/audio_device/audio_device_config.h" 15 #include "webrtc/modules/audio_device/audio_device_config.h"
16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h" 16 #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
17 #include "webrtc/system_wrappers/include/event_wrapper.h" 17 #include "webrtc/system_wrappers/include/event_wrapper.h"
18 #include "webrtc/system_wrappers/include/trace.h"
19 18
20 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable; 19 webrtc::adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
21 20
22 // Accesses Pulse functions through our late-binding symbol table instead of 21 // Accesses Pulse functions through our late-binding symbol table instead of
23 // directly. This way we don't have to link to libpulse, which means our binary 22 // directly. This way we don't have to link to libpulse, which means our binary
24 // will work on systems that don't have it. 23 // will work on systems that don't have it.
25 #define LATE(sym) \ 24 #define LATE(sym) \
26 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \ 25 LATESYM_GET(webrtc::adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, \
27 sym) 26 sym)
28 27
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
83 _configuredLatencyRec(0), 82 _configuredLatencyRec(0),
84 _paDeviceIndex(-1), 83 _paDeviceIndex(-1),
85 _paStateChanged(false), 84 _paStateChanged(false),
86 _paMainloop(NULL), 85 _paMainloop(NULL),
87 _paMainloopApi(NULL), 86 _paMainloopApi(NULL),
88 _paContext(NULL), 87 _paContext(NULL),
89 _recStream(NULL), 88 _recStream(NULL),
90 _playStream(NULL), 89 _playStream(NULL),
91 _recStreamFlags(0), 90 _recStreamFlags(0),
92 _playStreamFlags(0) { 91 _playStreamFlags(0) {
93 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__); 92 LOG(LS_INFO) << __FUNCTION__ << " created";
henrika_webrtc 2017/07/07 14:04:45 Nit, bur why all these initial spaces? Could you p
saza WebRTC 2017/07/10 06:37:11 The prefixed spaces in this file indicate dependen
94 93
95 memset(_paServerVersion, 0, sizeof(_paServerVersion)); 94 memset(_paServerVersion, 0, sizeof(_paServerVersion));
96 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr)); 95 memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
97 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr)); 96 memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
98 memset(_oldKeyState, 0, sizeof(_oldKeyState)); 97 memset(_oldKeyState, 0, sizeof(_oldKeyState));
99 } 98 }
100 99
101 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() { 100 AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse() {
102 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", 101 LOG(LS_INFO) << __FUNCTION__ << " destroyed";
103 __FUNCTION__);
104 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 102 RTC_DCHECK(thread_checker_.CalledOnValidThread());
105 Terminate(); 103 Terminate();
106 104
107 if (_recBuffer) { 105 if (_recBuffer) {
108 delete[] _recBuffer; 106 delete[] _recBuffer;
109 _recBuffer = NULL; 107 _recBuffer = NULL;
110 } 108 }
111 if (_playBuffer) { 109 if (_playBuffer) {
112 delete[] _playBuffer; 110 delete[] _playBuffer;
113 _playBuffer = NULL; 111 _playBuffer = NULL;
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 if (_ptrThreadPlay) { 215 if (_ptrThreadPlay) {
218 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release(); 216 rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
219 217
220 _timeEventPlay.Set(); 218 _timeEventPlay.Set();
221 tmpThread->Stop(); 219 tmpThread->Stop();
222 delete tmpThread; 220 delete tmpThread;
223 } 221 }
224 222
225 // Terminate PulseAudio 223 // Terminate PulseAudio
226 if (TerminatePulseAudio() < 0) { 224 if (TerminatePulseAudio() < 0) {
227 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 225 LOG(LS_ERROR) << " failed to terminate PulseAudio";
henrika_webrtc 2017/07/07 14:04:45 Same here and in the rest of the file as well...
saza WebRTC 2017/07/10 06:37:11 Done.
228 " failed to terminate PulseAudio");
229 return -1; 226 return -1;
230 } 227 }
231 228
232 if (_XDisplay) { 229 if (_XDisplay) {
233 XCloseDisplay(_XDisplay); 230 XCloseDisplay(_XDisplay);
234 _XDisplay = NULL; 231 _XDisplay = NULL;
235 } 232 }
236 233
237 _initialized = false; 234 _initialized = false;
238 _outputDeviceIsSpecified = false; 235 _outputDeviceIsSpecified = false;
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 return -1; 369 return -1;
373 } 370 }
374 371
375 volume = level; 372 volume = level;
376 373
377 return 0; 374 return 0;
378 } 375 }
379 376
380 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft, 377 int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(uint16_t volumeLeft,
381 uint16_t volumeRight) { 378 uint16_t volumeRight) {
382 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 379 LOG(LS_WARNING) << " API call not supported on this platform";
383 " API call not supported on this platform");
384 return -1; 380 return -1;
385 } 381 }
386 382
387 int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/, 383 int32_t AudioDeviceLinuxPulse::WaveOutVolume(uint16_t& /*volumeLeft*/,
388 uint16_t& /*volumeRight*/) const { 384 uint16_t& /*volumeRight*/) const {
389 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 385 LOG(LS_WARNING) << " API call not supported on this platform";
390 " API call not supported on this platform");
391 return -1; 386 return -1;
392 } 387 }
393 388
394 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const { 389 int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(uint32_t& maxVolume) const {
395 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 390 RTC_DCHECK(thread_checker_.CalledOnValidThread());
396 uint32_t maxVol(0); 391 uint32_t maxVol(0);
397 392
398 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { 393 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) {
399 return -1; 394 return -1;
400 } 395 }
(...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after
706 } 701 }
707 702
708 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) { 703 int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume) {
709 return (_mixerManager.SetMicrophoneVolume(volume)); 704 return (_mixerManager.SetMicrophoneVolume(volume));
710 } 705 }
711 706
712 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const { 707 int32_t AudioDeviceLinuxPulse::MicrophoneVolume(uint32_t& volume) const {
713 uint32_t level(0); 708 uint32_t level(0);
714 709
715 if (_mixerManager.MicrophoneVolume(level) == -1) { 710 if (_mixerManager.MicrophoneVolume(level) == -1) {
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 711 LOG(LS_WARNING) << " failed to retrieve current microphone level";
717 " failed to retrive current microphone level");
718 return -1; 712 return -1;
719 } 713 }
720 714
721 volume = level; 715 volume = level;
722 716
723 return 0; 717 return 0;
724 } 718 }
725 719
726 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const { 720 int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(uint32_t& maxVolume) const {
727 uint32_t maxVol(0); 721 uint32_t maxVol(0);
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
779 } 773 }
780 774
781 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) { 775 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index) {
782 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 776 RTC_DCHECK(thread_checker_.CalledOnValidThread());
783 if (_playIsInitialized) { 777 if (_playIsInitialized) {
784 return -1; 778 return -1;
785 } 779 }
786 780
787 const uint16_t nDevices = PlayoutDevices(); 781 const uint16_t nDevices = PlayoutDevices();
788 782
789 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 783 LOG(LS_VERBOSE) << " number of availiable output devices is " << nDevices;
790 " number of availiable output devices is %u", nDevices);
791 784
792 if (index > (nDevices - 1)) { 785 if (index > (nDevices - 1)) {
793 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 786 LOG(LS_ERROR) << " device index is out of range [0," << (nDevices - 1)
794 " device index is out of range [0,%u]", (nDevices - 1)); 787 << "]";
795 return -1; 788 return -1;
796 } 789 }
797 790
798 _outputDeviceIndex = index; 791 _outputDeviceIndex = index;
799 _outputDeviceIsSpecified = true; 792 _outputDeviceIsSpecified = true;
800 793
801 return 0; 794 return 0;
802 } 795 }
803 796
804 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice( 797 int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
805 AudioDeviceModule::WindowsDeviceType /*device*/) { 798 AudioDeviceModule::WindowsDeviceType /*device*/) {
806 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 799 LOG(LS_ERROR) << "WindowsDeviceType not supported";
807 "WindowsDeviceType not supported");
808 return -1; 800 return -1;
809 } 801 }
810 802
811 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName( 803 int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
812 uint16_t index, 804 uint16_t index,
813 char name[kAdmMaxDeviceNameSize], 805 char name[kAdmMaxDeviceNameSize],
814 char guid[kAdmMaxGuidSize]) { 806 char guid[kAdmMaxGuidSize]) {
815 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 807 RTC_DCHECK(thread_checker_.CalledOnValidThread());
816 const uint16_t nDevices = PlayoutDevices(); 808 const uint16_t nDevices = PlayoutDevices();
817 809
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
902 } 894 }
903 895
904 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) { 896 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index) {
905 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 897 RTC_DCHECK(thread_checker_.CalledOnValidThread());
906 if (_recIsInitialized) { 898 if (_recIsInitialized) {
907 return -1; 899 return -1;
908 } 900 }
909 901
910 const uint16_t nDevices(RecordingDevices()); 902 const uint16_t nDevices(RecordingDevices());
911 903
912 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 904 LOG(LS_VERBOSE) << " number of availiable input devices is " << nDevices;
913 " number of availiable input devices is %u", nDevices);
914 905
915 if (index > (nDevices - 1)) { 906 if (index > (nDevices - 1)) {
916 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 907 LOG(LS_ERROR) << " device index is out of range [0," << (nDevices - 1)
917 " device index is out of range [0,%u]", (nDevices - 1)); 908 << "]";
918 return -1; 909 return -1;
919 } 910 }
920 911
921 _inputDeviceIndex = index; 912 _inputDeviceIndex = index;
922 _inputDeviceIsSpecified = true; 913 _inputDeviceIsSpecified = true;
923 914
924 return 0; 915 return 0;
925 } 916 }
926 917
927 int32_t AudioDeviceLinuxPulse::SetRecordingDevice( 918 int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
928 AudioDeviceModule::WindowsDeviceType /*device*/) { 919 AudioDeviceModule::WindowsDeviceType /*device*/) {
929 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 920 LOG(LS_ERROR) << "WindowsDeviceType not supported";
930 "WindowsDeviceType not supported");
931 return -1; 921 return -1;
932 } 922 }
933 923
934 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) { 924 int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available) {
935 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 925 RTC_DCHECK(thread_checker_.CalledOnValidThread());
936 available = false; 926 available = false;
937 927
938 // Try to initialize the playout side 928 // Try to initialize the playout side
939 int32_t res = InitPlayout(); 929 int32_t res = InitPlayout();
940 930
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
975 if (!_outputDeviceIsSpecified) { 965 if (!_outputDeviceIsSpecified) {
976 return -1; 966 return -1;
977 } 967 }
978 968
979 if (_playIsInitialized) { 969 if (_playIsInitialized) {
980 return 0; 970 return 0;
981 } 971 }
982 972
983 // Initialize the speaker (devices might have been added or removed) 973 // Initialize the speaker (devices might have been added or removed)
984 if (InitSpeaker() == -1) { 974 if (InitSpeaker() == -1) {
985 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 975 LOG(LS_WARNING) << " InitSpeaker() failed";
986 " InitSpeaker() failed");
987 } 976 }
988 977
989 // Set the play sample specification 978 // Set the play sample specification
990 pa_sample_spec playSampleSpec; 979 pa_sample_spec playSampleSpec;
991 playSampleSpec.channels = _playChannels; 980 playSampleSpec.channels = _playChannels;
992 playSampleSpec.format = PA_SAMPLE_S16LE; 981 playSampleSpec.format = PA_SAMPLE_S16LE;
993 playSampleSpec.rate = sample_rate_hz_; 982 playSampleSpec.rate = sample_rate_hz_;
994 983
995 // Create a new play stream 984 // Create a new play stream
996 _playStream = 985 _playStream =
997 LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL); 986 LATE(pa_stream_new)(_paContext, "playStream", &playSampleSpec, NULL);
998 987
999 if (!_playStream) { 988 if (!_playStream) {
1000 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 989 LOG(LS_ERROR) << " failed to create play stream, err="
1001 " failed to create play stream, err=%d", 990 << LATE(pa_context_errno)(_paContext);
1002 LATE(pa_context_errno)(_paContext));
1003 return -1; 991 return -1;
1004 } 992 }
1005 993
1006 // Provide the playStream to the mixer 994 // Provide the playStream to the mixer
1007 _mixerManager.SetPlayStream(_playStream); 995 _mixerManager.SetPlayStream(_playStream);
1008 996
1009 if (_ptrAudioBuffer) { 997 if (_ptrAudioBuffer) {
1010 // Update audio buffer with the selected parameters 998 // Update audio buffer with the selected parameters
1011 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_); 999 _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1012 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); 1000 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
1013 } 1001 }
1014 1002
1015 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state %d\n", 1003 LOG(LS_VERBOSE) << " stream state "
1016 LATE(pa_stream_get_state)(_playStream)); 1004 << LATE(pa_stream_get_state)(_playStream);
1017 1005
1018 // Set stream flags 1006 // Set stream flags
1019 _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | 1007 _playStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
1020 PA_STREAM_INTERPOLATE_TIMING); 1008 PA_STREAM_INTERPOLATE_TIMING);
1021 1009
1022 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1010 if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1023 // If configuring a specific latency then we want to specify 1011 // If configuring a specific latency then we want to specify
1024 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters 1012 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1025 // automatically to reach that target latency. However, that flag 1013 // automatically to reach that target latency. However, that flag
1026 // doesn't exist in Ubuntu 8.04 and many people still use that, 1014 // doesn't exist in Ubuntu 8.04 and many people still use that,
1027 // so we have to check the protocol version of libpulse. 1015 // so we have to check the protocol version of libpulse.
1028 if (LATE(pa_context_get_protocol_version)(_paContext) >= 1016 if (LATE(pa_context_get_protocol_version)(_paContext) >=
1029 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { 1017 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
1030 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY; 1018 _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1031 } 1019 }
1032 1020
1033 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 1021 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
1034 if (!spec) { 1022 if (!spec) {
1035 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1023 LOG(LS_ERROR) << " pa_stream_get_sample_spec()";
1036 " pa_stream_get_sample_spec()");
1037 return -1; 1024 return -1;
1038 } 1025 }
1039 1026
1040 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1027 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1041 uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS / 1028 uint32_t latency = bytesPerSec * WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
1042 WEBRTC_PA_MSECS_PER_SEC; 1029 WEBRTC_PA_MSECS_PER_SEC;
1043 1030
1044 // Set the play buffer attributes 1031 // Set the play buffer attributes
1045 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer 1032 _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1046 _playBufferAttr.tlength = latency; // target fill level of play buffer 1033 _playBufferAttr.tlength = latency; // target fill level of play buffer
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1082 if (!_inputDeviceIsSpecified) { 1069 if (!_inputDeviceIsSpecified) {
1083 return -1; 1070 return -1;
1084 } 1071 }
1085 1072
1086 if (_recIsInitialized) { 1073 if (_recIsInitialized) {
1087 return 0; 1074 return 0;
1088 } 1075 }
1089 1076
1090 // Initialize the microphone (devices might have been added or removed) 1077 // Initialize the microphone (devices might have been added or removed)
1091 if (InitMicrophone() == -1) { 1078 if (InitMicrophone() == -1) {
1092 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1079 LOG(LS_WARNING) << " InitMicrophone() failed";
1093 " InitMicrophone() failed");
1094 } 1080 }
1095 1081
1096 // Set the rec sample specification 1082 // Set the rec sample specification
1097 pa_sample_spec recSampleSpec; 1083 pa_sample_spec recSampleSpec;
1098 recSampleSpec.channels = _recChannels; 1084 recSampleSpec.channels = _recChannels;
1099 recSampleSpec.format = PA_SAMPLE_S16LE; 1085 recSampleSpec.format = PA_SAMPLE_S16LE;
1100 recSampleSpec.rate = sample_rate_hz_; 1086 recSampleSpec.rate = sample_rate_hz_;
1101 1087
1102 // Create a new rec stream 1088 // Create a new rec stream
1103 _recStream = 1089 _recStream =
1104 LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL); 1090 LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec, NULL);
1105 if (!_recStream) { 1091 if (!_recStream) {
1106 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1092 LOG(LS_ERROR) << " failed to create rec stream, err="
1107 " failed to create rec stream, err=%d", 1093 << LATE(pa_context_errno)(_paContext);
1108 LATE(pa_context_errno)(_paContext));
1109 return -1; 1094 return -1;
1110 } 1095 }
1111 1096
1112 // Provide the recStream to the mixer 1097 // Provide the recStream to the mixer
1113 _mixerManager.SetRecStream(_recStream); 1098 _mixerManager.SetRecStream(_recStream);
1114 1099
1115 if (_ptrAudioBuffer) { 1100 if (_ptrAudioBuffer) {
1116 // Update audio buffer with the selected parameters 1101 // Update audio buffer with the selected parameters
1117 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_); 1102 _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1118 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); 1103 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
1119 } 1104 }
1120 1105
1121 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1106 if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1122 _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE | 1107 _recStreamFlags = (pa_stream_flags_t)(PA_STREAM_AUTO_TIMING_UPDATE |
1123 PA_STREAM_INTERPOLATE_TIMING); 1108 PA_STREAM_INTERPOLATE_TIMING);
1124 1109
1125 // If configuring a specific latency then we want to specify 1110 // If configuring a specific latency then we want to specify
1126 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters 1111 // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1127 // automatically to reach that target latency. However, that flag 1112 // automatically to reach that target latency. However, that flag
1128 // doesn't exist in Ubuntu 8.04 and many people still use that, 1113 // doesn't exist in Ubuntu 8.04 and many people still use that,
1129 // so we have to check the protocol version of libpulse. 1114 // so we have to check the protocol version of libpulse.
1130 if (LATE(pa_context_get_protocol_version)(_paContext) >= 1115 if (LATE(pa_context_get_protocol_version)(_paContext) >=
1131 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) { 1116 WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION) {
1132 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY; 1117 _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1133 } 1118 }
1134 1119
1135 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream); 1120 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_recStream);
1136 if (!spec) { 1121 if (!spec) {
1137 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1122 LOG(LS_ERROR) << " pa_stream_get_sample_spec(rec)";
1138 " pa_stream_get_sample_spec(rec)");
1139 return -1; 1123 return -1;
1140 } 1124 }
1141 1125
1142 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1126 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1143 uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / 1127 uint32_t latency = bytesPerSec * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS /
1144 WEBRTC_PA_MSECS_PER_SEC; 1128 WEBRTC_PA_MSECS_PER_SEC;
1145 1129
1146 // Set the rec buffer attributes 1130 // Set the rec buffer attributes
1147 // Note: fragsize specifies a maximum transfer size, not a minimum, so 1131 // Note: fragsize specifies a maximum transfer size, not a minimum, so
1148 // it is not possible to force a high latency setting, only a low one. 1132 // it is not possible to force a high latency setting, only a low one.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1185 _startRec = true; 1169 _startRec = true;
1186 1170
1187 // The audio thread will signal when recording has started. 1171 // The audio thread will signal when recording has started.
1188 _timeEventRec.Set(); 1172 _timeEventRec.Set();
1189 if (kEventTimeout == _recStartEvent.Wait(10000)) { 1173 if (kEventTimeout == _recStartEvent.Wait(10000)) {
1190 { 1174 {
1191 rtc::CritScope lock(&_critSect); 1175 rtc::CritScope lock(&_critSect);
1192 _startRec = false; 1176 _startRec = false;
1193 } 1177 }
1194 StopRecording(); 1178 StopRecording();
1195 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1179 LOG(LS_ERROR) << " failed to activate recording";
1196 " failed to activate recording");
1197 return -1; 1180 return -1;
1198 } 1181 }
1199 1182
1200 { 1183 {
1201 rtc::CritScope lock(&_critSect); 1184 rtc::CritScope lock(&_critSect);
1202 if (_recording) { 1185 if (_recording) {
1203 // The recording state is set by the audio thread after recording 1186 // The recording state is set by the audio thread after recording
1204 // has started. 1187 // has started.
1205 } else { 1188 } else {
1206 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1189 LOG(LS_ERROR) << " failed to activate recording";
1207 " failed to activate recording");
1208 return -1; 1190 return -1;
1209 } 1191 }
1210 } 1192 }
1211 1193
1212 return 0; 1194 return 0;
1213 } 1195 }
1214 1196
1215 int32_t AudioDeviceLinuxPulse::StopRecording() { 1197 int32_t AudioDeviceLinuxPulse::StopRecording() {
1216 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1198 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1217 rtc::CritScope lock(&_critSect); 1199 rtc::CritScope lock(&_critSect);
1218 1200
1219 if (!_recIsInitialized) { 1201 if (!_recIsInitialized) {
1220 return 0; 1202 return 0;
1221 } 1203 }
1222 1204
1223 if (_recStream == NULL) { 1205 if (_recStream == NULL) {
1224 return -1; 1206 return -1;
1225 } 1207 }
1226 1208
1227 _recIsInitialized = false; 1209 _recIsInitialized = false;
1228 _recording = false; 1210 _recording = false;
1229 1211
1230 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping recording"); 1212 LOG(LS_VERBOSE) << " stopping recording";
1231 1213
1232 // Stop Recording 1214 // Stop Recording
1233 PaLock(); 1215 PaLock();
1234 1216
1235 DisableReadCallback(); 1217 DisableReadCallback();
1236 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL); 1218 LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1237 1219
1238 // Unset this here so that we don't get a TERMINATED callback 1220 // Unset this here so that we don't get a TERMINATED callback
1239 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL); 1221 LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1240 1222
1241 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) { 1223 if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED) {
1242 // Disconnect the stream 1224 // Disconnect the stream
1243 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) { 1225 if (LATE(pa_stream_disconnect)(_recStream) != PA_OK) {
1244 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1226 LOG(LS_ERROR) << " failed to disconnect rec stream, err="
1245 " failed to disconnect rec stream, err=%d\n", 1227 << LATE(pa_context_errno)(_paContext);
1246 LATE(pa_context_errno)(_paContext));
1247 PaUnLock(); 1228 PaUnLock();
1248 return -1; 1229 return -1;
1249 } 1230 }
1250 1231
1251 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 1232 LOG(LS_VERBOSE) << " disconnected recording";
1252 " disconnected recording");
1253 } 1233 }
1254 1234
1255 LATE(pa_stream_unref)(_recStream); 1235 LATE(pa_stream_unref)(_recStream);
1256 _recStream = NULL; 1236 _recStream = NULL;
1257 1237
1258 PaUnLock(); 1238 PaUnLock();
1259 1239
1260 // Provide the recStream to the mixer 1240 // Provide the recStream to the mixer
1261 _mixerManager.SetRecStream(_recStream); 1241 _mixerManager.SetRecStream(_recStream);
1262 1242
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1304 // accessed on the playout thread. 1284 // accessed on the playout thread.
1305 1285
1306 // The audio thread will signal when playout has started. 1286 // The audio thread will signal when playout has started.
1307 _timeEventPlay.Set(); 1287 _timeEventPlay.Set();
1308 if (kEventTimeout == _playStartEvent.Wait(10000)) { 1288 if (kEventTimeout == _playStartEvent.Wait(10000)) {
1309 { 1289 {
1310 rtc::CritScope lock(&_critSect); 1290 rtc::CritScope lock(&_critSect);
1311 _startPlay = false; 1291 _startPlay = false;
1312 } 1292 }
1313 StopPlayout(); 1293 StopPlayout();
1314 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1294 LOG(LS_ERROR) << " failed to activate playout";
1315 " failed to activate playout");
1316 return -1; 1295 return -1;
1317 } 1296 }
1318 1297
1319 { 1298 {
1320 rtc::CritScope lock(&_critSect); 1299 rtc::CritScope lock(&_critSect);
1321 if (_playing) { 1300 if (_playing) {
1322 // The playing state is set by the audio thread after playout 1301 // The playing state is set by the audio thread after playout
1323 // has started. 1302 // has started.
1324 } else { 1303 } else {
1325 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1304 LOG(LS_ERROR) << " failed to activate playing";
1326 " failed to activate playing");
1327 return -1; 1305 return -1;
1328 } 1306 }
1329 } 1307 }
1330 1308
1331 return 0; 1309 return 0;
1332 } 1310 }
1333 1311
1334 int32_t AudioDeviceLinuxPulse::StopPlayout() { 1312 int32_t AudioDeviceLinuxPulse::StopPlayout() {
1335 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1313 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1336 rtc::CritScope lock(&_critSect); 1314 rtc::CritScope lock(&_critSect);
1337 1315
1338 if (!_playIsInitialized) { 1316 if (!_playIsInitialized) {
1339 return 0; 1317 return 0;
1340 } 1318 }
1341 1319
1342 if (_playStream == NULL) { 1320 if (_playStream == NULL) {
1343 return -1; 1321 return -1;
1344 } 1322 }
1345 1323
1346 _playIsInitialized = false; 1324 _playIsInitialized = false;
1347 _playing = false; 1325 _playing = false;
1348 _sndCardPlayDelay = 0; 1326 _sndCardPlayDelay = 0;
1349 _sndCardRecDelay = 0; 1327 _sndCardRecDelay = 0;
1350 1328
1351 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stopping playback"); 1329 LOG(LS_VERBOSE) << " stopping playback";
1352 1330
1353 // Stop Playout 1331 // Stop Playout
1354 PaLock(); 1332 PaLock();
1355 1333
1356 DisableWriteCallback(); 1334 DisableWriteCallback();
1357 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL); 1335 LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1358 1336
1359 // Unset this here so that we don't get a TERMINATED callback 1337 // Unset this here so that we don't get a TERMINATED callback
1360 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL); 1338 LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1361 1339
1362 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) { 1340 if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED) {
1363 // Disconnect the stream 1341 // Disconnect the stream
1364 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) { 1342 if (LATE(pa_stream_disconnect)(_playStream) != PA_OK) {
1365 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1343 LOG(LS_ERROR) << " failed to disconnect play stream, err="
1366 " failed to disconnect play stream, err=%d", 1344 << LATE(pa_context_errno)(_paContext);
1367 LATE(pa_context_errno)(_paContext));
1368 PaUnLock(); 1345 PaUnLock();
1369 return -1; 1346 return -1;
1370 } 1347 }
1371 1348
1372 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 1349 LOG(LS_VERBOSE) << " disconnected playback";
1373 " disconnected playback");
1374 } 1350 }
1375 1351
1376 LATE(pa_stream_unref)(_playStream); 1352 LATE(pa_stream_unref)(_playStream);
1377 _playStream = NULL; 1353 _playStream = NULL;
1378 1354
1379 PaUnLock(); 1355 PaUnLock();
1380 1356
1381 // Provide the playStream to the mixer 1357 // Provide the playStream to the mixer
1382 _mixerManager.SetPlayStream(_playStream); 1358 _mixerManager.SetPlayStream(_playStream);
1383 1359
(...skipping 20 matching lines...) Expand all
1404 bool AudioDeviceLinuxPulse::Playing() const { 1380 bool AudioDeviceLinuxPulse::Playing() const {
1405 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1381 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1406 return (_playing); 1382 return (_playing);
1407 } 1383 }
1408 1384
1409 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer( 1385 int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1410 const AudioDeviceModule::BufferType type, 1386 const AudioDeviceModule::BufferType type,
1411 uint16_t sizeMS) { 1387 uint16_t sizeMS) {
1412 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1388 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1413 if (type != AudioDeviceModule::kFixedBufferSize) { 1389 if (type != AudioDeviceModule::kFixedBufferSize) {
1414 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1390 LOG(LS_ERROR) << " Adaptive buffer size not supported on this platform";
1415 " Adaptive buffer size not supported on this platform");
1416 return -1; 1391 return -1;
1417 } 1392 }
1418 1393
1419 _playBufType = type; 1394 _playBufType = type;
1420 _playBufDelayFixed = sizeMS; 1395 _playBufDelayFixed = sizeMS;
1421 1396
1422 return 0; 1397 return 0;
1423 } 1398 }
1424 1399
1425 int32_t AudioDeviceLinuxPulse::PlayoutBuffer( 1400 int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1426 AudioDeviceModule::BufferType& type, 1401 AudioDeviceModule::BufferType& type,
1427 uint16_t& sizeMS) const { 1402 uint16_t& sizeMS) const {
1428 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 1403 RTC_DCHECK(thread_checker_.CalledOnValidThread());
1429 type = _playBufType; 1404 type = _playBufType;
1430 sizeMS = _playBufDelayFixed; 1405 sizeMS = _playBufDelayFixed;
1431 1406
1432 return 0; 1407 return 0;
1433 } 1408 }
1434 1409
1435 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const { 1410 int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const {
1436 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 1411 LOG(LS_WARNING) << " API call not supported on this platform";
1437 " API call not supported on this platform");
1438 return -1; 1412 return -1;
1439 } 1413 }
1440 1414
1441 bool AudioDeviceLinuxPulse::PlayoutWarning() const { 1415 bool AudioDeviceLinuxPulse::PlayoutWarning() const {
1442 rtc::CritScope lock(&_critSect); 1416 rtc::CritScope lock(&_critSect);
1443 return (_playWarning > 0); 1417 return (_playWarning > 0);
1444 } 1418 }
1445 1419
1446 bool AudioDeviceLinuxPulse::PlayoutError() const { 1420 bool AudioDeviceLinuxPulse::PlayoutError() const {
1447 rtc::CritScope lock(&_critSect); 1421 rtc::CritScope lock(&_critSect);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1509 const pa_server_info* i, 1483 const pa_server_info* i,
1510 void* pThis) { 1484 void* pThis) {
1511 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i); 1485 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaServerInfoCallbackHandler(i);
1512 } 1486 }
1513 1487
1514 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) { 1488 void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream* p, void* pThis) {
1515 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p); 1489 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamStateCallbackHandler(p);
1516 } 1490 }
1517 1491
1518 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) { 1492 void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context* c) {
1519 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " context state cb"); 1493 LOG(LS_VERBOSE) << " context state cb";
1520 1494
1521 pa_context_state_t state = LATE(pa_context_get_state)(c); 1495 pa_context_state_t state = LATE(pa_context_get_state)(c);
1522 switch (state) { 1496 switch (state) {
1523 case PA_CONTEXT_UNCONNECTED: 1497 case PA_CONTEXT_UNCONNECTED:
1524 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); 1498 LOG(LS_VERBOSE) << " unconnected";
1525 break; 1499 break;
1526 case PA_CONTEXT_CONNECTING: 1500 case PA_CONTEXT_CONNECTING:
1527 case PA_CONTEXT_AUTHORIZING: 1501 case PA_CONTEXT_AUTHORIZING:
1528 case PA_CONTEXT_SETTING_NAME: 1502 case PA_CONTEXT_SETTING_NAME:
1529 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " no state"); 1503 LOG(LS_VERBOSE) << " no state";
1530 break; 1504 break;
1531 case PA_CONTEXT_FAILED: 1505 case PA_CONTEXT_FAILED:
1532 case PA_CONTEXT_TERMINATED: 1506 case PA_CONTEXT_TERMINATED:
1533 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); 1507 LOG(LS_VERBOSE) << " failed";
1534 _paStateChanged = true; 1508 _paStateChanged = true;
1535 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1509 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1536 break; 1510 break;
1537 case PA_CONTEXT_READY: 1511 case PA_CONTEXT_READY:
1538 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); 1512 LOG(LS_VERBOSE) << " ready";
1539 _paStateChanged = true; 1513 _paStateChanged = true;
1540 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1514 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1541 break; 1515 break;
1542 } 1516 }
1543 } 1517 }
1544 1518
1545 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i, 1519 void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info* i,
1546 int eol) { 1520 int eol) {
1547 if (eol) { 1521 if (eol) {
1548 // Signal that we are done 1522 // Signal that we are done
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1619 // Copy the sink name 1593 // Copy the sink name
1620 strncpy(_playDisplayDeviceName, i->default_sink_name, 1594 strncpy(_playDisplayDeviceName, i->default_sink_name,
1621 kAdmMaxDeviceNameSize); 1595 kAdmMaxDeviceNameSize);
1622 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0'; 1596 _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1623 } 1597 }
1624 1598
1625 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1599 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1626 } 1600 }
1627 1601
1628 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) { 1602 void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream* p) {
1629 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " stream state cb"); 1603 LOG(LS_VERBOSE) << " stream state cb";
1630 1604
1631 pa_stream_state_t state = LATE(pa_stream_get_state)(p); 1605 pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1632 switch (state) { 1606 switch (state) {
1633 case PA_STREAM_UNCONNECTED: 1607 case PA_STREAM_UNCONNECTED:
1634 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " unconnected"); 1608 LOG(LS_VERBOSE) << " unconnected";
1635 break; 1609 break;
1636 case PA_STREAM_CREATING: 1610 case PA_STREAM_CREATING:
1637 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " creating"); 1611 LOG(LS_VERBOSE) << " creating";
1638 break; 1612 break;
1639 case PA_STREAM_FAILED: 1613 case PA_STREAM_FAILED:
1640 case PA_STREAM_TERMINATED: 1614 case PA_STREAM_TERMINATED:
1641 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " failed"); 1615 LOG(LS_VERBOSE) << " failed";
1642 break; 1616 break;
1643 case PA_STREAM_READY: 1617 case PA_STREAM_READY:
1644 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " ready"); 1618 LOG(LS_VERBOSE) << " ready";
1645 break; 1619 break;
1646 } 1620 }
1647 1621
1648 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0); 1622 LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1649 } 1623 }
1650 1624
1651 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() { 1625 int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion() {
1652 PaLock(); 1626 PaLock();
1653 1627
1654 pa_operation* paOperation = NULL; 1628 pa_operation* paOperation = NULL;
1655 1629
1656 // get the server info and update deviceName 1630 // get the server info and update deviceName
1657 paOperation = 1631 paOperation =
1658 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this); 1632 LATE(pa_context_get_server_info)(_paContext, PaServerInfoCallback, this);
1659 1633
1660 WaitForOperationCompletion(paOperation); 1634 WaitForOperationCompletion(paOperation);
1661 1635
1662 PaUnLock(); 1636 PaUnLock();
1663 1637
1664 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, 1638 LOG(LS_VERBOSE) << " checking PulseAudio version: " << _paServerVersion;
1665 " checking PulseAudio version: %s", _paServerVersion);
1666 1639
1667 return 0; 1640 return 0;
1668 } 1641 }
1669 1642
1670 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() { 1643 int32_t AudioDeviceLinuxPulse::InitSamplingFrequency() {
1671 PaLock(); 1644 PaLock();
1672 1645
1673 pa_operation* paOperation = NULL; 1646 pa_operation* paOperation = NULL;
1674 1647
1675 // Get the server info and update sample_rate_hz_ 1648 // Get the server info and update sample_rate_hz_
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1753 return 0; 1726 return 0;
1754 } 1727 }
1755 1728
1756 int32_t AudioDeviceLinuxPulse::InitPulseAudio() { 1729 int32_t AudioDeviceLinuxPulse::InitPulseAudio() {
1757 int retVal = 0; 1730 int retVal = 0;
1758 1731
1759 // Load libpulse 1732 // Load libpulse
1760 if (!PaSymbolTable.Load()) { 1733 if (!PaSymbolTable.Load()) {
1761 // Most likely the Pulse library and sound server are not installed on 1734 // Most likely the Pulse library and sound server are not installed on
1762 // this system 1735 // this system
1763 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1736 LOG(LS_ERROR) << " failed to load symbol table";
1764 " failed to load symbol table");
1765 return -1; 1737 return -1;
1766 } 1738 }
1767 1739
1768 // Create a mainloop API and connection to the default server 1740 // Create a mainloop API and connection to the default server
1769 // the mainloop is the internal asynchronous API event loop 1741 // the mainloop is the internal asynchronous API event loop
1770 if (_paMainloop) { 1742 if (_paMainloop) {
1771 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1743 LOG(LS_ERROR) << " PA mainloop has already existed";
1772 " PA mainloop has already existed");
1773 return -1; 1744 return -1;
1774 } 1745 }
1775 _paMainloop = LATE(pa_threaded_mainloop_new)(); 1746 _paMainloop = LATE(pa_threaded_mainloop_new)();
1776 if (!_paMainloop) { 1747 if (!_paMainloop) {
1777 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1748 LOG(LS_ERROR) << " could not create mainloop";
1778 " could not create mainloop");
1779 return -1; 1749 return -1;
1780 } 1750 }
1781 1751
1782 // Start the threaded main loop 1752 // Start the threaded main loop
1783 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop); 1753 retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
1784 if (retVal != PA_OK) { 1754 if (retVal != PA_OK) {
1785 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1755 LOG(LS_ERROR) << " failed to start main loop, error=" << retVal;
1786 " failed to start main loop, error=%d", retVal);
1787 return -1; 1756 return -1;
1788 } 1757 }
1789 1758
1790 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " mainloop running!"); 1759 LOG(LS_VERBOSE) << " mainloop running!";
1791 1760
1792 PaLock(); 1761 PaLock();
1793 1762
1794 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop); 1763 _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
1795 if (!_paMainloopApi) { 1764 if (!_paMainloopApi) {
1796 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1765 LOG(LS_ERROR) << " could not create mainloop API";
1797 " could not create mainloop API");
1798 PaUnLock(); 1766 PaUnLock();
1799 return -1; 1767 return -1;
1800 } 1768 }
1801 1769
1802 // Create a new PulseAudio context 1770 // Create a new PulseAudio context
1803 if (_paContext) { 1771 if (_paContext) {
1804 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1772 LOG(LS_ERROR) << " PA context has already existed";
1805 " PA context has already existed");
1806 PaUnLock(); 1773 PaUnLock();
1807 return -1; 1774 return -1;
1808 } 1775 }
1809 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine"); 1776 _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
1810 1777
1811 if (!_paContext) { 1778 if (!_paContext) {
1812 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1779 LOG(LS_ERROR) << " could not create context";
1813 " could not create context");
1814 PaUnLock(); 1780 PaUnLock();
1815 return -1; 1781 return -1;
1816 } 1782 }
1817 1783
1818 // Set state callback function 1784 // Set state callback function
1819 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this); 1785 LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback, this);
1820 1786
1821 // Connect the context to a server (default) 1787 // Connect the context to a server (default)
1822 _paStateChanged = false; 1788 _paStateChanged = false;
1823 retVal = 1789 retVal =
1824 LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL); 1790 LATE(pa_context_connect)(_paContext, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL);
1825 1791
1826 if (retVal != PA_OK) { 1792 if (retVal != PA_OK) {
1827 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1793 LOG(LS_ERROR) << " failed to connect context, error=" << retVal;
1828 " failed to connect context, error=%d", retVal);
1829 PaUnLock(); 1794 PaUnLock();
1830 return -1; 1795 return -1;
1831 } 1796 }
1832 1797
1833 // Wait for state change 1798 // Wait for state change
1834 while (!_paStateChanged) { 1799 while (!_paStateChanged) {
1835 LATE(pa_threaded_mainloop_wait)(_paMainloop); 1800 LATE(pa_threaded_mainloop_wait)(_paMainloop);
1836 } 1801 }
1837 1802
1838 // Now check to see what final state we reached. 1803 // Now check to see what final state we reached.
1839 pa_context_state_t state = LATE(pa_context_get_state)(_paContext); 1804 pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
1840 1805
1841 if (state != PA_CONTEXT_READY) { 1806 if (state != PA_CONTEXT_READY) {
1842 if (state == PA_CONTEXT_FAILED) { 1807 if (state == PA_CONTEXT_FAILED) {
1843 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1808 LOG(LS_ERROR) << " failed to connect to PulseAudio sound server";
1844 " failed to connect to PulseAudio sound server");
1845 } else if (state == PA_CONTEXT_TERMINATED) { 1809 } else if (state == PA_CONTEXT_TERMINATED) {
1846 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1810 LOG(LS_ERROR) << " PulseAudio connection terminated early";
1847 " PulseAudio connection terminated early");
1848 } else { 1811 } else {
1849 // Shouldn't happen, because we only signal on one of those three 1812 // Shouldn't happen, because we only signal on one of those three
1850 // states 1813 // states
1851 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1814 LOG(LS_ERROR) << " unknown problem connecting to PulseAudio";
1852 " unknown problem connecting to PulseAudio");
1853 } 1815 }
1854 PaUnLock(); 1816 PaUnLock();
1855 return -1; 1817 return -1;
1856 } 1818 }
1857 1819
1858 PaUnLock(); 1820 PaUnLock();
1859 1821
1860 // Give the objects to the mixer manager 1822 // Give the objects to the mixer manager
1861 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext); 1823 _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
1862 1824
1863 // Check the version 1825 // Check the version
1864 if (CheckPulseAudioVersion() < 0) { 1826 if (CheckPulseAudioVersion() < 0) {
1865 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1827 LOG(LS_ERROR) << " PulseAudio version " << _paServerVersion
1866 " PulseAudio version %s not supported", _paServerVersion); 1828 << " not supported";
1867 return -1; 1829 return -1;
1868 } 1830 }
1869 1831
1870 // Initialize sampling frequency 1832 // Initialize sampling frequency
1871 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) { 1833 if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0) {
1872 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1834 LOG(LS_ERROR) << " failed to initialize sampling frequency, set to "
1873 " failed to initialize sampling frequency," 1835 << sample_rate_hz_ << " Hz";
1874 " set to %d Hz",
1875 sample_rate_hz_);
1876 return -1; 1836 return -1;
1877 } 1837 }
1878 1838
1879 return 0; 1839 return 0;
1880 } 1840 }
1881 1841
1882 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() { 1842 int32_t AudioDeviceLinuxPulse::TerminatePulseAudio() {
1883 // Do nothing if the instance doesn't exist 1843 // Do nothing if the instance doesn't exist
1884 // likely PaSymbolTable.Load() fails 1844 // likely PaSymbolTable.Load() fails
1885 if (!_paMainloop) { 1845 if (!_paMainloop) {
(...skipping 20 matching lines...) Expand all
1906 LATE(pa_threaded_mainloop_stop)(_paMainloop); 1866 LATE(pa_threaded_mainloop_stop)(_paMainloop);
1907 } 1867 }
1908 1868
1909 // Free the mainloop 1869 // Free the mainloop
1910 if (_paMainloop) { 1870 if (_paMainloop) {
1911 LATE(pa_threaded_mainloop_free)(_paMainloop); 1871 LATE(pa_threaded_mainloop_free)(_paMainloop);
1912 } 1872 }
1913 1873
1914 _paMainloop = NULL; 1874 _paMainloop = NULL;
1915 1875
1916 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " PulseAudio terminated"); 1876 LOG(LS_VERBOSE) << " PulseAudio terminated";
1917 1877
1918 return 0; 1878 return 0;
1919 } 1879 }
1920 1880
1921 void AudioDeviceLinuxPulse::PaLock() { 1881 void AudioDeviceLinuxPulse::PaLock() {
1922 LATE(pa_threaded_mainloop_lock)(_paMainloop); 1882 LATE(pa_threaded_mainloop_lock)(_paMainloop);
1923 } 1883 }
1924 1884
1925 void AudioDeviceLinuxPulse::PaUnLock() { 1885 void AudioDeviceLinuxPulse::PaUnLock() {
1926 LATE(pa_threaded_mainloop_unlock)(_paMainloop); 1886 LATE(pa_threaded_mainloop_unlock)(_paMainloop);
1927 } 1887 }
1928 1888
1929 void AudioDeviceLinuxPulse::WaitForOperationCompletion( 1889 void AudioDeviceLinuxPulse::WaitForOperationCompletion(
1930 pa_operation* paOperation) const { 1890 pa_operation* paOperation) const {
1931 if (!paOperation) { 1891 if (!paOperation) {
1932 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1892 LOG(LS_ERROR) << "paOperation NULL in WaitForOperationCompletion";
1933 "paOperation NULL in WaitForOperationCompletion");
1934 return; 1893 return;
1935 } 1894 }
1936 1895
1937 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) { 1896 while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING) {
1938 LATE(pa_threaded_mainloop_wait)(_paMainloop); 1897 LATE(pa_threaded_mainloop_wait)(_paMainloop);
1939 } 1898 }
1940 1899
1941 LATE(pa_operation_unref)(paOperation); 1900 LATE(pa_operation_unref)(paOperation);
1942 } 1901 }
1943 1902
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1982 _timeEventPlay.Set(); 1941 _timeEventPlay.Set();
1983 } 1942 }
1984 1943
1985 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/, 1944 void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream* /*unused*/,
1986 void* pThis) { 1945 void* pThis) {
1987 static_cast<AudioDeviceLinuxPulse*>(pThis) 1946 static_cast<AudioDeviceLinuxPulse*>(pThis)
1988 ->PaStreamUnderflowCallbackHandler(); 1947 ->PaStreamUnderflowCallbackHandler();
1989 } 1948 }
1990 1949
1991 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() { 1950 void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler() {
1992 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Playout underflow"); 1951 LOG(LS_WARNING) << " Playout underflow";
1993 1952
1994 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) { 1953 if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS) {
1995 // We didn't configure a pa_buffer_attr before, so switching to 1954 // We didn't configure a pa_buffer_attr before, so switching to
1996 // one now would be questionable. 1955 // one now would be questionable.
1997 return; 1956 return;
1998 } 1957 }
1999 1958
2000 // Otherwise reconfigure the stream with a higher target latency. 1959 // Otherwise reconfigure the stream with a higher target latency.
2001 1960
2002 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 1961 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
2003 if (!spec) { 1962 if (!spec) {
2004 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1963 LOG(LS_ERROR) << " pa_stream_get_sample_spec()";
2005 " pa_stream_get_sample_spec()");
2006 return; 1964 return;
2007 } 1965 }
2008 1966
2009 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec); 1967 size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2010 uint32_t newLatency = 1968 uint32_t newLatency =
2011 _configuredLatencyPlay + bytesPerSec * 1969 _configuredLatencyPlay + bytesPerSec *
2012 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS / 1970 WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
2013 WEBRTC_PA_MSECS_PER_SEC; 1971 WEBRTC_PA_MSECS_PER_SEC;
2014 1972
2015 // Set the play buffer attributes 1973 // Set the play buffer attributes
2016 _playBufferAttr.maxlength = newLatency; 1974 _playBufferAttr.maxlength = newLatency;
2017 _playBufferAttr.tlength = newLatency; 1975 _playBufferAttr.tlength = newLatency;
2018 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR; 1976 _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2019 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq; 1977 _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2020 1978
2021 pa_operation* op = LATE(pa_stream_set_buffer_attr)( 1979 pa_operation* op = LATE(pa_stream_set_buffer_attr)(
2022 _playStream, &_playBufferAttr, NULL, NULL); 1980 _playStream, &_playBufferAttr, NULL, NULL);
2023 if (!op) { 1981 if (!op) {
2024 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 1982 LOG(LS_ERROR) << " pa_stream_set_buffer_attr()";
2025 " pa_stream_set_buffer_attr()");
2026 return; 1983 return;
2027 } 1984 }
2028 1985
2029 // Don't need to wait for this to complete. 1986 // Don't need to wait for this to complete.
2030 LATE(pa_operation_unref)(op); 1987 LATE(pa_operation_unref)(op);
2031 1988
2032 // Save the new latency in case we underflow again. 1989 // Save the new latency in case we underflow again.
2033 _configuredLatencyPlay = newLatency; 1990 _configuredLatencyPlay = newLatency;
2034 } 1991 }
2035 1992
2036 void AudioDeviceLinuxPulse::EnableReadCallback() { 1993 void AudioDeviceLinuxPulse::EnableReadCallback() {
2037 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this); 1994 LATE(pa_stream_set_read_callback)(_recStream, &PaStreamReadCallback, this);
2038 } 1995 }
2039 1996
2040 void AudioDeviceLinuxPulse::DisableReadCallback() { 1997 void AudioDeviceLinuxPulse::DisableReadCallback() {
2041 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL); 1998 LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2042 } 1999 }
2043 2000
2044 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/, 2001 void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream* /*unused1*/,
2045 size_t /*unused2*/, 2002 size_t /*unused2*/,
2046 void* pThis) { 2003 void* pThis) {
2047 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler(); 2004 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamReadCallbackHandler();
2048 } 2005 }
2049 2006
2050 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() { 2007 void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler() {
2051 // We get the data pointer and size now in order to save one Lock/Unlock 2008 // We get the data pointer and size now in order to save one Lock/Unlock
2052 // in the worker thread. 2009 // in the worker thread.
2053 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData, 2010 if (LATE(pa_stream_peek)(_recStream, &_tempSampleData,
2054 &_tempSampleDataSize) != 0) { 2011 &_tempSampleDataSize) != 0) {
2055 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't read data!"); 2012 LOG(LS_ERROR) << " Can't read data!";
2056 return; 2013 return;
2057 } 2014 }
2058 2015
2059 // Since we consume the data asynchronously on a different thread, we have 2016 // Since we consume the data asynchronously on a different thread, we have
2060 // to temporarily disable the read callback or else Pulse will call it 2017 // to temporarily disable the read callback or else Pulse will call it
2061 // continuously until we consume the data. We re-enable it below. 2018 // continuously until we consume the data. We re-enable it below.
2062 DisableReadCallback(); 2019 DisableReadCallback();
2063 _timeEventRec.Set(); 2020 _timeEventRec.Set();
2064 } 2021 }
2065 2022
2066 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/, 2023 void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream* /*unused*/,
2067 void* pThis) { 2024 void* pThis) {
2068 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler(); 2025 static_cast<AudioDeviceLinuxPulse*>(pThis)->PaStreamOverflowCallbackHandler();
2069 } 2026 }
2070 2027
2071 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() { 2028 void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler() {
2072 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " Recording overflow"); 2029 LOG(LS_WARNING) << " Recording overflow";
2073 } 2030 }
2074 2031
2075 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) { 2032 int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream* stream) {
2076 if (!WEBRTC_PA_REPORT_LATENCY) { 2033 if (!WEBRTC_PA_REPORT_LATENCY) {
2077 return 0; 2034 return 0;
2078 } 2035 }
2079 2036
2080 if (!stream) { 2037 if (!stream) {
2081 return 0; 2038 return 0;
2082 } 2039 }
2083 2040
2084 pa_usec_t latency; 2041 pa_usec_t latency;
2085 int negative; 2042 int negative;
2086 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) { 2043 if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0) {
2087 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Can't query latency"); 2044 LOG(LS_ERROR) << " Can't query latency";
2088 // We'd rather continue playout/capture with an incorrect delay than 2045 // We'd rather continue playout/capture with an incorrect delay than
2089 // stop it altogether, so return a valid value. 2046 // stop it altogether, so return a valid value.
2090 return 0; 2047 return 0;
2091 } 2048 }
2092 2049
2093 if (negative) { 2050 if (negative) {
2094 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 2051 LOG(LS_VERBOSE)
2095 " warning: pa_stream_get_latency reported negative " 2052 << " warning: pa_stream_get_latency reported negative delay";
2096 "delay");
2097 2053
2098 // The delay can be negative for monitoring streams if the captured 2054 // The delay can be negative for monitoring streams if the captured
2099 // samples haven't been played yet. In such a case, "latency" 2055 // samples haven't been played yet. In such a case, "latency"
2100 // contains the magnitude, so we must negate it to get the real value. 2056 // contains the magnitude, so we must negate it to get the real value.
2101 int32_t tmpLatency = (int32_t)-latency; 2057 int32_t tmpLatency = (int32_t)-latency;
2102 if (tmpLatency < 0) { 2058 if (tmpLatency < 0) {
2103 // Make sure that we don't use a negative delay. 2059 // Make sure that we don't use a negative delay.
2104 tmpLatency = 0; 2060 tmpLatency = 0;
2105 } 2061 }
2106 2062
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
2219 return -1; 2175 return -1;
2220 } 2176 }
2221 2177
2222 if (AGC()) { 2178 if (AGC()) {
2223 newMicLevel = _ptrAudioBuffer->NewMicLevel(); 2179 newMicLevel = _ptrAudioBuffer->NewMicLevel();
2224 if (newMicLevel != 0) { 2180 if (newMicLevel != 0) {
2225 // The VQE will only deliver non-zero microphone levels when a 2181 // The VQE will only deliver non-zero microphone levels when a
2226 // change is needed. 2182 // change is needed.
2227 // Set this new mic level (received from the observer as return 2183 // Set this new mic level (received from the observer as return
2228 // value in the callback). 2184 // value in the callback).
2229 WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, 2185 LOG(LS_VERBOSE) << " AGC change of volume: old=" << currentMicLevel
2230 " AGC change of volume: old=%u => new=%u", currentMicLevel, 2186 << " => new=" << newMicLevel;
2231 newMicLevel);
2232 if (SetMicrophoneVolume(newMicLevel) == -1) { 2187 if (SetMicrophoneVolume(newMicLevel) == -1) {
2233 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2188 LOG(LS_WARNING)
2234 " the required modification of the microphone " 2189 << " the required modification of the microphone volume failed";
2235 "volume failed");
2236 } 2190 }
2237 } 2191 }
2238 } 2192 }
2239 2193
2240 return 0; 2194 return 0;
2241 } 2195 }
2242 2196
2243 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) { 2197 bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis) {
2244 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess()); 2198 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->PlayThreadProcess());
2245 } 2199 }
2246 2200
2247 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) { 2201 bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis) {
2248 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess()); 2202 return (static_cast<AudioDeviceLinuxPulse*>(pThis)->RecThreadProcess());
2249 } 2203 }
2250 2204
2251 bool AudioDeviceLinuxPulse::PlayThreadProcess() { 2205 bool AudioDeviceLinuxPulse::PlayThreadProcess() {
2252 switch (_timeEventPlay.Wait(1000)) { 2206 switch (_timeEventPlay.Wait(1000)) {
2253 case kEventSignaled: 2207 case kEventSignaled:
2254 break; 2208 break;
2255 case kEventError: 2209 case kEventError:
2256 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2210 LOG(LS_WARNING) << "EventWrapper::Wait() failed";
2257 "EventWrapper::Wait() failed");
2258 return true; 2211 return true;
2259 case kEventTimeout: 2212 case kEventTimeout:
2260 return true; 2213 return true;
2261 } 2214 }
2262 2215
2263 rtc::CritScope lock(&_critSect); 2216 rtc::CritScope lock(&_critSect);
2264 2217
2265 if (_startPlay) { 2218 if (_startPlay) {
2266 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 2219 LOG(LS_VERBOSE) << "_startPlay true, performing initial actions";
2267 "_startPlay true, performing initial actions");
2268 2220
2269 _startPlay = false; 2221 _startPlay = false;
2270 _playDeviceName = NULL; 2222 _playDeviceName = NULL;
2271 2223
2272 // Set if not default device 2224 // Set if not default device
2273 if (_outputDeviceIndex > 0) { 2225 if (_outputDeviceIndex > 0) {
2274 // Get the playout device name 2226 // Get the playout device name
2275 _playDeviceName = new char[kAdmMaxDeviceNameSize]; 2227 _playDeviceName = new char[kAdmMaxDeviceNameSize];
2276 _deviceIndex = _outputDeviceIndex; 2228 _deviceIndex = _outputDeviceIndex;
2277 PlayoutDevices(); 2229 PlayoutDevices();
(...skipping 27 matching lines...) Expand all
2305 // Set the same volume for all channels 2257 // Set the same volume for all channels
2306 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream); 2258 const pa_sample_spec* spec = LATE(pa_stream_get_sample_spec)(_playStream);
2307 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume); 2259 LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2308 update_speaker_volume_at_startup_ = false; 2260 update_speaker_volume_at_startup_ = false;
2309 } 2261 }
2310 2262
2311 // Connect the stream to a sink 2263 // Connect the stream to a sink
2312 if (LATE(pa_stream_connect_playback)( 2264 if (LATE(pa_stream_connect_playback)(
2313 _playStream, _playDeviceName, &_playBufferAttr, 2265 _playStream, _playDeviceName, &_playBufferAttr,
2314 (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) { 2266 (pa_stream_flags_t)_playStreamFlags, ptr_cvolume, NULL) != PA_OK) {
2315 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2267 LOG(LS_ERROR) << " failed to connect play stream, err="
2316 " failed to connect play stream, err=%d", 2268 << LATE(pa_context_errno)(_paContext);
2317 LATE(pa_context_errno)(_paContext));
2318 } 2269 }
2319 2270
2320 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, 2271 LOG(LS_VERBOSE) << " play stream connected";
2321 " play stream connected");
2322 2272
2323 // Wait for state change 2273 // Wait for state change
2324 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) { 2274 while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY) {
2325 LATE(pa_threaded_mainloop_wait)(_paMainloop); 2275 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2326 } 2276 }
2327 2277
2328 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " play stream ready"); 2278 LOG(LS_VERBOSE) << " play stream ready";
2329 2279
2330 // We can now handle write callbacks 2280 // We can now handle write callbacks
2331 EnableWriteCallback(); 2281 EnableWriteCallback();
2332 2282
2333 PaUnLock(); 2283 PaUnLock();
2334 2284
2335 // Clear device name 2285 // Clear device name
2336 if (_playDeviceName) { 2286 if (_playDeviceName) {
2337 delete[] _playDeviceName; 2287 delete[] _playDeviceName;
2338 _playDeviceName = NULL; 2288 _playDeviceName = NULL;
(...skipping 17 matching lines...) Expand all
2356 write = _tempBufferSpace; 2306 write = _tempBufferSpace;
2357 } 2307 }
2358 2308
2359 PaLock(); 2309 PaLock();
2360 if (LATE(pa_stream_write)( 2310 if (LATE(pa_stream_write)(
2361 _playStream, (void*)&_playBuffer[_playbackBufferUnused], write, 2311 _playStream, (void*)&_playBuffer[_playbackBufferUnused], write,
2362 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { 2312 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
2363 _writeErrors++; 2313 _writeErrors++;
2364 if (_writeErrors > 10) { 2314 if (_writeErrors > 10) {
2365 if (_playError == 1) { 2315 if (_playError == 1) {
2366 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, 2316 LOG(LS_WARNING) << " pending playout error exists";
2367 " pending playout error exists");
2368 } 2317 }
2369 // Triggers callback from module process thread. 2318 // Triggers callback from module process thread.
2370 _playError = 1; 2319 _playError = 1;
2371 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, 2320 LOG(LS_ERROR) << " kPlayoutError message posted: _writeErrors="
2372 " kPlayoutError message posted: " 2321 << _writeErrors
2373 "_writeErrors=%u, error=%d", 2322 << ", error=" << LATE(pa_context_errno)(_paContext);
2374 _writeErrors, LATE(pa_context_errno)(_paContext));
2375 _writeErrors = 0; 2323 _writeErrors = 0;
2376 } 2324 }
2377 } 2325 }
2378 PaUnLock(); 2326 PaUnLock();
2379 2327
2380 _playbackBufferUnused += write; 2328 _playbackBufferUnused += write;
2381 _tempBufferSpace -= write; 2329 _tempBufferSpace -= write;
2382 } 2330 }
2383 2331
2384 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels); 2332 uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2385 // Might have been reduced to zero by the above. 2333 // Might have been reduced to zero by the above.
2386 if (_tempBufferSpace > 0) { 2334 if (_tempBufferSpace > 0) {
2387 // Ask for new PCM data to be played out using the 2335 // Ask for new PCM data to be played out using the
2388 // AudioDeviceBuffer ensure that this callback is executed 2336 // AudioDeviceBuffer ensure that this callback is executed
2389 // without taking the audio-thread lock. 2337 // without taking the audio-thread lock.
2390 UnLock(); 2338 UnLock();
2391 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " requesting data"); 2339 LOG(LS_VERBOSE) << " requesting data";
2392 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples); 2340 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2393 Lock(); 2341 Lock();
2394 2342
2395 // We have been unlocked - check the flag again. 2343 // We have been unlocked - check the flag again.
2396 if (!_playing) { 2344 if (!_playing) {
2397 return true; 2345 return true;
2398 } 2346 }
2399 2347
2400 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer); 2348 nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2401 if (nSamples != numPlaySamples) { 2349 if (nSamples != numPlaySamples) {
2402 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2350 LOG(LS_ERROR) << " invalid number of output samples(" << nSamples
2403 " invalid number of output samples(%d)", nSamples); 2351 << ")";
2404 } 2352 }
2405 2353
2406 size_t write = _playbackBufferSize; 2354 size_t write = _playbackBufferSize;
2407 if (_tempBufferSpace < write) { 2355 if (_tempBufferSpace < write) {
2408 write = _tempBufferSpace; 2356 write = _tempBufferSpace;
2409 } 2357 }
2410 2358
2411 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " will write"); 2359 LOG(LS_VERBOSE) << " will write";
2412 PaLock(); 2360 PaLock();
2413 if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write, 2361 if (LATE(pa_stream_write)(_playStream, (void*)&_playBuffer[0], write,
2414 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) { 2362 NULL, (int64_t)0, PA_SEEK_RELATIVE) != PA_OK) {
2415 _writeErrors++; 2363 _writeErrors++;
2416 if (_writeErrors > 10) { 2364 if (_writeErrors > 10) {
2417 if (_playError == 1) { 2365 if (_playError == 1) {
2418 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, 2366 LOG(LS_WARNING) << " pending playout error exists";
2419 " pending playout error exists");
2420 } 2367 }
2421 // Triggers callback from module process thread. 2368 // Triggers callback from module process thread.
2422 _playError = 1; 2369 _playError = 1;
2423 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, 2370 LOG(LS_ERROR) << " kPlayoutError message posted: _writeErrors="
2424 " kPlayoutError message posted: " 2371 << _writeErrors
2425 "_writeErrors=%u, error=%d", 2372 << ", error=" << LATE(pa_context_errno)(_paContext);
2426 _writeErrors, LATE(pa_context_errno)(_paContext));
2427 _writeErrors = 0; 2373 _writeErrors = 0;
2428 } 2374 }
2429 } 2375 }
2430 PaUnLock(); 2376 PaUnLock();
2431 2377
2432 _playbackBufferUnused = write; 2378 _playbackBufferUnused = write;
2433 } 2379 }
2434 2380
2435 _tempBufferSpace = 0; 2381 _tempBufferSpace = 0;
2436 PaLock(); 2382 PaLock();
2437 EnableWriteCallback(); 2383 EnableWriteCallback();
2438 PaUnLock(); 2384 PaUnLock();
2439 2385
2440 } // _playing 2386 } // _playing
2441 2387
2442 return true; 2388 return true;
2443 } 2389 }
2444 2390
2445 bool AudioDeviceLinuxPulse::RecThreadProcess() { 2391 bool AudioDeviceLinuxPulse::RecThreadProcess() {
2446 switch (_timeEventRec.Wait(1000)) { 2392 switch (_timeEventRec.Wait(1000)) {
2447 case kEventSignaled: 2393 case kEventSignaled:
2448 break; 2394 break;
2449 case kEventError: 2395 case kEventError:
2450 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2396 LOG(LS_WARNING) << "EventWrapper::Wait() failed";
2451 "EventWrapper::Wait() failed");
2452 return true; 2397 return true;
2453 case kEventTimeout: 2398 case kEventTimeout:
2454 return true; 2399 return true;
2455 } 2400 }
2456 2401
2457 rtc::CritScope lock(&_critSect); 2402 rtc::CritScope lock(&_critSect);
2458 2403
2459 if (_startRec) { 2404 if (_startRec) {
2460 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, 2405 LOG(LS_VERBOSE) << "_startRec true, performing initial actions";
2461 "_startRec true, performing initial actions");
2462 2406
2463 _recDeviceName = NULL; 2407 _recDeviceName = NULL;
2464 2408
2465 // Set if not default device 2409 // Set if not default device
2466 if (_inputDeviceIndex > 0) { 2410 if (_inputDeviceIndex > 0) {
2467 // Get the recording device name 2411 // Get the recording device name
2468 _recDeviceName = new char[kAdmMaxDeviceNameSize]; 2412 _recDeviceName = new char[kAdmMaxDeviceNameSize];
2469 _deviceIndex = _inputDeviceIndex; 2413 _deviceIndex = _inputDeviceIndex;
2470 RecordingDevices(); 2414 RecordingDevices();
2471 } 2415 }
2472 2416
2473 PaLock(); 2417 PaLock();
2474 2418
2475 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connecting stream"); 2419 LOG(LS_VERBOSE) << " connecting stream";
2476 2420
2477 // Connect the stream to a source 2421 // Connect the stream to a source
2478 if (LATE(pa_stream_connect_record)( 2422 if (LATE(pa_stream_connect_record)(
2479 _recStream, _recDeviceName, &_recBufferAttr, 2423 _recStream, _recDeviceName, &_recBufferAttr,
2480 (pa_stream_flags_t)_recStreamFlags) != PA_OK) { 2424 (pa_stream_flags_t)_recStreamFlags) != PA_OK) {
2481 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2425 LOG(LS_ERROR) << " failed to connect rec stream, err="
2482 " failed to connect rec stream, err=%d", 2426 << LATE(pa_context_errno)(_paContext);
2483 LATE(pa_context_errno)(_paContext));
2484 } 2427 }
2485 2428
2486 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " connected"); 2429 LOG(LS_VERBOSE) << " connected";
2487 2430
2488 // Wait for state change 2431 // Wait for state change
2489 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) { 2432 while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY) {
2490 LATE(pa_threaded_mainloop_wait)(_paMainloop); 2433 LATE(pa_threaded_mainloop_wait)(_paMainloop);
2491 } 2434 }
2492 2435
2493 WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " done"); 2436 LOG(LS_VERBOSE) << " done";
2494 2437
2495 // We can now handle read callbacks 2438 // We can now handle read callbacks
2496 EnableReadCallback(); 2439 EnableReadCallback();
2497 2440
2498 PaUnLock(); 2441 PaUnLock();
2499 2442
2500 // Clear device name 2443 // Clear device name
2501 if (_recDeviceName) { 2444 if (_recDeviceName) {
2502 delete[] _recDeviceName; 2445 delete[] _recDeviceName;
2503 _recDeviceName = NULL; 2446 _recDeviceName = NULL;
(...skipping 12 matching lines...) Expand all
2516 return true; 2459 return true;
2517 } 2460 }
2518 2461
2519 _tempSampleData = NULL; 2462 _tempSampleData = NULL;
2520 _tempSampleDataSize = 0; 2463 _tempSampleDataSize = 0;
2521 2464
2522 PaLock(); 2465 PaLock();
2523 while (true) { 2466 while (true) {
2524 // Ack the last thing we read 2467 // Ack the last thing we read
2525 if (LATE(pa_stream_drop)(_recStream) != 0) { 2468 if (LATE(pa_stream_drop)(_recStream) != 0) {
2526 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, 2469 LOG(LS_WARNING) << " failed to drop, err="
2527 " failed to drop, err=%d\n", 2470 << LATE(pa_context_errno)(_paContext);
2528 LATE(pa_context_errno)(_paContext));
2529 } 2471 }
2530 2472
2531 if (LATE(pa_stream_readable_size)(_recStream) <= 0) { 2473 if (LATE(pa_stream_readable_size)(_recStream) <= 0) {
2532 // Then that was all the data 2474 // Then that was all the data
2533 break; 2475 break;
2534 } 2476 }
2535 2477
2536 // Else more data. 2478 // Else more data.
2537 const void* sampleData; 2479 const void* sampleData;
2538 size_t sampleDataSize; 2480 size_t sampleDataSize;
2539 2481
2540 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) { 2482 if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize) != 0) {
2541 _recError = 1; // triggers callback from module process thread 2483 _recError = 1; // triggers callback from module process thread
2542 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, 2484 LOG(LS_ERROR) << " RECORD_ERROR message posted, error = "
2543 " RECORD_ERROR message posted, error = %d", 2485 << LATE(pa_context_errno)(_paContext);
2544 LATE(pa_context_errno)(_paContext));
2545 break; 2486 break;
2546 } 2487 }
2547 2488
2548 _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000); 2489 _sndCardRecDelay = (uint32_t)(LatencyUsecs(_recStream) / 1000);
2549 2490
2550 // Drop lock for sigslot dispatch, which could take a while. 2491 // Drop lock for sigslot dispatch, which could take a while.
2551 PaUnLock(); 2492 PaUnLock();
2552 // Read data and provide it to VoiceEngine 2493 // Read data and provide it to VoiceEngine
2553 if (ReadRecordedData(sampleData, sampleDataSize) == -1) { 2494 if (ReadRecordedData(sampleData, sampleDataSize) == -1) {
2554 return true; 2495 return true;
(...skipping 24 matching lines...) Expand all
2579 2520
2580 // A bit change in keymap means a key is pressed 2521 // A bit change in keymap means a key is pressed
2581 for (i = 0; i < sizeof(szKey); i++) 2522 for (i = 0; i < sizeof(szKey); i++)
2582 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i]; 2523 state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
2583 2524
2584 // Save old state 2525 // Save old state
2585 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState)); 2526 memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
2586 return (state != 0); 2527 return (state != 0);
2587 } 2528 }
2588 } // namespace webrtc 2529 } // namespace webrtc
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698