| Index: webrtc/modules/audio_device/mac/audio_device_mac.cc
|
| diff --git a/webrtc/modules/audio_device/mac/audio_device_mac.cc b/webrtc/modules/audio_device/mac/audio_device_mac.cc
|
| index e40d23c725844367a9de8dce19b432680f5d5792..59906a87458dfb8f338d946e9447a9bbdfd144ca 100644
|
| --- a/webrtc/modules/audio_device/mac/audio_device_mac.cc
|
| +++ b/webrtc/modules/audio_device/mac/audio_device_mac.cc
|
| @@ -13,10 +13,8 @@
|
| #include "webrtc/modules/audio_device/mac/portaudio/pa_ringbuffer.h"
|
| #include "webrtc/rtc_base/arraysize.h"
|
| #include "webrtc/rtc_base/checks.h"
|
| -#include "webrtc/rtc_base/logging.h"
|
| #include "webrtc/rtc_base/platform_thread.h"
|
| #include "webrtc/system_wrappers/include/event_wrapper.h"
|
| -#include "webrtc/system_wrappers/include/trace.h"
|
|
|
| #include <ApplicationServices/ApplicationServices.h>
|
| #include <libkern/OSAtomic.h> // OSAtomicCompareAndSwap()
|
| @@ -29,7 +27,7 @@ namespace webrtc {
|
| do { \
|
| err = expr; \
|
| if (err != noErr) { \
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
|
| + logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
|
| (const char*) & err); \
|
| return -1; \
|
| } \
|
| @@ -39,7 +37,7 @@ namespace webrtc {
|
| do { \
|
| err = expr; \
|
| if (err != noErr) { \
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id, "Error in " #expr, \
|
| + logCAMsg(rtc::LS_ERROR, "Error in " #expr, \
|
| (const char*) & err); \
|
| } \
|
| } while (0)
|
| @@ -48,7 +46,7 @@ namespace webrtc {
|
| do { \
|
| err = expr; \
|
| if (err != noErr) { \
|
| - logCAMsg(kTraceWarning, kTraceAudioDevice, _id, "Error in " #expr, \
|
| + logCAMsg(rtc::LS_WARNING, "Error in " #expr, \
|
| (const char*) & err); \
|
| } \
|
| } while (0)
|
| @@ -74,29 +72,49 @@ int32_t AudioDeviceMac::AtomicGet32(int32_t* theValue) {
|
| }
|
|
|
| // CoreAudio errors are best interpreted as four character strings.
|
| -void AudioDeviceMac::logCAMsg(const TraceLevel level,
|
| - const TraceModule module,
|
| - const int32_t id,
|
| +void AudioDeviceMac::logCAMsg(const rtc::LoggingSeverity sev,
|
| const char* msg,
|
| const char* err) {
|
| RTC_DCHECK(msg != NULL);
|
| RTC_DCHECK(err != NULL);
|
|
|
| #ifdef WEBRTC_ARCH_BIG_ENDIAN
|
| - WEBRTC_TRACE(level, module, id, "%s: %.4s", msg, err);
|
| + switch (sev) {
|
| + case rtc::LS_ERROR:
|
| + LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3];
|
| + break;
|
| + case rtc::LS_WARNING:
|
| + LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2] << err[3];
|
| + break;
|
| + case rtc::LS_VERBOSE:
|
| + LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2] << err[3];
|
| + break;
|
| + default:
|
| + break;
|
| + }
|
| #else
|
| // We need to flip the characters in this case.
|
| - WEBRTC_TRACE(level, module, id, "%s: %.1s%.1s%.1s%.1s", msg, err + 3, err + 2,
|
| - err + 1, err);
|
| + switch (sev) {
|
| + case rtc::LS_ERROR:
|
| + LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0];
|
| + break;
|
| + case rtc::LS_WARNING:
|
| + LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1] << err[0];
|
| + break;
|
| + case rtc::LS_VERBOSE:
|
| + LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1] << err[0];
|
| + break;
|
| + default:
|
| + break;
|
| + }
|
| #endif
|
| }
|
|
|
| -AudioDeviceMac::AudioDeviceMac(const int32_t id)
|
| +AudioDeviceMac::AudioDeviceMac()
|
| : _ptrAudioBuffer(NULL),
|
| _stopEventRec(*EventWrapper::Create()),
|
| _stopEvent(*EventWrapper::Create()),
|
| - _id(id),
|
| - _mixerManager(id),
|
| + _mixerManager(),
|
| _inputDeviceIndex(0),
|
| _outputDeviceIndex(0),
|
| _inputDeviceID(kAudioObjectUnknown),
|
| @@ -138,7 +156,7 @@ AudioDeviceMac::AudioDeviceMac(const int32_t id)
|
| _renderBufSizeSamples(0),
|
| prev_key_state_(),
|
| get_mic_volume_counter_ms_(0) {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
|
| + LOG(LS_INFO) << __FUNCTION__ << " created";
|
|
|
| RTC_DCHECK(&_stopEvent != NULL);
|
| RTC_DCHECK(&_stopEventRec != NULL);
|
| @@ -151,8 +169,7 @@ AudioDeviceMac::AudioDeviceMac(const int32_t id)
|
| }
|
|
|
| AudioDeviceMac::~AudioDeviceMac() {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed",
|
| - __FUNCTION__);
|
| + LOG(LS_INFO) << __FUNCTION__ << " destroyed";
|
|
|
| if (!_isShutDown) {
|
| Terminate();
|
| @@ -184,14 +201,12 @@ AudioDeviceMac::~AudioDeviceMac() {
|
| kern_return_t kernErr = KERN_SUCCESS;
|
| kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_destroy() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
|
| }
|
|
|
| kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_destroy() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr;
|
| }
|
|
|
| delete &_stopEvent;
|
| @@ -247,8 +262,7 @@ AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
|
| bufSize = PaUtil_InitializeRingBuffer(
|
| _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData);
|
| if (bufSize == -1) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " PaUtil_InitializeRingBuffer() error");
|
| + LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
|
| return InitStatus::PLAYOUT_ERROR;
|
| }
|
| }
|
| @@ -269,8 +283,7 @@ AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
|
| PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32),
|
| _captureBufSizeSamples, _captureBufData);
|
| if (bufSize == -1) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " PaUtil_InitializeRingBuffer() error");
|
| + LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error";
|
| return InitStatus::RECORDING_ERROR;
|
| }
|
| }
|
| @@ -279,16 +292,14 @@ AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
|
| kernErr = semaphore_create(mach_task_self(), &_renderSemaphore,
|
| SYNC_POLICY_FIFO, 0);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " semaphore_create() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
|
| return InitStatus::OTHER_ERROR;
|
| }
|
|
|
| kernErr = semaphore_create(mach_task_self(), &_captureSemaphore,
|
| SYNC_POLICY_FIFO, 0);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " semaphore_create() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_create() error: " << kernErr;
|
| return InitStatus::OTHER_ERROR;
|
| }
|
|
|
| @@ -323,11 +334,9 @@ AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() {
|
|
|
| int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0);
|
| if (intErr != 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Error in sysctlbyname(): %d", err);
|
| + LOG(LS_ERROR) << "Error in sysctlbyname(): " << err;
|
| } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Hardware model: %s",
|
| - buf);
|
| + LOG(LS_VERBOSE) << "Hardware model: " << buf;
|
| if (strncmp(buf, "MacBookPro", 10) == 0) {
|
| _macBookPro = true;
|
| }
|
| @@ -351,14 +360,12 @@ int32_t AudioDeviceMac::Terminate() {
|
| }
|
|
|
| if (_recording) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Recording must be stopped");
|
| + LOG(LS_ERROR) << "Recording must be stopped";
|
| return -1;
|
| }
|
|
|
| if (_playing) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Playback must be stopped");
|
| + LOG(LS_ERROR) << "Playback must be stopped";
|
| return -1;
|
| }
|
|
|
| @@ -377,7 +384,7 @@ int32_t AudioDeviceMac::Terminate() {
|
|
|
| err = AudioHardwareUnload();
|
| if (err != noErr) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Error in AudioHardwareUnload()", (const char*)&err);
|
| retVal = -1;
|
| }
|
| @@ -543,15 +550,13 @@ int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const {
|
|
|
| int32_t AudioDeviceMac::SetWaveOutVolume(uint16_t volumeLeft,
|
| uint16_t volumeRight) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| + LOG(LS_WARNING) << "API call not supported on this platform";
|
| return -1;
|
| }
|
|
|
| int32_t AudioDeviceMac::WaveOutVolume(uint16_t& /*volumeLeft*/,
|
| uint16_t& /*volumeRight*/) const {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| + LOG(LS_WARNING) << "API call not supported on this platform";
|
| return -1;
|
| }
|
|
|
| @@ -850,8 +855,7 @@ int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const {
|
| uint32_t level(0);
|
|
|
| if (_mixerManager.MicrophoneVolume(level) == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " failed to retrive current microphone level");
|
| + LOG(LS_WARNING) << "failed to retrieve current microphone level";
|
| return -1;
|
| }
|
|
|
| @@ -908,13 +912,12 @@ int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
|
| AudioDeviceID playDevices[MaxNumberDevices];
|
| uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput,
|
| playDevices, MaxNumberDevices);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " number of availiable waveform-audio output devices is %u",
|
| - nDevices);
|
| + LOG(LS_VERBOSE) << "number of available waveform-audio output devices is "
|
| + << nDevices;
|
|
|
| if (index > (nDevices - 1)) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " device index is out of range [0,%u]", (nDevices - 1));
|
| + LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
|
| + << "]";
|
| return -1;
|
| }
|
|
|
| @@ -926,8 +929,7 @@ int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) {
|
|
|
| int32_t AudioDeviceMac::SetPlayoutDevice(
|
| AudioDeviceModule::WindowsDeviceType /*device*/) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "WindowsDeviceType not supported");
|
| + LOG(LS_ERROR) << "WindowsDeviceType not supported";
|
| return -1;
|
| }
|
|
|
| @@ -981,13 +983,12 @@ int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
|
| AudioDeviceID recDevices[MaxNumberDevices];
|
| uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput,
|
| recDevices, MaxNumberDevices);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " number of availiable waveform-audio input devices is %u",
|
| - nDevices);
|
| + LOG(LS_VERBOSE) << "number of available waveform-audio input devices is "
|
| + << nDevices;
|
|
|
| if (index > (nDevices - 1)) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " device index is out of range [0,%u]", (nDevices - 1));
|
| + LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1)
|
| + << "]";
|
| return -1;
|
| }
|
|
|
| @@ -999,8 +1000,7 @@ int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) {
|
|
|
| int32_t AudioDeviceMac::SetRecordingDevice(
|
| AudioDeviceModule::WindowsDeviceType /*device*/) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "WindowsDeviceType not supported");
|
| + LOG(LS_ERROR) << "WindowsDeviceType not supported";
|
| return -1;
|
| }
|
|
|
| @@ -1067,8 +1067,7 @@ int32_t AudioDeviceMac::InitPlayout() {
|
|
|
| // Initialize the speaker (devices might have been added or removed)
|
| if (InitSpeaker() == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitSpeaker() failed");
|
| + LOG(LS_WARNING) << "InitSpeaker() failed";
|
| }
|
|
|
| if (!MicrophoneIsInitialized()) {
|
| @@ -1076,8 +1075,7 @@ int32_t AudioDeviceMac::InitPlayout() {
|
| // one or two devices (_twoDevices)
|
| bool available = false;
|
| if (MicrophoneIsAvailable(available) == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " MicrophoneIsAvailable() failed");
|
| + LOG(LS_WARNING) << "MicrophoneIsAvailable() failed";
|
| }
|
| }
|
|
|
| @@ -1108,12 +1106,10 @@ int32_t AudioDeviceMac::InitPlayout() {
|
|
|
| if (dataSource == 'ispk') {
|
| _macBookProPanRight = true;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "MacBook Pro using internal speakers; stereo"
|
| - " panning right");
|
| + LOG(LS_VERBOSE)
|
| + << "MacBook Pro using internal speakers; stereo panning right";
|
| } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "MacBook Pro not using internal speakers");
|
| + LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
|
| }
|
|
|
| // Add a listener to determine if the status changes.
|
| @@ -1130,50 +1126,42 @@ int32_t AudioDeviceMac::InitPlayout() {
|
| _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat));
|
|
|
| if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Unacceptable output stream format -> mFormatID",
|
| (const char*)&_outStreamFormat.mFormatID);
|
| return -1;
|
| }
|
|
|
| if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Too many channels on output device (mChannelsPerFrame = %d)",
|
| - _outStreamFormat.mChannelsPerFrame);
|
| + LOG(LS_ERROR) << "Too many channels on output device (mChannelsPerFrame = "
|
| + << _outStreamFormat.mChannelsPerFrame << ")";
|
| return -1;
|
| }
|
|
|
| if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Non-interleaved audio data is not supported.",
|
| - "AudioHardware streams should not have this format.");
|
| + LOG(LS_ERROR) << "Non-interleaved audio data is not supported."
|
| + << "AudioHardware streams should not have this format.";
|
| return -1;
|
| }
|
|
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Ouput stream format:");
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mSampleRate = %f, mChannelsPerFrame = %u",
|
| - _outStreamFormat.mSampleRate,
|
| - _outStreamFormat.mChannelsPerFrame);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mBytesPerPacket = %u, mFramesPerPacket = %u",
|
| - _outStreamFormat.mBytesPerPacket,
|
| - _outStreamFormat.mFramesPerPacket);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mBytesPerFrame = %u, mBitsPerChannel = %u",
|
| - _outStreamFormat.mBytesPerFrame,
|
| - _outStreamFormat.mBitsPerChannel);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
|
| - _outStreamFormat.mFormatFlags);
|
| - logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
|
| + LOG(LS_VERBOSE) << "Ouput stream format:";
|
| + LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate
|
| + << ", mChannelsPerFrame = "
|
| + << _outStreamFormat.mChannelsPerFrame;
|
| + LOG(LS_VERBOSE) << "mBytesPerPacket = " << _outStreamFormat.mBytesPerPacket
|
| + << ", mFramesPerPacket = "
|
| + << _outStreamFormat.mFramesPerPacket;
|
| + LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame
|
| + << ", mBitsPerChannel = " << _outStreamFormat.mBitsPerChannel;
|
| + LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags;
|
| + logCAMsg(rtc::LS_VERBOSE, "mFormatID",
|
| (const char*)&_outStreamFormat.mFormatID);
|
|
|
| // Our preferred format to work with.
|
| if (_outStreamFormat.mChannelsPerFrame < 2) {
|
| // Disable stereo playout when we only have one channel on the device.
|
| _playChannels = 1;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Stereo playout unavailable on this device");
|
| + LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
|
| }
|
| WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
|
|
|
| @@ -1214,8 +1202,7 @@ int32_t AudioDeviceMac::InitRecording() {
|
|
|
| // Initialize the microphone (devices might have been added or removed)
|
| if (InitMicrophone() == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitMicrophone() failed");
|
| + LOG(LS_WARNING) << "InitMicrophone() failed";
|
| }
|
|
|
| if (!SpeakerIsInitialized()) {
|
| @@ -1223,8 +1210,7 @@ int32_t AudioDeviceMac::InitRecording() {
|
| // one or two devices (_twoDevices)
|
| bool available = false;
|
| if (SpeakerIsAvailable(available) == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " SpeakerIsAvailable() failed");
|
| + LOG(LS_WARNING) << "SpeakerIsAvailable() failed";
|
| }
|
| }
|
|
|
| @@ -1247,16 +1233,15 @@ int32_t AudioDeviceMac::InitRecording() {
|
| _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat));
|
|
|
| if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Unacceptable input stream format -> mFormatID",
|
| (const char*)&_inStreamFormat.mFormatID);
|
| return -1;
|
| }
|
|
|
| if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Too many channels on input device (mChannelsPerFrame = %d)",
|
| - _inStreamFormat.mChannelsPerFrame);
|
| + LOG(LS_ERROR) << "Too many channels on input device (mChannelsPerFrame = "
|
| + << _inStreamFormat.mChannelsPerFrame << ")";
|
| return -1;
|
| }
|
|
|
| @@ -1264,26 +1249,23 @@ int32_t AudioDeviceMac::InitRecording() {
|
| _inStreamFormat.mSampleRate / 100 *
|
| N_BLOCKS_IO;
|
| if (io_block_size_samples > _captureBufSizeSamples) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Input IO block size (%d) is larger than ring buffer (%u)",
|
| - io_block_size_samples, _captureBufSizeSamples);
|
| + LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
|
| + << ") is larger than ring buffer (" << _captureBufSizeSamples
|
| + << ")";
|
| return -1;
|
| }
|
|
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input stream format:");
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " mSampleRate = %f, mChannelsPerFrame = %u",
|
| - _inStreamFormat.mSampleRate, _inStreamFormat.mChannelsPerFrame);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " mBytesPerPacket = %u, mFramesPerPacket = %u",
|
| - _inStreamFormat.mBytesPerPacket,
|
| - _inStreamFormat.mFramesPerPacket);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " mBytesPerFrame = %u, mBitsPerChannel = %u",
|
| - _inStreamFormat.mBytesPerFrame, _inStreamFormat.mBitsPerChannel);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " mFormatFlags = %u",
|
| - _inStreamFormat.mFormatFlags);
|
| - logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
|
| + LOG(LS_VERBOSE) << "Input stream format:";
|
| + LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate
|
| + << ", mChannelsPerFrame = "
|
| + << _inStreamFormat.mChannelsPerFrame;
|
| + LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket
|
| + << ", mFramesPerPacket = "
|
| + << _inStreamFormat.mFramesPerPacket;
|
| + LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame
|
| + << ", mBitsPerChannel = " << _inStreamFormat.mBitsPerChannel;
|
| + LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags;
|
| + logCAMsg(rtc::LS_VERBOSE, "mFormatID",
|
| (const char*)&_inStreamFormat.mFormatID);
|
|
|
| // Our preferred format to work with
|
| @@ -1293,8 +1275,7 @@ int32_t AudioDeviceMac::InitRecording() {
|
| // Disable stereo recording when we only have one channel on the device.
|
| _inDesiredFormat.mChannelsPerFrame = 1;
|
| _recChannels = 1;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Stereo recording unavailable on this device");
|
| + LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
|
| }
|
|
|
| if (_ptrAudioBuffer) {
|
| @@ -1411,8 +1392,7 @@ int32_t AudioDeviceMac::StartRecording() {
|
| }
|
|
|
| if (!_initialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Recording worker thread has not been started");
|
| + LOG(LS_ERROR) << "Recording worker thread has not been started";
|
| return -1;
|
| }
|
|
|
| @@ -1454,9 +1434,9 @@ int32_t AudioDeviceMac::StopRecording() {
|
| _critSect.Leave(); // Cannot be under lock, risk of deadlock
|
| if (kEventTimeout == _stopEventRec.Wait(2000)) {
|
| rtc::CritScope critScoped(&_critSect);
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Timed out stopping the capture IOProc. "
|
| - "We may have failed to detect a device removal.");
|
| + LOG(LS_WARNING)
|
| + << "Timed out stopping the capture IOProc."
|
| + << "We may have failed to detect a device removal.";
|
|
|
| WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID));
|
| WEBRTC_CA_LOG_WARN(
|
| @@ -1464,7 +1444,7 @@ int32_t AudioDeviceMac::StopRecording() {
|
| }
|
| _critSect.Enter();
|
| _doStopRec = false;
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, " Recording stopped");
|
| + LOG(LS_VERBOSE) << "Recording stopped";
|
| }
|
| } else {
|
| // We signal a stop for a shared device even when rendering has
|
| @@ -1480,9 +1460,9 @@ int32_t AudioDeviceMac::StopRecording() {
|
| _critSect.Leave(); // Cannot be under lock, risk of deadlock
|
| if (kEventTimeout == _stopEvent.Wait(2000)) {
|
| rtc::CritScope critScoped(&_critSect);
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Timed out stopping the shared IOProc. "
|
| - "We may have failed to detect a device removal.");
|
| + LOG(LS_WARNING)
|
| + << "Timed out stopping the shared IOProc."
|
| + << "We may have failed to detect a device removal.";
|
|
|
| // We assume rendering on a shared device has stopped as well if
|
| // the IOProc times out.
|
| @@ -1492,8 +1472,7 @@ int32_t AudioDeviceMac::StopRecording() {
|
| }
|
| _critSect.Enter();
|
| _doStop = false;
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Recording stopped (shared)");
|
| + LOG(LS_VERBOSE) << "Recording stopped (shared)";
|
| }
|
| }
|
|
|
| @@ -1586,9 +1565,9 @@ int32_t AudioDeviceMac::StopPlayout() {
|
| _critSect.Leave(); // Cannot be under lock, risk of deadlock
|
| if (kEventTimeout == _stopEvent.Wait(2000)) {
|
| rtc::CritScope critScoped(&_critSect);
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Timed out stopping the render IOProc. "
|
| - "We may have failed to detect a device removal.");
|
| + LOG(LS_WARNING)
|
| + << "Timed out stopping the render IOProc."
|
| + << "We may have failed to detect a device removal.";
|
|
|
| // We assume capturing on a shared device has stopped as well if the
|
| // IOProc times out.
|
| @@ -1598,7 +1577,7 @@ int32_t AudioDeviceMac::StopPlayout() {
|
| }
|
| _critSect.Enter();
|
| _doStop = false;
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Playout stopped");
|
| + LOG(LS_VERBOSE) << "Playout stopped";
|
| }
|
|
|
| // Setting this signal will allow the worker thread to be stopped.
|
| @@ -1660,8 +1639,7 @@ int32_t AudioDeviceMac::SetPlayoutBuffer(
|
| const AudioDeviceModule::BufferType type,
|
| uint16_t sizeMS) {
|
| if (type != AudioDeviceModule::kFixedBufferSize) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Adaptive buffer size not supported on this platform");
|
| + LOG(LS_ERROR) << "Adaptive buffer size not supported on this platform";
|
| return -1;
|
| }
|
|
|
| @@ -1680,8 +1658,7 @@ int32_t AudioDeviceMac::PlayoutBuffer(AudioDeviceModule::BufferType& type,
|
|
|
| // Not implemented for Mac.
|
| int32_t AudioDeviceMac::CPULoad(uint16_t& /*load*/) const {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| + LOG(LS_WARNING) << "API call not supported on this platform";
|
|
|
| return -1;
|
| }
|
| @@ -1734,7 +1711,7 @@ int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
|
| WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize(
|
| kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size));
|
| if (size == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "No devices");
|
| + LOG(LS_WARNING) << "No devices";
|
| return 0;
|
| }
|
|
|
| @@ -1764,8 +1741,7 @@ int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
|
| scopedDeviceIds[numberScopedDevices] = usedID;
|
| numberScopedDevices++;
|
| } else {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "GetNumberDevices(): Default device unknown");
|
| + LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown";
|
| }
|
|
|
| // Then list the rest of the devices
|
| @@ -1801,8 +1777,7 @@ int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope,
|
|
|
| if (bufferList->mNumberBuffers > 0) {
|
| if (numberScopedDevices >= deviceListLength) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Device list is not long enough");
|
| + LOG(LS_ERROR) << "Device list is not long enough";
|
| listOK = false;
|
| break;
|
| }
|
| @@ -1850,7 +1825,7 @@ int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
|
| if (numberDevices < 0) {
|
| return -1;
|
| } else if (numberDevices == 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "No devices");
|
| + LOG(LS_ERROR) << "No devices";
|
| return -1;
|
| }
|
|
|
| @@ -1874,8 +1849,7 @@ int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope,
|
| WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
|
| kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID));
|
| if (usedID == kAudioDeviceUnknown) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "GetDeviceName(): Default device unknown");
|
| + LOG(LS_WARNING) << "GetDeviceName(): Default device unknown";
|
| } else {
|
| isDefaultDevice = true;
|
| }
|
| @@ -1932,8 +1906,7 @@ int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
|
| if (numberDevices < 0) {
|
| return -1;
|
| } else if (numberDevices == 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "InitDevice(): No devices");
|
| + LOG(LS_ERROR) << "InitDevice(): No devices";
|
| return -1;
|
| }
|
|
|
| @@ -1945,8 +1918,7 @@ int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
|
| WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(
|
| kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId));
|
| if (deviceId == kAudioDeviceUnknown) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " No default device exists");
|
| + LOG(LS_WARNING) << "No default device exists";
|
| } else {
|
| isDefaultDevice = true;
|
| }
|
| @@ -1976,11 +1948,9 @@ int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex,
|
| 0, NULL, &size, devManf));
|
|
|
| if (isInput) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Input device: %s %s",
|
| - devManf, devName);
|
| + LOG(LS_VERBOSE) << "Input device: " << devManf << " " << devName;
|
| } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " Output device: %s %s",
|
| - devManf, devName);
|
| + LOG(LS_VERBOSE) << "Output device: " << devManf << " " << devName;
|
| }
|
|
|
| return 0;
|
| @@ -2075,10 +2045,10 @@ OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() {
|
| _renderLatencyUs +=
|
| static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate);
|
|
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " initial playout status: _renderDelayOffsetSamples=%d,"
|
| - " _renderDelayUs=%d, _renderLatencyUs=%d",
|
| - _renderDelayOffsetSamples, _renderDelayUs, _renderLatencyUs);
|
| + LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples="
|
| + << _renderDelayOffsetSamples << ", _renderDelayUs="
|
| + << _renderDelayUs << ", _renderLatencyUs="
|
| + << _renderLatencyUs;
|
| return 0;
|
| }
|
|
|
| @@ -2100,8 +2070,7 @@ OSStatus AudioDeviceMac::implObjectListenerProc(
|
| const AudioObjectID objectId,
|
| const UInt32 numberAddresses,
|
| const AudioObjectPropertyAddress addresses[]) {
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - "AudioDeviceMac::implObjectListenerProc()");
|
| + LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()";
|
|
|
| for (UInt32 i = 0; i < numberAddresses; i++) {
|
| if (addresses[i].mSelector == kAudioHardwarePropertyDevices) {
|
| @@ -2121,8 +2090,7 @@ OSStatus AudioDeviceMac::implObjectListenerProc(
|
| int32_t AudioDeviceMac::HandleDeviceChange() {
|
| OSStatus err = noErr;
|
|
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - "kAudioHardwarePropertyDevices");
|
| + LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices";
|
|
|
| // A device has changed. Check if our registered devices have been removed.
|
| // Ensure the devices have been initialized, meaning the IDs are valid.
|
| @@ -2135,17 +2103,15 @@ int32_t AudioDeviceMac::HandleDeviceChange() {
|
| &size, &deviceIsAlive);
|
|
|
| if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "Capture device is not alive (probably removed)");
|
| + LOG(LS_WARNING) << "Capture device is not alive (probably removed)";
|
| AtomicSet32(&_captureDeviceIsAlive, 0);
|
| _mixerManager.CloseMicrophone();
|
| if (_recError == 1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " pending recording error exists");
|
| + LOG(LS_WARNING) << "pending recording error exists";
|
| }
|
| _recError = 1; // triggers callback from module process thread
|
| } else if (err != noErr) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Error in AudioDeviceGetProperty()", (const char*)&err);
|
| return -1;
|
| }
|
| @@ -2160,17 +2126,15 @@ int32_t AudioDeviceMac::HandleDeviceChange() {
|
| &size, &deviceIsAlive);
|
|
|
| if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "Render device is not alive (probably removed)");
|
| + LOG(LS_WARNING) << "Render device is not alive (probably removed)";
|
| AtomicSet32(&_renderDeviceIsAlive, 0);
|
| _mixerManager.CloseSpeaker();
|
| if (_playError == 1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " pending playout error exists");
|
| + LOG(LS_WARNING) << "pending playout error exists";
|
| }
|
| _playError = 1; // triggers callback from module process thread
|
| } else if (err != noErr) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Error in AudioDeviceGetProperty()", (const char*)&err);
|
| return -1;
|
| }
|
| @@ -2184,7 +2148,7 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
|
| const AudioObjectPropertyAddress propertyAddress) {
|
| OSStatus err = noErr;
|
|
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Stream format changed");
|
| + LOG(LS_VERBOSE) << "Stream format changed";
|
|
|
| if (objectId != _inputDeviceID && objectId != _outputDeviceID) {
|
| return 0;
|
| @@ -2197,32 +2161,27 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
|
| objectId, &propertyAddress, 0, NULL, &size, &streamFormat));
|
|
|
| if (streamFormat.mFormatID != kAudioFormatLinearPCM) {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Unacceptable input stream format -> mFormatID",
|
| (const char*)&streamFormat.mFormatID);
|
| return -1;
|
| }
|
|
|
| if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Too many channels on device (mChannelsPerFrame = %d)",
|
| - streamFormat.mChannelsPerFrame);
|
| + LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = "
|
| + << streamFormat.mChannelsPerFrame << ")";
|
| return -1;
|
| }
|
|
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Stream format:");
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mSampleRate = %f, mChannelsPerFrame = %u",
|
| - streamFormat.mSampleRate, streamFormat.mChannelsPerFrame);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mBytesPerPacket = %u, mFramesPerPacket = %u",
|
| - streamFormat.mBytesPerPacket, streamFormat.mFramesPerPacket);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "mBytesPerFrame = %u, mBitsPerChannel = %u",
|
| - streamFormat.mBytesPerFrame, streamFormat.mBitsPerChannel);
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "mFormatFlags = %u",
|
| - streamFormat.mFormatFlags);
|
| - logCAMsg(kTraceInfo, kTraceAudioDevice, _id, "mFormatID",
|
| + LOG(LS_VERBOSE) << "Stream format:";
|
| + LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate
|
| + << ", mChannelsPerFrame = " << streamFormat.mChannelsPerFrame;
|
| + LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket
|
| + << ", mFramesPerPacket = " << streamFormat.mFramesPerPacket;
|
| + LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame
|
| + << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel;
|
| + LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags;
|
| + logCAMsg(rtc::LS_VERBOSE, "mFormatID",
|
| (const char*)&streamFormat.mFormatID);
|
|
|
| if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
|
| @@ -2230,9 +2189,9 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
|
| streamFormat.mSampleRate / 100 *
|
| N_BLOCKS_IO;
|
| if (io_block_size_samples > _captureBufSizeSamples) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Input IO block size (%d) is larger than ring buffer (%u)",
|
| - io_block_size_samples, _captureBufSizeSamples);
|
| + LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples
|
| + << ") is larger than ring buffer ("
|
| + << _captureBufSizeSamples << ")";
|
| return -1;
|
| }
|
|
|
| @@ -2244,8 +2203,7 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
|
| // Disable stereo recording when we only have one channel on the device.
|
| _inDesiredFormat.mChannelsPerFrame = 1;
|
| _recChannels = 1;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Stereo recording unavailable on this device");
|
| + LOG(LS_VERBOSE) << "Stereo recording unavailable on this device";
|
| }
|
|
|
| if (_ptrAudioBuffer) {
|
| @@ -2266,8 +2224,7 @@ int32_t AudioDeviceMac::HandleStreamFormatChange(
|
| // Our preferred format to work with
|
| if (_outStreamFormat.mChannelsPerFrame < 2) {
|
| _playChannels = 1;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Stereo playout unavailable on this device");
|
| + LOG(LS_VERBOSE) << "Stereo playout unavailable on this device";
|
| }
|
| WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat());
|
| }
|
| @@ -2281,7 +2238,7 @@ int32_t AudioDeviceMac::HandleDataSourceChange(
|
|
|
| if (_macBookPro &&
|
| propertyAddress.mScope == kAudioDevicePropertyScopeOutput) {
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id, "Data source changed");
|
| + LOG(LS_VERBOSE) << "Data source changed";
|
|
|
| _macBookProPanRight = false;
|
| UInt32 dataSource = 0;
|
| @@ -2290,11 +2247,10 @@ int32_t AudioDeviceMac::HandleDataSourceChange(
|
| objectId, &propertyAddress, 0, NULL, &size, &dataSource));
|
| if (dataSource == 'ispk') {
|
| _macBookProPanRight = true;
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "MacBook Pro using internal speakers; stereo panning right");
|
| + LOG(LS_VERBOSE)
|
| + << "MacBook Pro using internal speakers; stereo panning right";
|
| } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "MacBook Pro not using internal speakers");
|
| + LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers";
|
| }
|
| }
|
|
|
| @@ -2309,13 +2265,11 @@ int32_t AudioDeviceMac::HandleProcessorOverload(
|
| // We don't log the notification, as it's sent from the HAL's IO thread. We
|
| // don't want to slow it down even further.
|
| if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) {
|
| - // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "Capture processor
|
| - // overload");
|
| + // LOG(LS_WARNING) << "Capture processor // overload";
|
| //_callback->ProblemIsReported(
|
| // SndCardStreamObserver::ERecordingProblem);
|
| } else {
|
| - // WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - // "Render processor overload");
|
| + // LOG(LS_WARNING) << "Render processor overload";
|
| //_callback->ProblemIsReported(
|
| // SndCardStreamObserver::EPlaybackProblem);
|
| }
|
| @@ -2406,8 +2360,7 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
|
| WEBRTC_CA_LOG_WARN(
|
| AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID));
|
| if (err == noErr) {
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Playout or shared device stopped");
|
| + LOG(LS_VERBOSE) << "Playout or shared device stopped";
|
| }
|
| }
|
|
|
| @@ -2436,11 +2389,10 @@ OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData,
|
| if (err != noErr) {
|
| if (err == 1) {
|
| // This is our own error.
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Error in AudioConverterFillComplexBuffer()");
|
| + LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()";
|
| return 1;
|
| } else {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
|
| return 1;
|
| }
|
| @@ -2478,8 +2430,7 @@ OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets,
|
|
|
| kern_return_t kernErr = semaphore_signal_all(_renderSemaphore);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_signal_all() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
|
| return 1;
|
| }
|
|
|
| @@ -2502,8 +2453,7 @@ OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
|
| WEBRTC_CA_LOG_WARN(
|
| AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID));
|
| if (err == noErr) {
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Recording device stopped");
|
| + LOG(LS_VERBOSE) << "Recording device stopped";
|
| }
|
|
|
| _doStopRec = false;
|
| @@ -2540,8 +2490,7 @@ OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData,
|
|
|
| kern_return_t kernErr = semaphore_signal_all(_captureSemaphore);
|
| if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_signal_all() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr;
|
| }
|
|
|
| return err;
|
| @@ -2567,8 +2516,7 @@ OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets,
|
| return 1;
|
| }
|
| } else if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_wait() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr;
|
| }
|
| }
|
|
|
| @@ -2610,16 +2558,14 @@ bool AudioDeviceMac::RenderWorkerThread() {
|
| return false;
|
| }
|
| } else if (kernErr != KERN_SUCCESS) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " semaphore_timedwait() error: %d", kernErr);
|
| + LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr;
|
| }
|
| }
|
|
|
| int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
|
|
|
| if (!_ptrAudioBuffer) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " capture AudioBuffer is invalid");
|
| + LOG(LS_ERROR) << "capture AudioBuffer is invalid";
|
| return false;
|
| }
|
|
|
| @@ -2629,8 +2575,7 @@ bool AudioDeviceMac::RenderWorkerThread() {
|
|
|
| nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer);
|
| if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " invalid number of output samples(%d)", nSamples);
|
| + LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")";
|
| }
|
|
|
| uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame;
|
| @@ -2685,7 +2630,7 @@ bool AudioDeviceMac::CaptureWorkerThread() {
|
| // This is our own error.
|
| return false;
|
| } else {
|
| - logCAMsg(kTraceError, kTraceAudioDevice, _id,
|
| + logCAMsg(rtc::LS_ERROR,
|
| "Error in AudioConverterFillComplexBuffer()", (const char*)&err);
|
| return false;
|
| }
|
| @@ -2707,8 +2652,7 @@ bool AudioDeviceMac::CaptureWorkerThread() {
|
| static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5);
|
|
|
| if (!_ptrAudioBuffer) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " capture AudioBuffer is invalid");
|
| + LOG(LS_ERROR) << "capture AudioBuffer is invalid";
|
| return false;
|
| }
|
|
|
| @@ -2744,13 +2688,11 @@ bool AudioDeviceMac::CaptureWorkerThread() {
|
| // a change is needed.
|
| // Set this new mic level (received from the observer as return
|
| // value in the callback).
|
| - WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
|
| - " AGC change of volume: old=%u => new=%u",
|
| - currentMicLevel, newMicLevel);
|
| + LOG(LS_VERBOSE) << "AGC change of volume: old=" << currentMicLevel
|
| + << " => new=" << newMicLevel;
|
| if (SetMicrophoneVolume(newMicLevel) == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " the required modification of the microphone "
|
| - "volume failed");
|
| + LOG(LS_WARNING)
|
| + << "the required modification of the microphone volume failed";
|
| }
|
| }
|
| }
|
|
|