Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(132)

Unified Diff: webrtc/modules/audio_device/win/audio_device_core_win.cc

Issue 2685783014: Replace NULL with nullptr in all C++ files. (Closed)
Patch Set: Fixing android. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/audio_device/win/audio_device_core_win.cc
diff --git a/webrtc/modules/audio_device/win/audio_device_core_win.cc b/webrtc/modules/audio_device/win/audio_device_core_win.cc
index 9a70239895b8231873a1bd01fdfdf4ece210c565..9387c2352aac14c843670ca772ef1cb9c68c7f67 100644
--- a/webrtc/modules/audio_device/win/audio_device_core_win.cc
+++ b/webrtc/modules/audio_device/win/audio_device_core_win.cc
@@ -46,8 +46,14 @@
// Macro that continues to a COM error.
#define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
-// Macro that releases a COM object if not NULL.
-#define SAFE_RELEASE(p) do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
+// Macro that releases a COM object if not null.
+#define SAFE_RELEASE(p) \
+ do { \
+ if ((p)) { \
+ (p)->Release(); \
+ (p) = nullptr; \
+ } \
+ } while (0)
#define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
@@ -263,16 +269,16 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported()
// IMMDeviceEnumerator Provides methods for enumerating audio devices.
// IMMEndpoint Represents an audio endpoint device.
//
- IMMDeviceEnumerator* pIMMD(NULL);
+ IMMDeviceEnumerator* pIMMD(nullptr);
const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
hr = CoCreateInstance(
- CLSID_MMDeviceEnumerator, // GUID value of MMDeviceEnumerator coclass
- NULL,
- CLSCTX_ALL,
- IID_IMMDeviceEnumerator, // GUID value of the IMMDeviceEnumerator interface
- (void**)&pIMMD );
+ CLSID_MMDeviceEnumerator, // GUID value of MMDeviceEnumerator coclass
+ nullptr, CLSCTX_ALL,
+ IID_IMMDeviceEnumerator, // GUID value of the IMMDeviceEnumerator
+ // interface
+ (void**)&pIMMD);
if (FAILED(hr))
{
@@ -287,13 +293,8 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported()
// Gets the system's human readable message string for this HRESULT.
// All error message in English by default.
- DWORD messageLength = ::FormatMessageW(dwFlags,
- 0,
- hr,
- dwLangID,
- errorText,
- MAXERRORLENGTH,
- NULL);
+ DWORD messageLength = ::FormatMessageW(
+ dwFlags, 0, hr, dwLangID, errorText, MAXERRORLENGTH, nullptr);
assert(messageLength <= MAXERRORLENGTH);
@@ -325,9 +326,8 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported()
coreAudioIsSupported = false;
AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore(-1);
- if (p == NULL)
- {
- return false;
+ if (p == nullptr) {
+ return false;
}
int ok(0);
@@ -404,165 +404,170 @@ bool AudioDeviceWindowsCore::CoreAudioIsSupported()
// AudioDeviceWindowsCore() - ctor
// ----------------------------------------------------------------------------
-AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id) :
- _comInit(ScopedCOMInitializer::kMTA),
- _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
- _volumeMutex(*CriticalSectionWrapper::CreateCriticalSection()),
- _id(id),
- _ptrAudioBuffer(NULL),
- _ptrEnumerator(NULL),
- _ptrRenderCollection(NULL),
- _ptrCaptureCollection(NULL),
- _ptrDeviceOut(NULL),
- _ptrDeviceIn(NULL),
- _ptrClientOut(NULL),
- _ptrClientIn(NULL),
- _ptrRenderClient(NULL),
- _ptrCaptureClient(NULL),
- _ptrCaptureVolume(NULL),
- _ptrRenderSimpleVolume(NULL),
- _dmo(NULL),
- _mediaBuffer(NULL),
- _builtInAecEnabled(false),
- _playAudioFrameSize(0),
- _playSampleRate(0),
- _playBlockSize(0),
- _playChannels(2),
- _sndCardPlayDelay(0),
- _sndCardRecDelay(0),
- _writtenSamples(0),
- _readSamples(0),
- _playAcc(0),
- _recAudioFrameSize(0),
- _recSampleRate(0),
- _recBlockSize(0),
- _recChannels(2),
- _avrtLibrary(NULL),
- _winSupportAvrt(false),
- _hRenderSamplesReadyEvent(NULL),
- _hPlayThread(NULL),
- _hCaptureSamplesReadyEvent(NULL),
- _hRecThread(NULL),
- _hShutdownRenderEvent(NULL),
- _hShutdownCaptureEvent(NULL),
- _hRenderStartedEvent(NULL),
- _hCaptureStartedEvent(NULL),
- _hGetCaptureVolumeThread(NULL),
- _hSetCaptureVolumeThread(NULL),
- _hSetCaptureVolumeEvent(NULL),
- _hMmTask(NULL),
- _initialized(false),
- _recording(false),
- _playing(false),
- _recIsInitialized(false),
- _playIsInitialized(false),
- _speakerIsInitialized(false),
- _microphoneIsInitialized(false),
- _AGC(false),
- _playWarning(0),
- _playError(0),
- _recWarning(0),
- _recError(0),
- _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
- _playBufDelay(80),
- _playBufDelayFixed(80),
- _usingInputDeviceIndex(false),
- _usingOutputDeviceIndex(false),
- _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
- _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
- _inputDeviceIndex(0),
- _outputDeviceIndex(0),
- _newMicLevel(0)
-{
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
- assert(_comInit.succeeded());
-
- // Try to load the Avrt DLL
- if (!_avrtLibrary)
- {
- // Get handle to the Avrt DLL module.
- _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
- if (_avrtLibrary)
- {
- // Handle is valid (should only happen if OS larger than vista & win7).
- // Try to get the function addresses.
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() The Avrt DLL module is now loaded");
-
- _PAvRevertMmThreadCharacteristics = (PAvRevertMmThreadCharacteristics)GetProcAddress(_avrtLibrary, "AvRevertMmThreadCharacteristics");
- _PAvSetMmThreadCharacteristicsA = (PAvSetMmThreadCharacteristicsA)GetProcAddress(_avrtLibrary, "AvSetMmThreadCharacteristicsA");
- _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(_avrtLibrary, "AvSetMmThreadPriority");
-
- if ( _PAvRevertMmThreadCharacteristics &&
- _PAvSetMmThreadCharacteristicsA &&
- _PAvSetMmThreadPriority)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvRevertMmThreadCharacteristics() is OK");
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadCharacteristicsA() is OK");
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadPriority() is OK");
- _winSupportAvrt = true;
- }
- }
+AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id)
+ : _comInit(ScopedCOMInitializer::kMTA),
+ _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _volumeMutex(*CriticalSectionWrapper::CreateCriticalSection()),
+ _id(id),
+ _ptrAudioBuffer(nullptr),
+ _ptrEnumerator(nullptr),
+ _ptrRenderCollection(nullptr),
+ _ptrCaptureCollection(nullptr),
+ _ptrDeviceOut(nullptr),
+ _ptrDeviceIn(nullptr),
+ _ptrClientOut(nullptr),
+ _ptrClientIn(nullptr),
+ _ptrRenderClient(nullptr),
+ _ptrCaptureClient(nullptr),
+ _ptrCaptureVolume(nullptr),
+ _ptrRenderSimpleVolume(nullptr),
+ _dmo(nullptr),
+ _mediaBuffer(nullptr),
+ _builtInAecEnabled(false),
+ _playAudioFrameSize(0),
+ _playSampleRate(0),
+ _playBlockSize(0),
+ _playChannels(2),
+ _sndCardPlayDelay(0),
+ _sndCardRecDelay(0),
+ _writtenSamples(0),
+ _readSamples(0),
+ _playAcc(0),
+ _recAudioFrameSize(0),
+ _recSampleRate(0),
+ _recBlockSize(0),
+ _recChannels(2),
+ _avrtLibrary(nullptr),
+ _winSupportAvrt(false),
+ _hRenderSamplesReadyEvent(nullptr),
+ _hPlayThread(nullptr),
+ _hCaptureSamplesReadyEvent(nullptr),
+ _hRecThread(nullptr),
+ _hShutdownRenderEvent(nullptr),
+ _hShutdownCaptureEvent(nullptr),
+ _hRenderStartedEvent(nullptr),
+ _hCaptureStartedEvent(nullptr),
+ _hGetCaptureVolumeThread(nullptr),
+ _hSetCaptureVolumeThread(nullptr),
+ _hSetCaptureVolumeEvent(nullptr),
+ _hMmTask(nullptr),
+ _initialized(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _speakerIsInitialized(false),
+ _microphoneIsInitialized(false),
+ _AGC(false),
+ _playWarning(0),
+ _playError(0),
+ _recWarning(0),
+ _recError(0),
+ _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
+ _playBufDelay(80),
+ _playBufDelayFixed(80),
+ _usingInputDeviceIndex(false),
+ _usingOutputDeviceIndex(false),
+ _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
+ _inputDeviceIndex(0),
+ _outputDeviceIndex(0),
+ _newMicLevel(0) {
+ WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
+ assert(_comInit.succeeded());
+
+ // Try to load the Avrt DLL
+ if (!_avrtLibrary) {
+ // Get handle to the Avrt DLL module.
+ _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
+ if (_avrtLibrary) {
+ // Handle is valid (should only happen if OS larger than vista & win7).
+ // Try to get the function addresses.
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioDeviceWindowsCore::AudioDeviceWindowsCore() The Avrt "
+ "DLL module is now loaded");
+
+ _PAvRevertMmThreadCharacteristics =
+ (PAvRevertMmThreadCharacteristics)GetProcAddress(
+ _avrtLibrary, "AvRevertMmThreadCharacteristics");
+ _PAvSetMmThreadCharacteristicsA =
+ (PAvSetMmThreadCharacteristicsA)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadCharacteristicsA");
+ _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(
+ _avrtLibrary, "AvSetMmThreadPriority");
+
+ if (_PAvRevertMmThreadCharacteristics &&
+ _PAvSetMmThreadCharacteristicsA && _PAvSetMmThreadPriority) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioDeviceWindowsCore::AudioDeviceWindowsCore() "
+ "AvRevertMmThreadCharacteristics() is OK");
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioDeviceWindowsCore::AudioDeviceWindowsCore() "
+ "AvSetMmThreadCharacteristicsA() is OK");
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "AudioDeviceWindowsCore::AudioDeviceWindowsCore() "
+ "AvSetMmThreadPriority() is OK");
+ _winSupportAvrt = true;
+ }
}
+ }
- // Create our samples ready events - we want auto reset events that start in the not-signaled state.
- // The state of an auto-reset event object remains signaled until a single waiting thread is released,
- // at which time the system automatically sets the state to nonsignaled. If no threads are waiting,
- // the event object's state remains signaled.
- // (Except for _hShutdownCaptureEvent, which is used to shutdown multiple threads).
- _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
- _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
- _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
-
- _perfCounterFreq.QuadPart = 1;
- _perfCounterFactor = 0.0;
- _avgCPULoad = 0.0;
-
- // list of number of channels to use on recording side
- _recChannelsPrioList[0] = 2; // stereo is prio 1
- _recChannelsPrioList[1] = 1; // mono is prio 2
-
- // list of number of channels to use on playout side
- _playChannelsPrioList[0] = 2; // stereo is prio 1
- _playChannelsPrioList[1] = 1; // mono is prio 2
-
- HRESULT hr;
-
- // We know that this API will work since it has already been verified in
- // CoreAudioIsSupported, hence no need to check for errors here as well.
-
- // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
- // TODO(henrika): we should probably move this allocation to Init() instead
- // and deallocate in Terminate() to make the implementation more symmetric.
- CoCreateInstance(
- __uuidof(MMDeviceEnumerator),
- NULL,
- CLSCTX_ALL,
- __uuidof(IMMDeviceEnumerator),
- reinterpret_cast<void**>(&_ptrEnumerator));
- assert(NULL != _ptrEnumerator);
-
- // DMO initialization for built-in WASAPI AEC.
- {
- IMediaObject* ptrDMO = NULL;
- hr = CoCreateInstance(CLSID_CWMAudioAEC,
- NULL,
- CLSCTX_INPROC_SERVER,
- IID_IMediaObject,
- reinterpret_cast<void**>(&ptrDMO));
- if (FAILED(hr) || ptrDMO == NULL)
- {
- // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
- // feature is prevented from being enabled.
- _builtInAecEnabled = false;
- _TraceCOMError(hr);
- }
- _dmo = ptrDMO;
- SAFE_RELEASE(ptrDMO);
- }
+ // Create our samples ready events - we want auto reset events that start in
+ // the not-signaled state.
+ // The state of an auto-reset event object remains signaled until a single
+ // waiting thread is released,
+ // at which time the system automatically sets the state to nonsignaled. If no
+ // threads are waiting,
+ // the event object's state remains signaled.
+ // (Except for _hShutdownCaptureEvent, which is used to shutdown multiple
+ // threads).
+ _hRenderSamplesReadyEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ _hCaptureSamplesReadyEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ _hShutdownRenderEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ _hShutdownCaptureEvent = CreateEvent(nullptr, TRUE, FALSE, nullptr);
+ _hRenderStartedEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ _hCaptureStartedEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+ _hSetCaptureVolumeEvent = CreateEvent(nullptr, FALSE, FALSE, nullptr);
+
+ _perfCounterFreq.QuadPart = 1;
+ _perfCounterFactor = 0.0;
+ _avgCPULoad = 0.0;
+
+ // list of number of channels to use on recording side
+ _recChannelsPrioList[0] = 2; // stereo is prio 1
+ _recChannelsPrioList[1] = 1; // mono is prio 2
+
+ // list of number of channels to use on playout side
+ _playChannelsPrioList[0] = 2; // stereo is prio 1
+ _playChannelsPrioList[1] = 1; // mono is prio 2
+
+ HRESULT hr;
+
+ // We know that this API will work since it has already been verified in
+ // CoreAudioIsSupported, hence no need to check for errors here as well.
+
+ // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
+ // TODO(henrika): we should probably move this allocation to Init() instead
+ // and deallocate in Terminate() to make the implementation more symmetric.
+ CoCreateInstance(__uuidof(MMDeviceEnumerator), nullptr, CLSCTX_ALL,
+ __uuidof(IMMDeviceEnumerator),
+ reinterpret_cast<void**>(&_ptrEnumerator));
+ assert(nullptr != _ptrEnumerator);
+
+ // DMO initialization for built-in WASAPI AEC.
+ {
+ IMediaObject* ptrDMO = nullptr;
+ hr = CoCreateInstance(CLSID_CWMAudioAEC, nullptr, CLSCTX_INPROC_SERVER,
+ IID_IMediaObject, reinterpret_cast<void**>(&ptrDMO));
+ if (FAILED(hr) || ptrDMO == nullptr) {
+ // Since we check that _dmo is non-null in EnableBuiltInAEC(), the
+ // feature is prevented from being enabled.
+ _builtInAecEnabled = false;
+ _TraceCOMError(hr);
+ }
+ _dmo = ptrDMO;
+ SAFE_RELEASE(ptrDMO);
+ }
}
// ----------------------------------------------------------------------------
@@ -579,48 +584,41 @@ AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
// it here and not in Terminate() since we don't recreate it in Init().
SAFE_RELEASE(_ptrEnumerator);
- _ptrAudioBuffer = NULL;
+ _ptrAudioBuffer = nullptr;
- if (NULL != _hRenderSamplesReadyEvent)
- {
- CloseHandle(_hRenderSamplesReadyEvent);
- _hRenderSamplesReadyEvent = NULL;
+ if (nullptr != _hRenderSamplesReadyEvent) {
+ CloseHandle(_hRenderSamplesReadyEvent);
+ _hRenderSamplesReadyEvent = nullptr;
}
- if (NULL != _hCaptureSamplesReadyEvent)
- {
- CloseHandle(_hCaptureSamplesReadyEvent);
- _hCaptureSamplesReadyEvent = NULL;
+ if (nullptr != _hCaptureSamplesReadyEvent) {
+ CloseHandle(_hCaptureSamplesReadyEvent);
+ _hCaptureSamplesReadyEvent = nullptr;
}
- if (NULL != _hRenderStartedEvent)
- {
- CloseHandle(_hRenderStartedEvent);
- _hRenderStartedEvent = NULL;
+ if (nullptr != _hRenderStartedEvent) {
+ CloseHandle(_hRenderStartedEvent);
+ _hRenderStartedEvent = nullptr;
}
- if (NULL != _hCaptureStartedEvent)
- {
- CloseHandle(_hCaptureStartedEvent);
- _hCaptureStartedEvent = NULL;
+ if (nullptr != _hCaptureStartedEvent) {
+ CloseHandle(_hCaptureStartedEvent);
+ _hCaptureStartedEvent = nullptr;
}
- if (NULL != _hShutdownRenderEvent)
- {
- CloseHandle(_hShutdownRenderEvent);
- _hShutdownRenderEvent = NULL;
+ if (nullptr != _hShutdownRenderEvent) {
+ CloseHandle(_hShutdownRenderEvent);
+ _hShutdownRenderEvent = nullptr;
}
- if (NULL != _hShutdownCaptureEvent)
- {
- CloseHandle(_hShutdownCaptureEvent);
- _hShutdownCaptureEvent = NULL;
+ if (nullptr != _hShutdownCaptureEvent) {
+ CloseHandle(_hShutdownCaptureEvent);
+ _hShutdownCaptureEvent = nullptr;
}
- if (NULL != _hSetCaptureVolumeEvent)
- {
- CloseHandle(_hSetCaptureVolumeEvent);
- _hSetCaptureVolumeEvent = NULL;
+ if (nullptr != _hSetCaptureVolumeEvent) {
+ CloseHandle(_hSetCaptureVolumeEvent);
+ _hSetCaptureVolumeEvent = nullptr;
}
if (_avrtLibrary)
@@ -757,9 +755,8 @@ int32_t AudioDeviceWindowsCore::InitSpeaker()
return -1;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
if (_usingOutputDeviceIndex)
@@ -788,35 +785,32 @@ int32_t AudioDeviceWindowsCore::InitSpeaker()
ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
}
- if (ret != 0 || (_ptrDeviceOut == NULL))
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the rendering enpoint device");
- SAFE_RELEASE(_ptrDeviceOut);
- return -1;
+ if (ret != 0 || (_ptrDeviceOut == nullptr)) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "failed to initialize the rendering enpoint device");
+ SAFE_RELEASE(_ptrDeviceOut);
+ return -1;
}
- IAudioSessionManager* pManager = NULL;
- ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
- CLSCTX_ALL,
- NULL,
- (void**)&pManager);
- if (ret != 0 || pManager == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " failed to initialize the render manager");
- SAFE_RELEASE(pManager);
- return -1;
+ IAudioSessionManager* pManager = nullptr;
+ ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+ nullptr, (void**)&pManager);
+ if (ret != 0 || pManager == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " failed to initialize the render manager");
+ SAFE_RELEASE(pManager);
+ return -1;
}
SAFE_RELEASE(_ptrRenderSimpleVolume);
- ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
- if (ret != 0 || _ptrRenderSimpleVolume == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " failed to initialize the render simple volume");
- SAFE_RELEASE(pManager);
- SAFE_RELEASE(_ptrRenderSimpleVolume);
- return -1;
+ ret =
+ pManager->GetSimpleAudioVolume(nullptr, FALSE, &_ptrRenderSimpleVolume);
+ if (ret != 0 || _ptrRenderSimpleVolume == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " failed to initialize the render simple volume");
+ SAFE_RELEASE(pManager);
+ SAFE_RELEASE(_ptrRenderSimpleVolume);
+ return -1;
}
SAFE_RELEASE(pManager);
@@ -839,9 +833,8 @@ int32_t AudioDeviceWindowsCore::InitMicrophone()
return -1;
}
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
if (_usingInputDeviceIndex)
@@ -870,23 +863,21 @@ int32_t AudioDeviceWindowsCore::InitMicrophone()
ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
}
- if (ret != 0 || (_ptrDeviceIn == NULL))
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the capturing enpoint device");
- SAFE_RELEASE(_ptrDeviceIn);
- return -1;
+ if (ret != 0 || (_ptrDeviceIn == nullptr)) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "failed to initialize the capturing enpoint device");
+ SAFE_RELEASE(_ptrDeviceIn);
+ return -1;
}
- ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
- CLSCTX_ALL,
- NULL,
- reinterpret_cast<void **>(&_ptrCaptureVolume));
- if (ret != 0 || _ptrCaptureVolume == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " failed to initialize the capture volume");
- SAFE_RELEASE(_ptrCaptureVolume);
- return -1;
+ ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr,
+ reinterpret_cast<void**>(&_ptrCaptureVolume));
+ if (ret != 0 || _ptrCaptureVolume == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " failed to initialize the capture volume");
+ SAFE_RELEASE(_ptrCaptureVolume);
+ return -1;
}
_microphoneIsInitialized = true;
@@ -923,19 +914,19 @@ int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
CriticalSectionScoped lock(&_critSect);
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioSessionManager* pManager = NULL;
- ISimpleAudioVolume* pVolume = NULL;
+ IAudioSessionManager* pManager = nullptr;
+ ISimpleAudioVolume* pVolume = nullptr;
- hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL,
+ nullptr, (void**)&pManager);
EXIT_ON_ERROR(hr);
- hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
+ hr = pManager->GetSimpleAudioVolume(nullptr, FALSE, &pVolume);
EXIT_ON_ERROR(hr);
float volume(0.0f);
@@ -973,9 +964,8 @@ int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
return -1;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
}
@@ -990,7 +980,7 @@ int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
// scale input volume to valid range (0.0 to 1.0)
const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
_volumeMutex.Enter();
- hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
+ hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel, nullptr);
_volumeMutex.Leave();
EXIT_ON_ERROR(hr);
@@ -1016,9 +1006,8 @@ int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
return -1;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
}
@@ -1123,17 +1112,16 @@ int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
CriticalSectionScoped lock(&_critSect);
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Query the speaker system mute state.
- hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
- CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
BOOL mute;
@@ -1167,20 +1155,20 @@ int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
return -1;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Set the speaker system mute state.
- hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
const BOOL mute(enable);
- hr = pVolume->SetMute(mute, NULL);
+ hr = pVolume->SetMute(mute, nullptr);
EXIT_ON_ERROR(hr);
SAFE_RELEASE(pVolume);
@@ -1205,16 +1193,16 @@ int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
return -1;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Query the speaker system mute state.
- hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
BOOL mute;
@@ -1242,16 +1230,16 @@ int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
CriticalSectionScoped lock(&_critSect);
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Query the microphone system mute state.
- hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
BOOL mute;
@@ -1282,20 +1270,20 @@ int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
return -1;
}
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Set the microphone system mute state.
- hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
const BOOL mute(enable);
- hr = pVolume->SetMute(mute, NULL);
+ hr = pVolume->SetMute(mute, nullptr);
EXIT_ON_ERROR(hr);
SAFE_RELEASE(pVolume);
@@ -1320,10 +1308,11 @@ int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
// Query the microphone system mute state.
- hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
BOOL mute;
@@ -1514,15 +1503,15 @@ int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
CriticalSectionScoped lock(&_critSect);
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
HRESULT hr = S_OK;
- IAudioEndpointVolume* pVolume = NULL;
+ IAudioEndpointVolume* pVolume = nullptr;
- hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
+ nullptr, reinterpret_cast<void**>(&pVolume));
EXIT_ON_ERROR(hr);
float volume(0.0f);
@@ -1558,9 +1547,8 @@ int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
return -1;
}
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
}
@@ -1574,7 +1562,7 @@ int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
// scale input volume to valid range (0.0 to 1.0)
const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
_volumeMutex.Enter();
- _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
+ _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, nullptr);
_volumeMutex.Leave();
EXIT_ON_ERROR(hr);
@@ -1599,9 +1587,8 @@ int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
return -1;
}
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
}
@@ -1722,7 +1709,7 @@ int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
HRESULT hr(S_OK);
- assert(_ptrRenderCollection != NULL);
+ assert(_ptrRenderCollection != nullptr);
// Select an endpoint rendering device given the specified index
SAFE_RELEASE(_ptrDeviceOut);
@@ -1780,7 +1767,7 @@ int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDevic
HRESULT hr(S_OK);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
// Select an endpoint rendering device given the specified role
SAFE_RELEASE(_ptrDeviceOut);
@@ -1831,16 +1818,14 @@ int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
}
- if ((index > (nDevices-1)) || (name == NULL))
- {
- return -1;
+ if ((index > (nDevices - 1)) || (name == nullptr)) {
+ return -1;
}
memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL)
- {
- memset(guid, 0, kAdmMaxGuidSize);
+ if (guid != nullptr) {
+ memset(guid, 0, kAdmMaxGuidSize);
}
CriticalSectionScoped lock(&_critSect);
@@ -1862,9 +1847,11 @@ int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
if (ret == 0)
{
// Convert the endpoint device's friendly-name to UTF-8
- if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, nullptr, nullptr) == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WideCharToMultiByte(CP_UTF8) failed with error code %d",
+ GetLastError());
}
}
@@ -1878,13 +1865,14 @@ int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
}
- if (guid != NULL && ret == 0)
- {
- // Convert the endpoint device's ID string to UTF-8
- if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
- }
+ if (guid != nullptr && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid,
+ kAdmMaxGuidSize, nullptr, nullptr) == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WideCharToMultiByte(CP_UTF8) failed with error code %d",
+ GetLastError());
+ }
}
return ret;
@@ -1911,16 +1899,14 @@ int32_t AudioDeviceWindowsCore::RecordingDeviceName(
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
}
- if ((index > (nDevices-1)) || (name == NULL))
- {
- return -1;
+ if ((index > (nDevices - 1)) || (name == nullptr)) {
+ return -1;
}
memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL)
- {
- memset(guid, 0, kAdmMaxGuidSize);
+ if (guid != nullptr) {
+ memset(guid, 0, kAdmMaxGuidSize);
}
CriticalSectionScoped lock(&_critSect);
@@ -1942,9 +1928,11 @@ int32_t AudioDeviceWindowsCore::RecordingDeviceName(
if (ret == 0)
{
// Convert the endpoint device's friendly-name to UTF-8
- if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name,
+ kAdmMaxDeviceNameSize, nullptr, nullptr) == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WideCharToMultiByte(CP_UTF8) failed with error code %d",
+ GetLastError());
}
}
@@ -1958,13 +1946,14 @@ int32_t AudioDeviceWindowsCore::RecordingDeviceName(
ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
}
- if (guid != NULL && ret == 0)
- {
- // Convert the endpoint device's ID string to UTF-8
- if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
- }
+ if (guid != nullptr && ret == 0) {
+ // Convert the endpoint device's ID string to UTF-8
+ if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid,
+ kAdmMaxGuidSize, nullptr, nullptr) == 0) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "WideCharToMultiByte(CP_UTF8) failed with error code %d",
+ GetLastError());
+ }
}
return ret;
@@ -2012,7 +2001,7 @@ int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
HRESULT hr(S_OK);
- assert(_ptrCaptureCollection != NULL);
+ assert(_ptrCaptureCollection != nullptr);
// Select an endpoint capture device given the specified index
SAFE_RELEASE(_ptrDeviceIn);
@@ -2070,7 +2059,7 @@ int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDev
HRESULT hr(S_OK);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
// Select an endpoint capture device given the specified role
SAFE_RELEASE(_ptrDeviceIn);
@@ -2165,9 +2154,8 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
return 0;
}
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
// Initialize the speaker (devices might have been added or removed)
@@ -2177,9 +2165,8 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
}
// Ensure that the updated rendering endpoint device is valid
- if (_ptrDeviceOut == NULL)
- {
- return -1;
+ if (_ptrDeviceOut == nullptr) {
+ return -1;
}
if (_builtInAecEnabled && _recIsInitialized)
@@ -2193,17 +2180,14 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
}
HRESULT hr = S_OK;
- WAVEFORMATEX* pWfxOut = NULL;
+ WAVEFORMATEX* pWfxOut = nullptr;
WAVEFORMATEX Wfx = WAVEFORMATEX();
- WAVEFORMATEX* pWfxClosestMatch = NULL;
+ WAVEFORMATEX* pWfxClosestMatch = nullptr;
// Create COM object with IAudioClient interface.
SAFE_RELEASE(_ptrClientOut);
- hr = _ptrDeviceOut->Activate(
- __uuidof(IAudioClient),
- CLSCTX_ALL,
- NULL,
- (void**)&_ptrClientOut);
+ hr = _ptrDeviceOut->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr,
+ (void**)&_ptrClientOut);
EXIT_ON_ERROR(hr);
// Retrieve the stream format that the audio engine uses for its internal
@@ -2319,20 +2303,25 @@ int32_t AudioDeviceWindowsCore::InitPlayout()
hnsBufferDuration = 30*10000;
}
hr = _ptrClientOut->Initialize(
- AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by the client will be event driven
- hnsBufferDuration, // requested buffer capacity as a time value (in 100-nanosecond units)
- 0, // periodicity
- &Wfx, // selected wave format
- NULL); // session GUID
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK, // processing of the audio buffer by
+ // the client will be event driven
+ hnsBufferDuration, // requested buffer capacity as a time value (in
+ // 100-nanosecond units)
+ 0, // periodicity
+ &Wfx, // selected wave format
+ nullptr); // session GUID
if (FAILED(hr))
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
- if (pWfxClosestMatch != NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
- pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
+ if (pWfxClosestMatch != nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "closest mix format: #channels=%d, samples/sec=%d, "
+ "bits/sample=%d",
+ pWfxClosestMatch->nChannels,
+ pWfxClosestMatch->nSamplesPerSec,
+ pWfxClosestMatch->wBitsPerSample);
}
else
{
@@ -2404,7 +2393,7 @@ Exit:
int32_t AudioDeviceWindowsCore::InitRecordingDMO()
{
assert(_builtInAecEnabled);
- assert(_dmo != NULL);
+ assert(_dmo != nullptr);
if (SetDMOProperties() == -1)
{
@@ -2506,9 +2495,8 @@ int32_t AudioDeviceWindowsCore::InitRecording()
}
_perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
// Initialize the microphone (devices might have been added or removed)
@@ -2518,9 +2506,8 @@ int32_t AudioDeviceWindowsCore::InitRecording()
}
// Ensure that the updated capturing endpoint device is valid
- if (_ptrDeviceIn == NULL)
- {
- return -1;
+ if (_ptrDeviceIn == nullptr) {
+ return -1;
}
if (_builtInAecEnabled)
@@ -2530,17 +2517,14 @@ int32_t AudioDeviceWindowsCore::InitRecording()
}
HRESULT hr = S_OK;
- WAVEFORMATEX* pWfxIn = NULL;
+ WAVEFORMATEX* pWfxIn = nullptr;
WAVEFORMATEX Wfx = WAVEFORMATEX();
- WAVEFORMATEX* pWfxClosestMatch = NULL;
+ WAVEFORMATEX* pWfxClosestMatch = nullptr;
// Create COM object with IAudioClient interface.
SAFE_RELEASE(_ptrClientIn);
- hr = _ptrDeviceIn->Activate(
- __uuidof(IAudioClient),
- CLSCTX_ALL,
- NULL,
- (void**)&_ptrClientIn);
+ hr = _ptrDeviceIn->Activate(__uuidof(IAudioClient), CLSCTX_ALL, nullptr,
+ (void**)&_ptrClientIn);
EXIT_ON_ERROR(hr);
// Retrieve the stream format that the audio engine uses for its internal
@@ -2625,22 +2609,28 @@ int32_t AudioDeviceWindowsCore::InitRecording()
// Create a capturing stream.
hr = _ptrClientIn->Initialize(
- AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
- AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer by the client will be event driven
- AUDCLNT_STREAMFLAGS_NOPERSIST, // volume and mute settings for an audio session will not persist across system restarts
- 0, // required for event-driven shared mode
- 0, // periodicity
- &Wfx, // selected wave format
- NULL); // session GUID
-
+ AUDCLNT_SHAREMODE_SHARED, // share Audio Engine with other applications
+ AUDCLNT_STREAMFLAGS_EVENTCALLBACK | // processing of the audio buffer
+ // by the client will be event
+ // driven
+ AUDCLNT_STREAMFLAGS_NOPERSIST, // volume and mute settings for an
+ // audio session will not persist
+ // across system restarts
+ 0, // required for event-driven shared mode
+ 0, // periodicity
+ &Wfx, // selected wave format
+ nullptr); // session GUID
if (hr != S_OK)
{
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
- if (pWfxClosestMatch != NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
- pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
+ if (pWfxClosestMatch != nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "closest mix format: #channels=%d, samples/sec=%d, "
+ "bits/sample=%d",
+ pWfxClosestMatch->nChannels,
+ pWfxClosestMatch->nSamplesPerSec,
+ pWfxClosestMatch->wBitsPerSample);
}
else
{
@@ -2717,9 +2707,8 @@ int32_t AudioDeviceWindowsCore::StartRecording()
return -1;
}
- if (_hRecThread != NULL)
- {
- return 0;
+ if (_hRecThread != nullptr) {
+ return 0;
}
if (_recording)
@@ -2748,49 +2737,34 @@ int32_t AudioDeviceWindowsCore::StartRecording()
}
}
- assert(_hRecThread == NULL);
- _hRecThread = CreateThread(NULL,
- 0,
- lpStartAddress,
- this,
- 0,
- NULL);
- if (_hRecThread == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "failed to create the recording thread");
- return -1;
+ assert(_hRecThread == nullptr);
+ _hRecThread =
+ CreateThread(nullptr, 0, lpStartAddress, this, 0, nullptr);
+ if (_hRecThread == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "failed to create the recording thread");
+ return -1;
}
// Set thread priority to highest possible
SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
- assert(_hGetCaptureVolumeThread == NULL);
- _hGetCaptureVolumeThread = CreateThread(NULL,
- 0,
- GetCaptureVolumeThread,
- this,
- 0,
- NULL);
- if (_hGetCaptureVolumeThread == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " failed to create the volume getter thread");
- return -1;
+ assert(_hGetCaptureVolumeThread == nullptr);
+ _hGetCaptureVolumeThread =
+ CreateThread(nullptr, 0, GetCaptureVolumeThread, this, 0, nullptr);
+ if (_hGetCaptureVolumeThread == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " failed to create the volume getter thread");
+ return -1;
}
- assert(_hSetCaptureVolumeThread == NULL);
- _hSetCaptureVolumeThread = CreateThread(NULL,
- 0,
- SetCaptureVolumeThread,
- this,
- 0,
- NULL);
- if (_hSetCaptureVolumeThread == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " failed to create the volume setter thread");
- return -1;
+ assert(_hSetCaptureVolumeThread == nullptr);
+ _hSetCaptureVolumeThread =
+ CreateThread(nullptr, 0, SetCaptureVolumeThread, this, 0, nullptr);
+ if (_hSetCaptureVolumeThread == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ " failed to create the volume setter thread");
+ return -1;
}
} // critScoped
@@ -2826,16 +2800,15 @@ int32_t AudioDeviceWindowsCore::StopRecording()
_Lock();
- if (_hRecThread == NULL)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "no capturing stream is active => close down WASAPI only");
- SAFE_RELEASE(_ptrClientIn);
- SAFE_RELEASE(_ptrCaptureClient);
- _recIsInitialized = false;
- _recording = false;
- _UnLock();
- return 0;
+ if (_hRecThread == nullptr) {
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "no capturing stream is active => close down WASAPI only");
+ SAFE_RELEASE(_ptrClientIn);
+ SAFE_RELEASE(_ptrCaptureClient);
+ _recIsInitialized = false;
+ _recording = false;
+ _UnLock();
+ return 0;
}
// Stop the driving thread...
@@ -2889,8 +2862,8 @@ int32_t AudioDeviceWindowsCore::StopRecording()
ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
// Ensure that the thread has released these interfaces properly.
- assert(err == -1 || _ptrClientIn == NULL);
- assert(err == -1 || _ptrCaptureClient == NULL);
+ assert(err == -1 || _ptrClientIn == nullptr);
+ assert(err == -1 || _ptrCaptureClient == nullptr);
_recIsInitialized = false;
_recording = false;
@@ -2898,24 +2871,23 @@ int32_t AudioDeviceWindowsCore::StopRecording()
// These will create thread leaks in the result of an error,
// but we can at least resume the call.
CloseHandle(_hRecThread);
- _hRecThread = NULL;
+ _hRecThread = nullptr;
CloseHandle(_hGetCaptureVolumeThread);
- _hGetCaptureVolumeThread = NULL;
+ _hGetCaptureVolumeThread = nullptr;
CloseHandle(_hSetCaptureVolumeThread);
- _hSetCaptureVolumeThread = NULL;
+ _hSetCaptureVolumeThread = nullptr;
if (_builtInAecEnabled)
{
- assert(_dmo != NULL);
- // This is necessary. Otherwise the DMO can generate garbage render
- // audio even after rendering has stopped.
- HRESULT hr = _dmo->FreeStreamingResources();
- if (FAILED(hr))
- {
- _TraceCOMError(hr);
- err = -1;
+ assert(_dmo != nullptr);
+ // This is necessary. Otherwise the DMO can generate garbage render
+ // audio even after rendering has stopped.
+ HRESULT hr = _dmo->FreeStreamingResources();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ err = -1;
}
}
@@ -2967,9 +2939,8 @@ int32_t AudioDeviceWindowsCore::StartPlayout()
return -1;
}
- if (_hPlayThread != NULL)
- {
- return 0;
+ if (_hPlayThread != nullptr) {
+ return 0;
}
if (_playing)
@@ -2981,19 +2952,13 @@ int32_t AudioDeviceWindowsCore::StartPlayout()
CriticalSectionScoped critScoped(&_critSect);
// Create thread which will drive the rendering.
- assert(_hPlayThread == NULL);
- _hPlayThread = CreateThread(
- NULL,
- 0,
- WSAPIRenderThread,
- this,
- 0,
- NULL);
- if (_hPlayThread == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "failed to create the playout thread");
- return -1;
+ assert(_hPlayThread == nullptr);
+ _hPlayThread =
+ CreateThread(nullptr, 0, WSAPIRenderThread, this, 0, nullptr);
+ if (_hPlayThread == nullptr) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "failed to create the playout thread");
+ return -1;
}
// Set thread priority to highest possible.
@@ -3030,15 +2995,15 @@ int32_t AudioDeviceWindowsCore::StopPlayout()
{
CriticalSectionScoped critScoped(&_critSect) ;
- if (_hPlayThread == NULL)
- {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "no rendering stream is active => close down WASAPI only");
- SAFE_RELEASE(_ptrClientOut);
- SAFE_RELEASE(_ptrRenderClient);
- _playIsInitialized = false;
- _playing = false;
- return 0;
+ if (_hPlayThread == nullptr) {
+ WEBRTC_TRACE(
+ kTraceInfo, kTraceAudioDevice, _id,
+ "no rendering stream is active => close down WASAPI only");
+ SAFE_RELEASE(_ptrClientOut);
+ SAFE_RELEASE(_ptrRenderClient);
+ _playIsInitialized = false;
+ _playing = false;
+ return 0;
}
// stop the driving thread...
@@ -3054,7 +3019,7 @@ int32_t AudioDeviceWindowsCore::StopPlayout()
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"failed to close down webrtc_core_audio_render_thread");
CloseHandle(_hPlayThread);
- _hPlayThread = NULL;
+ _hPlayThread = nullptr;
_playIsInitialized = false;
_playing = false;
return -1;
@@ -3077,7 +3042,7 @@ int32_t AudioDeviceWindowsCore::StopPlayout()
_playing = false;
CloseHandle(_hPlayThread);
- _hPlayThread = NULL;
+ _hPlayThread = nullptr;
if (_builtInAecEnabled && _recording)
{
@@ -3374,7 +3339,7 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
bool keepPlaying = true;
HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
HRESULT hr = S_OK;
- HANDLE hMmTask = NULL;
+ HANDLE hMmTask = nullptr;
LARGE_INTEGER t1;
LARGE_INTEGER t2;
@@ -3413,7 +3378,7 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
_Lock();
- IAudioClock* clock = NULL;
+ IAudioClock* clock = nullptr;
// Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
// This value is fixed during the rendering session.
@@ -3460,7 +3425,7 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
// Before starting the stream, fill the rendering buffer with silence.
//
- BYTE *pData = NULL;
+ BYTE* pData = nullptr;
hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
EXIT_ON_ERROR(hr);
@@ -3512,12 +3477,12 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
// Sanity check to ensure that essential states are not modified
// during the unlocked period.
- if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
- {
- _UnLock();
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- "output state has been modified during unlocked period");
- goto Exit;
+ if (_ptrRenderClient == nullptr || _ptrClientOut == nullptr) {
+ _UnLock();
+ WEBRTC_TRACE(
+ kTraceCritical, kTraceAudioDevice, _id,
+ "output state has been modified during unlocked period");
+ goto Exit;
}
// Get the number of frames of padding (queued up to play) in the endpoint buffer.
@@ -3564,11 +3529,13 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
}
// Sanity check to ensure that essential states are not modified during the unlocked period
- if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
- {
- _UnLock();
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "output state has been modified during unlocked period");
- goto Exit;
+ if (_ptrRenderClient == nullptr ||
+ _ptrClientOut == nullptr) {
+ _UnLock();
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ "output state has been modified during "
+ "unlocked period");
+ goto Exit;
}
if (nSamples != static_cast<int32_t>(_playBlockSize))
{
@@ -3596,7 +3563,7 @@ DWORD AudioDeviceWindowsCore::DoRenderThread()
if (clock) {
UINT64 pos = 0;
UINT64 freq = 1;
- clock->GetPosition(&pos, NULL);
+ clock->GetPosition(&pos, nullptr);
clock->GetFrequency(&freq);
playout_delay = ROUND((double(_writtenSamples) /
_devicePlaySampleRate - double(pos) / freq) * 1000.0);
@@ -3624,9 +3591,8 @@ Exit:
if (_winSupportAvrt)
{
- if (NULL != hMmTask)
- {
- _PAvRevertMmThreadCharacteristics(hMmTask);
+ if (nullptr != hMmTask) {
+ _PAvRevertMmThreadCharacteristics(hMmTask);
}
}
@@ -3634,18 +3600,15 @@ Exit:
if (keepPlaying)
{
- if (_ptrClientOut != NULL)
- {
- hr = _ptrClientOut->Stop();
- if (FAILED(hr))
- {
- _TraceCOMError(hr);
- }
- hr = _ptrClientOut->Reset();
- if (FAILED(hr))
- {
- _TraceCOMError(hr);
- }
+ if (_ptrClientOut != nullptr) {
+ hr = _ptrClientOut->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientOut->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
}
// Trigger callback from module process thread
_playError = 1;
@@ -3663,34 +3626,29 @@ Exit:
DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
{
- _hMmTask = NULL;
-
- rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
-
- // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
- // priority.
- if (_winSupportAvrt)
- {
- DWORD taskIndex(0);
- _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
- if (_hMmTask)
- {
- if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "failed to boost rec-thread using MMCSS");
- }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "capture thread is now registered with MMCSS (taskIndex=%d)",
- taskIndex);
- }
- else
- {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "failed to enable MMCSS on capture thread (err=%d)",
- GetLastError());
- _TraceCOMError(GetLastError());
- }
+ _hMmTask = nullptr;
+
+ rtc::SetCurrentThreadName("webrtc_core_audio_capture_thread");
+
+ // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
+ // priority.
+ if (_winSupportAvrt) {
+ DWORD taskIndex(0);
+ _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
+ if (_hMmTask) {
+ if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL)) {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "failed to boost rec-thread using MMCSS");
+ }
+ WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
+ "capture thread is now registered with MMCSS (taskIndex=%d)",
+ taskIndex);
+ } else {
+ WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
+ "failed to enable MMCSS on capture thread (err=%d)",
+ GetLastError());
+ _TraceCOMError(GetLastError());
+ }
}
return S_OK;
@@ -3700,26 +3658,25 @@ void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
{
if (_winSupportAvrt)
{
- if (NULL != _hMmTask)
- {
- _PAvRevertMmThreadCharacteristics(_hMmTask);
+ if (nullptr != _hMmTask) {
+ _PAvRevertMmThreadCharacteristics(_hMmTask);
}
}
- _hMmTask = NULL;
+ _hMmTask = nullptr;
}
DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
{
- assert(_mediaBuffer != NULL);
- bool keepRecording = true;
+ assert(_mediaBuffer != nullptr);
+ bool keepRecording = true;
- // Initialize COM as MTA in this thread.
- ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
- if (!comInit.succeeded()) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "failed to initialize COM in polling DMO thread");
- return 1;
+ // Initialize COM as MTA in this thread.
+ ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
+ if (!comInit.succeeded()) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "failed to initialize COM in polling DMO thread");
+ return 1;
}
HRESULT hr = InitCaptureThreadPriority();
@@ -3867,7 +3824,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
LARGE_INTEGER t2;
int32_t time(0);
- BYTE* syncBuffer = NULL;
+ BYTE* syncBuffer = nullptr;
UINT32 syncBufIndex = 0;
_readSamples = 0;
@@ -3892,8 +3849,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
// This value is fixed during the capturing session.
//
UINT32 bufferLength = 0;
- if (_ptrClientIn == NULL)
- {
+ if (_ptrClientIn == nullptr) {
WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
"input state has been modified before capture loop starts.");
return 1;
@@ -3908,9 +3864,8 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
//
const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
syncBuffer = new BYTE[syncBufferSize];
- if (syncBuffer == NULL)
- {
- return (DWORD)E_POINTER;
+ if (syncBuffer == nullptr) {
+ return (DWORD)E_POINTER;
}
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of sync buffer : %u [bytes]", syncBufferSize);
@@ -3980,12 +3935,12 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
// Sanity check to ensure that essential states are not modified
// during the unlocked period.
- if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
- {
- _UnLock();
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- "input state has been modified during unlocked period");
- goto Exit;
+ if (_ptrCaptureClient == nullptr || _ptrClientIn == nullptr) {
+ _UnLock();
+ WEBRTC_TRACE(
+ kTraceCritical, kTraceAudioDevice, _id,
+ "input state has been modified during unlocked period");
+ goto Exit;
}
// Find out how much capture data is available
@@ -4009,7 +3964,7 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
{
// Treat all of the data in the packet as silence and ignore the actual data values.
WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "AUDCLNT_BUFFERFLAGS_SILENT");
- pData = NULL;
+ pData = nullptr;
}
assert(framesAvailable != 0);
@@ -4071,11 +4026,13 @@ DWORD AudioDeviceWindowsCore::DoCaptureThread()
_playAcc = 0;
// Sanity check to ensure that essential states are not modified during the unlocked period
- if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
- {
- _UnLock();
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "input state has been modified during unlocked period");
- goto Exit;
+ if (_ptrCaptureClient == nullptr ||
+ _ptrClientIn == nullptr) {
+ _UnLock();
+ WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
+ "input state has been modified during "
+ "unlocked period");
+ goto Exit;
}
}
@@ -4137,18 +4094,15 @@ Exit:
if (keepRecording)
{
- if (_ptrClientIn != NULL)
- {
- hr = _ptrClientIn->Stop();
- if (FAILED(hr))
- {
- _TraceCOMError(hr);
- }
- hr = _ptrClientIn->Reset();
- if (FAILED(hr))
- {
- _TraceCOMError(hr);
- }
+ if (_ptrClientIn != nullptr) {
+ hr = _ptrClientIn->Stop();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
+ hr = _ptrClientIn->Reset();
+ if (FAILED(hr)) {
+ _TraceCOMError(hr);
+ }
}
// Trigger callback from module process thread
@@ -4183,11 +4137,11 @@ int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
return -1;
}
- if (_dmo == NULL)
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Built-in AEC DMO was not initialized properly at create time");
- return -1;
+ if (_dmo == nullptr) {
+ WEBRTC_TRACE(
+ kTraceError, kTraceAudioDevice, _id,
+ "Built-in AEC DMO was not initialized properly at create time");
+ return -1;
}
_builtInAecEnabled = enable;
@@ -4197,17 +4151,16 @@ int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
int AudioDeviceWindowsCore::SetDMOProperties()
{
HRESULT hr = S_OK;
- assert(_dmo != NULL);
+ assert(_dmo != nullptr);
rtc::scoped_refptr<IPropertyStore> ps;
{
- IPropertyStore* ptrPS = NULL;
- hr = _dmo->QueryInterface(IID_IPropertyStore,
- reinterpret_cast<void**>(&ptrPS));
- if (FAILED(hr) || ptrPS == NULL)
- {
- _TraceCOMError(hr);
- return -1;
+ IPropertyStore* ptrPS = nullptr;
+ hr = _dmo->QueryInterface(IID_IPropertyStore,
+ reinterpret_cast<void**>(&ptrPS));
+ if (FAILED(hr) || ptrPS == nullptr) {
+ _TraceCOMError(hr);
+ return -1;
}
ps = ptrPS;
SAFE_RELEASE(ptrPS);
@@ -4364,10 +4317,10 @@ int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
HRESULT hr = S_OK;
- IMMDeviceCollection *pCollection = NULL;
+ IMMDeviceCollection* pCollection = nullptr;
assert(dir == eRender || dir == eCapture);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
// Create a fresh list of devices using the specified direction
hr = _ptrEnumerator->EnumAudioEndpoints(
@@ -4411,13 +4364,10 @@ int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
assert(eRender == dir || eCapture == dir);
- if (eRender == dir && NULL != _ptrRenderCollection)
- {
- hr = _ptrRenderCollection->GetCount(&count);
- }
- else if (NULL != _ptrCaptureCollection)
- {
- hr = _ptrCaptureCollection->GetCount(&count);
+ if (eRender == dir && nullptr != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->GetCount(&count);
+ } else if (nullptr != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->GetCount(&count);
}
if (FAILED(hr))
@@ -4445,17 +4395,14 @@ int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPW
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
HRESULT hr = S_OK;
- IMMDevice *pDevice = NULL;
+ IMMDevice* pDevice = nullptr;
assert(dir == eRender || dir == eCapture);
- if (eRender == dir && NULL != _ptrRenderCollection)
- {
- hr = _ptrRenderCollection->Item(index, &pDevice);
- }
- else if (NULL != _ptrCaptureCollection)
- {
- hr = _ptrCaptureCollection->Item(index, &pDevice);
+ if (eRender == dir && nullptr != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (nullptr != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
}
if (FAILED(hr))
@@ -4484,11 +4431,11 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role,
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
HRESULT hr = S_OK;
- IMMDevice *pDevice = NULL;
+ IMMDevice* pDevice = nullptr;
assert(dir == eRender || dir == eCapture);
assert(role == eConsole || role == eCommunications);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
hr = _ptrEnumerator->GetDefaultAudioEndpoint(
dir,
@@ -4523,17 +4470,14 @@ int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWST
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
HRESULT hr = S_OK;
- IMMDevice *pDevice = NULL;
+ IMMDevice* pDevice = nullptr;
assert(dir == eRender || dir == eCapture);
- if (eRender == dir && NULL != _ptrRenderCollection)
- {
- hr = _ptrRenderCollection->Item(index, &pDevice);
- }
- else if (NULL != _ptrCaptureCollection)
- {
- hr = _ptrCaptureCollection->Item(index, &pDevice);
+ if (eRender == dir && nullptr != _ptrRenderCollection) {
+ hr = _ptrRenderCollection->Item(index, &pDevice);
+ } else if (nullptr != _ptrCaptureCollection) {
+ hr = _ptrCaptureCollection->Item(index, &pDevice);
}
if (FAILED(hr))
@@ -4562,11 +4506,11 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, L
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
HRESULT hr = S_OK;
- IMMDevice *pDevice = NULL;
+ IMMDevice* pDevice = nullptr;
assert(dir == eRender || dir == eCapture);
assert(role == eConsole || role == eCommunications);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
hr = _ptrEnumerator->GetDefaultAudioEndpoint(
dir,
@@ -4634,12 +4578,11 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
memset(szDeviceID, 0, sizeof(szDeviceID));
rtc::scoped_refptr<IMMDevice> device;
{
- IMMDevice* ptrDevice = NULL;
- hr = collection->Item(i, &ptrDevice);
- if (FAILED(hr) || ptrDevice == NULL)
- {
- _TraceCOMError(hr);
- return -1;
+ IMMDevice* ptrDevice = nullptr;
+ hr = collection->Item(i, &ptrDevice);
+ if (FAILED(hr) || ptrDevice == nullptr) {
+ _TraceCOMError(hr);
+ return -1;
}
device = ptrDevice;
SAFE_RELEASE(ptrDevice);
@@ -4682,20 +4625,18 @@ int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
static const WCHAR szDefault[] = L"<Device not available>";
HRESULT hr = E_FAIL;
- IPropertyStore *pProps = NULL;
+ IPropertyStore* pProps = nullptr;
PROPVARIANT varName;
- assert(pszBuffer != NULL);
+ assert(pszBuffer != nullptr);
assert(bufferLen > 0);
- if (pDevice != NULL)
- {
- hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
- if (FAILED(hr))
- {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "IMMDevice::OpenPropertyStore failed, hr = 0x%08X", hr);
- }
+ if (pDevice != nullptr) {
+ hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
+ if (FAILED(hr)) {
+ WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
+ "IMMDevice::OpenPropertyStore failed, hr = 0x%08X", hr);
+ }
}
// Initialize container for property value.
@@ -4727,10 +4668,9 @@ int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
"IPropertyStore::GetValue returned unexpected type, hr = 0x%08X", hr);
}
- if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
- {
- // Copy the valid device name to the provided ouput buffer.
- wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
+ if (SUCCEEDED(hr) && (varName.pwszVal != nullptr)) {
+ // Copy the valid device name to the provided ouput buffer.
+ wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
}
else
{
@@ -4755,14 +4695,13 @@ int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffe
static const WCHAR szDefault[] = L"<Device not available>";
HRESULT hr = E_FAIL;
- LPWSTR pwszID = NULL;
+ LPWSTR pwszID = nullptr;
- assert(pszBuffer != NULL);
+ assert(pszBuffer != nullptr);
assert(bufferLen > 0);
- if (pDevice != NULL)
- {
- hr = pDevice->GetId(&pwszID);
+ if (pDevice != nullptr) {
+ hr = pDevice->GetId(&pwszID);
}
if (hr == S_OK)
@@ -4790,7 +4729,7 @@ int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMM
HRESULT hr(S_OK);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
hr = _ptrEnumerator->GetDefaultAudioEndpoint(
dir,
@@ -4813,9 +4752,9 @@ int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevi
{
HRESULT hr(S_OK);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
- IMMDeviceCollection *pCollection = NULL;
+ IMMDeviceCollection* pCollection = nullptr;
hr = _ptrEnumerator->EnumAudioEndpoints(
dir,
@@ -4849,14 +4788,14 @@ int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow)
{
WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- assert(_ptrEnumerator != NULL);
+ assert(_ptrEnumerator != nullptr);
HRESULT hr = S_OK;
- IMMDeviceCollection *pCollection = NULL;
- IMMDevice *pEndpoint = NULL;
- IPropertyStore *pProps = NULL;
- IAudioEndpointVolume* pEndpointVolume = NULL;
- LPWSTR pwszID = NULL;
+ IMMDeviceCollection* pCollection = nullptr;
+ IMMDevice* pEndpoint = nullptr;
+ IPropertyStore* pProps = nullptr;
+ IAudioEndpointVolume* pEndpointVolume = nullptr;
+ LPWSTR pwszID = nullptr;
// Generate a collection of audio endpoint devices in the system.
// Get states for *all* endpoint devices.
@@ -4941,7 +4880,7 @@ int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow)
// Check the hardware volume capabilities.
DWORD dwHwSupportMask = 0;
hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
- NULL, (void**)&pEndpointVolume);
+ nullptr, (void**)&pEndpointVolume);
CONTINUE_ON_ERROR(hr);
hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
CONTINUE_ON_ERROR(hr);
@@ -5004,7 +4943,7 @@ Next:
"Error when logging device information");
}
CoTaskMemFree(pwszID);
- pwszID = NULL;
+ pwszID = nullptr;
PropVariantClear(&varName);
SAFE_RELEASE(pProps);
SAFE_RELEASE(pEndpoint);
@@ -5016,7 +4955,7 @@ Next:
Exit:
_TraceCOMError(hr);
CoTaskMemFree(pwszID);
- pwszID = NULL;
+ pwszID = nullptr;
SAFE_RELEASE(pCollection);
SAFE_RELEASE(pEndpoint);
SAFE_RELEASE(pEndpointVolume);
@@ -5039,13 +4978,8 @@ void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
// Gets the system's human readable message string for this HRESULT.
// All error message in English by default.
- DWORD messageLength = ::FormatMessageW(dwFlags,
- 0,
- hr,
- dwLangID,
- errorText,
- MAXERRORLENGTH,
- NULL);
+ DWORD messageLength = ::FormatMessageW(dwFlags, 0, hr, dwLangID, errorText,
+ MAXERRORLENGTH, nullptr);
assert(messageLength <= MAXERRORLENGTH);

Powered by Google App Engine
This is Rietveld 408576698