OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/base/logging.h" | |
12 #include "webrtc/base/timeutils.h" | |
13 #include "webrtc/modules/audio_device/audio_device_config.h" | |
14 #include "webrtc/modules/audio_device/win/audio_device_wave_win.h" | |
15 | |
16 #include "webrtc/system_wrappers/include/event_wrapper.h" | |
17 #include "webrtc/system_wrappers/include/trace.h" | |
18 | |
19 #include <windows.h> | |
20 #include <objbase.h> // CoTaskMemAlloc, CoTaskMemFree | |
21 #include <strsafe.h> // StringCchCopy(), StringCchCat(), StringCchPrintf() | |
22 #include <assert.h> | |
23 | |
24 // Avoids the need of Windows 7 SDK | |
25 #ifndef WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE | |
26 #define WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE 0x0010 | |
27 #endif | |
28 | |
29 // Supported in Windows Vista and Windows 7. | |
30 // http://msdn.microsoft.com/en-us/library/dd370819(v=VS.85).aspx | |
31 // Taken from Mmddk.h. | |
32 #define DRV_RESERVED 0x0800 | |
33 #define DRV_QUERYFUNCTIONINSTANCEID (DRV_RESERVED + 17) | |
34 #define DRV_QUERYFUNCTIONINSTANCEIDSIZE (DRV_RESERVED + 18) | |
35 | |
36 #define POW2(A) (2 << ((A) - 1)) | |
37 | |
38 namespace webrtc { | |
39 | |
40 // ============================================================================ | |
41 // Construction & Destruction | |
42 // ============================================================================ | |
43 | |
44 // ---------------------------------------------------------------------------- | |
45 // AudioDeviceWindowsWave - ctor | |
46 // ---------------------------------------------------------------------------- | |
47 | |
48 AudioDeviceWindowsWave::AudioDeviceWindowsWave(const int32_t id) : | |
49 _ptrAudioBuffer(NULL), | |
50 _critSect(*CriticalSectionWrapper::CreateCriticalSection()), | |
51 _timeEvent(*EventTimerWrapper::Create()), | |
52 _recStartEvent(*EventWrapper::Create()), | |
53 _playStartEvent(*EventWrapper::Create()), | |
54 _hGetCaptureVolumeThread(NULL), | |
55 _hShutdownGetVolumeEvent(NULL), | |
56 _hSetCaptureVolumeThread(NULL), | |
57 _hShutdownSetVolumeEvent(NULL), | |
58 _hSetCaptureVolumeEvent(NULL), | |
59 _critSectCb(*CriticalSectionWrapper::CreateCriticalSection()), | |
60 _id(id), | |
61 _mixerManager(id), | |
62 _usingInputDeviceIndex(false), | |
63 _usingOutputDeviceIndex(false), | |
64 _inputDevice(AudioDeviceModule::kDefaultDevice), | |
65 _outputDevice(AudioDeviceModule::kDefaultDevice), | |
66 _inputDeviceIndex(0), | |
67 _outputDeviceIndex(0), | |
68 _inputDeviceIsSpecified(false), | |
69 _outputDeviceIsSpecified(false), | |
70 _initialized(false), | |
71 _recIsInitialized(false), | |
72 _playIsInitialized(false), | |
73 _recording(false), | |
74 _playing(false), | |
75 _startRec(false), | |
76 _stopRec(false), | |
77 _startPlay(false), | |
78 _stopPlay(false), | |
79 _AGC(false), | |
80 _hWaveIn(NULL), | |
81 _hWaveOut(NULL), | |
82 _recChannels(N_REC_CHANNELS), | |
83 _playChannels(N_PLAY_CHANNELS), | |
84 _recBufCount(0), | |
85 _recPutBackDelay(0), | |
86 _recDelayCount(0), | |
87 _playBufCount(0), | |
88 _prevPlayTime(0), | |
89 _prevRecTime(0), | |
90 _prevTimerCheckTime(0), | |
91 _timesdwBytes(0), | |
92 _timerFaults(0), | |
93 _timerRestartAttempts(0), | |
94 _no_of_msecleft_warnings(0), | |
95 _MAX_minBuffer(65), | |
96 _useHeader(0), | |
97 _dTcheckPlayBufDelay(10), | |
98 _playBufDelay(80), | |
99 _playBufDelayFixed(80), | |
100 _minPlayBufDelay(20), | |
101 _avgCPULoad(0), | |
102 _sndCardPlayDelay(0), | |
103 _sndCardRecDelay(0), | |
104 _plSampOld(0), | |
105 _rcSampOld(0), | |
106 _playBufType(AudioDeviceModule::kAdaptiveBufferSize), | |
107 _recordedBytes(0), | |
108 _playWarning(0), | |
109 _playError(0), | |
110 _recWarning(0), | |
111 _recError(0), | |
112 _newMicLevel(0), | |
113 _minMicVolume(0), | |
114 _maxMicVolume(0) | |
115 { | |
116 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__
); | |
117 | |
118 // Initialize value, set to 0 if it fails | |
119 if (!QueryPerformanceFrequency(&_perfFreq)) | |
120 { | |
121 _perfFreq.QuadPart = 0; | |
122 } | |
123 | |
124 _hShutdownGetVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL); | |
125 _hShutdownSetVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL); | |
126 _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL); | |
127 } | |
128 | |
129 // ---------------------------------------------------------------------------- | |
130 // AudioDeviceWindowsWave - dtor | |
131 // ---------------------------------------------------------------------------- | |
132 | |
133 AudioDeviceWindowsWave::~AudioDeviceWindowsWave() | |
134 { | |
135 WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTIO
N__); | |
136 | |
137 Terminate(); | |
138 | |
139 delete &_recStartEvent; | |
140 delete &_playStartEvent; | |
141 delete &_timeEvent; | |
142 delete &_critSect; | |
143 delete &_critSectCb; | |
144 | |
145 if (NULL != _hShutdownGetVolumeEvent) | |
146 { | |
147 CloseHandle(_hShutdownGetVolumeEvent); | |
148 _hShutdownGetVolumeEvent = NULL; | |
149 } | |
150 | |
151 if (NULL != _hShutdownSetVolumeEvent) | |
152 { | |
153 CloseHandle(_hShutdownSetVolumeEvent); | |
154 _hShutdownSetVolumeEvent = NULL; | |
155 } | |
156 | |
157 if (NULL != _hSetCaptureVolumeEvent) | |
158 { | |
159 CloseHandle(_hSetCaptureVolumeEvent); | |
160 _hSetCaptureVolumeEvent = NULL; | |
161 } | |
162 } | |
163 | |
164 // ============================================================================ | |
165 // API | |
166 // ============================================================================ | |
167 | |
168 // ---------------------------------------------------------------------------- | |
169 // AttachAudioBuffer | |
170 // ---------------------------------------------------------------------------- | |
171 | |
172 void AudioDeviceWindowsWave::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) | |
173 { | |
174 | |
175 CriticalSectionScoped lock(&_critSect); | |
176 | |
177 _ptrAudioBuffer = audioBuffer; | |
178 | |
179 // inform the AudioBuffer about default settings for this implementation | |
180 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); | |
181 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); | |
182 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); | |
183 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); | |
184 } | |
185 | |
186 // ---------------------------------------------------------------------------- | |
187 // ActiveAudioLayer | |
188 // ---------------------------------------------------------------------------- | |
189 | |
190 int32_t AudioDeviceWindowsWave::ActiveAudioLayer(AudioDeviceModule::AudioLayer&
audioLayer) const | |
191 { | |
192 audioLayer = AudioDeviceModule::kWindowsWaveAudio; | |
193 return 0; | |
194 } | |
195 | |
196 // ---------------------------------------------------------------------------- | |
197 // Init | |
198 // ---------------------------------------------------------------------------- | |
199 | |
200 AudioDeviceGeneric::InitStatus AudioDeviceWindowsWave::Init() { | |
201 CriticalSectionScoped lock(&_critSect); | |
202 | |
203 if (_initialized) { | |
204 return InitStatus::OK; | |
205 } | |
206 | |
207 const uint32_t nowTime(rtc::TimeMillis()); | |
208 | |
209 _recordedBytes = 0; | |
210 _prevRecByteCheckTime = nowTime; | |
211 _prevRecTime = nowTime; | |
212 _prevPlayTime = nowTime; | |
213 _prevTimerCheckTime = nowTime; | |
214 | |
215 _playWarning = 0; | |
216 _playError = 0; | |
217 _recWarning = 0; | |
218 _recError = 0; | |
219 | |
220 _mixerManager.EnumerateAll(); | |
221 | |
222 if (_ptrThread) { | |
223 // thread is already created and active | |
224 return InitStatus::OK; | |
225 } | |
226 | |
227 const char* threadName = "webrtc_audio_module_thread"; | |
228 _ptrThread.reset(new rtc::PlatformThread(ThreadFunc, this, threadName)); | |
229 _ptrThread->Start(); | |
230 _ptrThread->SetPriority(rtc::kRealtimePriority); | |
231 | |
232 const bool periodic(true); | |
233 if (!_timeEvent.StartTimer(periodic, TIMER_PERIOD_MS)) { | |
234 LOG(LS_ERROR) << "failed to start the timer event"; | |
235 _ptrThread->Stop(); | |
236 _ptrThread.reset(); | |
237 return InitStatus::OTHER_ERROR; | |
238 } | |
239 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
240 "periodic timer (dT=%d) is now active", TIMER_PERIOD_MS); | |
241 | |
242 _hGetCaptureVolumeThread = | |
243 CreateThread(NULL, 0, GetCaptureVolumeThread, this, 0, NULL); | |
244 if (_hGetCaptureVolumeThread == NULL) { | |
245 LOG(LS_ERROR) << " failed to create the volume getter thread"; | |
246 return InitStatus::OTHER_ERROR; | |
247 } | |
248 | |
249 SetThreadPriority(_hGetCaptureVolumeThread, THREAD_PRIORITY_NORMAL); | |
250 | |
251 _hSetCaptureVolumeThread = | |
252 CreateThread(NULL, 0, SetCaptureVolumeThread, this, 0, NULL); | |
253 if (_hSetCaptureVolumeThread == NULL) { | |
254 LOG(LS_ERROR) << " failed to create the volume setter thread"; | |
255 return InitStatus::OTHER_ERROR; | |
256 } | |
257 | |
258 SetThreadPriority(_hSetCaptureVolumeThread, THREAD_PRIORITY_NORMAL); | |
259 | |
260 _initialized = true; | |
261 | |
262 return InitStatus::OK; | |
263 } | |
264 | |
265 // ---------------------------------------------------------------------------- | |
266 // Terminate | |
267 // ---------------------------------------------------------------------------- | |
268 | |
269 int32_t AudioDeviceWindowsWave::Terminate() | |
270 { | |
271 | |
272 if (!_initialized) | |
273 { | |
274 return 0; | |
275 } | |
276 | |
277 _critSect.Enter(); | |
278 | |
279 _mixerManager.Close(); | |
280 | |
281 if (_ptrThread) | |
282 { | |
283 rtc::PlatformThread* tmpThread = _ptrThread.release(); | |
284 _critSect.Leave(); | |
285 | |
286 _timeEvent.Set(); | |
287 | |
288 tmpThread->Stop(); | |
289 delete tmpThread; | |
290 } | |
291 else | |
292 { | |
293 _critSect.Leave(); | |
294 } | |
295 | |
296 _critSect.Enter(); | |
297 SetEvent(_hShutdownGetVolumeEvent); | |
298 _critSect.Leave(); | |
299 int32_t ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000); | |
300 if (ret != WAIT_OBJECT_0) | |
301 { | |
302 // the thread did not stop as it should | |
303 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
304 " failed to close down volume getter thread"); | |
305 CloseHandle(_hGetCaptureVolumeThread); | |
306 _hGetCaptureVolumeThread = NULL; | |
307 return -1; | |
308 } | |
309 _critSect.Enter(); | |
310 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
311 " volume getter thread is now closed"); | |
312 | |
313 SetEvent(_hShutdownSetVolumeEvent); | |
314 _critSect.Leave(); | |
315 ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000); | |
316 if (ret != WAIT_OBJECT_0) | |
317 { | |
318 // the thread did not stop as it should | |
319 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, | |
320 " failed to close down volume setter thread"); | |
321 CloseHandle(_hSetCaptureVolumeThread); | |
322 _hSetCaptureVolumeThread = NULL; | |
323 return -1; | |
324 } | |
325 _critSect.Enter(); | |
326 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, | |
327 " volume setter thread is now closed"); | |
328 | |
329 CloseHandle(_hGetCaptureVolumeThread); | |
330 _hGetCaptureVolumeThread = NULL; | |
331 | |
332 CloseHandle(_hSetCaptureVolumeThread); | |
333 _hSetCaptureVolumeThread = NULL; | |
334 | |
335 _critSect.Leave(); | |
336 | |
337 _timeEvent.StopTimer(); | |
338 | |
339 _initialized = false; | |
340 _outputDeviceIsSpecified = false; | |
341 _inputDeviceIsSpecified = false; | |
342 | |
343 return 0; | |
344 } | |
345 | |
346 | |
347 DWORD WINAPI AudioDeviceWindowsWave::GetCaptureVolumeThread(LPVOID context) | |
348 { | |
349 return(((AudioDeviceWindowsWave*)context)->DoGetCaptureVolumeThread()); | |
350 } | |
351 | |
352 DWORD WINAPI AudioDeviceWindowsWave::SetCaptureVolumeThread(LPVOID context) | |
353 { | |
354 return(((AudioDeviceWindowsWave*)context)->DoSetCaptureVolumeThread()); | |
355 } | |
356 | |
357 DWORD AudioDeviceWindowsWave::DoGetCaptureVolumeThread() | |
358 { | |
359 HANDLE waitObject = _hShutdownGetVolumeEvent; | |
360 | |
361 while (1) | |
362 { | |
363 DWORD waitResult = WaitForSingleObject(waitObject, | |
364 GET_MIC_VOLUME_INTERVAL_MS); | |
365 switch (waitResult) | |
366 { | |
367 case WAIT_OBJECT_0: // _hShutdownGetVolumeEvent | |
368 return 0; | |
369 case WAIT_TIMEOUT: // timeout notification | |
370 break; | |
371 default: // unexpected error | |
372 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
373 " unknown wait termination on get volume thread"); | |
374 return 1; | |
375 } | |
376 | |
377 if (AGC()) | |
378 { | |
379 uint32_t currentMicLevel = 0; | |
380 if (MicrophoneVolume(currentMicLevel) == 0) | |
381 { | |
382 // This doesn't set the system volume, just stores it. | |
383 _critSect.Enter(); | |
384 if (_ptrAudioBuffer) | |
385 { | |
386 _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel); | |
387 } | |
388 _critSect.Leave(); | |
389 } | |
390 } | |
391 } | |
392 } | |
393 | |
394 DWORD AudioDeviceWindowsWave::DoSetCaptureVolumeThread() | |
395 { | |
396 HANDLE waitArray[2] = {_hShutdownSetVolumeEvent, _hSetCaptureVolumeEvent}; | |
397 | |
398 while (1) | |
399 { | |
400 DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE)
; | |
401 switch (waitResult) | |
402 { | |
403 case WAIT_OBJECT_0: // _hShutdownSetVolumeEvent | |
404 return 0; | |
405 case WAIT_OBJECT_0 + 1: // _hSetCaptureVolumeEvent | |
406 break; | |
407 default: // unexpected error | |
408 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
409 " unknown wait termination on set volume thread"); | |
410 return 1; | |
411 } | |
412 | |
413 _critSect.Enter(); | |
414 uint32_t newMicLevel = _newMicLevel; | |
415 _critSect.Leave(); | |
416 | |
417 if (SetMicrophoneVolume(newMicLevel) == -1) | |
418 { | |
419 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
420 " the required modification of the microphone volume failed"); | |
421 } | |
422 } | |
423 return 0; | |
424 } | |
425 | |
426 // ---------------------------------------------------------------------------- | |
427 // Initialized | |
428 // ---------------------------------------------------------------------------- | |
429 | |
430 bool AudioDeviceWindowsWave::Initialized() const | |
431 { | |
432 return (_initialized); | |
433 } | |
434 | |
435 // ---------------------------------------------------------------------------- | |
436 // InitSpeaker | |
437 // ---------------------------------------------------------------------------- | |
438 | |
439 int32_t AudioDeviceWindowsWave::InitSpeaker() | |
440 { | |
441 | |
442 CriticalSectionScoped lock(&_critSect); | |
443 | |
444 if (_playing) | |
445 { | |
446 return -1; | |
447 } | |
448 | |
449 if (_mixerManager.EnumerateSpeakers() == -1) | |
450 { | |
451 // failed to locate any valid/controllable speaker | |
452 return -1; | |
453 } | |
454 | |
455 if (IsUsingOutputDeviceIndex()) | |
456 { | |
457 if (_mixerManager.OpenSpeaker(OutputDeviceIndex()) == -1) | |
458 { | |
459 return -1; | |
460 } | |
461 } | |
462 else | |
463 { | |
464 if (_mixerManager.OpenSpeaker(OutputDevice()) == -1) | |
465 { | |
466 return -1; | |
467 } | |
468 } | |
469 | |
470 return 0; | |
471 } | |
472 | |
473 // ---------------------------------------------------------------------------- | |
474 // InitMicrophone | |
475 // ---------------------------------------------------------------------------- | |
476 | |
477 int32_t AudioDeviceWindowsWave::InitMicrophone() | |
478 { | |
479 | |
480 CriticalSectionScoped lock(&_critSect); | |
481 | |
482 if (_recording) | |
483 { | |
484 return -1; | |
485 } | |
486 | |
487 if (_mixerManager.EnumerateMicrophones() == -1) | |
488 { | |
489 // failed to locate any valid/controllable microphone | |
490 return -1; | |
491 } | |
492 | |
493 if (IsUsingInputDeviceIndex()) | |
494 { | |
495 if (_mixerManager.OpenMicrophone(InputDeviceIndex()) == -1) | |
496 { | |
497 return -1; | |
498 } | |
499 } | |
500 else | |
501 { | |
502 if (_mixerManager.OpenMicrophone(InputDevice()) == -1) | |
503 { | |
504 return -1; | |
505 } | |
506 } | |
507 | |
508 uint32_t maxVol = 0; | |
509 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) | |
510 { | |
511 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
512 " unable to retrieve max microphone volume"); | |
513 } | |
514 _maxMicVolume = maxVol; | |
515 | |
516 uint32_t minVol = 0; | |
517 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) | |
518 { | |
519 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, | |
520 " unable to retrieve min microphone volume"); | |
521 } | |
522 _minMicVolume = minVol; | |
523 | |
524 return 0; | |
525 } | |
526 | |
527 // ---------------------------------------------------------------------------- | |
528 // SpeakerIsInitialized | |
529 // ---------------------------------------------------------------------------- | |
530 | |
531 bool AudioDeviceWindowsWave::SpeakerIsInitialized() const | |
532 { | |
533 return (_mixerManager.SpeakerIsInitialized()); | |
534 } | |
535 | |
536 // ---------------------------------------------------------------------------- | |
537 // MicrophoneIsInitialized | |
538 // ---------------------------------------------------------------------------- | |
539 | |
540 bool AudioDeviceWindowsWave::MicrophoneIsInitialized() const | |
541 { | |
542 return (_mixerManager.MicrophoneIsInitialized()); | |
543 } | |
544 | |
545 // ---------------------------------------------------------------------------- | |
546 // SpeakerVolumeIsAvailable | |
547 // ---------------------------------------------------------------------------- | |
548 | |
549 int32_t AudioDeviceWindowsWave::SpeakerVolumeIsAvailable(bool& available) | |
550 { | |
551 | |
552 bool isAvailable(false); | |
553 | |
554 // Enumerate all avaliable speakers and make an attempt to open up the | |
555 // output mixer corresponding to the currently selected output device. | |
556 // | |
557 if (InitSpeaker() == -1) | |
558 { | |
559 // failed to find a valid speaker | |
560 available = false; | |
561 return 0; | |
562 } | |
563 | |
564 // Check if the selected speaker has a volume control | |
565 // | |
566 _mixerManager.SpeakerVolumeIsAvailable(isAvailable); | |
567 available = isAvailable; | |
568 | |
569 // Close the initialized output mixer | |
570 // | |
571 _mixerManager.CloseSpeaker(); | |
572 | |
573 return 0; | |
574 } | |
575 | |
576 // ---------------------------------------------------------------------------- | |
577 // SetSpeakerVolume | |
578 // ---------------------------------------------------------------------------- | |
579 | |
580 int32_t AudioDeviceWindowsWave::SetSpeakerVolume(uint32_t volume) | |
581 { | |
582 | |
583 return (_mixerManager.SetSpeakerVolume(volume)); | |
584 } | |
585 | |
586 // ---------------------------------------------------------------------------- | |
587 // SpeakerVolume | |
588 // ---------------------------------------------------------------------------- | |
589 | |
590 int32_t AudioDeviceWindowsWave::SpeakerVolume(uint32_t& volume) const | |
591 { | |
592 | |
593 uint32_t level(0); | |
594 | |
595 if (_mixerManager.SpeakerVolume(level) == -1) | |
596 { | |
597 return -1; | |
598 } | |
599 | |
600 volume = level; | |
601 return 0; | |
602 } | |
603 | |
604 // ---------------------------------------------------------------------------- | |
605 // SetWaveOutVolume | |
606 // | |
607 // The low-order word contains the left-channel volume setting, and the | |
608 // high-order word contains the right-channel setting. | |
609 // A value of 0xFFFF represents full volume, and a value of 0x0000 is silence
. | |
610 // | |
611 // If a device does not support both left and right volume control, | |
612 // the low-order word of dwVolume specifies the volume level, | |
613 // and the high-order word is ignored. | |
614 // | |
615 // Most devices do not support the full 16 bits of volume-level control | |
616 // and will not use the least-significant bits of the requested volume settin
g. | |
617 // For example, if a device supports 4 bits of volume control, the values | |
618 // 0x4000, 0x4FFF, and 0x43BE will all be truncated to 0x4000. | |
619 // ---------------------------------------------------------------------------- | |
620 | |
621 int32_t AudioDeviceWindowsWave::SetWaveOutVolume(uint16_t volumeLeft, uint16_t v
olumeRight) | |
622 { | |
623 | |
624 MMRESULT res(0); | |
625 WAVEOUTCAPS caps; | |
626 | |
627 CriticalSectionScoped lock(&_critSect); | |
628 | |
629 if (_hWaveOut == NULL) | |
630 { | |
631 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "no open playout dev
ice exists => using default"); | |
632 } | |
633 | |
634 // To determine whether the device supports volume control on both | |
635 // the left and right channels, use the WAVECAPS_LRVOLUME flag. | |
636 // | |
637 res = waveOutGetDevCaps((UINT_PTR)_hWaveOut, &caps, sizeof(WAVEOUTCAPS)); | |
638 if (MMSYSERR_NOERROR != res) | |
639 { | |
640 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps()
failed (err=%d)", res); | |
641 TraceWaveOutError(res); | |
642 } | |
643 if (!(caps.dwSupport & WAVECAPS_VOLUME)) | |
644 { | |
645 // this device does not support volume control using the waveOutSetVolum
e API | |
646 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device does not suppo
rt volume control using the Wave API"); | |
647 return -1; | |
648 } | |
649 if (!(caps.dwSupport & WAVECAPS_LRVOLUME)) | |
650 { | |
651 // high-order word (right channel) is ignored | |
652 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "device does not sup
port volume control on both channels"); | |
653 } | |
654 | |
655 DWORD dwVolume(0x00000000); | |
656 dwVolume = (DWORD)(((volumeRight & 0xFFFF) << 16) | (volumeLeft & 0xFFFF)); | |
657 | |
658 res = waveOutSetVolume(_hWaveOut, dwVolume); | |
659 if (MMSYSERR_NOERROR != res) | |
660 { | |
661 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutSetVolume() fa
iled (err=%d)", res); | |
662 TraceWaveOutError(res); | |
663 return -1; | |
664 } | |
665 | |
666 return 0; | |
667 } | |
668 | |
669 // ---------------------------------------------------------------------------- | |
670 // WaveOutVolume | |
671 // | |
672 // The low-order word of this location contains the left-channel volume setti
ng, | |
673 // and the high-order word contains the right-channel setting. | |
674 // A value of 0xFFFF (65535) represents full volume, and a value of 0x0000 | |
675 // is silence. | |
676 // | |
677 // If a device does not support both left and right volume control, | |
678 // the low-order word of the specified location contains the mono volume leve
l. | |
679 // | |
680 // The full 16-bit setting(s) set with the waveOutSetVolume function is retur
ned, | |
681 // regardless of whether the device supports the full 16 bits of volume-level | |
682 // control. | |
683 // ---------------------------------------------------------------------------- | |
684 | |
685 int32_t AudioDeviceWindowsWave::WaveOutVolume(uint16_t& volumeLeft, uint16_t& vo
lumeRight) const | |
686 { | |
687 | |
688 MMRESULT res(0); | |
689 WAVEOUTCAPS caps; | |
690 | |
691 CriticalSectionScoped lock(&_critSect); | |
692 | |
693 if (_hWaveOut == NULL) | |
694 { | |
695 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "no open playout dev
ice exists => using default"); | |
696 } | |
697 | |
698 // To determine whether the device supports volume control on both | |
699 // the left and right channels, use the WAVECAPS_LRVOLUME flag. | |
700 // | |
701 res = waveOutGetDevCaps((UINT_PTR)_hWaveOut, &caps, sizeof(WAVEOUTCAPS)); | |
702 if (MMSYSERR_NOERROR != res) | |
703 { | |
704 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps()
failed (err=%d)", res); | |
705 TraceWaveOutError(res); | |
706 } | |
707 if (!(caps.dwSupport & WAVECAPS_VOLUME)) | |
708 { | |
709 // this device does not support volume control using the waveOutSetVolum
e API | |
710 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device does not suppo
rt volume control using the Wave API"); | |
711 return -1; | |
712 } | |
713 if (!(caps.dwSupport & WAVECAPS_LRVOLUME)) | |
714 { | |
715 // high-order word (right channel) is ignored | |
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "device does not sup
port volume control on both channels"); | |
717 } | |
718 | |
719 DWORD dwVolume(0x00000000); | |
720 | |
721 res = waveOutGetVolume(_hWaveOut, &dwVolume); | |
722 if (MMSYSERR_NOERROR != res) | |
723 { | |
724 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutGetVolume() fa
iled (err=%d)", res); | |
725 TraceWaveOutError(res); | |
726 return -1; | |
727 } | |
728 | |
729 WORD wVolumeLeft = LOWORD(dwVolume); | |
730 WORD wVolumeRight = HIWORD(dwVolume); | |
731 | |
732 volumeLeft = static_cast<uint16_t> (wVolumeLeft); | |
733 volumeRight = static_cast<uint16_t> (wVolumeRight); | |
734 | |
735 return 0; | |
736 } | |
737 | |
738 // ---------------------------------------------------------------------------- | |
739 // MaxSpeakerVolume | |
740 // ---------------------------------------------------------------------------- | |
741 | |
742 int32_t AudioDeviceWindowsWave::MaxSpeakerVolume(uint32_t& maxVolume) const | |
743 { | |
744 | |
745 uint32_t maxVol(0); | |
746 | |
747 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) | |
748 { | |
749 return -1; | |
750 } | |
751 | |
752 maxVolume = maxVol; | |
753 return 0; | |
754 } | |
755 | |
756 // ---------------------------------------------------------------------------- | |
757 // MinSpeakerVolume | |
758 // ---------------------------------------------------------------------------- | |
759 | |
760 int32_t AudioDeviceWindowsWave::MinSpeakerVolume(uint32_t& minVolume) const | |
761 { | |
762 | |
763 uint32_t minVol(0); | |
764 | |
765 if (_mixerManager.MinSpeakerVolume(minVol) == -1) | |
766 { | |
767 return -1; | |
768 } | |
769 | |
770 minVolume = minVol; | |
771 return 0; | |
772 } | |
773 | |
774 // ---------------------------------------------------------------------------- | |
775 // SpeakerVolumeStepSize | |
776 // ---------------------------------------------------------------------------- | |
777 | |
778 int32_t AudioDeviceWindowsWave::SpeakerVolumeStepSize(uint16_t& stepSize) const | |
779 { | |
780 | |
781 uint16_t delta(0); | |
782 | |
783 if (_mixerManager.SpeakerVolumeStepSize(delta) == -1) | |
784 { | |
785 return -1; | |
786 } | |
787 | |
788 stepSize = delta; | |
789 return 0; | |
790 } | |
791 | |
792 // ---------------------------------------------------------------------------- | |
793 // SpeakerMuteIsAvailable | |
794 // ---------------------------------------------------------------------------- | |
795 | |
796 int32_t AudioDeviceWindowsWave::SpeakerMuteIsAvailable(bool& available) | |
797 { | |
798 | |
799 bool isAvailable(false); | |
800 | |
801 // Enumerate all avaliable speakers and make an attempt to open up the | |
802 // output mixer corresponding to the currently selected output device. | |
803 // | |
804 if (InitSpeaker() == -1) | |
805 { | |
806 // If we end up here it means that the selected speaker has no volume | |
807 // control, hence it is safe to state that there is no mute control | |
808 // already at this stage. | |
809 available = false; | |
810 return 0; | |
811 } | |
812 | |
813 // Check if the selected speaker has a mute control | |
814 // | |
815 _mixerManager.SpeakerMuteIsAvailable(isAvailable); | |
816 available = isAvailable; | |
817 | |
818 // Close the initialized output mixer | |
819 // | |
820 _mixerManager.CloseSpeaker(); | |
821 | |
822 return 0; | |
823 } | |
824 | |
825 // ---------------------------------------------------------------------------- | |
826 // SetSpeakerMute | |
827 // ---------------------------------------------------------------------------- | |
828 | |
829 int32_t AudioDeviceWindowsWave::SetSpeakerMute(bool enable) | |
830 { | |
831 return (_mixerManager.SetSpeakerMute(enable)); | |
832 } | |
833 | |
834 // ---------------------------------------------------------------------------- | |
835 // SpeakerMute | |
836 // ---------------------------------------------------------------------------- | |
837 | |
838 int32_t AudioDeviceWindowsWave::SpeakerMute(bool& enabled) const | |
839 { | |
840 | |
841 bool muted(0); | |
842 | |
843 if (_mixerManager.SpeakerMute(muted) == -1) | |
844 { | |
845 return -1; | |
846 } | |
847 | |
848 enabled = muted; | |
849 return 0; | |
850 } | |
851 | |
852 // ---------------------------------------------------------------------------- | |
853 // MicrophoneMuteIsAvailable | |
854 // ---------------------------------------------------------------------------- | |
855 | |
856 int32_t AudioDeviceWindowsWave::MicrophoneMuteIsAvailable(bool& available) | |
857 { | |
858 | |
859 bool isAvailable(false); | |
860 | |
861 // Enumerate all avaliable microphones and make an attempt to open up the | |
862 // input mixer corresponding to the currently selected input device. | |
863 // | |
864 if (InitMicrophone() == -1) | |
865 { | |
866 // If we end up here it means that the selected microphone has no volume | |
867 // control, hence it is safe to state that there is no boost control | |
868 // already at this stage. | |
869 available = false; | |
870 return 0; | |
871 } | |
872 | |
873 // Check if the selected microphone has a mute control | |
874 // | |
875 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); | |
876 available = isAvailable; | |
877 | |
878 // Close the initialized input mixer | |
879 // | |
880 _mixerManager.CloseMicrophone(); | |
881 | |
882 return 0; | |
883 } | |
884 | |
885 // ---------------------------------------------------------------------------- | |
886 // SetMicrophoneMute | |
887 // ---------------------------------------------------------------------------- | |
888 | |
889 int32_t AudioDeviceWindowsWave::SetMicrophoneMute(bool enable) | |
890 { | |
891 return (_mixerManager.SetMicrophoneMute(enable)); | |
892 } | |
893 | |
894 // ---------------------------------------------------------------------------- | |
895 // MicrophoneMute | |
896 // ---------------------------------------------------------------------------- | |
897 | |
898 int32_t AudioDeviceWindowsWave::MicrophoneMute(bool& enabled) const | |
899 { | |
900 | |
901 bool muted(0); | |
902 | |
903 if (_mixerManager.MicrophoneMute(muted) == -1) | |
904 { | |
905 return -1; | |
906 } | |
907 | |
908 enabled = muted; | |
909 return 0; | |
910 } | |
911 | |
912 // ---------------------------------------------------------------------------- | |
913 // MicrophoneBoostIsAvailable | |
914 // ---------------------------------------------------------------------------- | |
915 | |
916 int32_t AudioDeviceWindowsWave::MicrophoneBoostIsAvailable(bool& available) | |
917 { | |
918 | |
919 bool isAvailable(false); | |
920 | |
921 // Enumerate all avaliable microphones and make an attempt to open up the | |
922 // input mixer corresponding to the currently selected input device. | |
923 // | |
924 if (InitMicrophone() == -1) | |
925 { | |
926 // If we end up here it means that the selected microphone has no volume | |
927 // control, hence it is safe to state that there is no boost control | |
928 // already at this stage. | |
929 available = false; | |
930 return 0; | |
931 } | |
932 | |
933 // Check if the selected microphone has a boost control | |
934 // | |
935 _mixerManager.MicrophoneBoostIsAvailable(isAvailable); | |
936 available = isAvailable; | |
937 | |
938 // Close the initialized input mixer | |
939 // | |
940 _mixerManager.CloseMicrophone(); | |
941 | |
942 return 0; | |
943 } | |
944 | |
945 // ---------------------------------------------------------------------------- | |
946 // SetMicrophoneBoost | |
947 // ---------------------------------------------------------------------------- | |
948 | |
949 int32_t AudioDeviceWindowsWave::SetMicrophoneBoost(bool enable) | |
950 { | |
951 | |
952 return (_mixerManager.SetMicrophoneBoost(enable)); | |
953 } | |
954 | |
955 // ---------------------------------------------------------------------------- | |
956 // MicrophoneBoost | |
957 // ---------------------------------------------------------------------------- | |
958 | |
959 int32_t AudioDeviceWindowsWave::MicrophoneBoost(bool& enabled) const | |
960 { | |
961 | |
962 bool onOff(0); | |
963 | |
964 if (_mixerManager.MicrophoneBoost(onOff) == -1) | |
965 { | |
966 return -1; | |
967 } | |
968 | |
969 enabled = onOff; | |
970 return 0; | |
971 } | |
972 | |
973 // ---------------------------------------------------------------------------- | |
974 // StereoRecordingIsAvailable | |
975 // ---------------------------------------------------------------------------- | |
976 | |
977 int32_t AudioDeviceWindowsWave::StereoRecordingIsAvailable(bool& available) | |
978 { | |
979 available = true; | |
980 return 0; | |
981 } | |
982 | |
983 // ---------------------------------------------------------------------------- | |
984 // SetStereoRecording | |
985 // ---------------------------------------------------------------------------- | |
986 | |
987 int32_t AudioDeviceWindowsWave::SetStereoRecording(bool enable) | |
988 { | |
989 | |
990 if (enable) | |
991 _recChannels = 2; | |
992 else | |
993 _recChannels = 1; | |
994 | |
995 return 0; | |
996 } | |
997 | |
998 // ---------------------------------------------------------------------------- | |
999 // StereoRecording | |
1000 // ---------------------------------------------------------------------------- | |
1001 | |
1002 int32_t AudioDeviceWindowsWave::StereoRecording(bool& enabled) const | |
1003 { | |
1004 | |
1005 if (_recChannels == 2) | |
1006 enabled = true; | |
1007 else | |
1008 enabled = false; | |
1009 | |
1010 return 0; | |
1011 } | |
1012 | |
1013 // ---------------------------------------------------------------------------- | |
1014 // StereoPlayoutIsAvailable | |
1015 // ---------------------------------------------------------------------------- | |
1016 | |
1017 int32_t AudioDeviceWindowsWave::StereoPlayoutIsAvailable(bool& available) | |
1018 { | |
1019 available = true; | |
1020 return 0; | |
1021 } | |
1022 | |
1023 // ---------------------------------------------------------------------------- | |
1024 // SetStereoPlayout | |
1025 // | |
1026 // Specifies the number of output channels. | |
1027 // | |
1028 // NOTE - the setting will only have an effect after InitPlayout has | |
1029 // been called. | |
1030 // | |
1031 // 16-bit mono: | |
1032 // | |
1033 // Each sample is 2 bytes. Sample 1 is followed by samples 2, 3, 4, and so on. | |
1034 // For each sample, the first byte is the low-order byte of channel 0 and the | |
1035 // second byte is the high-order byte of channel 0. | |
1036 // | |
1037 // 16-bit stereo: | |
1038 // | |
1039 // Each sample is 4 bytes. Sample 1 is followed by samples 2, 3, 4, and so on. | |
1040 // For each sample, the first byte is the low-order byte of channel 0 (left cha
nnel); | |
1041 // the second byte is the high-order byte of channel 0; the third byte is the | |
1042 // low-order byte of channel 1 (right channel); and the fourth byte is the | |
1043 // high-order byte of channel 1. | |
1044 // ---------------------------------------------------------------------------- | |
1045 | |
1046 int32_t AudioDeviceWindowsWave::SetStereoPlayout(bool enable) | |
1047 { | |
1048 | |
1049 if (enable) | |
1050 _playChannels = 2; | |
1051 else | |
1052 _playChannels = 1; | |
1053 | |
1054 return 0; | |
1055 } | |
1056 | |
1057 // ---------------------------------------------------------------------------- | |
1058 // StereoPlayout | |
1059 // ---------------------------------------------------------------------------- | |
1060 | |
1061 int32_t AudioDeviceWindowsWave::StereoPlayout(bool& enabled) const | |
1062 { | |
1063 | |
1064 if (_playChannels == 2) | |
1065 enabled = true; | |
1066 else | |
1067 enabled = false; | |
1068 | |
1069 return 0; | |
1070 } | |
1071 | |
1072 // ---------------------------------------------------------------------------- | |
1073 // SetAGC | |
1074 // ---------------------------------------------------------------------------- | |
1075 | |
1076 int32_t AudioDeviceWindowsWave::SetAGC(bool enable) | |
1077 { | |
1078 | |
1079 _AGC = enable; | |
1080 | |
1081 return 0; | |
1082 } | |
1083 | |
1084 // ---------------------------------------------------------------------------- | |
1085 // AGC | |
1086 // ---------------------------------------------------------------------------- | |
1087 | |
1088 bool AudioDeviceWindowsWave::AGC() const | |
1089 { | |
1090 return _AGC; | |
1091 } | |
1092 | |
1093 // ---------------------------------------------------------------------------- | |
1094 // MicrophoneVolumeIsAvailable | |
1095 // ---------------------------------------------------------------------------- | |
1096 | |
1097 int32_t AudioDeviceWindowsWave::MicrophoneVolumeIsAvailable(bool& available) | |
1098 { | |
1099 | |
1100 bool isAvailable(false); | |
1101 | |
1102 // Enumerate all avaliable microphones and make an attempt to open up the | |
1103 // input mixer corresponding to the currently selected output device. | |
1104 // | |
1105 if (InitMicrophone() == -1) | |
1106 { | |
1107 // Failed to find valid microphone | |
1108 available = false; | |
1109 return 0; | |
1110 } | |
1111 | |
1112 // Check if the selected microphone has a volume control | |
1113 // | |
1114 _mixerManager.MicrophoneVolumeIsAvailable(isAvailable); | |
1115 available = isAvailable; | |
1116 | |
1117 // Close the initialized input mixer | |
1118 // | |
1119 _mixerManager.CloseMicrophone(); | |
1120 | |
1121 return 0; | |
1122 } | |
1123 | |
1124 // ---------------------------------------------------------------------------- | |
1125 // SetMicrophoneVolume | |
1126 // ---------------------------------------------------------------------------- | |
1127 | |
1128 int32_t AudioDeviceWindowsWave::SetMicrophoneVolume(uint32_t volume) | |
1129 { | |
1130 return (_mixerManager.SetMicrophoneVolume(volume)); | |
1131 } | |
1132 | |
1133 // ---------------------------------------------------------------------------- | |
1134 // MicrophoneVolume | |
1135 // ---------------------------------------------------------------------------- | |
1136 | |
1137 int32_t AudioDeviceWindowsWave::MicrophoneVolume(uint32_t& volume) const | |
1138 { | |
1139 uint32_t level(0); | |
1140 | |
1141 if (_mixerManager.MicrophoneVolume(level) == -1) | |
1142 { | |
1143 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to retrive c
urrent microphone level"); | |
1144 return -1; | |
1145 } | |
1146 | |
1147 volume = level; | |
1148 return 0; | |
1149 } | |
1150 | |
1151 // ---------------------------------------------------------------------------- | |
1152 // MaxMicrophoneVolume | |
1153 // ---------------------------------------------------------------------------- | |
1154 | |
1155 int32_t AudioDeviceWindowsWave::MaxMicrophoneVolume(uint32_t& maxVolume) const | |
1156 { | |
1157 // _maxMicVolume can be zero in AudioMixerManager::MaxMicrophoneVolume(): | |
1158 // (1) API GetLineControl() returns failure at querying the max Mic level. | |
1159 // (2) API GetLineControl() returns maxVolume as zero in rare cases. | |
1160 // Both cases show we don't have access to the mixer controls. | |
1161 // We return -1 here to indicate that. | |
1162 if (_maxMicVolume == 0) | |
1163 { | |
1164 return -1; | |
1165 } | |
1166 | |
1167 maxVolume = _maxMicVolume;; | |
1168 return 0; | |
1169 } | |
1170 | |
1171 // ---------------------------------------------------------------------------- | |
1172 // MinMicrophoneVolume | |
1173 // ---------------------------------------------------------------------------- | |
1174 | |
1175 int32_t AudioDeviceWindowsWave::MinMicrophoneVolume(uint32_t& minVolume) const | |
1176 { | |
1177 minVolume = _minMicVolume; | |
1178 return 0; | |
1179 } | |
1180 | |
1181 // ---------------------------------------------------------------------------- | |
1182 // MicrophoneVolumeStepSize | |
1183 // ---------------------------------------------------------------------------- | |
1184 | |
1185 int32_t AudioDeviceWindowsWave::MicrophoneVolumeStepSize(uint16_t& stepSize) con
st | |
1186 { | |
1187 | |
1188 uint16_t delta(0); | |
1189 | |
1190 if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1) | |
1191 { | |
1192 return -1; | |
1193 } | |
1194 | |
1195 stepSize = delta; | |
1196 return 0; | |
1197 } | |
1198 | |
1199 // ---------------------------------------------------------------------------- | |
1200 // PlayoutDevices | |
1201 // ---------------------------------------------------------------------------- | |
1202 | |
1203 int16_t AudioDeviceWindowsWave::PlayoutDevices() | |
1204 { | |
1205 | |
1206 return (waveOutGetNumDevs()); | |
1207 } | |
1208 | |
1209 // ---------------------------------------------------------------------------- | |
1210 // SetPlayoutDevice I (II) | |
1211 // ---------------------------------------------------------------------------- | |
1212 | |
1213 int32_t AudioDeviceWindowsWave::SetPlayoutDevice(uint16_t index) | |
1214 { | |
1215 | |
1216 if (_playIsInitialized) | |
1217 { | |
1218 return -1; | |
1219 } | |
1220 | |
1221 UINT nDevices = waveOutGetNumDevs(); | |
1222 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "number of availiable wavef
orm-audio output devices is %u", nDevices); | |
1223 | |
1224 if (index < 0 || index > (nDevices-1)) | |
1225 { | |
1226 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out o
f range [0,%u]", (nDevices-1)); | |
1227 return -1; | |
1228 } | |
1229 | |
1230 _usingOutputDeviceIndex = true; | |
1231 _outputDeviceIndex = index; | |
1232 _outputDeviceIsSpecified = true; | |
1233 | |
1234 return 0; | |
1235 } | |
1236 | |
1237 // ---------------------------------------------------------------------------- | |
1238 // SetPlayoutDevice II (II) | |
1239 // ---------------------------------------------------------------------------- | |
1240 | |
1241 int32_t AudioDeviceWindowsWave::SetPlayoutDevice(AudioDeviceModule::WindowsDevic
eType device) | |
1242 { | |
1243 if (_playIsInitialized) | |
1244 { | |
1245 return -1; | |
1246 } | |
1247 | |
1248 if (device == AudioDeviceModule::kDefaultDevice) | |
1249 { | |
1250 } | |
1251 else if (device == AudioDeviceModule::kDefaultCommunicationDevice) | |
1252 { | |
1253 } | |
1254 | |
1255 _usingOutputDeviceIndex = false; | |
1256 _outputDevice = device; | |
1257 _outputDeviceIsSpecified = true; | |
1258 | |
1259 return 0; | |
1260 } | |
1261 | |
1262 // ---------------------------------------------------------------------------- | |
1263 // PlayoutDeviceName | |
1264 // ---------------------------------------------------------------------------- | |
1265 | |
1266 int32_t AudioDeviceWindowsWave::PlayoutDeviceName( | |
1267 uint16_t index, | |
1268 char name[kAdmMaxDeviceNameSize], | |
1269 char guid[kAdmMaxGuidSize]) | |
1270 { | |
1271 | |
1272 uint16_t nDevices(PlayoutDevices()); | |
1273 | |
1274 // Special fix for the case when the user asks for the name of the default d
evice. | |
1275 // | |
1276 if (index == (uint16_t)(-1)) | |
1277 { | |
1278 index = 0; | |
1279 } | |
1280 | |
1281 if ((index > (nDevices-1)) || (name == NULL)) | |
1282 { | |
1283 return -1; | |
1284 } | |
1285 | |
1286 memset(name, 0, kAdmMaxDeviceNameSize); | |
1287 | |
1288 if (guid != NULL) | |
1289 { | |
1290 memset(guid, 0, kAdmMaxGuidSize); | |
1291 } | |
1292 | |
1293 WAVEOUTCAPSW caps; // szPname member (product name (NULL terminated) is a
WCHAR | |
1294 MMRESULT res; | |
1295 | |
1296 res = waveOutGetDevCapsW(index, &caps, sizeof(WAVEOUTCAPSW)); | |
1297 if (res != MMSYSERR_NOERROR) | |
1298 { | |
1299 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCapsW(
) failed (err=%d)", res); | |
1300 return -1; | |
1301 } | |
1302 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, name, kAdmMaxDeviceNam
eSize, NULL, NULL) == 0) | |
1303 { | |
1304 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(C
P_UTF8) failed with error code %d - 1", GetLastError()); | |
1305 } | |
1306 | |
1307 if (guid == NULL) | |
1308 { | |
1309 return 0; | |
1310 } | |
1311 | |
1312 // It is possible to get the unique endpoint ID string using the Wave API. | |
1313 // However, it is only supported on Windows Vista and Windows 7. | |
1314 | |
1315 size_t cbEndpointId(0); | |
1316 | |
1317 // Get the size (including the terminating null) of the endpoint ID string o
f the waveOut device. | |
1318 // Windows Vista supports the DRV_QUERYFUNCTIONINSTANCEIDSIZE and DRV_QUERYF
UNCTIONINSTANCEID messages. | |
1319 res = waveOutMessage((HWAVEOUT)IntToPtr(index), | |
1320 DRV_QUERYFUNCTIONINSTANCEIDSIZE, | |
1321 (DWORD_PTR)&cbEndpointId, NULL); | |
1322 if (res != MMSYSERR_NOERROR) | |
1323 { | |
1324 // DRV_QUERYFUNCTIONINSTANCEIDSIZE is not supported <=> earlier version
of Windows than Vista | |
1325 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveOutMessage(DRV_QUE
RYFUNCTIONINSTANCEIDSIZE) failed (err=%d)", res); | |
1326 TraceWaveOutError(res); | |
1327 // Best we can do is to copy the friendly name and use it as guid | |
1328 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidS
ize, NULL, NULL) == 0) | |
1329 { | |
1330 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiBy
te(CP_UTF8) failed with error code %d - 2", GetLastError()); | |
1331 } | |
1332 return 0; | |
1333 } | |
1334 | |
1335 // waveOutMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) worked => we are on a Vis
ta or Windows 7 device | |
1336 | |
1337 WCHAR *pstrEndpointId = NULL; | |
1338 pstrEndpointId = (WCHAR*)CoTaskMemAlloc(cbEndpointId); | |
1339 | |
1340 // Get the endpoint ID string for this waveOut device. | |
1341 res = waveOutMessage((HWAVEOUT)IntToPtr(index), | |
1342 DRV_QUERYFUNCTIONINSTANCEID, | |
1343 (DWORD_PTR)pstrEndpointId, | |
1344 cbEndpointId); | |
1345 if (res != MMSYSERR_NOERROR) | |
1346 { | |
1347 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveOutMessage(DRV_QUE
RYFUNCTIONINSTANCEID) failed (err=%d)", res); | |
1348 TraceWaveOutError(res); | |
1349 // Best we can do is to copy the friendly name and use it as guid | |
1350 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidS
ize, NULL, NULL) == 0) | |
1351 { | |
1352 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiBy
te(CP_UTF8) failed with error code %d - 3", GetLastError()); | |
1353 } | |
1354 CoTaskMemFree(pstrEndpointId); | |
1355 return 0; | |
1356 } | |
1357 | |
1358 if (WideCharToMultiByte(CP_UTF8, 0, pstrEndpointId, -1, guid, kAdmMaxGuidSiz
e, NULL, NULL) == 0) | |
1359 { | |
1360 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(C
P_UTF8) failed with error code %d - 4", GetLastError()); | |
1361 } | |
1362 CoTaskMemFree(pstrEndpointId); | |
1363 | |
1364 return 0; | |
1365 } | |
1366 | |
1367 // ---------------------------------------------------------------------------- | |
1368 // RecordingDeviceName | |
1369 // ---------------------------------------------------------------------------- | |
1370 | |
1371 int32_t AudioDeviceWindowsWave::RecordingDeviceName( | |
1372 uint16_t index, | |
1373 char name[kAdmMaxDeviceNameSize], | |
1374 char guid[kAdmMaxGuidSize]) | |
1375 { | |
1376 | |
1377 uint16_t nDevices(RecordingDevices()); | |
1378 | |
1379 // Special fix for the case when the user asks for the name of the default d
evice. | |
1380 // | |
1381 if (index == (uint16_t)(-1)) | |
1382 { | |
1383 index = 0; | |
1384 } | |
1385 | |
1386 if ((index > (nDevices-1)) || (name == NULL)) | |
1387 { | |
1388 return -1; | |
1389 } | |
1390 | |
1391 memset(name, 0, kAdmMaxDeviceNameSize); | |
1392 | |
1393 if (guid != NULL) | |
1394 { | |
1395 memset(guid, 0, kAdmMaxGuidSize); | |
1396 } | |
1397 | |
1398 WAVEINCAPSW caps; // szPname member (product name (NULL terminated) is a
WCHAR | |
1399 MMRESULT res; | |
1400 | |
1401 res = waveInGetDevCapsW(index, &caps, sizeof(WAVEINCAPSW)); | |
1402 if (res != MMSYSERR_NOERROR) | |
1403 { | |
1404 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCapsW()
failed (err=%d)", res); | |
1405 return -1; | |
1406 } | |
1407 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, name, kAdmMaxDeviceNam
eSize, NULL, NULL) == 0) | |
1408 { | |
1409 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(C
P_UTF8) failed with error code %d - 1", GetLastError()); | |
1410 } | |
1411 | |
1412 if (guid == NULL) | |
1413 { | |
1414 return 0; | |
1415 } | |
1416 | |
1417 // It is possible to get the unique endpoint ID string using the Wave API. | |
1418 // However, it is only supported on Windows Vista and Windows 7. | |
1419 | |
1420 size_t cbEndpointId(0); | |
1421 | |
1422 // Get the size (including the terminating null) of the endpoint ID string o
f the waveOut device. | |
1423 // Windows Vista supports the DRV_QUERYFUNCTIONINSTANCEIDSIZE and DRV_QUERYF
UNCTIONINSTANCEID messages. | |
1424 res = waveInMessage((HWAVEIN)IntToPtr(index), | |
1425 DRV_QUERYFUNCTIONINSTANCEIDSIZE, | |
1426 (DWORD_PTR)&cbEndpointId, NULL); | |
1427 if (res != MMSYSERR_NOERROR) | |
1428 { | |
1429 // DRV_QUERYFUNCTIONINSTANCEIDSIZE is not supported <=> earlier version
of Windows than Vista | |
1430 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInMessage(DRV_QUER
YFUNCTIONINSTANCEIDSIZE) failed (err=%d)", res); | |
1431 TraceWaveInError(res); | |
1432 // Best we can do is to copy the friendly name and use it as guid | |
1433 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidS
ize, NULL, NULL) == 0) | |
1434 { | |
1435 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiBy
te(CP_UTF8) failed with error code %d - 2", GetLastError()); | |
1436 } | |
1437 return 0; | |
1438 } | |
1439 | |
1440 // waveOutMessage(DRV_QUERYFUNCTIONINSTANCEIDSIZE) worked => we are on a Vis
ta or Windows 7 device | |
1441 | |
1442 WCHAR *pstrEndpointId = NULL; | |
1443 pstrEndpointId = (WCHAR*)CoTaskMemAlloc(cbEndpointId); | |
1444 | |
1445 // Get the endpoint ID string for this waveOut device. | |
1446 res = waveInMessage((HWAVEIN)IntToPtr(index), | |
1447 DRV_QUERYFUNCTIONINSTANCEID, | |
1448 (DWORD_PTR)pstrEndpointId, | |
1449 cbEndpointId); | |
1450 if (res != MMSYSERR_NOERROR) | |
1451 { | |
1452 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInMessage(DRV_QUER
YFUNCTIONINSTANCEID) failed (err=%d)", res); | |
1453 TraceWaveInError(res); | |
1454 // Best we can do is to copy the friendly name and use it as guid | |
1455 if (WideCharToMultiByte(CP_UTF8, 0, caps.szPname, -1, guid, kAdmMaxGuidS
ize, NULL, NULL) == 0) | |
1456 { | |
1457 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiBy
te(CP_UTF8) failed with error code %d - 3", GetLastError()); | |
1458 } | |
1459 CoTaskMemFree(pstrEndpointId); | |
1460 return 0; | |
1461 } | |
1462 | |
1463 if (WideCharToMultiByte(CP_UTF8, 0, pstrEndpointId, -1, guid, kAdmMaxGuidSiz
e, NULL, NULL) == 0) | |
1464 { | |
1465 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(C
P_UTF8) failed with error code %d - 4", GetLastError()); | |
1466 } | |
1467 CoTaskMemFree(pstrEndpointId); | |
1468 | |
1469 return 0; | |
1470 } | |
1471 | |
1472 // ---------------------------------------------------------------------------- | |
1473 // RecordingDevices | |
1474 // ---------------------------------------------------------------------------- | |
1475 | |
1476 int16_t AudioDeviceWindowsWave::RecordingDevices() | |
1477 { | |
1478 | |
1479 return (waveInGetNumDevs()); | |
1480 } | |
1481 | |
1482 // ---------------------------------------------------------------------------- | |
1483 // SetRecordingDevice I (II) | |
1484 // ---------------------------------------------------------------------------- | |
1485 | |
1486 int32_t AudioDeviceWindowsWave::SetRecordingDevice(uint16_t index) | |
1487 { | |
1488 | |
1489 if (_recIsInitialized) | |
1490 { | |
1491 return -1; | |
1492 } | |
1493 | |
1494 UINT nDevices = waveInGetNumDevs(); | |
1495 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "number of availiable wavef
orm-audio input devices is %u", nDevices); | |
1496 | |
1497 if (index < 0 || index > (nDevices-1)) | |
1498 { | |
1499 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out o
f range [0,%u]", (nDevices-1)); | |
1500 return -1; | |
1501 } | |
1502 | |
1503 _usingInputDeviceIndex = true; | |
1504 _inputDeviceIndex = index; | |
1505 _inputDeviceIsSpecified = true; | |
1506 | |
1507 return 0; | |
1508 } | |
1509 | |
1510 // ---------------------------------------------------------------------------- | |
1511 // SetRecordingDevice II (II) | |
1512 // ---------------------------------------------------------------------------- | |
1513 | |
1514 int32_t AudioDeviceWindowsWave::SetRecordingDevice(AudioDeviceModule::WindowsDev
iceType device) | |
1515 { | |
1516 if (device == AudioDeviceModule::kDefaultDevice) | |
1517 { | |
1518 } | |
1519 else if (device == AudioDeviceModule::kDefaultCommunicationDevice) | |
1520 { | |
1521 } | |
1522 | |
1523 if (_recIsInitialized) | |
1524 { | |
1525 return -1; | |
1526 } | |
1527 | |
1528 _usingInputDeviceIndex = false; | |
1529 _inputDevice = device; | |
1530 _inputDeviceIsSpecified = true; | |
1531 | |
1532 return 0; | |
1533 } | |
1534 | |
1535 // ---------------------------------------------------------------------------- | |
1536 // PlayoutIsAvailable | |
1537 // ---------------------------------------------------------------------------- | |
1538 | |
1539 int32_t AudioDeviceWindowsWave::PlayoutIsAvailable(bool& available) | |
1540 { | |
1541 | |
1542 available = false; | |
1543 | |
1544 // Try to initialize the playout side | |
1545 int32_t res = InitPlayout(); | |
1546 | |
1547 // Cancel effect of initialization | |
1548 StopPlayout(); | |
1549 | |
1550 if (res != -1) | |
1551 { | |
1552 available = true; | |
1553 } | |
1554 | |
1555 return 0; | |
1556 } | |
1557 | |
1558 // ---------------------------------------------------------------------------- | |
1559 // RecordingIsAvailable | |
1560 // ---------------------------------------------------------------------------- | |
1561 | |
1562 int32_t AudioDeviceWindowsWave::RecordingIsAvailable(bool& available) | |
1563 { | |
1564 | |
1565 available = false; | |
1566 | |
1567 // Try to initialize the recording side | |
1568 int32_t res = InitRecording(); | |
1569 | |
1570 // Cancel effect of initialization | |
1571 StopRecording(); | |
1572 | |
1573 if (res != -1) | |
1574 { | |
1575 available = true; | |
1576 } | |
1577 | |
1578 return 0; | |
1579 } | |
1580 | |
1581 // ---------------------------------------------------------------------------- | |
1582 // InitPlayout | |
1583 // ---------------------------------------------------------------------------- | |
1584 | |
1585 int32_t AudioDeviceWindowsWave::InitPlayout() | |
1586 { | |
1587 | |
1588 CriticalSectionScoped lock(&_critSect); | |
1589 | |
1590 if (_playing) | |
1591 { | |
1592 return -1; | |
1593 } | |
1594 | |
1595 if (!_outputDeviceIsSpecified) | |
1596 { | |
1597 return -1; | |
1598 } | |
1599 | |
1600 if (_playIsInitialized) | |
1601 { | |
1602 return 0; | |
1603 } | |
1604 | |
1605 // Initialize the speaker (devices might have been added or removed) | |
1606 if (InitSpeaker() == -1) | |
1607 { | |
1608 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitSpeaker() faile
d"); | |
1609 } | |
1610 | |
1611 // Enumerate all availiable output devices | |
1612 EnumeratePlayoutDevices(); | |
1613 | |
1614 // Start by closing any existing wave-output devices | |
1615 // | |
1616 MMRESULT res(MMSYSERR_ERROR); | |
1617 | |
1618 if (_hWaveOut != NULL) | |
1619 { | |
1620 res = waveOutClose(_hWaveOut); | |
1621 if (MMSYSERR_NOERROR != res) | |
1622 { | |
1623 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutClose()
failed (err=%d)", res); | |
1624 TraceWaveOutError(res); | |
1625 } | |
1626 } | |
1627 | |
1628 // Set the output wave format | |
1629 // | |
1630 WAVEFORMATEX waveFormat; | |
1631 | |
1632 waveFormat.wFormatTag = WAVE_FORMAT_PCM; | |
1633 waveFormat.nChannels = _playChannels; // mono <=> 1, stereo <=> 2 | |
1634 waveFormat.nSamplesPerSec = N_PLAY_SAMPLES_PER_SEC; | |
1635 waveFormat.wBitsPerSample = 16; | |
1636 waveFormat.nBlockAlign = waveFormat.nChannels * (waveFormat.wBitsPerSamp
le/8); | |
1637 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAl
ign; | |
1638 waveFormat.cbSize = 0; | |
1639 | |
1640 // Open the given waveform-audio output device for playout | |
1641 // | |
1642 HWAVEOUT hWaveOut(NULL); | |
1643 | |
1644 if (IsUsingOutputDeviceIndex()) | |
1645 { | |
1646 // verify settings first | |
1647 res = waveOutOpen(NULL, _outputDeviceIndex, &waveFormat, 0, 0, CALLBACK_
NULL | WAVE_FORMAT_QUERY); | |
1648 if (MMSYSERR_NOERROR == res) | |
1649 { | |
1650 // open the given waveform-audio output device for recording | |
1651 res = waveOutOpen(&hWaveOut, _outputDeviceIndex, &waveFormat, 0, 0,
CALLBACK_NULL); | |
1652 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening output dev
ice corresponding to device ID %u", _outputDeviceIndex); | |
1653 } | |
1654 } | |
1655 else | |
1656 { | |
1657 if (_outputDevice == AudioDeviceModule::kDefaultCommunicationDevice) | |
1658 { | |
1659 // check if it is possible to open the default communication device
(supported on Windows 7) | |
1660 res = waveOutOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NUL
L | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE | WAVE_FORMAT_QUERY); | |
1661 if (MMSYSERR_NOERROR == res) | |
1662 { | |
1663 // if so, open the default communication device for real | |
1664 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CAL
LBACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE); | |
1665 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening defaul
t communication device"); | |
1666 } | |
1667 else | |
1668 { | |
1669 // use default device since default communication device was not
avaliable | |
1670 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CAL
LBACK_NULL); | |
1671 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "unable to open
default communication device => using default instead"); | |
1672 } | |
1673 } | |
1674 else if (_outputDevice == AudioDeviceModule::kDefaultDevice) | |
1675 { | |
1676 // open default device since it has been requested | |
1677 res = waveOutOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NUL
L | WAVE_FORMAT_QUERY); | |
1678 if (MMSYSERR_NOERROR == res) | |
1679 { | |
1680 res = waveOutOpen(&hWaveOut, WAVE_MAPPER, &waveFormat, 0, 0, CAL
LBACK_NULL); | |
1681 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening defaul
t output device"); | |
1682 } | |
1683 } | |
1684 } | |
1685 | |
1686 if (MMSYSERR_NOERROR != res) | |
1687 { | |
1688 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutOpen() failed
(err=%d)", res); | |
1689 TraceWaveOutError(res); | |
1690 return -1; | |
1691 } | |
1692 | |
1693 // Log information about the aquired output device | |
1694 // | |
1695 WAVEOUTCAPS caps; | |
1696 | |
1697 res = waveOutGetDevCaps((UINT_PTR)hWaveOut, &caps, sizeof(WAVEOUTCAPS)); | |
1698 if (res != MMSYSERR_NOERROR) | |
1699 { | |
1700 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCaps()
failed (err=%d)", res); | |
1701 TraceWaveOutError(res); | |
1702 } | |
1703 | |
1704 UINT deviceID(0); | |
1705 res = waveOutGetID(hWaveOut, &deviceID); | |
1706 if (res != MMSYSERR_NOERROR) | |
1707 { | |
1708 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetID() fail
ed (err=%d)", res); | |
1709 TraceWaveOutError(res); | |
1710 } | |
1711 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "utilized device ID : %u",
deviceID); | |
1712 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s",
caps.szPname); | |
1713 | |
1714 // Store valid handle for the open waveform-audio output device | |
1715 _hWaveOut = hWaveOut; | |
1716 | |
1717 // Store the input wave header as well | |
1718 _waveFormatOut = waveFormat; | |
1719 | |
1720 // Prepare wave-out headers | |
1721 // | |
1722 const uint8_t bytesPerSample = 2*_playChannels; | |
1723 | |
1724 for (int n = 0; n < N_BUFFERS_OUT; n++) | |
1725 { | |
1726 // set up the output wave header | |
1727 _waveHeaderOut[n].lpData = reinterpret_cast<LPSTR>(&_playBuffer
[n]); | |
1728 _waveHeaderOut[n].dwBufferLength = bytesPerSample*PLAY_BUF_SIZE_IN_SAMP
LES; | |
1729 _waveHeaderOut[n].dwFlags = 0; | |
1730 _waveHeaderOut[n].dwLoops = 0; | |
1731 | |
1732 memset(_playBuffer[n], 0, bytesPerSample*PLAY_BUF_SIZE_IN_SAMPLES); | |
1733 | |
1734 // The waveOutPrepareHeader function prepares a waveform-audio data bloc
k for playback. | |
1735 // The lpData, dwBufferLength, and dwFlags members of the WAVEHDR struct
ure must be set | |
1736 // before calling this function. | |
1737 // | |
1738 res = waveOutPrepareHeader(_hWaveOut, &_waveHeaderOut[n], sizeof(WAVEHDR
)); | |
1739 if (MMSYSERR_NOERROR != res) | |
1740 { | |
1741 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutPrepareH
eader(%d) failed (err=%d)", n, res); | |
1742 TraceWaveOutError(res); | |
1743 } | |
1744 | |
1745 // perform extra check to ensure that the header is prepared | |
1746 if (_waveHeaderOut[n].dwFlags != WHDR_PREPARED) | |
1747 { | |
1748 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutPrepareH
eader(%d) failed (dwFlags != WHDR_PREPARED)", n); | |
1749 } | |
1750 } | |
1751 | |
1752 // Mark playout side as initialized | |
1753 _playIsInitialized = true; | |
1754 | |
1755 _dTcheckPlayBufDelay = 10; // check playback buffer delay every 10 ms | |
1756 _playBufCount = 0; // index of active output wave header (<=> outpu
t buffer index) | |
1757 _playBufDelay = 80; // buffer delay/size is initialized to 80 ms and
slowly decreased until er < 25 | |
1758 _minPlayBufDelay = 25; // minimum playout buffer delay | |
1759 _MAX_minBuffer = 65; // adaptive minimum playout buffer delay cannot
be larger than this value | |
1760 _intro = 1; // Used to make sure that adaption starts after
(2000-1700)/100 seconds | |
1761 _waitCounter = 1700; // Counter for start of adaption of playback buf
fer | |
1762 _erZeroCounter = 0; // Log how many times er = 0 in consequtive call
s to RecTimeProc | |
1763 _useHeader = 0; // Counts number of "useHeader" detections. Stop
s at 2. | |
1764 | |
1765 _writtenSamples = 0; | |
1766 _writtenSamplesOld = 0; | |
1767 _playedSamplesOld = 0; | |
1768 _sndCardPlayDelay = 0; | |
1769 _sndCardRecDelay = 0; | |
1770 | |
1771 WEBRTC_TRACE(kTraceInfo, kTraceUtility, _id,"initial playout status: _playBu
fDelay=%d, _minPlayBufDelay=%d", | |
1772 _playBufDelay, _minPlayBufDelay); | |
1773 | |
1774 return 0; | |
1775 } | |
1776 | |
1777 // ---------------------------------------------------------------------------- | |
1778 // InitRecording | |
1779 // ---------------------------------------------------------------------------- | |
1780 | |
1781 int32_t AudioDeviceWindowsWave::InitRecording() | |
1782 { | |
1783 | |
1784 CriticalSectionScoped lock(&_critSect); | |
1785 | |
1786 if (_recording) | |
1787 { | |
1788 return -1; | |
1789 } | |
1790 | |
1791 if (!_inputDeviceIsSpecified) | |
1792 { | |
1793 return -1; | |
1794 } | |
1795 | |
1796 if (_recIsInitialized) | |
1797 { | |
1798 return 0; | |
1799 } | |
1800 | |
1801 _avgCPULoad = 0; | |
1802 _playAcc = 0; | |
1803 | |
1804 // Initialize the microphone (devices might have been added or removed) | |
1805 if (InitMicrophone() == -1) | |
1806 { | |
1807 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitMicrophone() fa
iled"); | |
1808 } | |
1809 | |
1810 // Enumerate all availiable input devices | |
1811 EnumerateRecordingDevices(); | |
1812 | |
1813 // Start by closing any existing wave-input devices | |
1814 // | |
1815 MMRESULT res(MMSYSERR_ERROR); | |
1816 | |
1817 if (_hWaveIn != NULL) | |
1818 { | |
1819 res = waveInClose(_hWaveIn); | |
1820 if (MMSYSERR_NOERROR != res) | |
1821 { | |
1822 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInClose() f
ailed (err=%d)", res); | |
1823 TraceWaveInError(res); | |
1824 } | |
1825 } | |
1826 | |
1827 // Set the input wave format | |
1828 // | |
1829 WAVEFORMATEX waveFormat; | |
1830 | |
1831 waveFormat.wFormatTag = WAVE_FORMAT_PCM; | |
1832 waveFormat.nChannels = _recChannels; // mono <=> 1, stereo <=> 2 | |
1833 waveFormat.nSamplesPerSec = N_REC_SAMPLES_PER_SEC; | |
1834 waveFormat.wBitsPerSample = 16; | |
1835 waveFormat.nBlockAlign = waveFormat.nChannels * (waveFormat.wBitsPerSamp
le/8); | |
1836 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAl
ign; | |
1837 waveFormat.cbSize = 0; | |
1838 | |
1839 // Open the given waveform-audio input device for recording | |
1840 // | |
1841 HWAVEIN hWaveIn(NULL); | |
1842 | |
1843 if (IsUsingInputDeviceIndex()) | |
1844 { | |
1845 // verify settings first | |
1846 res = waveInOpen(NULL, _inputDeviceIndex, &waveFormat, 0, 0, CALLBACK_NU
LL | WAVE_FORMAT_QUERY); | |
1847 if (MMSYSERR_NOERROR == res) | |
1848 { | |
1849 // open the given waveform-audio input device for recording | |
1850 res = waveInOpen(&hWaveIn, _inputDeviceIndex, &waveFormat, 0, 0, CAL
LBACK_NULL); | |
1851 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening input devi
ce corresponding to device ID %u", _inputDeviceIndex); | |
1852 } | |
1853 } | |
1854 else | |
1855 { | |
1856 if (_inputDevice == AudioDeviceModule::kDefaultCommunicationDevice) | |
1857 { | |
1858 // check if it is possible to open the default communication device
(supported on Windows 7) | |
1859 res = waveInOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL
| WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE | WAVE_FORMAT_QUERY); | |
1860 if (MMSYSERR_NOERROR == res) | |
1861 { | |
1862 // if so, open the default communication device for real | |
1863 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLB
ACK_NULL | WAVE_MAPPED_DEFAULT_COMMUNICATION_DEVICE); | |
1864 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening defaul
t communication device"); | |
1865 } | |
1866 else | |
1867 { | |
1868 // use default device since default communication device was not
avaliable | |
1869 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLB
ACK_NULL); | |
1870 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "unable to open
default communication device => using default instead"); | |
1871 } | |
1872 } | |
1873 else if (_inputDevice == AudioDeviceModule::kDefaultDevice) | |
1874 { | |
1875 // open default device since it has been requested | |
1876 res = waveInOpen(NULL, WAVE_MAPPER, &waveFormat, 0, 0, CALLBACK_NULL
| WAVE_FORMAT_QUERY); | |
1877 if (MMSYSERR_NOERROR == res) | |
1878 { | |
1879 res = waveInOpen(&hWaveIn, WAVE_MAPPER, &waveFormat, 0, 0, CALLB
ACK_NULL); | |
1880 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "opening defaul
t input device"); | |
1881 } | |
1882 } | |
1883 } | |
1884 | |
1885 if (MMSYSERR_NOERROR != res) | |
1886 { | |
1887 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInOpen() failed (
err=%d)", res); | |
1888 TraceWaveInError(res); | |
1889 return -1; | |
1890 } | |
1891 | |
1892 // Log information about the aquired input device | |
1893 // | |
1894 WAVEINCAPS caps; | |
1895 | |
1896 res = waveInGetDevCaps((UINT_PTR)hWaveIn, &caps, sizeof(WAVEINCAPS)); | |
1897 if (res != MMSYSERR_NOERROR) | |
1898 { | |
1899 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCaps()
failed (err=%d)", res); | |
1900 TraceWaveInError(res); | |
1901 } | |
1902 | |
1903 UINT deviceID(0); | |
1904 res = waveInGetID(hWaveIn, &deviceID); | |
1905 if (res != MMSYSERR_NOERROR) | |
1906 { | |
1907 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetID() faile
d (err=%d)", res); | |
1908 TraceWaveInError(res); | |
1909 } | |
1910 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "utilized device ID : %u",
deviceID); | |
1911 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name : %s",
caps.szPname); | |
1912 | |
1913 // Store valid handle for the open waveform-audio input device | |
1914 _hWaveIn = hWaveIn; | |
1915 | |
1916 // Store the input wave header as well | |
1917 _waveFormatIn = waveFormat; | |
1918 | |
1919 // Mark recording side as initialized | |
1920 _recIsInitialized = true; | |
1921 | |
1922 _recBufCount = 0; // index of active input wave header (<=> input buffer
index) | |
1923 _recDelayCount = 0; // ensures that input buffers are returned with certai
n delay | |
1924 | |
1925 return 0; | |
1926 } | |
1927 | |
1928 // ---------------------------------------------------------------------------- | |
1929 // StartRecording | |
1930 // ---------------------------------------------------------------------------- | |
1931 | |
1932 int32_t AudioDeviceWindowsWave::StartRecording() | |
1933 { | |
1934 | |
1935 if (!_recIsInitialized) | |
1936 { | |
1937 return -1; | |
1938 } | |
1939 | |
1940 if (_recording) | |
1941 { | |
1942 return 0; | |
1943 } | |
1944 | |
1945 // set state to ensure that the recording starts from the audio thread | |
1946 _startRec = true; | |
1947 | |
1948 // the audio thread will signal when recording has stopped | |
1949 if (kEventTimeout == _recStartEvent.Wait(10000)) | |
1950 { | |
1951 _startRec = false; | |
1952 StopRecording(); | |
1953 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate re
cording"); | |
1954 return -1; | |
1955 } | |
1956 | |
1957 if (_recording) | |
1958 { | |
1959 // the recording state is set by the audio thread after recording has st
arted | |
1960 } | |
1961 else | |
1962 { | |
1963 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate re
cording"); | |
1964 return -1; | |
1965 } | |
1966 | |
1967 return 0; | |
1968 } | |
1969 | |
1970 // ---------------------------------------------------------------------------- | |
1971 // StopRecording | |
1972 // ---------------------------------------------------------------------------- | |
1973 | |
1974 int32_t AudioDeviceWindowsWave::StopRecording() | |
1975 { | |
1976 | |
1977 CriticalSectionScoped lock(&_critSect); | |
1978 | |
1979 if (!_recIsInitialized) | |
1980 { | |
1981 return 0; | |
1982 } | |
1983 | |
1984 if (_hWaveIn == NULL) | |
1985 { | |
1986 return -1; | |
1987 } | |
1988 | |
1989 bool wasRecording = _recording; | |
1990 _recIsInitialized = false; | |
1991 _recording = false; | |
1992 | |
1993 MMRESULT res; | |
1994 | |
1995 // Stop waveform-adio input. If there are any buffers in the queue, the | |
1996 // current buffer will be marked as done (the dwBytesRecorded member in | |
1997 // the header will contain the length of data), but any empty buffers in | |
1998 // the queue will remain there. | |
1999 // | |
2000 res = waveInStop(_hWaveIn); | |
2001 if (MMSYSERR_NOERROR != res) | |
2002 { | |
2003 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInStop() failed
(err=%d)", res); | |
2004 TraceWaveInError(res); | |
2005 } | |
2006 | |
2007 // Stop input on the given waveform-audio input device and resets the curren
t | |
2008 // position to zero. All pending buffers are marked as done and returned to | |
2009 // the application. | |
2010 // | |
2011 res = waveInReset(_hWaveIn); | |
2012 if (MMSYSERR_NOERROR != res) | |
2013 { | |
2014 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInReset() faile
d (err=%d)", res); | |
2015 TraceWaveInError(res); | |
2016 } | |
2017 | |
2018 // Clean up the preparation performed by the waveInPrepareHeader function. | |
2019 // Only unprepare header if recording was ever started (and headers are prep
ared). | |
2020 // | |
2021 if (wasRecording) | |
2022 { | |
2023 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "waveInUnprepareHeader(
) will be performed"); | |
2024 for (int n = 0; n < N_BUFFERS_IN; n++) | |
2025 { | |
2026 res = waveInUnprepareHeader(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVE
HDR)); | |
2027 if (MMSYSERR_NOERROR != res) | |
2028 { | |
2029 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInUnpre
pareHeader() failed (err=%d)", res); | |
2030 TraceWaveInError(res); | |
2031 } | |
2032 } | |
2033 } | |
2034 | |
2035 // Close the given waveform-audio input device. | |
2036 // | |
2037 res = waveInClose(_hWaveIn); | |
2038 if (MMSYSERR_NOERROR != res) | |
2039 { | |
2040 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInClose() faile
d (err=%d)", res); | |
2041 TraceWaveInError(res); | |
2042 } | |
2043 | |
2044 // Set the wave input handle to NULL | |
2045 // | |
2046 _hWaveIn = NULL; | |
2047 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_hWaveIn is now set to NUL
L"); | |
2048 | |
2049 return 0; | |
2050 } | |
2051 | |
2052 // ---------------------------------------------------------------------------- | |
2053 // RecordingIsInitialized | |
2054 // ---------------------------------------------------------------------------- | |
2055 | |
2056 bool AudioDeviceWindowsWave::RecordingIsInitialized() const | |
2057 { | |
2058 return (_recIsInitialized); | |
2059 } | |
2060 | |
2061 // ---------------------------------------------------------------------------- | |
2062 // Recording | |
2063 // ---------------------------------------------------------------------------- | |
2064 | |
2065 bool AudioDeviceWindowsWave::Recording() const | |
2066 { | |
2067 return (_recording); | |
2068 } | |
2069 | |
2070 // ---------------------------------------------------------------------------- | |
2071 // PlayoutIsInitialized | |
2072 // ---------------------------------------------------------------------------- | |
2073 | |
2074 bool AudioDeviceWindowsWave::PlayoutIsInitialized() const | |
2075 { | |
2076 return (_playIsInitialized); | |
2077 } | |
2078 | |
2079 // ---------------------------------------------------------------------------- | |
2080 // StartPlayout | |
2081 // ---------------------------------------------------------------------------- | |
2082 | |
2083 int32_t AudioDeviceWindowsWave::StartPlayout() | |
2084 { | |
2085 | |
2086 if (!_playIsInitialized) | |
2087 { | |
2088 return -1; | |
2089 } | |
2090 | |
2091 if (_playing) | |
2092 { | |
2093 return 0; | |
2094 } | |
2095 | |
2096 // set state to ensure that playout starts from the audio thread | |
2097 _startPlay = true; | |
2098 | |
2099 // the audio thread will signal when recording has started | |
2100 if (kEventTimeout == _playStartEvent.Wait(10000)) | |
2101 { | |
2102 _startPlay = false; | |
2103 StopPlayout(); | |
2104 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate pl
ayout"); | |
2105 return -1; | |
2106 } | |
2107 | |
2108 if (_playing) | |
2109 { | |
2110 // the playing state is set by the audio thread after playout has starte
d | |
2111 } | |
2112 else | |
2113 { | |
2114 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to activate pl
aying"); | |
2115 return -1; | |
2116 } | |
2117 | |
2118 return 0; | |
2119 } | |
2120 | |
2121 // ---------------------------------------------------------------------------- | |
2122 // StopPlayout | |
2123 // ---------------------------------------------------------------------------- | |
2124 | |
2125 int32_t AudioDeviceWindowsWave::StopPlayout() | |
2126 { | |
2127 | |
2128 CriticalSectionScoped lock(&_critSect); | |
2129 | |
2130 if (!_playIsInitialized) | |
2131 { | |
2132 return 0; | |
2133 } | |
2134 | |
2135 if (_hWaveOut == NULL) | |
2136 { | |
2137 return -1; | |
2138 } | |
2139 | |
2140 _playIsInitialized = false; | |
2141 _playing = false; | |
2142 _sndCardPlayDelay = 0; | |
2143 _sndCardRecDelay = 0; | |
2144 | |
2145 MMRESULT res; | |
2146 | |
2147 // The waveOutReset function stops playback on the given waveform-audio | |
2148 // output device and resets the current position to zero. All pending | |
2149 // playback buffers are marked as done (WHDR_DONE) and returned to the appli
cation. | |
2150 // After this function returns, the application can send new playback buffer
s | |
2151 // to the device by calling waveOutWrite, or close the device by calling wav
eOutClose. | |
2152 // | |
2153 res = waveOutReset(_hWaveOut); | |
2154 if (MMSYSERR_NOERROR != res) | |
2155 { | |
2156 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutReset() fail
ed (err=%d)", res); | |
2157 TraceWaveOutError(res); | |
2158 } | |
2159 | |
2160 // The waveOutUnprepareHeader function cleans up the preparation performed | |
2161 // by the waveOutPrepareHeader function. This function must be called after | |
2162 // the device driver is finished with a data block. | |
2163 // You must call this function before freeing the buffer. | |
2164 // | |
2165 for (int n = 0; n < N_BUFFERS_OUT; n++) | |
2166 { | |
2167 res = waveOutUnprepareHeader(_hWaveOut, &_waveHeaderOut[n], sizeof(WAVEH
DR)); | |
2168 if (MMSYSERR_NOERROR != res) | |
2169 { | |
2170 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutUnprepar
eHeader() failed (err=%d)", res); | |
2171 TraceWaveOutError(res); | |
2172 } | |
2173 } | |
2174 | |
2175 // The waveOutClose function closes the given waveform-audio output device. | |
2176 // The close operation fails if the device is still playing a waveform-audio | |
2177 // buffer that was previously sent by calling waveOutWrite. Before calling | |
2178 // waveOutClose, the application must wait for all buffers to finish playing | |
2179 // or call the waveOutReset function to terminate playback. | |
2180 // | |
2181 res = waveOutClose(_hWaveOut); | |
2182 if (MMSYSERR_NOERROR != res) | |
2183 { | |
2184 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutClose() fail
ed (err=%d)", res); | |
2185 TraceWaveOutError(res); | |
2186 } | |
2187 | |
2188 _hWaveOut = NULL; | |
2189 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_hWaveOut is now set to NU
LL"); | |
2190 | |
2191 return 0; | |
2192 } | |
2193 | |
2194 // ---------------------------------------------------------------------------- | |
2195 // PlayoutDelay | |
2196 // ---------------------------------------------------------------------------- | |
2197 | |
2198 int32_t AudioDeviceWindowsWave::PlayoutDelay(uint16_t& delayMS) const | |
2199 { | |
2200 CriticalSectionScoped lock(&_critSect); | |
2201 delayMS = (uint16_t)_sndCardPlayDelay; | |
2202 return 0; | |
2203 } | |
2204 | |
2205 // ---------------------------------------------------------------------------- | |
2206 // RecordingDelay | |
2207 // ---------------------------------------------------------------------------- | |
2208 | |
2209 int32_t AudioDeviceWindowsWave::RecordingDelay(uint16_t& delayMS) const | |
2210 { | |
2211 CriticalSectionScoped lock(&_critSect); | |
2212 delayMS = (uint16_t)_sndCardRecDelay; | |
2213 return 0; | |
2214 } | |
2215 | |
2216 // ---------------------------------------------------------------------------- | |
2217 // Playing | |
2218 // ---------------------------------------------------------------------------- | |
2219 | |
2220 bool AudioDeviceWindowsWave::Playing() const | |
2221 { | |
2222 return (_playing); | |
2223 } | |
2224 // ---------------------------------------------------------------------------- | |
2225 // SetPlayoutBuffer | |
2226 // ---------------------------------------------------------------------------- | |
2227 | |
2228 int32_t AudioDeviceWindowsWave::SetPlayoutBuffer(const AudioDeviceModule::Buffer
Type type, uint16_t sizeMS) | |
2229 { | |
2230 CriticalSectionScoped lock(&_critSect); | |
2231 _playBufType = type; | |
2232 if (type == AudioDeviceModule::kFixedBufferSize) | |
2233 { | |
2234 _playBufDelayFixed = sizeMS; | |
2235 } | |
2236 return 0; | |
2237 } | |
2238 | |
2239 // ---------------------------------------------------------------------------- | |
2240 // PlayoutBuffer | |
2241 // ---------------------------------------------------------------------------- | |
2242 | |
2243 int32_t AudioDeviceWindowsWave::PlayoutBuffer(AudioDeviceModule::BufferType& typ
e, uint16_t& sizeMS) const | |
2244 { | |
2245 CriticalSectionScoped lock(&_critSect); | |
2246 type = _playBufType; | |
2247 if (type == AudioDeviceModule::kFixedBufferSize) | |
2248 { | |
2249 sizeMS = _playBufDelayFixed; | |
2250 } | |
2251 else | |
2252 { | |
2253 sizeMS = _playBufDelay; | |
2254 } | |
2255 | |
2256 return 0; | |
2257 } | |
2258 | |
2259 // ---------------------------------------------------------------------------- | |
2260 // CPULoad | |
2261 // ---------------------------------------------------------------------------- | |
2262 | |
2263 int32_t AudioDeviceWindowsWave::CPULoad(uint16_t& load) const | |
2264 { | |
2265 | |
2266 load = static_cast<uint16_t>(100*_avgCPULoad); | |
2267 | |
2268 return 0; | |
2269 } | |
2270 | |
2271 // ---------------------------------------------------------------------------- | |
2272 // PlayoutWarning | |
2273 // ---------------------------------------------------------------------------- | |
2274 | |
2275 bool AudioDeviceWindowsWave::PlayoutWarning() const | |
2276 { | |
2277 return ( _playWarning > 0); | |
2278 } | |
2279 | |
2280 // ---------------------------------------------------------------------------- | |
2281 // PlayoutError | |
2282 // ---------------------------------------------------------------------------- | |
2283 | |
2284 bool AudioDeviceWindowsWave::PlayoutError() const | |
2285 { | |
2286 return ( _playError > 0); | |
2287 } | |
2288 | |
2289 // ---------------------------------------------------------------------------- | |
2290 // RecordingWarning | |
2291 // ---------------------------------------------------------------------------- | |
2292 | |
2293 bool AudioDeviceWindowsWave::RecordingWarning() const | |
2294 { | |
2295 return ( _recWarning > 0); | |
2296 } | |
2297 | |
2298 // ---------------------------------------------------------------------------- | |
2299 // RecordingError | |
2300 // ---------------------------------------------------------------------------- | |
2301 | |
2302 bool AudioDeviceWindowsWave::RecordingError() const | |
2303 { | |
2304 return ( _recError > 0); | |
2305 } | |
2306 | |
2307 // ---------------------------------------------------------------------------- | |
2308 // ClearPlayoutWarning | |
2309 // ---------------------------------------------------------------------------- | |
2310 | |
2311 void AudioDeviceWindowsWave::ClearPlayoutWarning() | |
2312 { | |
2313 _playWarning = 0; | |
2314 } | |
2315 | |
2316 // ---------------------------------------------------------------------------- | |
2317 // ClearPlayoutError | |
2318 // ---------------------------------------------------------------------------- | |
2319 | |
2320 void AudioDeviceWindowsWave::ClearPlayoutError() | |
2321 { | |
2322 _playError = 0; | |
2323 } | |
2324 | |
2325 // ---------------------------------------------------------------------------- | |
2326 // ClearRecordingWarning | |
2327 // ---------------------------------------------------------------------------- | |
2328 | |
2329 void AudioDeviceWindowsWave::ClearRecordingWarning() | |
2330 { | |
2331 _recWarning = 0; | |
2332 } | |
2333 | |
2334 // ---------------------------------------------------------------------------- | |
2335 // ClearRecordingError | |
2336 // ---------------------------------------------------------------------------- | |
2337 | |
2338 void AudioDeviceWindowsWave::ClearRecordingError() | |
2339 { | |
2340 _recError = 0; | |
2341 } | |
2342 | |
2343 // ============================================================================ | |
2344 // Private Methods | |
2345 // ============================================================================ | |
2346 | |
2347 // ---------------------------------------------------------------------------- | |
2348 // InputSanityCheckAfterUnlockedPeriod | |
2349 // ---------------------------------------------------------------------------- | |
2350 | |
2351 int32_t AudioDeviceWindowsWave::InputSanityCheckAfterUnlockedPeriod() const | |
2352 { | |
2353 if (_hWaveIn == NULL) | |
2354 { | |
2355 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "input state has been
modified during unlocked period"); | |
2356 return -1; | |
2357 } | |
2358 return 0; | |
2359 } | |
2360 | |
2361 // ---------------------------------------------------------------------------- | |
2362 // OutputSanityCheckAfterUnlockedPeriod | |
2363 // ---------------------------------------------------------------------------- | |
2364 | |
2365 int32_t AudioDeviceWindowsWave::OutputSanityCheckAfterUnlockedPeriod() const | |
2366 { | |
2367 if (_hWaveOut == NULL) | |
2368 { | |
2369 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "output state has been
modified during unlocked period"); | |
2370 return -1; | |
2371 } | |
2372 return 0; | |
2373 } | |
2374 | |
2375 // ---------------------------------------------------------------------------- | |
2376 // EnumeratePlayoutDevices | |
2377 // ---------------------------------------------------------------------------- | |
2378 | |
2379 int32_t AudioDeviceWindowsWave::EnumeratePlayoutDevices() | |
2380 { | |
2381 | |
2382 uint16_t nDevices(PlayoutDevices()); | |
2383 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "==========================
====================================="); | |
2384 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#output devices: %u", nDev
ices); | |
2385 | |
2386 WAVEOUTCAPS caps; | |
2387 MMRESULT res; | |
2388 | |
2389 for (UINT deviceID = 0; deviceID < nDevices; deviceID++) | |
2390 { | |
2391 res = waveOutGetDevCaps(deviceID, &caps, sizeof(WAVEOUTCAPS)); | |
2392 if (res != MMSYSERR_NOERROR) | |
2393 { | |
2394 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetDevCa
ps() failed (err=%d)", res); | |
2395 } | |
2396 | |
2397 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "======================
========================================="); | |
2398 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Device ID %u:", device
ID); | |
2399 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "manufacturer ID :
%u", caps.wMid); | |
2400 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product ID :
%u",caps.wPid); | |
2401 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "version of driver :
%u.%u", HIBYTE(caps.vDriverVersion), LOBYTE(caps.vDriverVersion)); | |
2402 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name :
%s", caps.szPname); | |
2403 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "dwFormats :
0x%x", caps.dwFormats); | |
2404 if (caps.dwFormats & WAVE_FORMAT_48S16) | |
2405 { | |
2406 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,stereo,16b
it : SUPPORTED"); | |
2407 } | |
2408 else | |
2409 { | |
2410 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,ster
eo,16bit : *NOT* SUPPORTED"); | |
2411 } | |
2412 if (caps.dwFormats & WAVE_FORMAT_48M16) | |
2413 { | |
2414 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,mono,16bit
: SUPPORTED"); | |
2415 } | |
2416 else | |
2417 { | |
2418 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,mono
,16bit : *NOT* SUPPORTED"); | |
2419 } | |
2420 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wChannels :
%u", caps.wChannels); | |
2421 TraceSupportFlags(caps.dwSupport); | |
2422 } | |
2423 | |
2424 return 0; | |
2425 } | |
2426 | |
2427 // ---------------------------------------------------------------------------- | |
2428 // EnumerateRecordingDevices | |
2429 // ---------------------------------------------------------------------------- | |
2430 | |
2431 int32_t AudioDeviceWindowsWave::EnumerateRecordingDevices() | |
2432 { | |
2433 | |
2434 uint16_t nDevices(RecordingDevices()); | |
2435 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "==========================
====================================="); | |
2436 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#input devices: %u", nDevi
ces); | |
2437 | |
2438 WAVEINCAPS caps; | |
2439 MMRESULT res; | |
2440 | |
2441 for (UINT deviceID = 0; deviceID < nDevices; deviceID++) | |
2442 { | |
2443 res = waveInGetDevCaps(deviceID, &caps, sizeof(WAVEINCAPS)); | |
2444 if (res != MMSYSERR_NOERROR) | |
2445 { | |
2446 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetDevCap
s() failed (err=%d)", res); | |
2447 } | |
2448 | |
2449 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "======================
========================================="); | |
2450 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Device ID %u:", device
ID); | |
2451 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "manufacturer ID :
%u", caps.wMid); | |
2452 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product ID :
%u",caps.wPid); | |
2453 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "version of driver :
%u.%u", HIBYTE(caps.vDriverVersion), LOBYTE(caps.vDriverVersion)); | |
2454 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "product name :
%s", caps.szPname); | |
2455 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "dwFormats :
0x%x", caps.dwFormats); | |
2456 if (caps.dwFormats & WAVE_FORMAT_48S16) | |
2457 { | |
2458 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,stereo,16b
it : SUPPORTED"); | |
2459 } | |
2460 else | |
2461 { | |
2462 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,ster
eo,16bit : *NOT* SUPPORTED"); | |
2463 } | |
2464 if (caps.dwFormats & WAVE_FORMAT_48M16) | |
2465 { | |
2466 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, " 48kHz,mono,16bit
: SUPPORTED"); | |
2467 } | |
2468 else | |
2469 { | |
2470 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, " 48kHz,mono
,16bit : *NOT* SUPPORTED"); | |
2471 } | |
2472 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wChannels :
%u", caps.wChannels); | |
2473 } | |
2474 | |
2475 return 0; | |
2476 } | |
2477 | |
2478 // ---------------------------------------------------------------------------- | |
2479 // TraceSupportFlags | |
2480 // ---------------------------------------------------------------------------- | |
2481 | |
2482 void AudioDeviceWindowsWave::TraceSupportFlags(DWORD dwSupport) const | |
2483 { | |
2484 TCHAR buf[256]; | |
2485 | |
2486 StringCchPrintf(buf, 128, TEXT("support flags : 0x%x "), dwSupport); | |
2487 | |
2488 if (dwSupport & WAVECAPS_PITCH) | |
2489 { | |
2490 // supports pitch control | |
2491 StringCchCat(buf, 256, TEXT("(PITCH)")); | |
2492 } | |
2493 if (dwSupport & WAVECAPS_PLAYBACKRATE) | |
2494 { | |
2495 // supports playback rate control | |
2496 StringCchCat(buf, 256, TEXT("(PLAYBACKRATE)")); | |
2497 } | |
2498 if (dwSupport & WAVECAPS_VOLUME) | |
2499 { | |
2500 // supports volume control | |
2501 StringCchCat(buf, 256, TEXT("(VOLUME)")); | |
2502 } | |
2503 if (dwSupport & WAVECAPS_LRVOLUME) | |
2504 { | |
2505 // supports separate left and right volume control | |
2506 StringCchCat(buf, 256, TEXT("(LRVOLUME)")); | |
2507 } | |
2508 if (dwSupport & WAVECAPS_SYNC) | |
2509 { | |
2510 // the driver is synchronous and will block while playing a buffer | |
2511 StringCchCat(buf, 256, TEXT("(SYNC)")); | |
2512 } | |
2513 if (dwSupport & WAVECAPS_SAMPLEACCURATE) | |
2514 { | |
2515 // returns sample-accurate position information | |
2516 StringCchCat(buf, 256, TEXT("(SAMPLEACCURATE)")); | |
2517 } | |
2518 | |
2519 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf); | |
2520 } | |
2521 | |
2522 // ---------------------------------------------------------------------------- | |
2523 // TraceWaveInError | |
2524 // ---------------------------------------------------------------------------- | |
2525 | |
2526 void AudioDeviceWindowsWave::TraceWaveInError(MMRESULT error) const | |
2527 { | |
2528 TCHAR buf[MAXERRORLENGTH]; | |
2529 TCHAR msg[MAXERRORLENGTH]; | |
2530 | |
2531 StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: ")); | |
2532 waveInGetErrorText(error, msg, MAXERRORLENGTH); | |
2533 StringCchCat(buf, MAXERRORLENGTH, msg); | |
2534 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf); | |
2535 } | |
2536 | |
2537 // ---------------------------------------------------------------------------- | |
2538 // TraceWaveOutError | |
2539 // ---------------------------------------------------------------------------- | |
2540 | |
2541 void AudioDeviceWindowsWave::TraceWaveOutError(MMRESULT error) const | |
2542 { | |
2543 TCHAR buf[MAXERRORLENGTH]; | |
2544 TCHAR msg[MAXERRORLENGTH]; | |
2545 | |
2546 StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: ")); | |
2547 waveOutGetErrorText(error, msg, MAXERRORLENGTH); | |
2548 StringCchCat(buf, MAXERRORLENGTH, msg); | |
2549 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%S", buf); | |
2550 } | |
2551 | |
2552 // ---------------------------------------------------------------------------- | |
2553 // PrepareStartPlayout | |
2554 // ---------------------------------------------------------------------------- | |
2555 | |
2556 int32_t AudioDeviceWindowsWave::PrepareStartPlayout() | |
2557 { | |
2558 | |
2559 CriticalSectionScoped lock(&_critSect); | |
2560 | |
2561 if (_hWaveOut == NULL) | |
2562 { | |
2563 return -1; | |
2564 } | |
2565 | |
2566 // A total of 30ms of data is immediately placed in the SC buffer | |
2567 // | |
2568 int8_t zeroVec[4*PLAY_BUF_SIZE_IN_SAMPLES]; // max allocation | |
2569 memset(zeroVec, 0, 4*PLAY_BUF_SIZE_IN_SAMPLES); | |
2570 | |
2571 { | |
2572 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES); | |
2573 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES); | |
2574 Write(zeroVec, PLAY_BUF_SIZE_IN_SAMPLES); | |
2575 } | |
2576 | |
2577 _playAcc = 0; | |
2578 _playWarning = 0; | |
2579 _playError = 0; | |
2580 _dc_diff_mean = 0; | |
2581 _dc_y_prev = 0; | |
2582 _dc_penalty_counter = 20; | |
2583 _dc_prevtime = 0; | |
2584 _dc_prevplay = 0; | |
2585 | |
2586 return 0; | |
2587 } | |
2588 | |
2589 // ---------------------------------------------------------------------------- | |
2590 // PrepareStartRecording | |
2591 // ---------------------------------------------------------------------------- | |
2592 | |
2593 int32_t AudioDeviceWindowsWave::PrepareStartRecording() | |
2594 { | |
2595 | |
2596 CriticalSectionScoped lock(&_critSect); | |
2597 | |
2598 if (_hWaveIn == NULL) | |
2599 { | |
2600 return -1; | |
2601 } | |
2602 | |
2603 _playAcc = 0; | |
2604 _recordedBytes = 0; | |
2605 _recPutBackDelay = REC_PUT_BACK_DELAY; | |
2606 | |
2607 MMRESULT res; | |
2608 MMTIME mmtime; | |
2609 mmtime.wType = TIME_SAMPLES; | |
2610 | |
2611 res = waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime)); | |
2612 if (MMSYSERR_NOERROR != res) | |
2613 { | |
2614 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetPosition(T
IME_SAMPLES) failed (err=%d)", res); | |
2615 TraceWaveInError(res); | |
2616 } | |
2617 | |
2618 _read_samples = mmtime.u.sample; | |
2619 _read_samples_old = _read_samples; | |
2620 _rec_samples_old = mmtime.u.sample; | |
2621 _wrapCounter = 0; | |
2622 | |
2623 for (int n = 0; n < N_BUFFERS_IN; n++) | |
2624 { | |
2625 const uint8_t nBytesPerSample = 2*_recChannels; | |
2626 | |
2627 // set up the input wave header | |
2628 _waveHeaderIn[n].lpData = reinterpret_cast<LPSTR>(&_recBuffer[n
]); | |
2629 _waveHeaderIn[n].dwBufferLength = nBytesPerSample * REC_BUF_SIZE_IN_SAM
PLES; | |
2630 _waveHeaderIn[n].dwFlags = 0; | |
2631 _waveHeaderIn[n].dwBytesRecorded = 0; | |
2632 _waveHeaderIn[n].dwUser = 0; | |
2633 | |
2634 memset(_recBuffer[n], 0, nBytesPerSample * REC_BUF_SIZE_IN_SAMPLES); | |
2635 | |
2636 // prepare a buffer for waveform-audio input | |
2637 res = waveInPrepareHeader(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVEHDR)); | |
2638 if (MMSYSERR_NOERROR != res) | |
2639 { | |
2640 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInPrepareHe
ader(%d) failed (err=%d)", n, res); | |
2641 TraceWaveInError(res); | |
2642 } | |
2643 | |
2644 // send an input buffer to the given waveform-audio input device | |
2645 res = waveInAddBuffer(_hWaveIn, &_waveHeaderIn[n], sizeof(WAVEHDR)); | |
2646 if (MMSYSERR_NOERROR != res) | |
2647 { | |
2648 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInAddBuffer
(%d) failed (err=%d)", n, res); | |
2649 TraceWaveInError(res); | |
2650 } | |
2651 } | |
2652 | |
2653 // start input on the given waveform-audio input device | |
2654 res = waveInStart(_hWaveIn); | |
2655 if (MMSYSERR_NOERROR != res) | |
2656 { | |
2657 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInStart() faile
d (err=%d)", res); | |
2658 TraceWaveInError(res); | |
2659 } | |
2660 | |
2661 return 0; | |
2662 } | |
2663 | |
2664 // ---------------------------------------------------------------------------- | |
2665 // GetPlayoutBufferDelay | |
2666 // ---------------------------------------------------------------------------- | |
2667 | |
2668 int32_t AudioDeviceWindowsWave::GetPlayoutBufferDelay(uint32_t& writtenSamples,
uint32_t& playedSamples) | |
2669 { | |
2670 int i; | |
2671 int ms_Header; | |
2672 long playedDifference; | |
2673 int msecInPlayoutBuffer(0); // #milliseconds of audio in the playout buffe
r | |
2674 | |
2675 const uint16_t nSamplesPerMs = (uint16_t)(N_PLAY_SAMPLES_PER_SEC/1000); //
default is 48000/1000 = 48 | |
2676 | |
2677 MMRESULT res; | |
2678 MMTIME mmtime; | |
2679 | |
2680 if (!_playing) | |
2681 { | |
2682 playedSamples = 0; | |
2683 return (0); | |
2684 } | |
2685 | |
2686 // Retrieve the current playback position. | |
2687 // | |
2688 mmtime.wType = TIME_SAMPLES; // number of waveform-audio samples | |
2689 res = waveOutGetPosition(_hWaveOut, &mmtime, sizeof(mmtime)); | |
2690 if (MMSYSERR_NOERROR != res) | |
2691 { | |
2692 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveOutGetPosition(
) failed (err=%d)", res); | |
2693 TraceWaveOutError(res); | |
2694 } | |
2695 | |
2696 writtenSamples = _writtenSamples; // #samples written to the playout buffe
r | |
2697 playedSamples = mmtime.u.sample; // current playout position in the playo
ut buffer | |
2698 | |
2699 // derive remaining amount (in ms) of data in the playout buffer | |
2700 msecInPlayoutBuffer = ((writtenSamples - playedSamples)/nSamplesPerMs); | |
2701 | |
2702 playedDifference = (long) (_playedSamplesOld - playedSamples); | |
2703 | |
2704 if (playedDifference > 64000) | |
2705 { | |
2706 // If the sound cards number-of-played-out-samples variable wraps around
before | |
2707 // written_sampels wraps around this needs to be adjusted. This can happ
en on | |
2708 // sound cards that uses less than 32 bits to keep track of number of pl
ayed out | |
2709 // sampels. To avoid being fooled by sound cards that sometimes produces
false | |
2710 // output we compare old value minus the new value with a large value. T
his is | |
2711 // neccessary because some SC:s produce an output like 153, 198, 175, 23
0 which | |
2712 // would trigger the wrap-around function if we didn't compare with a la
rge value. | |
2713 // The value 64000 is chosen because 2^16=65536 so we allow wrap around
at 16 bits. | |
2714 | |
2715 i = 31; | |
2716 while((_playedSamplesOld <= (unsigned long)POW2(i)) && (i > 14)) { | |
2717 i--; | |
2718 } | |
2719 | |
2720 if((i < 31) && (i > 14)) { | |
2721 // Avoid adjusting when there is 32-bit wrap-around since that is | |
2722 // something neccessary. | |
2723 // | |
2724 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "msecleft() => wrap ar
ound occured: %d bits used by sound card)", (i+1)); | |
2725 | |
2726 _writtenSamples = _writtenSamples - POW2(i + 1); | |
2727 writtenSamples = _writtenSamples; | |
2728 msecInPlayoutBuffer = ((writtenSamples - playedSamples)/nSamplesPerM
s); | |
2729 } | |
2730 } | |
2731 else if ((_writtenSamplesOld > (unsigned long)POW2(31)) && (writtenSamples <
96000)) | |
2732 { | |
2733 // Wrap around as expected after having used all 32 bits. (But we still | |
2734 // test if the wrap around happened earlier which it should not) | |
2735 | |
2736 i = 31; | |
2737 while (_writtenSamplesOld <= (unsigned long)POW2(i)) { | |
2738 i--; | |
2739 } | |
2740 | |
2741 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, " msecleft() (wrap around
occured after having used all 32 bits)"); | |
2742 | |
2743 _writtenSamplesOld = writtenSamples; | |
2744 _playedSamplesOld = playedSamples; | |
2745 msecInPlayoutBuffer = (int)((writtenSamples + POW2(i + 1) - playedSample
s)/nSamplesPerMs); | |
2746 | |
2747 } | |
2748 else if ((writtenSamples < 96000) && (playedSamples > (unsigned long)POW2(31
))) | |
2749 { | |
2750 // Wrap around has, as expected, happened for written_sampels before | |
2751 // playedSampels so we have to adjust for this until also playedSampels | |
2752 // has had wrap around. | |
2753 | |
2754 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, " msecleft() (wrap around
occured: correction of output is done)"); | |
2755 | |
2756 _writtenSamplesOld = writtenSamples; | |
2757 _playedSamplesOld = playedSamples; | |
2758 msecInPlayoutBuffer = (int)((writtenSamples + POW2(32) - playedSamples)/
nSamplesPerMs); | |
2759 } | |
2760 | |
2761 _writtenSamplesOld = writtenSamples; | |
2762 _playedSamplesOld = playedSamples; | |
2763 | |
2764 | |
2765 // We use the following formaula to track that playout works as it should | |
2766 // y=playedSamples/48 - timeGetTime(); | |
2767 // y represent the clock drift between system clock and sound card clock - s
hould be fairly stable | |
2768 // When the exponential mean value of diff(y) goes away from zero something
is wrong | |
2769 // The exponential formula will accept 1% clock drift but not more | |
2770 // The driver error means that we will play to little audio and have a high
negative clock drift | |
2771 // We kick in our alternative method when the clock drift reaches 20% | |
2772 | |
2773 int diff,y; | |
2774 int unsigned time =0; | |
2775 | |
2776 // If we have other problems that causes playout glitches | |
2777 // we don't want to switch playout method. | |
2778 // Check if playout buffer is extremely low, or if we haven't been able to | |
2779 // exectue our code in more than 40 ms | |
2780 | |
2781 time = timeGetTime(); | |
2782 | |
2783 if ((msecInPlayoutBuffer < 20) || (time - _dc_prevtime > 40)) | |
2784 { | |
2785 _dc_penalty_counter = 100; | |
2786 } | |
2787 | |
2788 if ((playedSamples != 0)) | |
2789 { | |
2790 y = playedSamples/48 - time; | |
2791 if ((_dc_y_prev != 0) && (_dc_penalty_counter == 0)) | |
2792 { | |
2793 diff = y - _dc_y_prev; | |
2794 _dc_diff_mean = (990*_dc_diff_mean)/1000 + 10*diff; | |
2795 } | |
2796 _dc_y_prev = y; | |
2797 } | |
2798 | |
2799 if (_dc_penalty_counter) | |
2800 { | |
2801 _dc_penalty_counter--; | |
2802 } | |
2803 | |
2804 if (_dc_diff_mean < -200) | |
2805 { | |
2806 // Always reset the filter | |
2807 _dc_diff_mean = 0; | |
2808 | |
2809 // Problem is detected. Switch delay method and set min buffer to 80. | |
2810 // Reset the filter and keep monitoring the filter output. | |
2811 // If issue is detected a second time, increase min buffer to 100. | |
2812 // If that does not help, we must modify this scheme further. | |
2813 | |
2814 _useHeader++; | |
2815 if (_useHeader == 1) | |
2816 { | |
2817 _minPlayBufDelay = 80; | |
2818 _playWarning = 1; // only warn first time | |
2819 WEBRTC_TRACE(kTraceInfo, kTraceUtility, -1, "Modification #1: _useHe
ader = %d, _minPlayBufDelay = %d", _useHeader, _minPlayBufDelay); | |
2820 } | |
2821 else if (_useHeader == 2) | |
2822 { | |
2823 _minPlayBufDelay = 100; // add some more safety | |
2824 WEBRTC_TRACE(kTraceInfo, kTraceUtility, -1, "Modification #2: _useHe
ader = %d, _minPlayBufDelay = %d", _useHeader, _minPlayBufDelay); | |
2825 } | |
2826 else | |
2827 { | |
2828 // This state should not be entered... (HA) | |
2829 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1, "further actions are
required!"); | |
2830 } | |
2831 if (_playWarning == 1) | |
2832 { | |
2833 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending playout war
ning exists"); | |
2834 } | |
2835 _playWarning = 1; // triggers callback from module process thread | |
2836 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "kPlayoutWarning message
posted: switching to alternative playout delay method"); | |
2837 } | |
2838 _dc_prevtime = time; | |
2839 _dc_prevplay = playedSamples; | |
2840 | |
2841 // Try a very rough method of looking at how many buffers are still playing | |
2842 ms_Header = 0; | |
2843 for (i = 0; i < N_BUFFERS_OUT; i++) { | |
2844 if ((_waveHeaderOut[i].dwFlags & WHDR_INQUEUE)!=0) { | |
2845 ms_Header += 10; | |
2846 } | |
2847 } | |
2848 | |
2849 if ((ms_Header-50) > msecInPlayoutBuffer) { | |
2850 // Test for cases when GetPosition appears to be screwed up (currently j
ust log....) | |
2851 TCHAR infoStr[300]; | |
2852 if (_no_of_msecleft_warnings%20==0) | |
2853 { | |
2854 StringCchPrintf(infoStr, 300, TEXT("writtenSamples=%i, playedSamples
=%i, msecInPlayoutBuffer=%i, ms_Header=%i"), writtenSamples, playedSamples, msec
InPlayoutBuffer, ms_Header); | |
2855 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "%S", infoStr); | |
2856 } | |
2857 _no_of_msecleft_warnings++; | |
2858 } | |
2859 | |
2860 // If this is true we have had a problem with the playout | |
2861 if (_useHeader > 0) | |
2862 { | |
2863 return (ms_Header); | |
2864 } | |
2865 | |
2866 | |
2867 if (ms_Header < msecInPlayoutBuffer) | |
2868 { | |
2869 if (_no_of_msecleft_warnings % 100 == 0) | |
2870 { | |
2871 TCHAR str[300]; | |
2872 StringCchPrintf(str, 300, TEXT("_no_of_msecleft_warnings=%i, msecInP
layoutBuffer=%i ms_Header=%i (minBuffer=%i buffersize=%i writtenSamples=%i playe
dSamples=%i)"), | |
2873 _no_of_msecleft_warnings, msecInPlayoutBuffer, ms_Header, _minPl
ayBufDelay, _playBufDelay, writtenSamples, playedSamples); | |
2874 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "%S", str); | |
2875 } | |
2876 _no_of_msecleft_warnings++; | |
2877 ms_Header -= 6; // Round off as we only have 10ms resolution + Header in
fo is usually slightly delayed compared to GetPosition | |
2878 | |
2879 if (ms_Header < 0) | |
2880 ms_Header = 0; | |
2881 | |
2882 return (ms_Header); | |
2883 } | |
2884 else | |
2885 { | |
2886 return (msecInPlayoutBuffer); | |
2887 } | |
2888 } | |
2889 | |
2890 // ---------------------------------------------------------------------------- | |
2891 // GetRecordingBufferDelay | |
2892 // ---------------------------------------------------------------------------- | |
2893 | |
2894 int32_t AudioDeviceWindowsWave::GetRecordingBufferDelay(uint32_t& readSamples, u
int32_t& recSamples) | |
2895 { | |
2896 long recDifference; | |
2897 MMTIME mmtime; | |
2898 MMRESULT mmr; | |
2899 | |
2900 const uint16_t nSamplesPerMs = (uint16_t)(N_REC_SAMPLES_PER_SEC/1000); // d
efault is 48000/1000 = 48 | |
2901 | |
2902 // Retrieve the current input position of the given waveform-audio input dev
ice | |
2903 // | |
2904 mmtime.wType = TIME_SAMPLES; | |
2905 mmr = waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime)); | |
2906 if (MMSYSERR_NOERROR != mmr) | |
2907 { | |
2908 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInGetPosition()
failed (err=%d)", mmr); | |
2909 TraceWaveInError(mmr); | |
2910 } | |
2911 | |
2912 readSamples = _read_samples; // updated for each full fram in RecProc() | |
2913 recSamples = mmtime.u.sample; // remaining time in input queue (recorded b
ut not read yet) | |
2914 | |
2915 recDifference = (long) (_rec_samples_old - recSamples); | |
2916 | |
2917 if( recDifference > 64000) { | |
2918 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 1 (recDifference =%d)
", recDifference); | |
2919 // If the sound cards number-of-recorded-samples variable wraps around b
efore | |
2920 // read_sampels wraps around this needs to be adjusted. This can happen
on | |
2921 // sound cards that uses less than 32 bits to keep track of number of pl
ayed out | |
2922 // sampels. To avoid being fooled by sound cards that sometimes produces
false | |
2923 // output we compare old value minus the new value with a large value. T
his is | |
2924 // neccessary because some SC:s produce an output like 153, 198, 175, 23
0 which | |
2925 // would trigger the wrap-around function if we didn't compare with a la
rge value. | |
2926 // The value 64000 is chosen because 2^16=65536 so we allow wrap around
at 16 bits. | |
2927 // | |
2928 int i = 31; | |
2929 while((_rec_samples_old <= (unsigned long)POW2(i)) && (i > 14)) | |
2930 i--; | |
2931 | |
2932 if((i < 31) && (i > 14)) { | |
2933 // Avoid adjusting when there is 32-bit wrap-around since that is | |
2934 // somethying neccessary. | |
2935 // | |
2936 _read_samples = _read_samples - POW2(i + 1); | |
2937 readSamples = _read_samples; | |
2938 _wrapCounter++; | |
2939 } else { | |
2940 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1,"AEC (_rec_samples_ol
d %d recSamples %d)",_rec_samples_old, recSamples); | |
2941 } | |
2942 } | |
2943 | |
2944 if((_wrapCounter>200)){ | |
2945 // Do nothing, handled later | |
2946 } | |
2947 else if((_rec_samples_old > (unsigned long)POW2(31)) && (recSamples < 96000)
) { | |
2948 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 2 (_rec_samples_old %
d recSamples %d)",_rec_samples_old, recSamples); | |
2949 // Wrap around as expected after having used all 32 bits. | |
2950 _read_samples_old = readSamples; | |
2951 _rec_samples_old = recSamples; | |
2952 _wrapCounter++; | |
2953 return (int)((recSamples + POW2(32) - readSamples)/nSamplesPerMs); | |
2954 | |
2955 | |
2956 } else if((recSamples < 96000) && (readSamples > (unsigned long)POW2(31))) { | |
2957 WEBRTC_TRACE (kTraceDebug, kTraceUtility, -1,"WRAP 3 (readSamples %d rec
Samples %d)",readSamples, recSamples); | |
2958 // Wrap around has, as expected, happened for rec_sampels before | |
2959 // readSampels so we have to adjust for this until also readSampels | |
2960 // has had wrap around. | |
2961 _read_samples_old = readSamples; | |
2962 _rec_samples_old = recSamples; | |
2963 _wrapCounter++; | |
2964 return (int)((recSamples + POW2(32) - readSamples)/nSamplesPerMs); | |
2965 } | |
2966 | |
2967 _read_samples_old = _read_samples; | |
2968 _rec_samples_old = recSamples; | |
2969 int res=(((int)_rec_samples_old - (int)_read_samples_old)/nSamplesPerMs); | |
2970 | |
2971 if((res > 2000)||(res < 0)||(_wrapCounter>200)){ | |
2972 // Reset everything | |
2973 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1,"msec_read error (res %d
wrapCounter %d)",res, _wrapCounter); | |
2974 MMTIME mmtime; | |
2975 mmtime.wType = TIME_SAMPLES; | |
2976 | |
2977 mmr=waveInGetPosition(_hWaveIn, &mmtime, sizeof(mmtime)); | |
2978 if (mmr != MMSYSERR_NOERROR) { | |
2979 WEBRTC_TRACE (kTraceWarning, kTraceUtility, -1, "waveInGetPosition f
ailed (mmr=%d)", mmr); | |
2980 } | |
2981 _read_samples=mmtime.u.sample; | |
2982 _read_samples_old=_read_samples; | |
2983 _rec_samples_old=mmtime.u.sample; | |
2984 | |
2985 // Guess a decent value | |
2986 res = 20; | |
2987 } | |
2988 | |
2989 _wrapCounter = 0; | |
2990 return res; | |
2991 } | |
2992 | |
2993 // ============================================================================ | |
2994 // Thread Methods | |
2995 // ============================================================================ | |
2996 | |
2997 // ---------------------------------------------------------------------------- | |
2998 // ThreadFunc | |
2999 // ---------------------------------------------------------------------------- | |
3000 | |
3001 bool AudioDeviceWindowsWave::ThreadFunc(void* pThis) | |
3002 { | |
3003 return (static_cast<AudioDeviceWindowsWave*>(pThis)->ThreadProcess()); | |
3004 } | |
3005 | |
3006 // ---------------------------------------------------------------------------- | |
3007 // ThreadProcess | |
3008 // ---------------------------------------------------------------------------- | |
3009 | |
3010 bool AudioDeviceWindowsWave::ThreadProcess() | |
3011 { | |
3012 uint32_t time(0); | |
3013 uint32_t playDiff(0); | |
3014 uint32_t recDiff(0); | |
3015 | |
3016 LONGLONG playTime(0); | |
3017 LONGLONG recTime(0); | |
3018 | |
3019 switch (_timeEvent.Wait(1000)) | |
3020 { | |
3021 case kEventSignaled: | |
3022 break; | |
3023 case kEventError: | |
3024 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "EventWrapper::Wait(
) failed => restarting timer"); | |
3025 _timeEvent.StopTimer(); | |
3026 _timeEvent.StartTimer(true, TIMER_PERIOD_MS); | |
3027 return true; | |
3028 case kEventTimeout: | |
3029 return true; | |
3030 } | |
3031 | |
3032 time = rtc::TimeMillis(); | |
3033 | |
3034 if (_startPlay) | |
3035 { | |
3036 if (PrepareStartPlayout() == 0) | |
3037 { | |
3038 _prevTimerCheckTime = time; | |
3039 _prevPlayTime = time; | |
3040 _startPlay = false; | |
3041 _playing = true; | |
3042 _playStartEvent.Set(); | |
3043 } | |
3044 } | |
3045 | |
3046 if (_startRec) | |
3047 { | |
3048 if (PrepareStartRecording() == 0) | |
3049 { | |
3050 _prevTimerCheckTime = time; | |
3051 _prevRecTime = time; | |
3052 _prevRecByteCheckTime = time; | |
3053 _startRec = false; | |
3054 _recording = true; | |
3055 _recStartEvent.Set(); | |
3056 } | |
3057 } | |
3058 | |
3059 if (_playing) | |
3060 { | |
3061 playDiff = time - _prevPlayTime; | |
3062 } | |
3063 | |
3064 if (_recording) | |
3065 { | |
3066 recDiff = time - _prevRecTime; | |
3067 } | |
3068 | |
3069 if (_playing || _recording) | |
3070 { | |
3071 RestartTimerIfNeeded(time); | |
3072 } | |
3073 | |
3074 if (_playing && | |
3075 (playDiff > (uint32_t)(_dTcheckPlayBufDelay - 1)) || | |
3076 (playDiff < 0)) | |
3077 { | |
3078 Lock(); | |
3079 if (_playing) | |
3080 { | |
3081 if (PlayProc(playTime) == -1) | |
3082 { | |
3083 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "PlayProc() fa
iled"); | |
3084 } | |
3085 _prevPlayTime = time; | |
3086 if (playTime != 0) | |
3087 _playAcc += playTime; | |
3088 } | |
3089 UnLock(); | |
3090 } | |
3091 | |
3092 if (_playing && (playDiff > 12)) | |
3093 { | |
3094 // It has been a long time since we were able to play out, try to | |
3095 // compensate by calling PlayProc again. | |
3096 // | |
3097 Lock(); | |
3098 if (_playing) | |
3099 { | |
3100 if (PlayProc(playTime)) | |
3101 { | |
3102 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "PlayProc() fa
iled"); | |
3103 } | |
3104 _prevPlayTime = time; | |
3105 if (playTime != 0) | |
3106 _playAcc += playTime; | |
3107 } | |
3108 UnLock(); | |
3109 } | |
3110 | |
3111 if (_recording && | |
3112 (recDiff > REC_CHECK_TIME_PERIOD_MS) || | |
3113 (recDiff < 0)) | |
3114 { | |
3115 Lock(); | |
3116 if (_recording) | |
3117 { | |
3118 int32_t nRecordedBytes(0); | |
3119 uint16_t maxIter(10); | |
3120 | |
3121 // Deliver all availiable recorded buffers and update the CPU load m
easurement. | |
3122 // We use a while loop here to compensate for the fact that the mult
i-media timer | |
3123 // can sometimed enter a "bad state" after hibernation where the res
olution is | |
3124 // reduced from ~1ms to ~10-15 ms. | |
3125 // | |
3126 while ((nRecordedBytes = RecProc(recTime)) > 0) | |
3127 { | |
3128 maxIter--; | |
3129 _recordedBytes += nRecordedBytes; | |
3130 if (recTime && _perfFreq.QuadPart) | |
3131 { | |
3132 // Measure the average CPU load: | |
3133 // This is a simplified expression where an exponential filt
er is used: | |
3134 // _avgCPULoad = 0.99 * _avgCPULoad + 0.01 * newCPU, | |
3135 // newCPU = (recTime+playAcc)/f is time in seconds | |
3136 // newCPU / 0.01 is the fraction of a 10 ms period | |
3137 // The two 0.01 cancels each other. | |
3138 // NOTE - assumes 10ms audio buffers. | |
3139 // | |
3140 _avgCPULoad = (float)(_avgCPULoad*.99 + (recTime+_playAcc)/(
double)(_perfFreq.QuadPart)); | |
3141 _playAcc = 0; | |
3142 } | |
3143 if (maxIter == 0) | |
3144 { | |
3145 // If we get this message ofte, our compensation scheme is n
ot sufficient. | |
3146 WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "failed to
compensate for reduced MM-timer resolution"); | |
3147 } | |
3148 } | |
3149 | |
3150 if (nRecordedBytes == -1) | |
3151 { | |
3152 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "RecProc() fai
led"); | |
3153 } | |
3154 | |
3155 _prevRecTime = time; | |
3156 | |
3157 // Monitor the recording process and generate error/warning callback
s if needed | |
3158 MonitorRecording(time); | |
3159 } | |
3160 UnLock(); | |
3161 } | |
3162 | |
3163 if (!_recording) | |
3164 { | |
3165 _prevRecByteCheckTime = time; | |
3166 _avgCPULoad = 0; | |
3167 } | |
3168 | |
3169 return true; | |
3170 } | |
3171 | |
3172 // ---------------------------------------------------------------------------- | |
3173 // RecProc | |
3174 // ---------------------------------------------------------------------------- | |
3175 | |
3176 int32_t AudioDeviceWindowsWave::RecProc(LONGLONG& consumedTime) | |
3177 { | |
3178 MMRESULT res; | |
3179 uint32_t bufCount(0); | |
3180 uint32_t nBytesRecorded(0); | |
3181 | |
3182 consumedTime = 0; | |
3183 | |
3184 // count modulo N_BUFFERS_IN (0,1,2,...,(N_BUFFERS_IN-1),0,1,2,..) | |
3185 if (_recBufCount == N_BUFFERS_IN) | |
3186 { | |
3187 _recBufCount = 0; | |
3188 } | |
3189 | |
3190 bufCount = _recBufCount; | |
3191 | |
3192 // take mono/stereo mode into account when deriving size of a full buffer | |
3193 const uint16_t bytesPerSample = 2*_recChannels; | |
3194 const uint32_t fullBufferSizeInBytes = bytesPerSample * REC_BUF_SIZE_IN_SAMP
LES; | |
3195 | |
3196 // read number of recorded bytes for the given input-buffer | |
3197 nBytesRecorded = _waveHeaderIn[bufCount].dwBytesRecorded; | |
3198 | |
3199 if (nBytesRecorded == fullBufferSizeInBytes || | |
3200 (nBytesRecorded > 0)) | |
3201 { | |
3202 int32_t msecOnPlaySide; | |
3203 int32_t msecOnRecordSide; | |
3204 uint32_t writtenSamples; | |
3205 uint32_t playedSamples; | |
3206 uint32_t readSamples, recSamples; | |
3207 bool send = true; | |
3208 | |
3209 uint32_t nSamplesRecorded = (nBytesRecorded/bytesPerSample); // divide
by 2 or 4 depending on mono or stereo | |
3210 | |
3211 if (nBytesRecorded == fullBufferSizeInBytes) | |
3212 { | |
3213 _timesdwBytes = 0; | |
3214 } | |
3215 else | |
3216 { | |
3217 // Test if it is stuck on this buffer | |
3218 _timesdwBytes++; | |
3219 if (_timesdwBytes < 5) | |
3220 { | |
3221 // keep trying | |
3222 return (0); | |
3223 } | |
3224 else | |
3225 { | |
3226 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id,"nBytesRecorded=%d
=> don't use", nBytesRecorded); | |
3227 _timesdwBytes = 0; | |
3228 send = false; | |
3229 } | |
3230 } | |
3231 | |
3232 // store the recorded buffer (no action will be taken if the #recorded s
amples is not a full buffer) | |
3233 _ptrAudioBuffer->SetRecordedBuffer(_waveHeaderIn[bufCount].lpData, nSamp
lesRecorded); | |
3234 | |
3235 // update #samples read | |
3236 _read_samples += nSamplesRecorded; | |
3237 | |
3238 // Check how large the playout and recording buffers are on the sound ca
rd. | |
3239 // This info is needed by the AEC. | |
3240 // | |
3241 msecOnPlaySide = GetPlayoutBufferDelay(writtenSamples, playedSamples); | |
3242 msecOnRecordSide = GetRecordingBufferDelay(readSamples, recSamples); | |
3243 | |
3244 // If we use the alternative playout delay method, skip the clock drift
compensation | |
3245 // since it will be an unreliable estimate and might degrade AEC perform
ance. | |
3246 int32_t drift = (_useHeader > 0) ? 0 : GetClockDrift(playedSamples, recS
amples); | |
3247 | |
3248 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide, drift); | |
3249 | |
3250 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); | |
3251 | |
3252 // Store the play and rec delay values for video synchronization | |
3253 _sndCardPlayDelay = msecOnPlaySide; | |
3254 _sndCardRecDelay = msecOnRecordSide; | |
3255 | |
3256 LARGE_INTEGER t1={0},t2={0}; | |
3257 | |
3258 if (send) | |
3259 { | |
3260 QueryPerformanceCounter(&t1); | |
3261 | |
3262 // deliver recorded samples at specified sample rate, mic level etc.
to the observer using callback | |
3263 UnLock(); | |
3264 _ptrAudioBuffer->DeliverRecordedData(); | |
3265 Lock(); | |
3266 | |
3267 QueryPerformanceCounter(&t2); | |
3268 | |
3269 if (InputSanityCheckAfterUnlockedPeriod() == -1) | |
3270 { | |
3271 // assert(false); | |
3272 return -1; | |
3273 } | |
3274 } | |
3275 | |
3276 if (_AGC) | |
3277 { | |
3278 uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel(); | |
3279 if (newMicLevel != 0) | |
3280 { | |
3281 // The VQE will only deliver non-zero microphone levels when a c
hange is needed. | |
3282 WEBRTC_TRACE(kTraceStream, kTraceUtility, _id,"AGC change of vol
ume: => new=%u", newMicLevel); | |
3283 | |
3284 // We store this outside of the audio buffer to avoid | |
3285 // having it overwritten by the getter thread. | |
3286 _newMicLevel = newMicLevel; | |
3287 SetEvent(_hSetCaptureVolumeEvent); | |
3288 } | |
3289 } | |
3290 | |
3291 // return utilized buffer to queue after specified delay (default is 4) | |
3292 if (_recDelayCount > (_recPutBackDelay-1)) | |
3293 { | |
3294 // deley buffer counter to compensate for "put-back-delay" | |
3295 bufCount = (bufCount + N_BUFFERS_IN - _recPutBackDelay) % N_BUFFERS_
IN; | |
3296 | |
3297 // reset counter so we can make new detection | |
3298 _waveHeaderIn[bufCount].dwBytesRecorded = 0; | |
3299 | |
3300 // return the utilized wave-header after certain delay (given by _re
cPutBackDelay) | |
3301 res = waveInUnprepareHeader(_hWaveIn, &(_waveHeaderIn[bufCount]), si
zeof(WAVEHDR)); | |
3302 if (MMSYSERR_NOERROR != res) | |
3303 { | |
3304 WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "waveInUnpre
pareHeader(%d) failed (err=%d)", bufCount, res); | |
3305 TraceWaveInError(res); | |
3306 } | |
3307 | |
3308 // ensure that the utilized header can be used again | |
3309 res = waveInPrepareHeader(_hWaveIn, &(_waveHeaderIn[bufCount]), size
of(WAVEHDR)); | |
3310 if (res != MMSYSERR_NOERROR) | |
3311 { | |
3312 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInPrepare
Header(%d) failed (err=%d)", bufCount, res); | |
3313 TraceWaveInError(res); | |
3314 return -1; | |
3315 } | |
3316 | |
3317 // add the utilized buffer to the queue again | |
3318 res = waveInAddBuffer(_hWaveIn, &(_waveHeaderIn[bufCount]), sizeof(W
AVEHDR)); | |
3319 if (res != MMSYSERR_NOERROR) | |
3320 { | |
3321 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveInAddBuff
er(%d) failed (err=%d)", bufCount, res); | |
3322 TraceWaveInError(res); | |
3323 if (_recPutBackDelay < 50) | |
3324 { | |
3325 _recPutBackDelay++; | |
3326 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "_recPutBa
ckDelay increased to %d", _recPutBackDelay); | |
3327 } | |
3328 else | |
3329 { | |
3330 if (_recError == 1) | |
3331 { | |
3332 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending
recording error exists"); | |
3333 } | |
3334 _recError = 1; // triggers callback from module process thr
ead | |
3335 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingErr
or message posted: _recPutBackDelay=%u", _recPutBackDelay); | |
3336 } | |
3337 } | |
3338 } // if (_recDelayCount > (_recPutBackDelay-1)) | |
3339 | |
3340 if (_recDelayCount < (_recPutBackDelay+1)) | |
3341 { | |
3342 _recDelayCount++; | |
3343 } | |
3344 | |
3345 // increase main buffer count since one complete buffer has now been del
ivered | |
3346 _recBufCount++; | |
3347 | |
3348 if (send) { | |
3349 // Calculate processing time | |
3350 consumedTime = (int)(t2.QuadPart-t1.QuadPart); | |
3351 // handle wraps, time should not be higher than a second | |
3352 if ((consumedTime > _perfFreq.QuadPart) || (consumedTime < 0)) | |
3353 consumedTime = 0; | |
3354 } | |
3355 | |
3356 } // if ((nBytesRecorded == fullBufferSizeInBytes)) | |
3357 | |
3358 return nBytesRecorded; | |
3359 } | |
3360 | |
3361 // ---------------------------------------------------------------------------- | |
3362 // PlayProc | |
3363 // ---------------------------------------------------------------------------- | |
3364 | |
3365 int AudioDeviceWindowsWave::PlayProc(LONGLONG& consumedTime) | |
3366 { | |
3367 int32_t remTimeMS(0); | |
3368 int8_t playBuffer[4*PLAY_BUF_SIZE_IN_SAMPLES]; | |
3369 uint32_t writtenSamples(0); | |
3370 uint32_t playedSamples(0); | |
3371 | |
3372 LARGE_INTEGER t1; | |
3373 LARGE_INTEGER t2; | |
3374 | |
3375 consumedTime = 0; | |
3376 _waitCounter++; | |
3377 | |
3378 // Get number of ms of sound that remains in the sound card buffer for playb
ack. | |
3379 // | |
3380 remTimeMS = GetPlayoutBufferDelay(writtenSamples, playedSamples); | |
3381 | |
3382 // The threshold can be adaptive or fixed. The adaptive scheme is updated | |
3383 // also for fixed mode but the updated threshold is not utilized. | |
3384 // | |
3385 const uint16_t thresholdMS = | |
3386 (_playBufType == AudioDeviceModule::kAdaptiveBufferSize) ? _playBufDelay
: _playBufDelayFixed; | |
3387 | |
3388 if (remTimeMS < thresholdMS + 9) | |
3389 { | |
3390 _dTcheckPlayBufDelay = 5; | |
3391 | |
3392 if (remTimeMS == 0) | |
3393 { | |
3394 WEBRTC_TRACE(kTraceInfo, kTraceUtility, _id, "playout buffer is empt
y => we must adapt..."); | |
3395 if (_waitCounter > 30) | |
3396 { | |
3397 _erZeroCounter++; | |
3398 if (_erZeroCounter == 2) | |
3399 { | |
3400 _playBufDelay += 15; | |
3401 _minPlayBufDelay += 20; | |
3402 _waitCounter = 50; | |
3403 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout s
tates (er=0,erZero=2): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay, _
playBufDelay); | |
3404 } | |
3405 else if (_erZeroCounter == 3) | |
3406 { | |
3407 _erZeroCounter = 0; | |
3408 _playBufDelay += 30; | |
3409 _minPlayBufDelay += 25; | |
3410 _waitCounter = 0; | |
3411 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout s
tates (er=0, erZero=3): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay,
_playBufDelay); | |
3412 } | |
3413 else | |
3414 { | |
3415 _minPlayBufDelay += 10; | |
3416 _playBufDelay += 15; | |
3417 _waitCounter = 50; | |
3418 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "New playout s
tates (er=0, erZero=1): minPlayBufDelay=%u, playBufDelay=%u", _minPlayBufDelay,
_playBufDelay); | |
3419 } | |
3420 } | |
3421 } | |
3422 else if (remTimeMS < _minPlayBufDelay) | |
3423 { | |
3424 // If there is less than 25 ms of audio in the play out buffer | |
3425 // increase the buffersize limit value. _waitCounter prevents | |
3426 // _playBufDelay to be increased every time this function is called. | |
3427 | |
3428 if (_waitCounter > 30) | |
3429 { | |
3430 _playBufDelay += 10; | |
3431 if (_intro == 0) | |
3432 _waitCounter = 0; | |
3433 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold
is increased: playBufDelay=%u", _playBufDelay); | |
3434 } | |
3435 } | |
3436 else if (remTimeMS < thresholdMS - 9) | |
3437 { | |
3438 _erZeroCounter = 0; | |
3439 } | |
3440 else | |
3441 { | |
3442 _erZeroCounter = 0; | |
3443 _dTcheckPlayBufDelay = 10; | |
3444 } | |
3445 | |
3446 QueryPerformanceCounter(&t1); // measure time: START | |
3447 | |
3448 // Ask for new PCM data to be played out using the AudioDeviceBuffer. | |
3449 // Ensure that this callback is executed without taking the audio-thread
lock. | |
3450 // | |
3451 UnLock(); | |
3452 uint32_t nSamples = _ptrAudioBuffer->RequestPlayoutData(PLAY_BUF_SIZE_IN
_SAMPLES); | |
3453 Lock(); | |
3454 | |
3455 if (OutputSanityCheckAfterUnlockedPeriod() == -1) | |
3456 { | |
3457 // assert(false); | |
3458 return -1; | |
3459 } | |
3460 | |
3461 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); | |
3462 if (nSamples != PLAY_BUF_SIZE_IN_SAMPLES) | |
3463 { | |
3464 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "invalid number of out
put samples(%d)", nSamples); | |
3465 } | |
3466 | |
3467 QueryPerformanceCounter(&t2); // measure time: STOP | |
3468 consumedTime = (int)(t2.QuadPart - t1.QuadPart); | |
3469 | |
3470 Write(playBuffer, PLAY_BUF_SIZE_IN_SAMPLES); | |
3471 | |
3472 } // if (er < thresholdMS + 9) | |
3473 else if (thresholdMS + 9 < remTimeMS ) | |
3474 { | |
3475 _erZeroCounter = 0; | |
3476 _dTcheckPlayBufDelay = 2; // check buffer more often | |
3477 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Need to check playout buf
fer more often (dT=%u, remTimeMS=%u)", _dTcheckPlayBufDelay, remTimeMS); | |
3478 } | |
3479 | |
3480 // If the buffersize has been stable for 20 seconds try to decrease the buff
er size | |
3481 if (_waitCounter > 2000) | |
3482 { | |
3483 _intro = 0; | |
3484 _playBufDelay--; | |
3485 _waitCounter = 1990; | |
3486 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is decr
eased: playBufDelay=%u", _playBufDelay); | |
3487 } | |
3488 | |
3489 // Limit the minimum sound card (playback) delay to adaptive minimum delay | |
3490 if (_playBufDelay < _minPlayBufDelay) | |
3491 { | |
3492 _playBufDelay = _minPlayBufDelay; | |
3493 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is limi
ted to %u", _minPlayBufDelay); | |
3494 } | |
3495 | |
3496 // Limit the maximum sound card (playback) delay to 150 ms | |
3497 if (_playBufDelay > 150) | |
3498 { | |
3499 _playBufDelay = 150; | |
3500 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Playout threshold is limi
ted to %d", _playBufDelay); | |
3501 } | |
3502 | |
3503 // Upper limit of the minimum sound card (playback) delay to 65 ms. | |
3504 // Deactivated during "useHeader mode" (_useHeader > 0). | |
3505 if (_minPlayBufDelay > _MAX_minBuffer && | |
3506 (_useHeader == 0)) | |
3507 { | |
3508 _minPlayBufDelay = _MAX_minBuffer; | |
3509 WEBRTC_TRACE(kTraceDebug, kTraceUtility, _id, "Minimum playout threshold
is limited to %d", _MAX_minBuffer); | |
3510 } | |
3511 | |
3512 return (0); | |
3513 } | |
3514 | |
3515 // ---------------------------------------------------------------------------- | |
3516 // Write | |
3517 // ---------------------------------------------------------------------------- | |
3518 | |
3519 int32_t AudioDeviceWindowsWave::Write(int8_t* data, uint16_t nSamples) | |
3520 { | |
3521 if (_hWaveOut == NULL) | |
3522 { | |
3523 return -1; | |
3524 } | |
3525 | |
3526 if (_playIsInitialized) | |
3527 { | |
3528 MMRESULT res; | |
3529 | |
3530 const uint16_t bufCount(_playBufCount); | |
3531 | |
3532 // Place data in the memory associated with _waveHeaderOut[bufCount] | |
3533 // | |
3534 const int16_t nBytes = (2*_playChannels)*nSamples; | |
3535 memcpy(&_playBuffer[bufCount][0], &data[0], nBytes); | |
3536 | |
3537 // Send a data block to the given waveform-audio output device. | |
3538 // | |
3539 // When the buffer is finished, the WHDR_DONE bit is set in the dwFlags | |
3540 // member of the WAVEHDR structure. The buffer must be prepared with the | |
3541 // waveOutPrepareHeader function before it is passed to waveOutWrite. | |
3542 // Unless the device is paused by calling the waveOutPause function, | |
3543 // playback begins when the first data block is sent to the device. | |
3544 // | |
3545 res = waveOutWrite(_hWaveOut, &_waveHeaderOut[bufCount], sizeof(_waveHea
derOut[bufCount])); | |
3546 if (MMSYSERR_NOERROR != res) | |
3547 { | |
3548 WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "waveOutWrite(%d)
failed (err=%d)", bufCount, res); | |
3549 TraceWaveOutError(res); | |
3550 | |
3551 _writeErrors++; | |
3552 if (_writeErrors > 10) | |
3553 { | |
3554 if (_playError == 1) | |
3555 { | |
3556 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending pla
yout error exists"); | |
3557 } | |
3558 _playError = 1; // triggers callback from module process thread | |
3559 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kPlayoutError mes
sage posted: _writeErrors=%u", _writeErrors); | |
3560 } | |
3561 | |
3562 return -1; | |
3563 } | |
3564 | |
3565 _playBufCount = (_playBufCount+1) % N_BUFFERS_OUT; // increase buffer c
ounter modulo size of total buffer | |
3566 _writtenSamples += nSamples; // each sample is 2
or 4 bytes | |
3567 _writeErrors = 0; | |
3568 } | |
3569 | |
3570 return 0; | |
3571 } | |
3572 | |
3573 // ---------------------------------------------------------------------------- | |
3574 // GetClockDrift | |
3575 // ---------------------------------------------------------------------------- | |
3576 | |
3577 int32_t AudioDeviceWindowsWave::GetClockDrift(const uint32_t plSamp, const uint3
2_t rcSamp) | |
3578 { | |
3579 int drift = 0; | |
3580 unsigned int plSampDiff = 0, rcSampDiff = 0; | |
3581 | |
3582 if (plSamp >= _plSampOld) | |
3583 { | |
3584 plSampDiff = plSamp - _plSampOld; | |
3585 } | |
3586 else | |
3587 { | |
3588 // Wrap | |
3589 int i = 31; | |
3590 while(_plSampOld <= (unsigned int)POW2(i)) | |
3591 { | |
3592 i--; | |
3593 } | |
3594 | |
3595 // Add the amount remaining prior to wrapping | |
3596 plSampDiff = plSamp + POW2(i + 1) - _plSampOld; | |
3597 } | |
3598 | |
3599 if (rcSamp >= _rcSampOld) | |
3600 { | |
3601 rcSampDiff = rcSamp - _rcSampOld; | |
3602 } | |
3603 else | |
3604 { // Wrap | |
3605 int i = 31; | |
3606 while(_rcSampOld <= (unsigned int)POW2(i)) | |
3607 { | |
3608 i--; | |
3609 } | |
3610 | |
3611 rcSampDiff = rcSamp + POW2(i + 1) - _rcSampOld; | |
3612 } | |
3613 | |
3614 drift = plSampDiff - rcSampDiff; | |
3615 | |
3616 _plSampOld = plSamp; | |
3617 _rcSampOld = rcSamp; | |
3618 | |
3619 return drift; | |
3620 } | |
3621 | |
3622 // ---------------------------------------------------------------------------- | |
3623 // MonitorRecording | |
3624 // ---------------------------------------------------------------------------- | |
3625 | |
3626 int32_t AudioDeviceWindowsWave::MonitorRecording(const uint32_t time) | |
3627 { | |
3628 const uint16_t bytesPerSample = 2*_recChannels; | |
3629 const uint32_t nRecordedSamples = _recordedBytes/bytesPerSample; | |
3630 | |
3631 if (nRecordedSamples > 5*N_REC_SAMPLES_PER_SEC) | |
3632 { | |
3633 // 5 seconds of audio has been recorded... | |
3634 if ((time - _prevRecByteCheckTime) > 5700) | |
3635 { | |
3636 // ...and it was more than 5.7 seconds since we last did this check
<=> | |
3637 // we have not been able to record 5 seconds of audio in 5.7 seconds
, | |
3638 // hence a problem should be reported. | |
3639 // This problem can be related to USB overload. | |
3640 // | |
3641 if (_recWarning == 1) | |
3642 { | |
3643 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending recordi
ng warning exists"); | |
3644 } | |
3645 _recWarning = 1; // triggers callback from module process thread | |
3646 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "kRecordingWarning m
essage posted: time-_prevRecByteCheckTime=%d", time - _prevRecByteCheckTime); | |
3647 } | |
3648 | |
3649 _recordedBytes = 0; // restart "check again when 5 seconds ar
e recorded" | |
3650 _prevRecByteCheckTime = time; // reset timer to measure time for record
ing of 5 seconds | |
3651 } | |
3652 | |
3653 if ((time - _prevRecByteCheckTime) > 8000) | |
3654 { | |
3655 // It has been more than 8 seconds since we able to confirm that 5 secon
ds of | |
3656 // audio was recorded, hence we have not been able to record 5 seconds i
n | |
3657 // 8 seconds => the complete recording process is most likely dead. | |
3658 // | |
3659 if (_recError == 1) | |
3660 { | |
3661 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, "pending recording e
rror exists"); | |
3662 } | |
3663 _recError = 1; // triggers callback from module process thread | |
3664 WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message p
osted: time-_prevRecByteCheckTime=%d", time - _prevRecByteCheckTime); | |
3665 | |
3666 _prevRecByteCheckTime = time; | |
3667 } | |
3668 | |
3669 return 0; | |
3670 } | |
3671 | |
3672 // ---------------------------------------------------------------------------- | |
3673 // MonitorRecording | |
3674 // | |
3675 // Restart timer if needed (they seem to be messed up after a hibernate). | |
3676 // ---------------------------------------------------------------------------- | |
3677 | |
3678 int32_t AudioDeviceWindowsWave::RestartTimerIfNeeded(const uint32_t time) | |
3679 { | |
3680 const uint32_t diffMS = time - _prevTimerCheckTime; | |
3681 _prevTimerCheckTime = time; | |
3682 | |
3683 if (diffMS > 7) | |
3684 { | |
3685 // one timer-issue detected... | |
3686 _timerFaults++; | |
3687 if (_timerFaults > 5 && _timerRestartAttempts < 2) | |
3688 { | |
3689 // Reinitialize timer event if event fails to execute at least every
5ms. | |
3690 // On some machines it helps and the timer starts working as it shou
ld again; | |
3691 // however, not all machines (we have seen issues on e.g. IBM T60). | |
3692 // Therefore, the scheme below ensures that we do max 2 attempts to
restart the timer. | |
3693 // For the cases where restart does not do the trick, we compensate
for the reduced | |
3694 // resolution on both the recording and playout sides. | |
3695 WEBRTC_TRACE(kTraceWarning, kTraceUtility, _id, " timer issue detect
ed => timer is restarted"); | |
3696 _timeEvent.StopTimer(); | |
3697 _timeEvent.StartTimer(true, TIMER_PERIOD_MS); | |
3698 // make sure timer gets time to start up and we don't kill/start tim
er serveral times over and over again | |
3699 _timerFaults = -20; | |
3700 _timerRestartAttempts++; | |
3701 } | |
3702 } | |
3703 else | |
3704 { | |
3705 // restart timer-check scheme since we are OK | |
3706 _timerFaults = 0; | |
3707 _timerRestartAttempts = 0; | |
3708 } | |
3709 | |
3710 return 0; | |
3711 } | |
3712 | |
3713 | |
3714 bool AudioDeviceWindowsWave::KeyPressed() const{ | |
3715 | |
3716 int key_down = 0; | |
3717 for (int key = VK_SPACE; key < VK_NUMLOCK; key++) { | |
3718 short res = GetAsyncKeyState(key); | |
3719 key_down |= res & 0x1; // Get the LSB | |
3720 } | |
3721 return (key_down > 0); | |
3722 } | |
3723 } // namespace webrtc | |
OLD | NEW |