| Index: webrtc/modules/audio_device/ios/audio_device_ios.mm
|
| diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm
|
| index 47503a96d70546ef07b822627f6ee69548bdc379..496f584c11e174be96b8c9ba2d87302e01b9413e 100644
|
| --- a/webrtc/modules/audio_device/ios/audio_device_ios.mm
|
| +++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm
|
| @@ -8,1905 +8,996 @@
|
| * be found in the AUTHORS file in the root of the source tree.
|
| */
|
|
|
| +#if !defined(__has_feature) || !__has_feature(objc_arc)
|
| +#error "This file requires ARC support."
|
| +#endif
|
| +
|
| #import <AVFoundation/AVFoundation.h>
|
| #import <Foundation/Foundation.h>
|
|
|
| #include "webrtc/modules/audio_device/ios/audio_device_ios.h"
|
| +#include "webrtc/modules/utility/interface/helpers_ios.h"
|
|
|
| +#include "webrtc/base/checks.h"
|
| +#include "webrtc/base/logging.h"
|
| #include "webrtc/system_wrappers/interface/trace.h"
|
|
|
| +#define TAG "AudioDeviceIOS::"
|
| +
|
| namespace webrtc {
|
| -AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
|
| - :
|
| - _ptrAudioBuffer(NULL),
|
| - _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
|
| - _id(id),
|
| - _auVoiceProcessing(NULL),
|
| - _audioInterruptionObserver(NULL),
|
| - _initialized(false),
|
| - _isShutDown(false),
|
| - _recording(false),
|
| - _playing(false),
|
| - _recIsInitialized(false),
|
| - _playIsInitialized(false),
|
| - _recordingDeviceIsSpecified(false),
|
| - _playoutDeviceIsSpecified(false),
|
| - _micIsInitialized(false),
|
| - _speakerIsInitialized(false),
|
| - _AGC(false),
|
| - _adbSampFreq(0),
|
| - _recordingDelay(0),
|
| - _playoutDelay(0),
|
| - _playoutDelayMeasurementCounter(9999),
|
| - _recordingDelayHWAndOS(0),
|
| - _recordingDelayMeasurementCounter(9999),
|
| - _playWarning(0),
|
| - _playError(0),
|
| - _recWarning(0),
|
| - _recError(0),
|
| - _playoutBufferUsed(0),
|
| - _recordingCurrentSeq(0),
|
| - _recordingBufferTotalSize(0) {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
|
| - "%s created", __FUNCTION__);
|
| -
|
| - memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| - memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
|
| - memset(_recordingLength, 0, sizeof(_recordingLength));
|
| - memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
|
| +
|
| +// TODO(henrika): use this method as base for querying the hardware sample
|
| +// rate, channel configuration and buffer size. Currently only added for
|
| +// debugging purposes.
|
| +static void LogAudioSessionParameters() {
|
| + NSLog(@"LogAudioSessionParameters");
|
| + @autoreleasepool {
|
| + // Initialize our audio session.
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + NSError* errorCode = nil;
|
| + // Set category to AVAudioSessionCategoryPlayAndRecord.
|
| + BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
|
| + error:&errorCode];
|
| + ios::CheckAndLogError(success, errorCode);
|
| + // Set mode to AVAudioSessionModeVoiceChat.
|
| + success = [session setMode:AVAudioSessionModeVoiceChat error:&errorCode];
|
| + ios::CheckAndLogError(success, errorCode);
|
| + // Activate the audio session.
|
| + success = [session setActive:YES error:&errorCode];
|
| + ios::CheckAndLogError(success, errorCode);
|
| +
|
| + // Log current properties.
|
| + NSLog(@"category: %@", [session category]);
|
| + AVAudioSessionCategoryOptions options = [session categoryOptions];
|
| + NSLog(@"category options: %lu", (unsigned long)options);
|
| + NSLog(@"mode: %@", [session mode]);
|
| + NSLog(@"sample rate: %0.0f", [session sampleRate]);
|
| + NSLog(@"output latency: %f", (double)[session outputLatency]);
|
| + NSLog(@"input latency: %f", (double)[session inputLatency]);
|
| + NSLog(@"buffer duration: %f", (double)[session IOBufferDuration]);
|
| + NSLog(@"#output channels: %ld", (long)[session outputNumberOfChannels]);
|
| + NSLog(@"#input channels: %ld", (long)[session inputNumberOfChannels]);
|
| +
|
| + // Deactivate the audio session.
|
| + success = [session setActive:NO error:&errorCode];
|
| + ios::CheckAndLogError(success, errorCode);
|
| + }
|
| +}
|
| +
|
| +AudioDeviceIOS::AudioDeviceIOS()
|
| + : audio_device_buffer_(nullptr),
|
| + _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
|
| + _auVoiceProcessing(nullptr),
|
| + _audioInterruptionObserver(nullptr),
|
| + _initialized(false),
|
| + _isShutDown(false),
|
| + _recording(false),
|
| + _playing(false),
|
| + _recIsInitialized(false),
|
| + _playIsInitialized(false),
|
| + _adbSampFreq(0),
|
| + _recordingDelay(0),
|
| + _playoutDelay(0),
|
| + _playoutDelayMeasurementCounter(9999),
|
| + _recordingDelayHWAndOS(0),
|
| + _recordingDelayMeasurementCounter(9999),
|
| + _playoutBufferUsed(0),
|
| + _recordingCurrentSeq(0),
|
| + _recordingBufferTotalSize(0) {
|
| + LOG(LS_INFO) << TAG << "ctor" << ios::GetThreadInfo()
|
| + << ios::GetCurrentThreadDescription();
|
| + memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| + memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
|
| + memset(_recordingLength, 0, sizeof(_recordingLength));
|
| + memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
|
| +#if !defined(NDEBUG)
|
| + LogAudioSessionParameters();
|
| +#endif
|
| + // TODO(henrika): these parameters are currently hard coded to match the
|
| + // existing implementation where we always use 16kHz as preferred sample
|
| + // rate. Goal is to improve this scheme and make it more flexible.
|
| + // In addition, a better native buffer size shall be derived. Using 10ms
|
| + // as default here.
|
| + // We should also implemented observers for notification of any change in
|
| + // these parameters.
|
| + playout_parameters_.reset(16000, 1, 160);
|
| + record_parameters_.reset(16000, 1, 160);
|
| }
|
|
|
| AudioDeviceIOS::~AudioDeviceIOS() {
|
| - WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
|
| - "%s destroyed", __FUNCTION__);
|
| -
|
| - Terminate();
|
| -
|
| - delete &_critSect;
|
| + LOG(LS_INFO) << TAG << "~dtor" << ios::GetThreadInfo();
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + Terminate();
|
| + delete &_critSect;
|
| }
|
|
|
| -
|
| -// ============================================================================
|
| -// API
|
| -// ============================================================================
|
| -
|
| void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - _ptrAudioBuffer = audioBuffer;
|
| -
|
| - // inform the AudioBuffer about default settings for this implementation
|
| - _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
|
| - _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
|
| - _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
|
| - _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::ActiveAudioLayer(
|
| - AudioDeviceModule::AudioLayer& audioLayer) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
|
| - return 0;
|
| + LOG(LS_INFO) << TAG << "AttachAudioBuffer";
|
| + DCHECK(audioBuffer);
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + audio_device_buffer_ = audioBuffer;
|
| + // TODO(henrika): try to improve this section.
|
| + audioBuffer->SetPlayoutSampleRate(playout_parameters_.sample_rate());
|
| + audioBuffer->SetPlayoutChannels(playout_parameters_.channels());
|
| + audioBuffer->SetRecordingSampleRate(record_parameters_.sample_rate());
|
| + audioBuffer->SetRecordingChannels(record_parameters_.channels());
|
| }
|
|
|
| int32_t AudioDeviceIOS::Init() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (_initialized) {
|
| - return 0;
|
| - }
|
| -
|
| - _isShutDown = false;
|
| -
|
| - // Create and start capture thread
|
| - if (!_captureWorkerThread) {
|
| - _captureWorkerThread = ThreadWrapper::CreateThread(
|
| - RunCapture, this, "CaptureWorkerThread");
|
| - bool res = _captureWorkerThread->Start();
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
|
| - _id, "CaptureWorkerThread started (res=%d)", res);
|
| - _captureWorkerThread->SetPriority(kRealtimePriority);
|
| - } else {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
|
| - _id, "Thread already created");
|
| - }
|
| - _playWarning = 0;
|
| - _playError = 0;
|
| - _recWarning = 0;
|
| - _recError = 0;
|
| -
|
| - _initialized = true;
|
| -
|
| - return 0;
|
| + LOG(LS_INFO) << TAG << "Init";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (_initialized) {
|
| + return 0;
|
| + }
|
| + DCHECK(!_captureWorkerThread);
|
| + // Create and start the capture thread.
|
| + // TODO(henrika): do we need this thread?
|
| + _isShutDown = false;
|
| + _captureWorkerThread =
|
| + ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
|
| + if (!_captureWorkerThread->Start()) {
|
| + LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!";
|
| + return -1;
|
| + }
|
| + _captureWorkerThread->SetPriority(kRealtimePriority);
|
| + _initialized = true;
|
| + return 0;
|
| }
|
|
|
| int32_t AudioDeviceIOS::Terminate() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - if (!_initialized) {
|
| - return 0;
|
| - }
|
| -
|
| -
|
| - // Stop capture thread
|
| - if (_captureWorkerThread) {
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
|
| - _id, "Stopping CaptureWorkerThread");
|
| - bool res = _captureWorkerThread->Stop();
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
|
| - _id, "CaptureWorkerThread stopped (res=%d)", res);
|
| - _captureWorkerThread.reset();
|
| + LOG(LS_INFO) << TAG << "Terminate";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (!_initialized) {
|
| + return 0;
|
| + }
|
| + // Stop the capture thread.
|
| + if (_captureWorkerThread) {
|
| + if (!_captureWorkerThread->Stop()) {
|
| + LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!";
|
| + return -1;
|
| }
|
| -
|
| - // Shut down Audio Unit
|
| - ShutdownPlayOrRecord();
|
| -
|
| - _isShutDown = true;
|
| - _initialized = false;
|
| - _speakerIsInitialized = false;
|
| - _micIsInitialized = false;
|
| - _playoutDeviceIsSpecified = false;
|
| - _recordingDeviceIsSpecified = false;
|
| - return 0;
|
| + _captureWorkerThread.reset();
|
| + }
|
| + ShutdownPlayOrRecord();
|
| + _isShutDown = true;
|
| + _initialized = false;
|
| + return 0;
|
| }
|
|
|
| -bool AudioDeviceIOS::Initialized() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - return (_initialized);
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::InitSpeaker() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_initialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (_playing) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Cannot init speaker when playing");
|
| - return -1;
|
| - }
|
| -
|
| - if (!_playoutDeviceIsSpecified) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Playout device is not specified");
|
| - return -1;
|
| +int32_t AudioDeviceIOS::InitPlayout() {
|
| + LOG(LS_INFO) << TAG << "InitPlayout";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(_initialized);
|
| + DCHECK(!_playIsInitialized);
|
| + DCHECK(!_playing);
|
| + if (!_recIsInitialized) {
|
| + if (InitPlayOrRecord() == -1) {
|
| + LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
|
| + return -1;
|
| }
|
| -
|
| - // Do nothing
|
| - _speakerIsInitialized = true;
|
| -
|
| - return 0;
|
| + }
|
| + _playIsInitialized = true;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::InitMicrophone() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_initialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (_recording) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Cannot init mic when recording");
|
| - return -1;
|
| - }
|
| -
|
| - if (!_recordingDeviceIsSpecified) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
|
| - _id, " Recording device is not specified");
|
| - return -1;
|
| +int32_t AudioDeviceIOS::InitRecording() {
|
| + LOG(LS_INFO) << TAG << "InitPlayout";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(_initialized);
|
| + DCHECK(!_recIsInitialized);
|
| + DCHECK(!_recording);
|
| + if (!_playIsInitialized) {
|
| + if (InitPlayOrRecord() == -1) {
|
| + LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
|
| + return -1;
|
| }
|
| -
|
| - // Do nothing
|
| -
|
| - _micIsInitialized = true;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::SpeakerIsInitialized() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - return _speakerIsInitialized;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::MicrophoneIsInitialized() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - return _micIsInitialized;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - available = false; // Speaker volume not supported on iOS
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
|
| - uint16_t volumeRight) {
|
| - WEBRTC_TRACE(
|
| - kTraceModuleCall,
|
| - kTraceAudioDevice,
|
| - _id,
|
| - "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
|
| - volumeLeft, volumeRight);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| -
|
| - return -1;
|
| -}
|
| -
|
| -int32_t
|
| -AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
|
| - uint16_t& /*volumeRight*/) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::MinSpeakerVolume(
|
| - uint32_t& minVolume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - available = false; // Speaker mute not supported on iOS
|
| -
|
| - return 0;
|
| + }
|
| + _recIsInitialized = true;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| +int32_t AudioDeviceIOS::StartPlayout() {
|
| + LOG(LS_INFO) << TAG << "StartPlayout";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(_playIsInitialized);
|
| + DCHECK(!_playing);
|
| +
|
| + CriticalSectionScoped lock(&_critSect);
|
| +
|
| + memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| + _playoutBufferUsed = 0;
|
| + _playoutDelay = 0;
|
| + // Make sure first call to update delay function will update delay
|
| + _playoutDelayMeasurementCounter = 9999;
|
| +
|
| + if (!_recording) {
|
| + OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
|
| + if (result != noErr) {
|
| + LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
|
| + return -1;
|
| + }
|
| + }
|
| + _playing = true;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - available = false; // Mic mute not supported on iOS
|
| -
|
| +int32_t AudioDeviceIOS::StopPlayout() {
|
| + LOG(LS_INFO) << TAG << "StopPlayout";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (!_playIsInitialized || !_playing) {
|
| return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + }
|
|
|
| - available = false; // Mic boost not supported on iOS
|
| + CriticalSectionScoped lock(&_critSect);
|
|
|
| - return 0;
|
| + if (!_recording) {
|
| + // Both playout and recording has stopped, shutdown the device.
|
| + ShutdownPlayOrRecord();
|
| + }
|
| + _playIsInitialized = false;
|
| + _playing = false;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
|
| -
|
| - if (!_micIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Microphone not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (enable) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " SetMicrophoneBoost cannot be enabled on this platform");
|
| - return -1;
|
| +int32_t AudioDeviceIOS::StartRecording() {
|
| + LOG(LS_INFO) << TAG << "StartRecording";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + DCHECK(_recIsInitialized);
|
| + DCHECK(!_recording);
|
| +
|
| + CriticalSectionScoped lock(&_critSect);
|
| +
|
| + memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
|
| + memset(_recordingLength, 0, sizeof(_recordingLength));
|
| + memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
|
| +
|
| + _recordingCurrentSeq = 0;
|
| + _recordingBufferTotalSize = 0;
|
| + _recordingDelay = 0;
|
| + _recordingDelayHWAndOS = 0;
|
| + // Make sure first call to update delay function will update delay
|
| + _recordingDelayMeasurementCounter = 9999;
|
| +
|
| + if (!_playing) {
|
| + OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
|
| + if (result != noErr) {
|
| + LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
|
| + return -1;
|
| }
|
| -
|
| - return 0;
|
| + }
|
| + _recording = true;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - if (!_micIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Microphone not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - enabled = false;
|
| -
|
| +int32_t AudioDeviceIOS::StopRecording() {
|
| + LOG(LS_INFO) << TAG << "StopRecording";
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + if (!_recIsInitialized || !_recording) {
|
| return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + }
|
|
|
| - available = false; // Stereo recording not supported on iOS
|
| + CriticalSectionScoped lock(&_critSect);
|
|
|
| - return 0;
|
| + if (!_playing) {
|
| + // Both playout and recording has stopped, shutdown the device.
|
| + ShutdownPlayOrRecord();
|
| + }
|
| + _recIsInitialized = false;
|
| + _recording = false;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
|
| -
|
| +// Change the default receiver playout route to speaker.
|
| +int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
|
| + LOG(LS_INFO) << TAG << "SetLoudspeakerStatus(" << enable << ")";
|
| +
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + NSString* category = session.category;
|
| + AVAudioSessionCategoryOptions options = session.categoryOptions;
|
| + // Respect old category options if category is
|
| + // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
|
| + // might not be valid for this category.
|
| + if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
|
| if (enable) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Stereo recording is not supported on this platform");
|
| - return -1;
|
| + options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| + } else {
|
| + options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| }
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - enabled = false;
|
| - return 0;
|
| + } else {
|
| + options = AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| + }
|
| + NSError* error = nil;
|
| + BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
|
| + withOptions:options
|
| + error:&error];
|
| + ios::CheckAndLogError(success, error);
|
| + return (error == nil) ? 0 : -1;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - available = false; // Stereo playout not supported on iOS
|
| -
|
| - return 0;
|
| +int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
|
| + LOG(LS_INFO) << TAG << "GetLoudspeakerStatus";
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + AVAudioSessionCategoryOptions options = session.categoryOptions;
|
| + enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
|
| -
|
| - if (enable) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Stereo playout is not supported on this platform");
|
| - return -1;
|
| - }
|
| - return 0;
|
| +int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
|
| + delayMS = _playoutDelay;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - enabled = false;
|
| - return 0;
|
| +int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
|
| + delayMS = _recordingDelay;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetAGC(bool enable) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetAGC(enable=%d)", enable);
|
| -
|
| - _AGC = enable;
|
| -
|
| - return 0;
|
| +int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
|
| + uint16_t& sizeMS) const {
|
| + type = AudioDeviceModule::kAdaptiveBufferSize;
|
| + sizeMS = _playoutDelay;
|
| + return 0;
|
| }
|
|
|
| -bool AudioDeviceIOS::AGC() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - return _AGC;
|
| +int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
|
| + CHECK(playout_parameters_.is_valid());
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + *params = playout_parameters_;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| -
|
| - available = false; // Mic volume not supported on IOS
|
| -
|
| - return 0;
|
| +int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
|
| + CHECK(record_parameters_.is_valid());
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + *params = record_parameters_;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
|
| +// ============================================================================
|
| +// Private Methods
|
| +// ============================================================================
|
|
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| +int32_t AudioDeviceIOS::InitPlayOrRecord() {
|
| + LOG_F(LS_INFO) << TAG << "InitPlayOrRecord";
|
| + DCHECK(!_auVoiceProcessing);
|
|
|
| -int32_t
|
| - AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + OSStatus result = -1;
|
|
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| + // Create Voice Processing Audio Unit
|
| + AudioComponentDescription desc;
|
| + AudioComponent comp;
|
|
|
| -int32_t
|
| - AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + desc.componentType = kAudioUnitType_Output;
|
| + desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
|
| + desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
| + desc.componentFlags = 0;
|
| + desc.componentFlagsMask = 0;
|
|
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| + comp = AudioComponentFindNext(nullptr, &desc);
|
| + if (nullptr == comp) {
|
| + LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit";
|
| return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + }
|
|
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| + result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result;
|
| return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::MicrophoneVolumeStepSize(
|
| - uint16_t& stepSize) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + }
|
| +
|
| + // TODO(henrika): I think we should set the preferred channel configuration
|
| + // in both directions as well to be safe.
|
| +
|
| + // Set preferred hardware sample rate to 16 kHz.
|
| + // TODO(henrika): improve this selection of sample rate. Why do we currently
|
| + // use a hard coded value? How can we fail and still continue?
|
| + NSError* error = nil;
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + Float64 preferredSampleRate(playout_parameters_.sample_rate());
|
| + [session setPreferredSampleRate:preferredSampleRate error:&error];
|
| + if (error != nil) {
|
| + const char* errorString = [[error localizedDescription] UTF8String];
|
| + LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString;
|
| + }
|
| +
|
| + error = nil;
|
| + // Make the setMode:error: and setCategory:error: calls only if necessary.
|
| + // Non-obviously, setting them to the value they already have will clear
|
| + // transient properties (such as PortOverride) that some other component may
|
| + // have set up.
|
| + if (session.mode != AVAudioSessionModeVoiceChat) {
|
| + [session setMode:AVAudioSessionModeVoiceChat error:&error];
|
| + if (error != nil) {
|
| + const char* errorString = [[error localizedDescription] UTF8String];
|
| + LOG_F(LS_ERROR) << "setMode failed: " << errorString;
|
| + }
|
| + }
|
|
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| + error = nil;
|
| + if (session.category != AVAudioSessionCategoryPlayAndRecord) {
|
| + [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
|
| + if (error != nil) {
|
| + const char* errorString = [[error localizedDescription] UTF8String];
|
| + LOG_F(LS_ERROR) << "setCategory failed: " << errorString;
|
| + }
|
| + }
|
| +
|
| + UInt32 enableIO = 1;
|
| + result = AudioUnitSetProperty(_auVoiceProcessing,
|
| + kAudioOutputUnitProperty_EnableIO,
|
| + kAudioUnitScope_Input,
|
| + 1, // input bus
|
| + &enableIO, sizeof(enableIO));
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result;
|
| + }
|
| +
|
| + result = AudioUnitSetProperty(_auVoiceProcessing,
|
| + kAudioOutputUnitProperty_EnableIO,
|
| + kAudioUnitScope_Output,
|
| + 0, // output bus
|
| + &enableIO, sizeof(enableIO));
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result;
|
| + }
|
| +
|
| + // Disable AU buffer allocation for the recorder, we allocate our own.
|
| + // TODO(henrika): understand this part better.
|
| + UInt32 flag = 0;
|
| + result = AudioUnitSetProperty(_auVoiceProcessing,
|
| + kAudioUnitProperty_ShouldAllocateBuffer,
|
| + kAudioUnitScope_Output, 1, &flag, sizeof(flag));
|
| + if (0 != result) {
|
| + LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result;
|
| + // Should work anyway
|
| + }
|
| +
|
| + // Set recording callback.
|
| + AURenderCallbackStruct auCbS;
|
| + memset(&auCbS, 0, sizeof(auCbS));
|
| + auCbS.inputProc = RecordProcess;
|
| + auCbS.inputProcRefCon = this;
|
| + result = AudioUnitSetProperty(
|
| + _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback,
|
| + kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS));
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result;
|
| + }
|
| +
|
| + // Set playout callback.
|
| + memset(&auCbS, 0, sizeof(auCbS));
|
| + auCbS.inputProc = PlayoutProcess;
|
| + auCbS.inputProcRefCon = this;
|
| + result = AudioUnitSetProperty(
|
| + _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback,
|
| + kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS));
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result;
|
| + }
|
| +
|
| + // Get stream format for out/0
|
| + AudioStreamBasicDescription playoutDesc;
|
| + UInt32 size = sizeof(playoutDesc);
|
| + result =
|
| + AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
|
| + kAudioUnitScope_Output, 0, &playoutDesc, &size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result;
|
| + }
|
| +
|
| + playoutDesc.mSampleRate = preferredSampleRate;
|
| + LOG_F(LS_INFO) << "Audio Unit playout opened in sampling rate: "
|
| + << playoutDesc.mSampleRate;
|
| +
|
| + // Store the sampling frequency to use towards the Audio Device Buffer
|
| + // todo: Add 48 kHz (increase buffer sizes). Other fs?
|
| + // TODO(henrika): Figure out if we really need this complex handling.
|
| + if ((playoutDesc.mSampleRate > 44090.0) &&
|
| + (playoutDesc.mSampleRate < 44110.0)) {
|
| + _adbSampFreq = 44100;
|
| + } else if ((playoutDesc.mSampleRate > 15990.0) &&
|
| + (playoutDesc.mSampleRate < 16010.0)) {
|
| + _adbSampFreq = 16000;
|
| + } else if ((playoutDesc.mSampleRate > 7990.0) &&
|
| + (playoutDesc.mSampleRate < 8010.0)) {
|
| + _adbSampFreq = 8000;
|
| + } else {
|
| + _adbSampFreq = 0;
|
| + FATAL() << "Invalid sample rate";
|
| + }
|
| +
|
| + // Set the audio device buffer sampling rates (use same for play and record).
|
| + // TODO(henrika): this is not a good place to set these things up.
|
| + DCHECK(audio_device_buffer_);
|
| + DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate());
|
| + audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq);
|
| + audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq);
|
| +
|
| + // Set stream format for out/0.
|
| + playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
|
| + kLinearPCMFormatFlagIsPacked |
|
| + kLinearPCMFormatFlagIsNonInterleaved;
|
| + playoutDesc.mBytesPerPacket = 2;
|
| + playoutDesc.mFramesPerPacket = 1;
|
| + playoutDesc.mBytesPerFrame = 2;
|
| + playoutDesc.mChannelsPerFrame = 1;
|
| + playoutDesc.mBitsPerChannel = 16;
|
| + result =
|
| + AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
|
| + kAudioUnitScope_Input, 0, &playoutDesc, size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0";
|
| + }
|
| +
|
| + // Get stream format for in/1.
|
| + AudioStreamBasicDescription recordingDesc;
|
| + size = sizeof(recordingDesc);
|
| + result =
|
| + AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
|
| + kAudioUnitScope_Input, 1, &recordingDesc, &size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1";
|
| + }
|
| +
|
| + recordingDesc.mSampleRate = preferredSampleRate;
|
| + LOG_F(LS_INFO) << "Audio Unit recording opened in sampling rate: "
|
| + << recordingDesc.mSampleRate;
|
| +
|
| + // Set stream format for out/1 (use same sampling frequency as for in/1).
|
| + recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
|
| + kLinearPCMFormatFlagIsPacked |
|
| + kLinearPCMFormatFlagIsNonInterleaved;
|
| + recordingDesc.mBytesPerPacket = 2;
|
| + recordingDesc.mFramesPerPacket = 1;
|
| + recordingDesc.mBytesPerFrame = 2;
|
| + recordingDesc.mChannelsPerFrame = 1;
|
| + recordingDesc.mBitsPerChannel = 16;
|
| + result =
|
| + AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
|
| + kAudioUnitScope_Output, 1, &recordingDesc, size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1";
|
| + }
|
| +
|
| + // Initialize here already to be able to get/set stream properties.
|
| + result = AudioUnitInitialize(_auVoiceProcessing);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result;
|
| + }
|
| +
|
| + // Get hardware sample rate for logging (see if we get what we asked for).
|
| + // TODO(henrika): what if we don't get what we ask for?
|
| + double sampleRate = session.sampleRate;
|
| + LOG_F(LS_INFO) << "Current HW sample rate is: " << sampleRate
|
| + << ", ADB sample rate is: " << _adbSampFreq;
|
| +
|
| + // Listen to audio interruptions.
|
| + // TODO(henrika): learn this area better.
|
| + NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
|
| + id observer = [center
|
| + addObserverForName:AVAudioSessionInterruptionNotification
|
| + object:nil
|
| + queue:[NSOperationQueue mainQueue]
|
| + usingBlock:^(NSNotification* notification) {
|
| + NSNumber* typeNumber =
|
| + [notification userInfo][AVAudioSessionInterruptionTypeKey];
|
| + AVAudioSessionInterruptionType type =
|
| + (AVAudioSessionInterruptionType)[typeNumber
|
| + unsignedIntegerValue];
|
| + switch (type) {
|
| + case AVAudioSessionInterruptionTypeBegan:
|
| + // At this point our audio session has been deactivated and
|
| + // the
|
| + // audio unit render callbacks no longer occur. Nothing to
|
| + // do.
|
| + break;
|
| + case AVAudioSessionInterruptionTypeEnded: {
|
| + NSError* error = nil;
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + [session setActive:YES error:&error];
|
| + if (error != nil) {
|
| + LOG_F(LS_ERROR) << "Failed to active audio session";
|
| + }
|
| + // Post interruption the audio unit render callbacks don't
|
| + // automatically continue, so we restart the unit manually
|
| + // here.
|
| + AudioOutputUnitStop(_auVoiceProcessing);
|
| + AudioOutputUnitStart(_auVoiceProcessing);
|
| + break;
|
| + }
|
| + }
|
| + }];
|
| + // Increment refcount on observer using ARC bridge. Instance variable is a
|
| + // void* instead of an id because header is included in other pure C++
|
| + // files.
|
| + _audioInterruptionObserver = (__bridge_retained void*)observer;
|
|
|
| -int16_t AudioDeviceIOS::PlayoutDevices() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| + // Activate audio session.
|
| + error = nil;
|
| + [session setActive:YES error:&error];
|
| + if (error != nil) {
|
| + LOG_F(LS_ERROR) << "Failed to active audio session";
|
| + }
|
|
|
| - return (int16_t)1;
|
| + return 0;
|
| }
|
|
|
| -int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
|
| +int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
|
| + LOG_F(LS_INFO) << TAG << "ShutdownPlayOrRecord";
|
|
|
| - if (_playIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Playout already initialized");
|
| - return -1;
|
| + if (_audioInterruptionObserver != nullptr) {
|
| + NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
|
| + // Transfer ownership of observer back to ARC, which will dealloc the
|
| + // observer once it exits this scope.
|
| + id observer = (__bridge_transfer id)_audioInterruptionObserver;
|
| + [center removeObserver:observer];
|
| + _audioInterruptionObserver = nullptr;
|
| + }
|
| +
|
| + // Close and delete AU.
|
| + OSStatus result = -1;
|
| + if (nullptr != _auVoiceProcessing) {
|
| + result = AudioOutputUnitStop(_auVoiceProcessing);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
|
| }
|
| -
|
| - if (index !=0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " SetPlayoutDevice invalid index");
|
| - return -1;
|
| + result = AudioComponentInstanceDispose(_auVoiceProcessing);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
|
| }
|
| - _playoutDeviceIsSpecified = true;
|
| + _auVoiceProcessing = nullptr;
|
| + }
|
|
|
| - return 0;
|
| + return 0;
|
| }
|
|
|
| -int32_t
|
| - AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "WindowsDeviceType not supported");
|
| - return -1;
|
| -}
|
| +// ============================================================================
|
| +// Thread Methods
|
| +// ============================================================================
|
|
|
| -int32_t
|
| - AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
|
| - char name[kAdmMaxDeviceNameSize],
|
| - char guid[kAdmMaxGuidSize]) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
|
| +OSStatus AudioDeviceIOS::RecordProcess(
|
| + void* inRefCon,
|
| + AudioUnitRenderActionFlags* ioActionFlags,
|
| + const AudioTimeStamp* inTimeStamp,
|
| + UInt32 inBusNumber,
|
| + UInt32 inNumberFrames,
|
| + AudioBufferList* ioData) {
|
| + AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
|
| + return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber,
|
| + inNumberFrames);
|
| +}
|
| +
|
| +OSStatus AudioDeviceIOS::RecordProcessImpl(
|
| + AudioUnitRenderActionFlags* ioActionFlags,
|
| + const AudioTimeStamp* inTimeStamp,
|
| + uint32_t inBusNumber,
|
| + uint32_t inNumberFrames) {
|
| + // Setup some basic stuff
|
| + // Use temp buffer not to lock up recording buffer more than necessary
|
| + // todo: Make dataTmp a member variable with static size that holds
|
| + // max possible frames?
|
| + int16_t* dataTmp = new int16_t[inNumberFrames];
|
| + memset(dataTmp, 0, 2 * inNumberFrames);
|
| +
|
| + AudioBufferList abList;
|
| + abList.mNumberBuffers = 1;
|
| + abList.mBuffers[0].mData = dataTmp;
|
| + abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample
|
| + abList.mBuffers[0].mNumberChannels = 1;
|
| +
|
| + // Get data from mic
|
| + OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp,
|
| + inBusNumber, inNumberFrames, &abList);
|
| + if (res != 0) {
|
| + // TODO(henrika): improve error handling.
|
| + delete[] dataTmp;
|
| + return 0;
|
| + }
|
| +
|
| + if (_recording) {
|
| + // Insert all data in temp buffer into recording buffers
|
| + // There is zero or one buffer partially full at any given time,
|
| + // all others are full or empty
|
| + // Full means filled with noSamp10ms samples.
|
| +
|
| + const unsigned int noSamp10ms = _adbSampFreq / 100;
|
| + unsigned int dataPos = 0;
|
| + uint16_t bufPos = 0;
|
| + int16_t insertPos = -1;
|
| + unsigned int nCopy = 0; // Number of samples to copy
|
| +
|
| + while (dataPos < inNumberFrames) {
|
| + // Loop over all recording buffers or
|
| + // until we find the partially full buffer
|
| + // First choice is to insert into partially full buffer,
|
| + // second choice is to insert into empty buffer
|
| + bufPos = 0;
|
| + insertPos = -1;
|
| + nCopy = 0;
|
| + while (bufPos < N_REC_BUFFERS) {
|
| + if ((_recordingLength[bufPos] > 0) &&
|
| + (_recordingLength[bufPos] < noSamp10ms)) {
|
| + // Found the partially full buffer
|
| + insertPos = static_cast<int16_t>(bufPos);
|
| + // Don't need to search more, quit loop
|
| + bufPos = N_REC_BUFFERS;
|
| + } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) {
|
| + // Found an empty buffer
|
| + insertPos = static_cast<int16_t>(bufPos);
|
| + }
|
| + ++bufPos;
|
| + }
|
|
|
| - if (index != 0) {
|
| - return -1;
|
| - }
|
| - // return empty strings
|
| - memset(name, 0, kAdmMaxDeviceNameSize);
|
| - if (guid != NULL) {
|
| - memset(guid, 0, kAdmMaxGuidSize);
|
| + // Insert data into buffer
|
| + if (insertPos > -1) {
|
| + // We found a non-full buffer, copy data to it
|
| + unsigned int dataToCopy = inNumberFrames - dataPos;
|
| + unsigned int currentRecLen = _recordingLength[insertPos];
|
| + unsigned int roomInBuffer = noSamp10ms - currentRecLen;
|
| + nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
|
| +
|
| + memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos],
|
| + nCopy * sizeof(int16_t));
|
| + if (0 == currentRecLen) {
|
| + _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
|
| + ++_recordingCurrentSeq;
|
| + }
|
| + _recordingBufferTotalSize += nCopy;
|
| + // Has to be done last to avoid interrupt problems between threads.
|
| + _recordingLength[insertPos] += nCopy;
|
| + dataPos += nCopy;
|
| + } else {
|
| + // Didn't find a non-full buffer
|
| + // TODO(henrika): improve error handling
|
| + dataPos = inNumberFrames; // Don't try to insert more
|
| + }
|
| }
|
| + }
|
| + delete[] dataTmp;
|
| + return 0;
|
| +}
|
| +
|
| +OSStatus AudioDeviceIOS::PlayoutProcess(
|
| + void* inRefCon,
|
| + AudioUnitRenderActionFlags* ioActionFlags,
|
| + const AudioTimeStamp* inTimeStamp,
|
| + UInt32 inBusNumber,
|
| + UInt32 inNumberFrames,
|
| + AudioBufferList* ioData) {
|
| + AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
|
| + return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
|
| +}
|
| +
|
| +OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
|
| + AudioBufferList* ioData) {
|
| + int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData);
|
| + unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
|
| + unsigned int dataSize = dataSizeBytes / 2; // Number of samples
|
| + CHECK_EQ(dataSize, inNumberFrames);
|
| + memset(data, 0, dataSizeBytes); // Start with empty buffer
|
| +
|
| + // Get playout data from Audio Device Buffer
|
| +
|
| + if (_playing) {
|
| + unsigned int noSamp10ms = _adbSampFreq / 100;
|
| + // todo: Member variable and allocate when samp freq is determined
|
| + int16_t* dataTmp = new int16_t[noSamp10ms];
|
| + memset(dataTmp, 0, 2 * noSamp10ms);
|
| + unsigned int dataPos = 0;
|
| + int noSamplesOut = 0;
|
| + unsigned int nCopy = 0;
|
| +
|
| + // First insert data from playout buffer if any
|
| + if (_playoutBufferUsed > 0) {
|
| + nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed;
|
| + DCHECK_EQ(nCopy, _playoutBufferUsed);
|
| + memcpy(data, _playoutBuffer, 2 * nCopy);
|
| + dataPos = nCopy;
|
| + memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| + _playoutBufferUsed = 0;
|
| + }
|
| +
|
| + // Now get the rest from Audio Device Buffer.
|
| + while (dataPos < dataSize) {
|
| + // Update playout delay
|
| + UpdatePlayoutDelay();
|
| +
|
| + // Ask for new PCM data to be played out using the AudioDeviceBuffer
|
| + noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms);
|
| +
|
| + // Get data from Audio Device Buffer
|
| + noSamplesOut = audio_device_buffer_->GetPlayoutData(
|
| + reinterpret_cast<int8_t*>(dataTmp));
|
| + CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut);
|
| +
|
| + // Insert as much as fits in data buffer
|
| + nCopy =
|
| + (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos);
|
| + memcpy(&data[dataPos], dataTmp, 2 * nCopy);
|
| +
|
| + // Save rest in playout buffer if any
|
| + if (nCopy < noSamp10ms) {
|
| + memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy));
|
| + _playoutBufferUsed = noSamp10ms - nCopy;
|
| + }
|
|
|
| - return 0;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::RecordingDeviceName(uint16_t index,
|
| - char name[kAdmMaxDeviceNameSize],
|
| - char guid[kAdmMaxGuidSize]) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
|
| -
|
| - if (index != 0) {
|
| - return -1;
|
| - }
|
| - // return empty strings
|
| - memset(name, 0, kAdmMaxDeviceNameSize);
|
| - if (guid != NULL) {
|
| - memset(guid, 0, kAdmMaxGuidSize);
|
| + // Update loop/index counter, if we copied less than noSamp10ms
|
| + // samples we shall quit loop anyway
|
| + dataPos += noSamp10ms;
|
| }
|
| -
|
| - return 0;
|
| + delete[] dataTmp;
|
| + }
|
| + return 0;
|
| }
|
|
|
| -int16_t AudioDeviceIOS::RecordingDevices() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| +// TODO(henrika): can either be removed or simplified.
|
| +void AudioDeviceIOS::UpdatePlayoutDelay() {
|
| + ++_playoutDelayMeasurementCounter;
|
|
|
| - return (int16_t)1;
|
| -}
|
| + if (_playoutDelayMeasurementCounter >= 100) {
|
| + // Update HW and OS delay every second, unlikely to change
|
|
|
| -int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
|
| + // Since this is eventually rounded to integral ms, add 0.5ms
|
| + // here to get round-to-nearest-int behavior instead of
|
| + // truncation.
|
| + double totalDelaySeconds = 0.0005;
|
|
|
| - if (_recIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Recording already initialized");
|
| - return -1;
|
| + // HW output latency
|
| + AVAudioSession* session = [AVAudioSession sharedInstance];
|
| + double latency = session.outputLatency;
|
| + assert(latency >= 0);
|
| + totalDelaySeconds += latency;
|
| +
|
| + // HW buffer duration
|
| + double ioBufferDuration = session.IOBufferDuration;
|
| + assert(ioBufferDuration >= 0);
|
| + totalDelaySeconds += ioBufferDuration;
|
| +
|
| + // AU latency
|
| + Float64 f64(0);
|
| + UInt32 size = sizeof(f64);
|
| + OSStatus result =
|
| + AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
|
| + kAudioUnitScope_Global, 0, &f64, &size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "AU latency error: " << result;
|
| }
|
| + assert(f64 >= 0);
|
| + totalDelaySeconds += f64;
|
|
|
| - if (index !=0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " SetRecordingDevice invalid index");
|
| - return -1;
|
| - }
|
| + // To ms
|
| + _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
|
|
|
| - _recordingDeviceIsSpecified = true;
|
| + // Reset counter
|
| + _playoutDelayMeasurementCounter = 0;
|
| + }
|
|
|
| - return 0;
|
| + // todo: Add playout buffer?
|
| }
|
|
|
| -int32_t
|
| - AudioDeviceIOS::SetRecordingDevice(
|
| - AudioDeviceModule::WindowsDeviceType) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "WindowsDeviceType not supported");
|
| - return -1;
|
| -}
|
| +void AudioDeviceIOS::UpdateRecordingDelay() {
|
| + ++_recordingDelayMeasurementCounter;
|
|
|
| -// ----------------------------------------------------------------------------
|
| -// SetLoudspeakerStatus
|
| -//
|
| -// Change the default receiver playout route to speaker.
|
| -//
|
| -// ----------------------------------------------------------------------------
|
| + if (_recordingDelayMeasurementCounter >= 100) {
|
| + // Update HW and OS delay every second, unlikely to change
|
|
|
| -int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
|
| + // Since this is eventually rounded to integral ms, add 0.5ms
|
| + // here to get round-to-nearest-int behavior instead of
|
| + // truncation.
|
| + double totalDelaySeconds = 0.0005;
|
|
|
| + // HW input latency
|
| AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - NSString* category = session.category;
|
| - AVAudioSessionCategoryOptions options = session.categoryOptions;
|
| - // Respect old category options if category is
|
| - // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
|
| - // might not be valid for this category.
|
| - if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
|
| - if (enable) {
|
| - options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| - } else {
|
| - options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| - }
|
| - } else {
|
| - options = AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| - }
|
| -
|
| - NSError* error = nil;
|
| - [session setCategory:AVAudioSessionCategoryPlayAndRecord
|
| - withOptions:options
|
| - error:&error];
|
| - if (error != nil) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "Error changing default output route ");
|
| - return -1;
|
| + double latency = session.inputLatency;
|
| + assert(latency >= 0);
|
| + totalDelaySeconds += latency;
|
| +
|
| + // HW buffer duration
|
| + double ioBufferDuration = session.IOBufferDuration;
|
| + assert(ioBufferDuration >= 0);
|
| + totalDelaySeconds += ioBufferDuration;
|
| +
|
| + // AU latency
|
| + Float64 f64(0);
|
| + UInt32 size = sizeof(f64);
|
| + OSStatus result =
|
| + AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
|
| + kAudioUnitScope_Global, 0, &f64, &size);
|
| + if (0 != result) {
|
| + LOG_F(LS_ERROR) << "AU latency error: " << result;
|
| }
|
| + assert(f64 >= 0);
|
| + totalDelaySeconds += f64;
|
|
|
| - return 0;
|
| -}
|
| + // To ms
|
| + _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000);
|
|
|
| -int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
|
| + // Reset counter
|
| + _recordingDelayMeasurementCounter = 0;
|
| + }
|
|
|
| - AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - AVAudioSessionCategoryOptions options = session.categoryOptions;
|
| - enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
|
| + _recordingDelay = _recordingDelayHWAndOS;
|
|
|
| - return 0;
|
| + // ADB recording buffer size, update every time
|
| + // Don't count the one next 10 ms to be sent, then convert samples => ms
|
| + const uint32_t noSamp10ms = _adbSampFreq / 100;
|
| + if (_recordingBufferTotalSize > noSamp10ms) {
|
| + _recordingDelay +=
|
| + (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
|
| + }
|
| }
|
|
|
| -int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - available = false;
|
| -
|
| - // Try to initialize the playout side
|
| - int32_t res = InitPlayout();
|
| -
|
| - // Cancel effect of initialization
|
| - StopPlayout();
|
| -
|
| - if (res != -1) {
|
| - available = true;
|
| - }
|
| -
|
| - return 0;
|
| +bool AudioDeviceIOS::RunCapture(void* ptrThis) {
|
| + return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
|
| }
|
|
|
| -int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - available = false;
|
| -
|
| - // Try to initialize the recording side
|
| - int32_t res = InitRecording();
|
| +bool AudioDeviceIOS::CaptureWorkerThread() {
|
| + if (_recording) {
|
| + int bufPos = 0;
|
| + unsigned int lowestSeq = 0;
|
| + int lowestSeqBufPos = 0;
|
| + bool foundBuf = true;
|
| + const unsigned int noSamp10ms = _adbSampFreq / 100;
|
| +
|
| + while (foundBuf) {
|
| + // Check if we have any buffer with data to insert
|
| + // into the Audio Device Buffer,
|
| + // and find the one with the lowest seq number
|
| + foundBuf = false;
|
| + for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
|
| + if (noSamp10ms == _recordingLength[bufPos]) {
|
| + if (!foundBuf) {
|
| + lowestSeq = _recordingSeqNumber[bufPos];
|
| + lowestSeqBufPos = bufPos;
|
| + foundBuf = true;
|
| + } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
|
| + lowestSeq = _recordingSeqNumber[bufPos];
|
| + lowestSeqBufPos = bufPos;
|
| + }
|
| + }
|
| + }
|
|
|
| - // Cancel effect of initialization
|
| - StopRecording();
|
| + // Insert data into the Audio Device Buffer if found any
|
| + if (foundBuf) {
|
| + // Update recording delay
|
| + UpdateRecordingDelay();
|
|
|
| - if (res != -1) {
|
| - available = true;
|
| - }
|
| + // Set the recorded buffer
|
| + audio_device_buffer_->SetRecordedBuffer(
|
| + reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]),
|
| + _recordingLength[lowestSeqBufPos]);
|
|
|
| - return 0;
|
| -}
|
| + // Don't need to set the current mic level in ADB since we only
|
| + // support digital AGC,
|
| + // and besides we cannot get or set the IOS mic level anyway.
|
|
|
| -int32_t AudioDeviceIOS::InitPlayout() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| + // Set VQE info, use clockdrift == 0
|
| + audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0);
|
|
|
| - CriticalSectionScoped lock(&_critSect);
|
| + // Deliver recorded samples at specified sample rate, mic level
|
| + // etc. to the observer using callback
|
| + audio_device_buffer_->DeliverRecordedData();
|
|
|
| - if (!_initialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized");
|
| - return -1;
|
| + // Make buffer available
|
| + _recordingSeqNumber[lowestSeqBufPos] = 0;
|
| + _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
|
| + // Must be done last to avoid interrupt problems between threads
|
| + _recordingLength[lowestSeqBufPos] = 0;
|
| + }
|
| }
|
| -
|
| - if (_playing) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Playout already started");
|
| - return -1;
|
| - }
|
| -
|
| - if (_playIsInitialized) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Playout already initialized");
|
| - return 0;
|
| - }
|
| -
|
| - if (!_playoutDeviceIsSpecified) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Playout device is not specified");
|
| - return -1;
|
| - }
|
| -
|
| - // Initialize the speaker
|
| - if (InitSpeaker() == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitSpeaker() failed");
|
| - }
|
| -
|
| - _playIsInitialized = true;
|
| -
|
| - if (!_recIsInitialized) {
|
| - // Audio init
|
| - if (InitPlayOrRecord() == -1) {
|
| - // todo: Handle error
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitPlayOrRecord() failed");
|
| - }
|
| - } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Recording already initialized - InitPlayOrRecord() not called");
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::PlayoutIsInitialized() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| - return (_playIsInitialized);
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::InitRecording() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_initialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (_recording) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Recording already started");
|
| - return -1;
|
| - }
|
| -
|
| - if (_recIsInitialized) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Recording already initialized");
|
| - return 0;
|
| - }
|
| -
|
| - if (!_recordingDeviceIsSpecified) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Recording device is not specified");
|
| - return -1;
|
| - }
|
| -
|
| - // Initialize the microphone
|
| - if (InitMicrophone() == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitMicrophone() failed");
|
| - }
|
| -
|
| - _recIsInitialized = true;
|
| -
|
| - if (!_playIsInitialized) {
|
| - // Audio init
|
| - if (InitPlayOrRecord() == -1) {
|
| - // todo: Handle error
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " InitPlayOrRecord() failed");
|
| - }
|
| - } else {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Playout already initialized - InitPlayOrRecord() " \
|
| - "not called");
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::RecordingIsInitialized() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| - return (_recIsInitialized);
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StartRecording() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_recIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Recording not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (_recording) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Recording already started");
|
| - return 0;
|
| - }
|
| -
|
| - // Reset recording buffer
|
| - memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
|
| - memset(_recordingLength, 0, sizeof(_recordingLength));
|
| - memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
|
| - _recordingCurrentSeq = 0;
|
| - _recordingBufferTotalSize = 0;
|
| - _recordingDelay = 0;
|
| - _recordingDelayHWAndOS = 0;
|
| - // Make sure first call to update delay function will update delay
|
| - _recordingDelayMeasurementCounter = 9999;
|
| - _recWarning = 0;
|
| - _recError = 0;
|
| -
|
| - if (!_playing) {
|
| - // Start Audio Unit
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Starting Audio Unit");
|
| - OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " Error starting Audio Unit (result=%d)", result);
|
| - return -1;
|
| - }
|
| - }
|
| -
|
| - _recording = true;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StopRecording() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_recIsInitialized) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Recording is not initialized");
|
| - return 0;
|
| - }
|
| -
|
| - _recording = false;
|
| -
|
| - if (!_playing) {
|
| - // Both playout and recording has stopped, shutdown the device
|
| - ShutdownPlayOrRecord();
|
| - }
|
| -
|
| - _recIsInitialized = false;
|
| - _micIsInitialized = false;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::Recording() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| - return (_recording);
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StartPlayout() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - // This lock is (among other things) needed to avoid concurrency issues
|
| - // with capture thread
|
| - // shutting down Audio Unit
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_playIsInitialized) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Playout not initialized");
|
| - return -1;
|
| - }
|
| -
|
| - if (_playing) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Playing already started");
|
| - return 0;
|
| - }
|
| -
|
| - // Reset playout buffer
|
| - memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| - _playoutBufferUsed = 0;
|
| - _playoutDelay = 0;
|
| - // Make sure first call to update delay function will update delay
|
| - _playoutDelayMeasurementCounter = 9999;
|
| - _playWarning = 0;
|
| - _playError = 0;
|
| -
|
| - if (!_recording) {
|
| - // Start Audio Unit
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Starting Audio Unit");
|
| - OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
|
| - " Error starting Audio Unit (result=%d)", result);
|
| - return -1;
|
| - }
|
| - }
|
| -
|
| - _playing = true;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::StopPlayout() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_playIsInitialized) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Playout is not initialized");
|
| - return 0;
|
| - }
|
| -
|
| - _playing = false;
|
| -
|
| - if (!_recording) {
|
| - // Both playout and recording has stopped, signal shutdown the device
|
| - ShutdownPlayOrRecord();
|
| - }
|
| -
|
| - _playIsInitialized = false;
|
| - _speakerIsInitialized = false;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::Playing() const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "%s", __FUNCTION__);
|
| - return (_playing);
|
| -}
|
| -
|
| -// ----------------------------------------------------------------------------
|
| -// ResetAudioDevice
|
| -//
|
| -// Disable playout and recording, signal to capture thread to shutdown,
|
| -// and set enable states after shutdown to same as current.
|
| -// In capture thread audio device will be shutdown, then started again.
|
| -// ----------------------------------------------------------------------------
|
| -int32_t AudioDeviceIOS::ResetAudioDevice() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - CriticalSectionScoped lock(&_critSect);
|
| -
|
| - if (!_playIsInitialized && !_recIsInitialized) {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Playout or recording not initialized, doing nothing");
|
| - return 0; // Nothing to reset
|
| - }
|
| -
|
| - // Store the states we have before stopping to restart below
|
| - bool initPlay = _playIsInitialized;
|
| - bool play = _playing;
|
| - bool initRec = _recIsInitialized;
|
| - bool rec = _recording;
|
| -
|
| - int res(0);
|
| -
|
| - // Stop playout and recording
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Stopping playout and recording");
|
| - res += StopPlayout();
|
| - res += StopRecording();
|
| -
|
| - // Restart
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Restarting playout and recording (%d, %d, %d, %d)",
|
| - initPlay, play, initRec, rec);
|
| - if (initPlay) res += InitPlayout();
|
| - if (initRec) res += InitRecording();
|
| - if (play) res += StartPlayout();
|
| - if (rec) res += StartRecording();
|
| -
|
| - if (0 != res) {
|
| - // Logging is done in init/start/stop calls above
|
| - return -1;
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
|
| - delayMS = _playoutDelay;
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
|
| - delayMS = _recordingDelay;
|
| - return 0;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
|
| - uint16_t sizeMS) {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
|
| - "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
|
| - type, sizeMS);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -int32_t
|
| - AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
|
| - uint16_t& sizeMS) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - type = AudioDeviceModule::kAdaptiveBufferSize;
|
| -
|
| - sizeMS = _playoutDelay;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " API call not supported on this platform");
|
| - return -1;
|
| -}
|
| -
|
| -bool AudioDeviceIOS::PlayoutWarning() const {
|
| - return (_playWarning > 0);
|
| -}
|
| -
|
| -bool AudioDeviceIOS::PlayoutError() const {
|
| - return (_playError > 0);
|
| -}
|
| -
|
| -bool AudioDeviceIOS::RecordingWarning() const {
|
| - return (_recWarning > 0);
|
| -}
|
| -
|
| -bool AudioDeviceIOS::RecordingError() const {
|
| - return (_recError > 0);
|
| -}
|
| -
|
| -void AudioDeviceIOS::ClearPlayoutWarning() {
|
| - _playWarning = 0;
|
| -}
|
| -
|
| -void AudioDeviceIOS::ClearPlayoutError() {
|
| - _playError = 0;
|
| -}
|
| -
|
| -void AudioDeviceIOS::ClearRecordingWarning() {
|
| - _recWarning = 0;
|
| -}
|
| -
|
| -void AudioDeviceIOS::ClearRecordingError() {
|
| - _recError = 0;
|
| -}
|
| -
|
| -// ============================================================================
|
| -// Private Methods
|
| -// ============================================================================
|
| -
|
| -int32_t AudioDeviceIOS::InitPlayOrRecord() {
|
| - WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - OSStatus result = -1;
|
| -
|
| - // Check if already initialized
|
| - if (NULL != _auVoiceProcessing) {
|
| - // We already have initialized before and created any of the audio unit,
|
| - // check that all exist
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Already initialized");
|
| - // todo: Call AudioUnitReset() here and empty all buffers?
|
| - return 0;
|
| - }
|
| -
|
| - // Create Voice Processing Audio Unit
|
| - AudioComponentDescription desc;
|
| - AudioComponent comp;
|
| -
|
| - desc.componentType = kAudioUnitType_Output;
|
| - desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
|
| - desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
| - desc.componentFlags = 0;
|
| - desc.componentFlagsMask = 0;
|
| -
|
| - comp = AudioComponentFindNext(NULL, &desc);
|
| - if (NULL == comp) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not find audio component for Audio Unit");
|
| - return -1;
|
| - }
|
| -
|
| - result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not create Audio Unit instance (result=%d)",
|
| - result);
|
| - return -1;
|
| - }
|
| -
|
| - // Set preferred hardware sample rate to 16 kHz
|
| - NSError* error = nil;
|
| - AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - Float64 preferredSampleRate(16000.0);
|
| - [session setPreferredSampleRate:preferredSampleRate
|
| - error:&error];
|
| - if (error != nil) {
|
| - const char* errorString = [[error localizedDescription] UTF8String];
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Could not set preferred sample rate: %s", errorString);
|
| - }
|
| - error = nil;
|
| - // Make the setMode:error: and setCategory:error: calls only if necessary.
|
| - // Non-obviously, setting them to the value they already have will clear
|
| - // transient properties (such as PortOverride) that some other component may
|
| - // have set up.
|
| - if (session.mode != AVAudioSessionModeVoiceChat) {
|
| - [session setMode:AVAudioSessionModeVoiceChat error:&error];
|
| - if (error != nil) {
|
| - const char* errorString = [[error localizedDescription] UTF8String];
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Could not set mode: %s", errorString);
|
| - }
|
| - }
|
| - error = nil;
|
| - if (session.category != AVAudioSessionCategoryPlayAndRecord) {
|
| - [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
|
| - if (error != nil) {
|
| - const char* errorString = [[error localizedDescription] UTF8String];
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - "Could not set category: %s", errorString);
|
| - }
|
| - }
|
| -
|
| - //////////////////////
|
| - // Setup Voice Processing Audio Unit
|
| -
|
| - // Note: For Signal Processing AU element 0 is output bus, element 1 is
|
| - // input bus for global scope element is irrelevant (always use
|
| - // element 0)
|
| -
|
| - // Enable IO on both elements
|
| -
|
| - // todo: Below we just log and continue upon error. We might want
|
| - // to close AU and return error for some cases.
|
| - // todo: Log info about setup.
|
| -
|
| - UInt32 enableIO = 1;
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioOutputUnitProperty_EnableIO,
|
| - kAudioUnitScope_Input,
|
| - 1, // input bus
|
| - &enableIO,
|
| - sizeof(enableIO));
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not enable IO on input (result=%d)", result);
|
| - }
|
| -
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioOutputUnitProperty_EnableIO,
|
| - kAudioUnitScope_Output,
|
| - 0, // output bus
|
| - &enableIO,
|
| - sizeof(enableIO));
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not enable IO on output (result=%d)", result);
|
| - }
|
| -
|
| - // Disable AU buffer allocation for the recorder, we allocate our own
|
| - UInt32 flag = 0;
|
| - result = AudioUnitSetProperty(
|
| - _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
|
| - kAudioUnitScope_Output, 1, &flag, sizeof(flag));
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Could not disable AU buffer allocation (result=%d)",
|
| - result);
|
| - // Should work anyway
|
| - }
|
| -
|
| - // Set recording callback
|
| - AURenderCallbackStruct auCbS;
|
| - memset(&auCbS, 0, sizeof(auCbS));
|
| - auCbS.inputProc = RecordProcess;
|
| - auCbS.inputProcRefCon = this;
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioOutputUnitProperty_SetInputCallback,
|
| - kAudioUnitScope_Global, 1,
|
| - &auCbS, sizeof(auCbS));
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set record callback for Audio Unit (result=%d)",
|
| - result);
|
| - }
|
| -
|
| - // Set playout callback
|
| - memset(&auCbS, 0, sizeof(auCbS));
|
| - auCbS.inputProc = PlayoutProcess;
|
| - auCbS.inputProcRefCon = this;
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioUnitProperty_SetRenderCallback,
|
| - kAudioUnitScope_Global, 0,
|
| - &auCbS, sizeof(auCbS));
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set play callback for Audio Unit (result=%d)",
|
| - result);
|
| - }
|
| -
|
| - // Get stream format for out/0
|
| - AudioStreamBasicDescription playoutDesc;
|
| - UInt32 size = sizeof(playoutDesc);
|
| - result = AudioUnitGetProperty(_auVoiceProcessing,
|
| - kAudioUnitProperty_StreamFormat,
|
| - kAudioUnitScope_Output, 0, &playoutDesc,
|
| - &size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not get stream format Audio Unit out/0 (result=%d)",
|
| - result);
|
| - }
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Audio Unit playout opened in sampling rate %f",
|
| - playoutDesc.mSampleRate);
|
| -
|
| - playoutDesc.mSampleRate = preferredSampleRate;
|
| -
|
| - // Store the sampling frequency to use towards the Audio Device Buffer
|
| - // todo: Add 48 kHz (increase buffer sizes). Other fs?
|
| - if ((playoutDesc.mSampleRate > 44090.0)
|
| - && (playoutDesc.mSampleRate < 44110.0)) {
|
| - _adbSampFreq = 44100;
|
| - } else if ((playoutDesc.mSampleRate > 15990.0)
|
| - && (playoutDesc.mSampleRate < 16010.0)) {
|
| - _adbSampFreq = 16000;
|
| - } else if ((playoutDesc.mSampleRate > 7990.0)
|
| - && (playoutDesc.mSampleRate < 8010.0)) {
|
| - _adbSampFreq = 8000;
|
| - } else {
|
| - _adbSampFreq = 0;
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Audio Unit out/0 opened in unknown sampling rate (%f)",
|
| - playoutDesc.mSampleRate);
|
| - // todo: We should bail out here.
|
| - }
|
| -
|
| - // Set the audio device buffer sampling rate,
|
| - // we assume we get the same for play and record
|
| - if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set audio device buffer recording sampling rate (%d)",
|
| - _adbSampFreq);
|
| - }
|
| -
|
| - if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set audio device buffer playout sampling rate (%d)",
|
| - _adbSampFreq);
|
| - }
|
| -
|
| - // Set stream format for in/0 (use same sampling frequency as for out/0)
|
| - playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
|
| - | kLinearPCMFormatFlagIsPacked
|
| - | kLinearPCMFormatFlagIsNonInterleaved;
|
| - playoutDesc.mBytesPerPacket = 2;
|
| - playoutDesc.mFramesPerPacket = 1;
|
| - playoutDesc.mBytesPerFrame = 2;
|
| - playoutDesc.mChannelsPerFrame = 1;
|
| - playoutDesc.mBitsPerChannel = 16;
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioUnitProperty_StreamFormat,
|
| - kAudioUnitScope_Input, 0, &playoutDesc, size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set stream format Audio Unit in/0 (result=%d)",
|
| - result);
|
| - }
|
| -
|
| - // Get stream format for in/1
|
| - AudioStreamBasicDescription recordingDesc;
|
| - size = sizeof(recordingDesc);
|
| - result = AudioUnitGetProperty(_auVoiceProcessing,
|
| - kAudioUnitProperty_StreamFormat,
|
| - kAudioUnitScope_Input, 1, &recordingDesc,
|
| - &size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not get stream format Audio Unit in/1 (result=%d)",
|
| - result);
|
| - }
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
|
| - " Audio Unit recording opened in sampling rate %f",
|
| - recordingDesc.mSampleRate);
|
| -
|
| - recordingDesc.mSampleRate = preferredSampleRate;
|
| -
|
| - // Set stream format for out/1 (use same sampling frequency as for in/1)
|
| - recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
|
| - | kLinearPCMFormatFlagIsPacked
|
| - | kLinearPCMFormatFlagIsNonInterleaved;
|
| -
|
| - recordingDesc.mBytesPerPacket = 2;
|
| - recordingDesc.mFramesPerPacket = 1;
|
| - recordingDesc.mBytesPerFrame = 2;
|
| - recordingDesc.mChannelsPerFrame = 1;
|
| - recordingDesc.mBitsPerChannel = 16;
|
| - result = AudioUnitSetProperty(_auVoiceProcessing,
|
| - kAudioUnitProperty_StreamFormat,
|
| - kAudioUnitScope_Output, 1, &recordingDesc,
|
| - size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not set stream format Audio Unit out/1 (result=%d)",
|
| - result);
|
| - }
|
| -
|
| - // Initialize here already to be able to get/set stream properties.
|
| - result = AudioUnitInitialize(_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - " Could not init Audio Unit (result=%d)", result);
|
| - }
|
| -
|
| - // Get hardware sample rate for logging (see if we get what we asked for)
|
| - double sampleRate = session.sampleRate;
|
| - WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
|
| - " Current HW sample rate is %f, ADB sample rate is %d",
|
| - sampleRate, _adbSampFreq);
|
| -
|
| - // Listen to audio interruptions.
|
| - NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
|
| - id observer =
|
| - [center addObserverForName:AVAudioSessionInterruptionNotification
|
| - object:nil
|
| - queue:[NSOperationQueue mainQueue]
|
| - usingBlock:^(NSNotification* notification) {
|
| - NSNumber* typeNumber =
|
| - [notification userInfo][AVAudioSessionInterruptionTypeKey];
|
| - AVAudioSessionInterruptionType type =
|
| - (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
|
| - switch (type) {
|
| - case AVAudioSessionInterruptionTypeBegan:
|
| - // At this point our audio session has been deactivated and the
|
| - // audio unit render callbacks no longer occur. Nothing to do.
|
| - break;
|
| - case AVAudioSessionInterruptionTypeEnded: {
|
| - NSError* error = nil;
|
| - AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - [session setActive:YES
|
| - error:&error];
|
| - if (error != nil) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "Error activating audio session");
|
| - }
|
| - // Post interruption the audio unit render callbacks don't
|
| - // automatically continue, so we restart the unit manually here.
|
| - AudioOutputUnitStop(_auVoiceProcessing);
|
| - AudioOutputUnitStart(_auVoiceProcessing);
|
| - break;
|
| - }
|
| - }
|
| - }];
|
| - // Increment refcount on observer using ARC bridge. Instance variable is a
|
| - // void* instead of an id because header is included in other pure C++
|
| - // files.
|
| - _audioInterruptionObserver = (__bridge_retained void*)observer;
|
| -
|
| - // Activate audio session.
|
| - error = nil;
|
| - [session setActive:YES
|
| - error:&error];
|
| - if (error != nil) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "Error activating audio session");
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
|
| - WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
|
| -
|
| - if (_audioInterruptionObserver != NULL) {
|
| - NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
|
| - // Transfer ownership of observer back to ARC, which will dealloc the
|
| - // observer once it exits this scope.
|
| - id observer = (__bridge_transfer id)_audioInterruptionObserver;
|
| - [center removeObserver:observer];
|
| - _audioInterruptionObserver = NULL;
|
| - }
|
| -
|
| - // Close and delete AU
|
| - OSStatus result = -1;
|
| - if (NULL != _auVoiceProcessing) {
|
| - result = AudioOutputUnitStop(_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Error stopping Audio Unit (result=%d)", result);
|
| - }
|
| - result = AudioComponentInstanceDispose(_auVoiceProcessing);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Error disposing Audio Unit (result=%d)", result);
|
| - }
|
| - _auVoiceProcessing = NULL;
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -// ============================================================================
|
| -// Thread Methods
|
| -// ============================================================================
|
| -
|
| -OSStatus
|
| - AudioDeviceIOS::RecordProcess(void *inRefCon,
|
| - AudioUnitRenderActionFlags *ioActionFlags,
|
| - const AudioTimeStamp *inTimeStamp,
|
| - UInt32 inBusNumber,
|
| - UInt32 inNumberFrames,
|
| - AudioBufferList *ioData) {
|
| - AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
|
| -
|
| - return ptrThis->RecordProcessImpl(ioActionFlags,
|
| - inTimeStamp,
|
| - inBusNumber,
|
| - inNumberFrames);
|
| -}
|
| -
|
| -
|
| -OSStatus
|
| - AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
|
| - const AudioTimeStamp *inTimeStamp,
|
| - uint32_t inBusNumber,
|
| - uint32_t inNumberFrames) {
|
| - // Setup some basic stuff
|
| - // Use temp buffer not to lock up recording buffer more than necessary
|
| - // todo: Make dataTmp a member variable with static size that holds
|
| - // max possible frames?
|
| - int16_t* dataTmp = new int16_t[inNumberFrames];
|
| - memset(dataTmp, 0, 2*inNumberFrames);
|
| -
|
| - AudioBufferList abList;
|
| - abList.mNumberBuffers = 1;
|
| - abList.mBuffers[0].mData = dataTmp;
|
| - abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample
|
| - abList.mBuffers[0].mNumberChannels = 1;
|
| -
|
| - // Get data from mic
|
| - OSStatus res = AudioUnitRender(_auVoiceProcessing,
|
| - ioActionFlags, inTimeStamp,
|
| - inBusNumber, inNumberFrames, &abList);
|
| - if (res != 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Error getting rec data, error = %d", res);
|
| -
|
| - if (_recWarning > 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Pending rec warning exists");
|
| - }
|
| - _recWarning = 1;
|
| -
|
| - delete [] dataTmp;
|
| - return 0;
|
| - }
|
| -
|
| - if (_recording) {
|
| - // Insert all data in temp buffer into recording buffers
|
| - // There is zero or one buffer partially full at any given time,
|
| - // all others are full or empty
|
| - // Full means filled with noSamp10ms samples.
|
| -
|
| - const unsigned int noSamp10ms = _adbSampFreq / 100;
|
| - unsigned int dataPos = 0;
|
| - uint16_t bufPos = 0;
|
| - int16_t insertPos = -1;
|
| - unsigned int nCopy = 0; // Number of samples to copy
|
| -
|
| - while (dataPos < inNumberFrames) {
|
| - // Loop over all recording buffers or
|
| - // until we find the partially full buffer
|
| - // First choice is to insert into partially full buffer,
|
| - // second choice is to insert into empty buffer
|
| - bufPos = 0;
|
| - insertPos = -1;
|
| - nCopy = 0;
|
| - while (bufPos < N_REC_BUFFERS) {
|
| - if ((_recordingLength[bufPos] > 0)
|
| - && (_recordingLength[bufPos] < noSamp10ms)) {
|
| - // Found the partially full buffer
|
| - insertPos = static_cast<int16_t>(bufPos);
|
| - // Don't need to search more, quit loop
|
| - bufPos = N_REC_BUFFERS;
|
| - } else if ((-1 == insertPos)
|
| - && (0 == _recordingLength[bufPos])) {
|
| - // Found an empty buffer
|
| - insertPos = static_cast<int16_t>(bufPos);
|
| - }
|
| - ++bufPos;
|
| - }
|
| -
|
| - // Insert data into buffer
|
| - if (insertPos > -1) {
|
| - // We found a non-full buffer, copy data to it
|
| - unsigned int dataToCopy = inNumberFrames - dataPos;
|
| - unsigned int currentRecLen = _recordingLength[insertPos];
|
| - unsigned int roomInBuffer = noSamp10ms - currentRecLen;
|
| - nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
|
| -
|
| - memcpy(&_recordingBuffer[insertPos][currentRecLen],
|
| - &dataTmp[dataPos], nCopy*sizeof(int16_t));
|
| - if (0 == currentRecLen) {
|
| - _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
|
| - ++_recordingCurrentSeq;
|
| - }
|
| - _recordingBufferTotalSize += nCopy;
|
| - // Has to be done last to avoid interrupt problems
|
| - // between threads
|
| - _recordingLength[insertPos] += nCopy;
|
| - dataPos += nCopy;
|
| - } else {
|
| - // Didn't find a non-full buffer
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Could not insert into recording buffer");
|
| - if (_recWarning > 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Pending rec warning exists");
|
| - }
|
| - _recWarning = 1;
|
| - dataPos = inNumberFrames; // Don't try to insert more
|
| - }
|
| - }
|
| - }
|
| -
|
| - delete [] dataTmp;
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -OSStatus
|
| - AudioDeviceIOS::PlayoutProcess(void *inRefCon,
|
| - AudioUnitRenderActionFlags *ioActionFlags,
|
| - const AudioTimeStamp *inTimeStamp,
|
| - UInt32 inBusNumber,
|
| - UInt32 inNumberFrames,
|
| - AudioBufferList *ioData) {
|
| - AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
|
| -
|
| - return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
|
| -}
|
| -
|
| -OSStatus
|
| - AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
|
| - AudioBufferList *ioData) {
|
| - // Setup some basic stuff
|
| -// assert(sizeof(short) == 2); // Assumption for implementation
|
| -
|
| - int16_t* data =
|
| - static_cast<int16_t*>(ioData->mBuffers[0].mData);
|
| - unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
|
| - unsigned int dataSize = dataSizeBytes/2; // Number of samples
|
| - if (dataSize != inNumberFrames) { // Should always be the same
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "dataSize (%u) != inNumberFrames (%u)",
|
| - dataSize, (unsigned int)inNumberFrames);
|
| - if (_playWarning > 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Pending play warning exists");
|
| - }
|
| - _playWarning = 1;
|
| - }
|
| - memset(data, 0, dataSizeBytes); // Start with empty buffer
|
| -
|
| -
|
| - // Get playout data from Audio Device Buffer
|
| -
|
| - if (_playing) {
|
| - unsigned int noSamp10ms = _adbSampFreq / 100;
|
| - // todo: Member variable and allocate when samp freq is determined
|
| - int16_t* dataTmp = new int16_t[noSamp10ms];
|
| - memset(dataTmp, 0, 2*noSamp10ms);
|
| - unsigned int dataPos = 0;
|
| - int noSamplesOut = 0;
|
| - unsigned int nCopy = 0;
|
| -
|
| - // First insert data from playout buffer if any
|
| - if (_playoutBufferUsed > 0) {
|
| - nCopy = (dataSize < _playoutBufferUsed) ?
|
| - dataSize : _playoutBufferUsed;
|
| - if (nCopy != _playoutBufferUsed) {
|
| - // todo: If dataSize < _playoutBufferUsed
|
| - // (should normally never be)
|
| - // we must move the remaining data
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "nCopy (%u) != _playoutBufferUsed (%u)",
|
| - nCopy, _playoutBufferUsed);
|
| - if (_playWarning > 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Pending play warning exists");
|
| - }
|
| - _playWarning = 1;
|
| - }
|
| - memcpy(data, _playoutBuffer, 2*nCopy);
|
| - dataPos = nCopy;
|
| - memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
|
| - _playoutBufferUsed = 0;
|
| - }
|
| -
|
| - // Now get the rest from Audio Device Buffer
|
| - while (dataPos < dataSize) {
|
| - // Update playout delay
|
| - UpdatePlayoutDelay();
|
| -
|
| - // Ask for new PCM data to be played out using the AudioDeviceBuffer
|
| - noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
|
| -
|
| - // Get data from Audio Device Buffer
|
| - noSamplesOut =
|
| - _ptrAudioBuffer->GetPlayoutData(
|
| - reinterpret_cast<int8_t*>(dataTmp));
|
| - // Cast OK since only equality comparison
|
| - if (noSamp10ms != (unsigned int)noSamplesOut) {
|
| - // Should never happen
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - "noSamp10ms (%u) != noSamplesOut (%d)",
|
| - noSamp10ms, noSamplesOut);
|
| -
|
| - if (_playWarning > 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
|
| - " Pending play warning exists");
|
| - }
|
| - _playWarning = 1;
|
| - }
|
| -
|
| - // Insert as much as fits in data buffer
|
| - nCopy = (dataSize-dataPos) > noSamp10ms ?
|
| - noSamp10ms : (dataSize-dataPos);
|
| - memcpy(&data[dataPos], dataTmp, 2*nCopy);
|
| -
|
| - // Save rest in playout buffer if any
|
| - if (nCopy < noSamp10ms) {
|
| - memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
|
| - _playoutBufferUsed = noSamp10ms - nCopy;
|
| - }
|
| -
|
| - // Update loop/index counter, if we copied less than noSamp10ms
|
| - // samples we shall quit loop anyway
|
| - dataPos += noSamp10ms;
|
| - }
|
| -
|
| - delete [] dataTmp;
|
| - }
|
| -
|
| - return 0;
|
| -}
|
| -
|
| -void AudioDeviceIOS::UpdatePlayoutDelay() {
|
| - ++_playoutDelayMeasurementCounter;
|
| -
|
| - if (_playoutDelayMeasurementCounter >= 100) {
|
| - // Update HW and OS delay every second, unlikely to change
|
| -
|
| - // Since this is eventually rounded to integral ms, add 0.5ms
|
| - // here to get round-to-nearest-int behavior instead of
|
| - // truncation.
|
| - double totalDelaySeconds = 0.0005;
|
| -
|
| - // HW output latency
|
| - AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - double latency = session.outputLatency;
|
| - assert(latency >= 0);
|
| - totalDelaySeconds += latency;
|
| -
|
| - // HW buffer duration
|
| - double ioBufferDuration = session.IOBufferDuration;
|
| - assert(ioBufferDuration >= 0);
|
| - totalDelaySeconds += ioBufferDuration;
|
| -
|
| - // AU latency
|
| - Float64 f64(0);
|
| - UInt32 size = sizeof(f64);
|
| - OSStatus result = AudioUnitGetProperty(
|
| - _auVoiceProcessing, kAudioUnitProperty_Latency,
|
| - kAudioUnitScope_Global, 0, &f64, &size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "error AU latency (result=%d)", result);
|
| - }
|
| - assert(f64 >= 0);
|
| - totalDelaySeconds += f64;
|
| -
|
| - // To ms
|
| - _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
|
| -
|
| - // Reset counter
|
| - _playoutDelayMeasurementCounter = 0;
|
| - }
|
| -
|
| - // todo: Add playout buffer?
|
| -}
|
| -
|
| -void AudioDeviceIOS::UpdateRecordingDelay() {
|
| - ++_recordingDelayMeasurementCounter;
|
| -
|
| - if (_recordingDelayMeasurementCounter >= 100) {
|
| - // Update HW and OS delay every second, unlikely to change
|
| -
|
| - // Since this is eventually rounded to integral ms, add 0.5ms
|
| - // here to get round-to-nearest-int behavior instead of
|
| - // truncation.
|
| - double totalDelaySeconds = 0.0005;
|
| -
|
| - // HW input latency
|
| - AVAudioSession* session = [AVAudioSession sharedInstance];
|
| - double latency = session.inputLatency;
|
| - assert(latency >= 0);
|
| - totalDelaySeconds += latency;
|
| -
|
| - // HW buffer duration
|
| - double ioBufferDuration = session.IOBufferDuration;
|
| - assert(ioBufferDuration >= 0);
|
| - totalDelaySeconds += ioBufferDuration;
|
| -
|
| - // AU latency
|
| - Float64 f64(0);
|
| - UInt32 size = sizeof(f64);
|
| - OSStatus result = AudioUnitGetProperty(
|
| - _auVoiceProcessing, kAudioUnitProperty_Latency,
|
| - kAudioUnitScope_Global, 0, &f64, &size);
|
| - if (0 != result) {
|
| - WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
|
| - "error AU latency (result=%d)", result);
|
| - }
|
| - assert(f64 >= 0);
|
| - totalDelaySeconds += f64;
|
| -
|
| - // To ms
|
| - _recordingDelayHWAndOS =
|
| - static_cast<uint32_t>(totalDelaySeconds / 1000);
|
| -
|
| - // Reset counter
|
| - _recordingDelayMeasurementCounter = 0;
|
| - }
|
| -
|
| - _recordingDelay = _recordingDelayHWAndOS;
|
| -
|
| - // ADB recording buffer size, update every time
|
| - // Don't count the one next 10 ms to be sent, then convert samples => ms
|
| - const uint32_t noSamp10ms = _adbSampFreq / 100;
|
| - if (_recordingBufferTotalSize > noSamp10ms) {
|
| - _recordingDelay +=
|
| - (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
|
| - }
|
| -}
|
| -
|
| -bool AudioDeviceIOS::RunCapture(void* ptrThis) {
|
| - return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
|
| -}
|
| -
|
| -bool AudioDeviceIOS::CaptureWorkerThread() {
|
| - if (_recording) {
|
| - int bufPos = 0;
|
| - unsigned int lowestSeq = 0;
|
| - int lowestSeqBufPos = 0;
|
| - bool foundBuf = true;
|
| - const unsigned int noSamp10ms = _adbSampFreq / 100;
|
| -
|
| - while (foundBuf) {
|
| - // Check if we have any buffer with data to insert
|
| - // into the Audio Device Buffer,
|
| - // and find the one with the lowest seq number
|
| - foundBuf = false;
|
| - for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
|
| - if (noSamp10ms == _recordingLength[bufPos]) {
|
| - if (!foundBuf) {
|
| - lowestSeq = _recordingSeqNumber[bufPos];
|
| - lowestSeqBufPos = bufPos;
|
| - foundBuf = true;
|
| - } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
|
| - lowestSeq = _recordingSeqNumber[bufPos];
|
| - lowestSeqBufPos = bufPos;
|
| - }
|
| - }
|
| - } // for
|
| -
|
| - // Insert data into the Audio Device Buffer if found any
|
| - if (foundBuf) {
|
| - // Update recording delay
|
| - UpdateRecordingDelay();
|
| -
|
| - // Set the recorded buffer
|
| - _ptrAudioBuffer->SetRecordedBuffer(
|
| - reinterpret_cast<int8_t*>(
|
| - _recordingBuffer[lowestSeqBufPos]),
|
| - _recordingLength[lowestSeqBufPos]);
|
| -
|
| - // Don't need to set the current mic level in ADB since we only
|
| - // support digital AGC,
|
| - // and besides we cannot get or set the IOS mic level anyway.
|
| -
|
| - // Set VQE info, use clockdrift == 0
|
| - _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
|
| -
|
| - // Deliver recorded samples at specified sample rate, mic level
|
| - // etc. to the observer using callback
|
| - _ptrAudioBuffer->DeliverRecordedData();
|
| -
|
| - // Make buffer available
|
| - _recordingSeqNumber[lowestSeqBufPos] = 0;
|
| - _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
|
| - // Must be done last to avoid interrupt problems between threads
|
| - _recordingLength[lowestSeqBufPos] = 0;
|
| - }
|
| - } // while (foundBuf)
|
| - } // if (_recording)
|
| -
|
| - {
|
| - // Normal case
|
| - // Sleep thread (5ms) to let other threads get to work
|
| - // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
|
| - // Device Buffer?
|
| - timespec t;
|
| - t.tv_sec = 0;
|
| - t.tv_nsec = 5*1000*1000;
|
| - nanosleep(&t, NULL);
|
| - }
|
| -
|
| - return true;
|
| + }
|
| +
|
| + {
|
| + // Normal case
|
| + // Sleep thread (5ms) to let other threads get to work
|
| + // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
|
| + // Device Buffer?
|
| + timespec t;
|
| + t.tv_sec = 0;
|
| + t.tv_nsec = 5 * 1000 * 1000;
|
| + nanosleep(&t, nullptr);
|
| + }
|
| + return true;
|
| }
|
|
|
| } // namespace webrtc
|
|
|