Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(560)

Unified Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1206783002: Cleanup of iOS AudioDevice implementation (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebased Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/audio_device/ios/audio_device_ios.mm
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm
index 47503a96d70546ef07b822627f6ee69548bdc379..cb15032544f4ed6aca753556f95e6a1b72c35f49 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm
@@ -8,1905 +8,1046 @@
* be found in the AUTHORS file in the root of the source tree.
*/
+#if !defined(__has_feature) || !__has_feature(objc_arc)
+#error "This file requires ARC support."
+#endif
+
#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
+#include "webrtc/modules/utility/interface/helpers_ios.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
#include "webrtc/system_wrappers/interface/trace.h"
namespace webrtc {
-AudioDeviceIOS::AudioDeviceIOS(const int32_t id)
- :
- _ptrAudioBuffer(NULL),
- _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
- _id(id),
- _auVoiceProcessing(NULL),
- _audioInterruptionObserver(NULL),
- _initialized(false),
- _isShutDown(false),
- _recording(false),
- _playing(false),
- _recIsInitialized(false),
- _playIsInitialized(false),
- _recordingDeviceIsSpecified(false),
- _playoutDeviceIsSpecified(false),
- _micIsInitialized(false),
- _speakerIsInitialized(false),
- _AGC(false),
- _adbSampFreq(0),
- _recordingDelay(0),
- _playoutDelay(0),
- _playoutDelayMeasurementCounter(9999),
- _recordingDelayHWAndOS(0),
- _recordingDelayMeasurementCounter(9999),
- _playWarning(0),
- _playError(0),
- _recWarning(0),
- _recError(0),
- _playoutBufferUsed(0),
- _recordingCurrentSeq(0),
- _recordingBufferTotalSize(0) {
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
- "%s created", __FUNCTION__);
-
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
- memset(_recordingLength, 0, sizeof(_recordingLength));
- memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
-}
-
-AudioDeviceIOS::~AudioDeviceIOS() {
- WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
- "%s destroyed", __FUNCTION__);
-
- Terminate();
-
- delete &_critSect;
-}
-
-
-// ============================================================================
-// API
-// ============================================================================
-
-void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- _ptrAudioBuffer = audioBuffer;
-
- // inform the AudioBuffer about default settings for this implementation
- _ptrAudioBuffer->SetRecordingSampleRate(ENGINE_REC_BUF_SIZE_IN_SAMPLES);
- _ptrAudioBuffer->SetPlayoutSampleRate(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES);
- _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS);
- _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS);
-}
-
-int32_t AudioDeviceIOS::ActiveAudioLayer(
- AudioDeviceModule::AudioLayer& audioLayer) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
- return 0;
-}
-
-int32_t AudioDeviceIOS::Init() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (_initialized) {
- return 0;
- }
-
- _isShutDown = false;
-
- // Create and start capture thread
- if (!_captureWorkerThread) {
- _captureWorkerThread = ThreadWrapper::CreateThread(
- RunCapture, this, "CaptureWorkerThread");
- bool res = _captureWorkerThread->Start();
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "CaptureWorkerThread started (res=%d)", res);
- _captureWorkerThread->SetPriority(kRealtimePriority);
- } else {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
- _id, "Thread already created");
- }
- _playWarning = 0;
- _playError = 0;
- _recWarning = 0;
- _recError = 0;
-
- _initialized = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::Terminate() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- if (!_initialized) {
- return 0;
- }
-
-
- // Stop capture thread
- if (_captureWorkerThread) {
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "Stopping CaptureWorkerThread");
- bool res = _captureWorkerThread->Stop();
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice,
- _id, "CaptureWorkerThread stopped (res=%d)", res);
- _captureWorkerThread.reset();
- }
-
- // Shut down Audio Unit
- ShutdownPlayOrRecord();
-
- _isShutDown = true;
- _initialized = false;
- _speakerIsInitialized = false;
- _micIsInitialized = false;
- _playoutDeviceIsSpecified = false;
- _recordingDeviceIsSpecified = false;
- return 0;
-}
-
-bool AudioDeviceIOS::Initialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return (_initialized);
-}
-
-int32_t AudioDeviceIOS::InitSpeaker() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- CriticalSectionScoped lock(&_critSect);
+#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Cannot init speaker when playing");
- return -1;
- }
-
- if (!_playoutDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Playout device is not specified");
- return -1;
- }
-
- // Do nothing
- _speakerIsInitialized = true;
+using ios::CheckAndLogError;
- return 0;
+#if !defined(NDEBUG)
+static void LogDeviceInfo() {
+ LOG(LS_INFO) << "LogDeviceInfo";
+ @autoreleasepool {
+ LOG(LS_INFO) << " system name: " << ios::GetSystemName();
+ LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
+ LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
+ LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
+ }
}
+#endif
-int32_t AudioDeviceIOS::InitMicrophone() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Cannot init mic when recording");
- return -1;
+static void ActivateAudioSession(AVAudioSession* session, bool activate) {
+ LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
+ @autoreleasepool {
+ NSError* error = nil;
+ BOOL success = NO;
+ if (!activate) {
+ // Deactivate the audio session.
+ success = [session setActive:NO error:&error];
+ DCHECK(CheckAndLogError(success, error));
+ return;
+ }
+ // Activate an audio session and set category and mode. Only make changes
+ // if needed since setting them to the value they already have will clear
+ // transient properties (such as PortOverride) that some other component
+ // have set up.
+ if (session.category != AVAudioSessionCategoryPlayAndRecord) {
+ error = nil;
+ success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ error:&error];
+ DCHECK(CheckAndLogError(success, error));
}
-
- if (!_recordingDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
- _id, " Recording device is not specified");
- return -1;
+ if (session.mode != AVAudioSessionModeVoiceChat) {
+ error = nil;
+ success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
+ DCHECK(CheckAndLogError(success, error));
}
-
- // Do nothing
-
- _micIsInitialized = true;
-
- return 0;
-}
-
-bool AudioDeviceIOS::SpeakerIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return _speakerIsInitialized;
-}
-
-bool AudioDeviceIOS::MicrophoneIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return _micIsInitialized;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Speaker volume not supported on iOS
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetSpeakerVolume(volume=%u)", volume);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::SetWaveOutVolume(uint16_t volumeLeft,
- uint16_t volumeRight) {
- WEBRTC_TRACE(
- kTraceModuleCall,
- kTraceAudioDevice,
- _id,
- "AudioDeviceIOS::SetWaveOutVolume(volumeLeft=%u, volumeRight=%u)",
- volumeLeft, volumeRight);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
-
- return -1;
-}
-
-int32_t
-AudioDeviceIOS::WaveOutVolume(uint16_t& /*volumeLeft*/,
- uint16_t& /*volumeRight*/) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MinSpeakerVolume(
- uint32_t& minVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::SpeakerVolumeStepSize(uint16_t& stepSize) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Speaker mute not supported on iOS
-
- return 0;
+ error = nil;
+ success = [session setActive:YES error:&error];
+ DCHECK(CheckAndLogError(success, error));
+ // Ensure that category and mode are actually activated.
+ DCHECK(
+ [session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]);
+ DCHECK([session.mode isEqualToString:AVAudioSessionModeVoiceChat]);
+ }
+}
+
+// Query hardware characteristics, such as input and output latency, input and
+// output channel count, hardware sample rate, hardware volume setting, and
+// whether audio input is available. To obtain meaningful values for hardware
+// characteristics,the audio session must be initialized and active before we
+// query the values.
+// TODO(henrika): Note that these characteristics can change at runtime. For
+// instance, input sample rate may change when a user plugs in a headset.
+static void GetHardwareAudioParameters(AudioParameters* playout_parameters,
+ AudioParameters* record_parameters) {
+ LOG(LS_INFO) << "GetHardwareAudioParameters";
+ @autoreleasepool {
+ // Implicit initialization happens when we obtain a reference to the
+ // AVAudioSession object.
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ // Always get values when the audio session is active.
+ ActivateAudioSession(session, true);
+ CHECK(session.isInputAvailable) << "No input path is available!";
+ // Get current hardware parameters.
+ double sample_rate = (double)session.sampleRate;
+ double io_buffer_duration = (double)session.IOBufferDuration;
+ int output_channels = (int)session.outputNumberOfChannels;
+ int input_channels = (int)session.inputNumberOfChannels;
+ int frames_per_buffer =
+ static_cast<int>(sample_rate * io_buffer_duration + 0.5);
+ // Copy hardware parameters to output parameters.
+ playout_parameters->reset(sample_rate, output_channels, frames_per_buffer);
+ record_parameters->reset(sample_rate, input_channels, frames_per_buffer);
+ // Add logging for debugging purposes.
+ LOG(LS_INFO) << " sample rate: " << sample_rate;
+ LOG(LS_INFO) << " IO buffer duration: " << io_buffer_duration;
+ LOG(LS_INFO) << " frames_per_buffer: " << frames_per_buffer;
+ LOG(LS_INFO) << " output channels: " << output_channels;
+ LOG(LS_INFO) << " input channels: " << input_channels;
+ LOG(LS_INFO) << " output latency: " << (double)session.outputLatency;
+ LOG(LS_INFO) << " input latency: " << (double)session.inputLatency;
+ // Don't keep the audio session active. Instead, deactivate when needed.
+ ActivateAudioSession(session, false);
+ // TODO(henrika): to be extra safe, we can do more here. E.g., set
+ // preferred values for sample rate, channels etc., re-activate an audio
+ // session and verify the actual values again. Then we know for sure that
+ // the current values will in fact be correct. Or, we can skip all this
+ // and check setting when audio is started. Probably better.
+ }
+}
+
+AudioDeviceIOS::AudioDeviceIOS()
+ : audio_device_buffer_(nullptr),
+ _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
+ _auVoiceProcessing(nullptr),
+ _audioInterruptionObserver(nullptr),
+ _initialized(false),
+ _isShutDown(false),
+ _recording(false),
+ _playing(false),
+ _recIsInitialized(false),
+ _playIsInitialized(false),
+ _adbSampFreq(0),
+ _recordingDelay(0),
+ _playoutDelay(0),
+ _playoutDelayMeasurementCounter(9999),
+ _recordingDelayHWAndOS(0),
+ _recordingDelayMeasurementCounter(9999),
+ _playoutBufferUsed(0),
+ _recordingCurrentSeq(0),
+ _recordingBufferTotalSize(0) {
+ LOGI() << "ctor" << ios::GetCurrentThreadDescription();
+ memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+ memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
+ memset(_recordingLength, 0, sizeof(_recordingLength));
+ memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
}
-int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
+AudioDeviceIOS::~AudioDeviceIOS() {
+ LOGI() << "~dtor";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+ delete &_critSect;
}
-int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
+void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ LOGI() << "AttachAudioBuffer";
+ DCHECK(audioBuffer);
+ DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
}
-int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic mute not supported on iOS
-
+int32_t AudioDeviceIOS::Init() {
+ LOGI() << "Init";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (_initialized) {
return 0;
-}
-
-int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
+ }
+#if !defined(NDEBUG)
+ LogDeviceInfo();
+#endif
+ // Query hardware audio parameters and cache the results. These parameters
+ // will be used as preferred values later when streaming starts.
+ // Note that I override these "optimal" value below since I don't want to
+ // modify the existing behavior yet.
+ GetHardwareAudioParameters(&playout_parameters_, &record_parameters_);
+ // TODO(henrika): these parameters are currently hard coded to match the
+ // existing implementation where we always use 16kHz as preferred sample
+ // rate and mono only. Goal is to improve this scheme and make it more
+ // flexible. In addition, a better native buffer size shall be derived.
+ // Using 10ms as default here (only used by unit test so far).
+ // We should also implemented observers for notification of any change in
+ // these parameters.
+ playout_parameters_.reset(16000, 1, 160);
+ record_parameters_.reset(16000, 1, 160);
+
+ // AttachAudioBuffer() is called at construction by the main class but check
+ // just in case.
+ DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
+ // Inform the audio device buffer (ADB) about the new audio format.
+ // TODO(henrika): try to improve this section.
+ audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
+ audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
+ audio_device_buffer_->SetRecordingSampleRate(
+ record_parameters_.sample_rate());
+ audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
+
+ DCHECK(!_captureWorkerThread);
+ // Create and start the capture thread.
+ // TODO(henrika): do we need this thread?
+ _isShutDown = false;
+ _captureWorkerThread =
+ ThreadWrapper::CreateThread(RunCapture, this, "CaptureWorkerThread");
+ if (!_captureWorkerThread->Start()) {
+ LOG_F(LS_ERROR) << "Failed to start CaptureWorkerThread!";
return -1;
+ }
+ _captureWorkerThread->SetPriority(kRealtimePriority);
+ _initialized = true;
+ return 0;
}
-int32_t AudioDeviceIOS::MicrophoneBoostIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic boost not supported on iOS
-
+int32_t AudioDeviceIOS::Terminate() {
+ LOGI() << "Terminate";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!_initialized) {
return 0;
+ }
+ // Stop the capture thread.
+ if (_captureWorkerThread) {
+ if (!_captureWorkerThread->Stop()) {
+ LOG_F(LS_ERROR) << "Failed to stop CaptureWorkerThread!";
+ return -1;
+ }
+ _captureWorkerThread.reset();
+ }
+ ShutdownPlayOrRecord();
+ _isShutDown = true;
+ _initialized = false;
+ return 0;
}
-int32_t AudioDeviceIOS::SetMicrophoneBoost(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetMicrophoneBoost(enable=%u)", enable);
-
- if (!_micIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Microphone not initialized");
- return -1;
+int32_t AudioDeviceIOS::InitPlayout() {
+ LOGI() << "InitPlayout";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(_initialized);
+ DCHECK(!_playIsInitialized);
+ DCHECK(!_playing);
+ if (!_recIsInitialized) {
+ if (InitPlayOrRecord() == -1) {
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ return -1;
}
+ }
+ _playIsInitialized = true;
+ return 0;
+}
- if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetMicrophoneBoost cannot be enabled on this platform");
- return -1;
+int32_t AudioDeviceIOS::InitRecording() {
+ LOGI() << "InitPlayout";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(_initialized);
+ DCHECK(!_recIsInitialized);
+ DCHECK(!_recording);
+ if (!_playIsInitialized) {
+ if (InitPlayOrRecord() == -1) {
+ LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
+ return -1;
}
-
- return 0;
+ }
+ _recIsInitialized = true;
+ return 0;
}
-int32_t AudioDeviceIOS::MicrophoneBoost(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- if (!_micIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Microphone not initialized");
- return -1;
+int32_t AudioDeviceIOS::StartPlayout() {
+ LOGI() << "StartPlayout";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(_playIsInitialized);
+ DCHECK(!_playing);
+
+ CriticalSectionScoped lock(&_critSect);
+
+ memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+ _playoutBufferUsed = 0;
+ _playoutDelay = 0;
+ // Make sure first call to update delay function will update delay
+ _playoutDelayMeasurementCounter = 9999;
+
+ if (!_recording) {
+ OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ return -1;
}
-
- enabled = false;
-
- return 0;
+ }
+ _playing = true;
+ return 0;
}
-int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+int32_t AudioDeviceIOS::StopPlayout() {
+ LOGI() << "StopPlayout";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!_playIsInitialized || !_playing) {
+ return 0;
+ }
- available = false; // Stereo recording not supported on iOS
+ CriticalSectionScoped lock(&_critSect);
- return 0;
+ if (!_recording) {
+ // Both playout and recording has stopped, shutdown the device.
+ ShutdownPlayOrRecord();
+ }
+ _playIsInitialized = false;
+ _playing = false;
+ return 0;
}
-int32_t AudioDeviceIOS::SetStereoRecording(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetStereoRecording(enable=%u)", enable);
-
- if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Stereo recording is not supported on this platform");
- return -1;
+int32_t AudioDeviceIOS::StartRecording() {
+ LOGI() << "StartRecording";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(_recIsInitialized);
+ DCHECK(!_recording);
+
+ CriticalSectionScoped lock(&_critSect);
+
+ memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
+ memset(_recordingLength, 0, sizeof(_recordingLength));
+ memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
+
+ _recordingCurrentSeq = 0;
+ _recordingBufferTotalSize = 0;
+ _recordingDelay = 0;
+ _recordingDelayHWAndOS = 0;
+ // Make sure first call to update delay function will update delay
+ _recordingDelayMeasurementCounter = 9999;
+
+ if (!_playing) {
+ OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
+ if (result != noErr) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
+ return -1;
}
- return 0;
+ }
+ _recording = true;
+ return 0;
}
-int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- enabled = false;
+int32_t AudioDeviceIOS::StopRecording() {
+ LOGI() << "StopRecording";
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!_recIsInitialized || !_recording) {
return 0;
-}
+ }
-int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+ CriticalSectionScoped lock(&_critSect);
- available = false; // Stereo playout not supported on iOS
-
- return 0;
+ if (!_playing) {
+ // Both playout and recording has stopped, shutdown the device.
+ ShutdownPlayOrRecord();
+ }
+ _recIsInitialized = false;
+ _recording = false;
+ return 0;
}
-int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetStereoPlayout(enable=%u)", enable);
-
+// Change the default receiver playout route to speaker.
+int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
+ LOGI() << "SetLoudspeakerStatus(" << enable << ")";
+
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ NSString* category = session.category;
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ // Respect old category options if category is
+ // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
+ // might not be valid for this category.
+ if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
if (enable) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Stereo playout is not supported on this platform");
- return -1;
+ options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
+ } else {
+ options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
}
- return 0;
+ } else {
+ options = AVAudioSessionCategoryOptionDefaultToSpeaker;
+ }
+ NSError* error = nil;
+ BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
+ withOptions:options
+ error:&error];
+ ios::CheckAndLogError(success, error);
+ return (error == nil) ? 0 : -1;
}
-int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- enabled = false;
- return 0;
+int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
+ LOGI() << "GetLoudspeakerStatus";
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ AVAudioSessionCategoryOptions options = session.categoryOptions;
+ enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
+ return 0;
}
-int32_t AudioDeviceIOS::SetAGC(bool enable) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetAGC(enable=%d)", enable);
-
- _AGC = enable;
-
- return 0;
+int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
+ delayMS = _playoutDelay;
+ return 0;
}
-bool AudioDeviceIOS::AGC() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- return _AGC;
+int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
+ delayMS = _recordingDelay;
+ return 0;
}
-int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
-
- available = false; // Mic volume not supported on IOS
-
- return 0;
+int32_t AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const {
+ type = AudioDeviceModule::kAdaptiveBufferSize;
+ sizeMS = _playoutDelay;
+ return 0;
}
-int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetMicrophoneVolume(volume=%u)", volume);
+int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
+ CHECK(playout_parameters_.is_valid());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ *params = playout_parameters_;
+ return 0;
+}
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
+int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
+ CHECK(record_parameters_.is_valid());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ *params = record_parameters_;
+ return 0;
}
-int32_t
- AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+// ============================================================================
+// Private Methods
+// ============================================================================
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
+int32_t AudioDeviceIOS::InitPlayOrRecord() {
+ LOGI() << "AudioDeviceIOS::InitPlayOrRecord";
+ DCHECK(!_auVoiceProcessing);
-int32_t
- AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+ OSStatus result = -1;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
+ // Create Voice Processing Audio Unit
+ AudioComponentDescription desc;
+ AudioComponent comp;
-int32_t
- AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+ desc.componentType = kAudioUnitType_Output;
+ desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
+ desc.componentManufacturer = kAudioUnitManufacturer_Apple;
+ desc.componentFlags = 0;
+ desc.componentFlagsMask = 0;
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
+ comp = AudioComponentFindNext(nullptr, &desc);
+ if (nullptr == comp) {
+ LOG_F(LS_ERROR) << "Could not find audio component for Audio Unit";
return -1;
-}
-
-int32_t
- AudioDeviceIOS::MicrophoneVolumeStepSize(
- uint16_t& stepSize) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+ }
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
+ result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to create Audio Unit instance: " << result;
return -1;
-}
+ }
+
+ // TODO(henrika): I think we should set the preferred channel configuration
+ // in both directions as well to be safe.
+
+ // Set preferred hardware sample rate to 16 kHz.
+ // TODO(henrika): improve this selection of sample rate. Why do we currently
+ // use a hard coded value? How can we fail and still continue?
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ Float64 preferredSampleRate(playout_parameters_.sample_rate());
+ [session setPreferredSampleRate:preferredSampleRate error:&error];
+ if (error != nil) {
+ const char* errorString = [[error localizedDescription] UTF8String];
+ LOG_F(LS_ERROR) << "setPreferredSampleRate failed: " << errorString;
+ }
+
+ // TODO(henrika): we can reduce latency by setting the IOBufferDuration
+ // here. Default size for 16kHz is 0.016 sec or 16 msec on an iPhone 6.
+
+ // Activate the audio session.
+ ActivateAudioSession(session, true);
+
+ UInt32 enableIO = 1;
+ result = AudioUnitSetProperty(_auVoiceProcessing,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Input,
+ 1, // input bus
+ &enableIO, sizeof(enableIO));
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to enable IO on input: " << result;
+ }
+
+ result = AudioUnitSetProperty(_auVoiceProcessing,
+ kAudioOutputUnitProperty_EnableIO,
+ kAudioUnitScope_Output,
+ 0, // output bus
+ &enableIO, sizeof(enableIO));
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to enable IO on output: " << result;
+ }
+
+ // Disable AU buffer allocation for the recorder, we allocate our own.
+ // TODO(henrika): understand this part better.
+ UInt32 flag = 0;
+ result = AudioUnitSetProperty(_auVoiceProcessing,
+ kAudioUnitProperty_ShouldAllocateBuffer,
+ kAudioUnitScope_Output, 1, &flag, sizeof(flag));
+ if (0 != result) {
+ LOG_F(LS_WARNING) << "Failed to disable AU buffer allocation: " << result;
+ // Should work anyway
+ }
+
+ // Set recording callback.
+ AURenderCallbackStruct auCbS;
+ memset(&auCbS, 0, sizeof(auCbS));
+ auCbS.inputProc = RecordProcess;
+ auCbS.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ _auVoiceProcessing, kAudioOutputUnitProperty_SetInputCallback,
+ kAudioUnitScope_Global, 1, &auCbS, sizeof(auCbS));
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to set AU record callback: " << result;
+ }
+
+ // Set playout callback.
+ memset(&auCbS, 0, sizeof(auCbS));
+ auCbS.inputProc = PlayoutProcess;
+ auCbS.inputProcRefCon = this;
+ result = AudioUnitSetProperty(
+ _auVoiceProcessing, kAudioUnitProperty_SetRenderCallback,
+ kAudioUnitScope_Global, 0, &auCbS, sizeof(auCbS));
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to set AU output callback: " << result;
+ }
+
+ // Get stream format for out/0
+ AudioStreamBasicDescription playoutDesc;
+ UInt32 size = sizeof(playoutDesc);
+ result =
+ AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, 0, &playoutDesc, &size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to get AU output stream format: " << result;
+ }
+
+ playoutDesc.mSampleRate = preferredSampleRate;
+ LOG(LS_INFO) << "Audio Unit playout opened in sampling rate: "
+ << playoutDesc.mSampleRate;
+
+ // Store the sampling frequency to use towards the Audio Device Buffer
+ // todo: Add 48 kHz (increase buffer sizes). Other fs?
+ // TODO(henrika): Figure out if we really need this complex handling.
+ if ((playoutDesc.mSampleRate > 44090.0) &&
+ (playoutDesc.mSampleRate < 44110.0)) {
+ _adbSampFreq = 44100;
+ } else if ((playoutDesc.mSampleRate > 15990.0) &&
+ (playoutDesc.mSampleRate < 16010.0)) {
+ _adbSampFreq = 16000;
+ } else if ((playoutDesc.mSampleRate > 7990.0) &&
+ (playoutDesc.mSampleRate < 8010.0)) {
+ _adbSampFreq = 8000;
+ } else {
+ _adbSampFreq = 0;
+ FATAL() << "Invalid sample rate";
+ }
+
+ // Set the audio device buffer sampling rates (use same for play and record).
+ // TODO(henrika): this is not a good place to set these things up.
+ DCHECK(audio_device_buffer_);
+ DCHECK_EQ(_adbSampFreq, playout_parameters_.sample_rate());
+ audio_device_buffer_->SetRecordingSampleRate(_adbSampFreq);
+ audio_device_buffer_->SetPlayoutSampleRate(_adbSampFreq);
+
+ // Set stream format for out/0.
+ playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
+ kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsNonInterleaved;
+ playoutDesc.mBytesPerPacket = 2;
+ playoutDesc.mFramesPerPacket = 1;
+ playoutDesc.mBytesPerFrame = 2;
+ playoutDesc.mChannelsPerFrame = 1;
+ playoutDesc.mBitsPerChannel = 16;
+ result =
+ AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, 0, &playoutDesc, size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to set AU stream format for out/0";
+ }
+
+ // Get stream format for in/1.
+ AudioStreamBasicDescription recordingDesc;
+ size = sizeof(recordingDesc);
+ result =
+ AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Input, 1, &recordingDesc, &size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to get AU stream format for in/1";
+ }
+
+ recordingDesc.mSampleRate = preferredSampleRate;
+ LOG(LS_INFO) << "Audio Unit recording opened in sampling rate: "
+ << recordingDesc.mSampleRate;
+
+ // Set stream format for out/1 (use same sampling frequency as for in/1).
+ recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger |
+ kLinearPCMFormatFlagIsPacked |
+ kLinearPCMFormatFlagIsNonInterleaved;
+ recordingDesc.mBytesPerPacket = 2;
+ recordingDesc.mFramesPerPacket = 1;
+ recordingDesc.mBytesPerFrame = 2;
+ recordingDesc.mChannelsPerFrame = 1;
+ recordingDesc.mBitsPerChannel = 16;
+ result =
+ AudioUnitSetProperty(_auVoiceProcessing, kAudioUnitProperty_StreamFormat,
+ kAudioUnitScope_Output, 1, &recordingDesc, size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "Failed to set AU stream format for out/1";
+ }
+
+ // Initialize here already to be able to get/set stream properties.
+ result = AudioUnitInitialize(_auVoiceProcessing);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "AudioUnitInitialize failed: " << result;
+ }
+
+ // Get hardware sample rate for logging (see if we get what we asked for).
+ // TODO(henrika): what if we don't get what we ask for?
+ double sampleRate = session.sampleRate;
+ LOG(LS_INFO) << "Current HW sample rate is: " << sampleRate
+ << ", ADB sample rate is: " << _adbSampFreq;
+ LOG(LS_INFO) << "Current HW IO buffer size is: " <<
+ [session IOBufferDuration];
+
+ // Listen to audio interruptions.
+ // TODO(henrika): learn this area better.
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ id observer = [center
+ addObserverForName:AVAudioSessionInterruptionNotification
+ object:nil
+ queue:[NSOperationQueue mainQueue]
+ usingBlock:^(NSNotification* notification) {
+ NSNumber* typeNumber =
+ [notification userInfo][AVAudioSessionInterruptionTypeKey];
+ AVAudioSessionInterruptionType type =
+ (AVAudioSessionInterruptionType)[typeNumber
+ unsignedIntegerValue];
+ switch (type) {
+ case AVAudioSessionInterruptionTypeBegan:
+ // At this point our audio session has been deactivated and
+ // the
+ // audio unit render callbacks no longer occur. Nothing to
+ // do.
+ break;
+ case AVAudioSessionInterruptionTypeEnded: {
+ NSError* error = nil;
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ [session setActive:YES error:&error];
+ if (error != nil) {
+ LOG_F(LS_ERROR) << "Failed to active audio session";
+ }
+ // Post interruption the audio unit render callbacks don't
+ // automatically continue, so we restart the unit manually
+ // here.
+ AudioOutputUnitStop(_auVoiceProcessing);
+ AudioOutputUnitStart(_auVoiceProcessing);
+ break;
+ }
+ }
+ }];
+ // Increment refcount on observer using ARC bridge. Instance variable is a
+ // void* instead of an id because header is included in other pure C++
+ // files.
+ _audioInterruptionObserver = (__bridge_retained void*)observer;
-int16_t AudioDeviceIOS::PlayoutDevices() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
+ // Deactivate the audio session.
+ ActivateAudioSession(session, false);
- return (int16_t)1;
+ return 0;
}
-int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetPlayoutDevice(index=%u)", index);
+int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
+ LOGI() << "ShutdownPlayOrRecord";
- if (_playIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout already initialized");
- return -1;
+ if (_audioInterruptionObserver != nullptr) {
+ NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
+ // Transfer ownership of observer back to ARC, which will dealloc the
+ // observer once it exits this scope.
+ id observer = (__bridge_transfer id)_audioInterruptionObserver;
+ [center removeObserver:observer];
+ _audioInterruptionObserver = nullptr;
+ }
+
+ // Close and delete AU.
+ OSStatus result = -1;
+ if (nullptr != _auVoiceProcessing) {
+ result = AudioOutputUnitStop(_auVoiceProcessing);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
}
-
- if (index !=0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetPlayoutDevice invalid index");
- return -1;
+ result = AudioComponentInstanceDispose(_auVoiceProcessing);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
}
- _playoutDeviceIsSpecified = true;
-
- return 0;
-}
+ _auVoiceProcessing = nullptr;
+ }
-int32_t
- AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
+ return 0;
}
-int32_t
- AudioDeviceIOS::PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::PlayoutDeviceName(index=%u)", index);
-
- if (index != 0) {
- return -1;
- }
- // return empty strings
- memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL) {
- memset(guid, 0, kAdmMaxGuidSize);
- }
+// ============================================================================
+// Thread Methods
+// ============================================================================
+OSStatus AudioDeviceIOS::RecordProcess(
+ void* inRefCon,
+ AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList* ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
+ return ptrThis->RecordProcessImpl(ioActionFlags, inTimeStamp, inBusNumber,
+ inNumberFrames);
+}
+
+OSStatus AudioDeviceIOS::RecordProcessImpl(
+ AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* inTimeStamp,
+ uint32_t inBusNumber,
+ uint32_t inNumberFrames) {
+ // Setup some basic stuff
+ // Use temp buffer not to lock up recording buffer more than necessary
+ // todo: Make dataTmp a member variable with static size that holds
+ // max possible frames?
+ int16_t* dataTmp = new int16_t[inNumberFrames];
+ memset(dataTmp, 0, 2 * inNumberFrames);
+
+ AudioBufferList abList;
+ abList.mNumberBuffers = 1;
+ abList.mBuffers[0].mData = dataTmp;
+ abList.mBuffers[0].mDataByteSize = 2 * inNumberFrames; // 2 bytes/sample
+ abList.mBuffers[0].mNumberChannels = 1;
+
+ // Get data from mic
+ OSStatus res = AudioUnitRender(_auVoiceProcessing, ioActionFlags, inTimeStamp,
+ inBusNumber, inNumberFrames, &abList);
+ if (res != 0) {
+ // TODO(henrika): improve error handling.
+ delete[] dataTmp;
return 0;
-}
-
-int32_t
- AudioDeviceIOS::RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::RecordingDeviceName(index=%u)", index);
+ }
+
+ if (_recording) {
+ // Insert all data in temp buffer into recording buffers
+ // There is zero or one buffer partially full at any given time,
+ // all others are full or empty
+ // Full means filled with noSamp10ms samples.
+
+ const unsigned int noSamp10ms = _adbSampFreq / 100;
+ unsigned int dataPos = 0;
+ uint16_t bufPos = 0;
+ int16_t insertPos = -1;
+ unsigned int nCopy = 0; // Number of samples to copy
+
+ while (dataPos < inNumberFrames) {
+ // Loop over all recording buffers or
+ // until we find the partially full buffer
+ // First choice is to insert into partially full buffer,
+ // second choice is to insert into empty buffer
+ bufPos = 0;
+ insertPos = -1;
+ nCopy = 0;
+ while (bufPos < N_REC_BUFFERS) {
+ if ((_recordingLength[bufPos] > 0) &&
+ (_recordingLength[bufPos] < noSamp10ms)) {
+ // Found the partially full buffer
+ insertPos = static_cast<int16_t>(bufPos);
+ // Don't need to search more, quit loop
+ bufPos = N_REC_BUFFERS;
+ } else if ((-1 == insertPos) && (0 == _recordingLength[bufPos])) {
+ // Found an empty buffer
+ insertPos = static_cast<int16_t>(bufPos);
+ }
+ ++bufPos;
+ }
- if (index != 0) {
- return -1;
- }
- // return empty strings
- memset(name, 0, kAdmMaxDeviceNameSize);
- if (guid != NULL) {
- memset(guid, 0, kAdmMaxGuidSize);
+ // Insert data into buffer
+ if (insertPos > -1) {
+ // We found a non-full buffer, copy data to it
+ unsigned int dataToCopy = inNumberFrames - dataPos;
+ unsigned int currentRecLen = _recordingLength[insertPos];
+ unsigned int roomInBuffer = noSamp10ms - currentRecLen;
+ nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
+
+ memcpy(&_recordingBuffer[insertPos][currentRecLen], &dataTmp[dataPos],
+ nCopy * sizeof(int16_t));
+ if (0 == currentRecLen) {
+ _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
+ ++_recordingCurrentSeq;
+ }
+ _recordingBufferTotalSize += nCopy;
+ // Has to be done last to avoid interrupt problems between threads.
+ _recordingLength[insertPos] += nCopy;
+ dataPos += nCopy;
+ } else {
+ // Didn't find a non-full buffer
+ // TODO(henrika): improve error handling
+ dataPos = inNumberFrames; // Don't try to insert more
+ }
}
+ }
+ delete[] dataTmp;
+ return 0;
+}
+
+OSStatus AudioDeviceIOS::PlayoutProcess(
+ void* inRefCon,
+ AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* inTimeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList* ioData) {
+ AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
+ return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
+}
+
+OSStatus AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
+ AudioBufferList* ioData) {
+ int16_t* data = static_cast<int16_t*>(ioData->mBuffers[0].mData);
+ unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
+ unsigned int dataSize = dataSizeBytes / 2; // Number of samples
+ CHECK_EQ(dataSize, inNumberFrames);
+ memset(data, 0, dataSizeBytes); // Start with empty buffer
+
+ // Get playout data from Audio Device Buffer
+
+ if (_playing) {
+ unsigned int noSamp10ms = _adbSampFreq / 100;
+ // todo: Member variable and allocate when samp freq is determined
+ int16_t* dataTmp = new int16_t[noSamp10ms];
+ memset(dataTmp, 0, 2 * noSamp10ms);
+ unsigned int dataPos = 0;
+ int noSamplesOut = 0;
+ unsigned int nCopy = 0;
+
+ // First insert data from playout buffer if any
+ if (_playoutBufferUsed > 0) {
+ nCopy = (dataSize < _playoutBufferUsed) ? dataSize : _playoutBufferUsed;
+ DCHECK_EQ(nCopy, _playoutBufferUsed);
+ memcpy(data, _playoutBuffer, 2 * nCopy);
+ dataPos = nCopy;
+ memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
+ _playoutBufferUsed = 0;
+ }
+
+ // Now get the rest from Audio Device Buffer.
+ while (dataPos < dataSize) {
+ // Update playout delay
+ UpdatePlayoutDelay();
+
+ // Ask for new PCM data to be played out using the AudioDeviceBuffer
+ noSamplesOut = audio_device_buffer_->RequestPlayoutData(noSamp10ms);
+
+ // Get data from Audio Device Buffer
+ noSamplesOut = audio_device_buffer_->GetPlayoutData(
+ reinterpret_cast<int8_t*>(dataTmp));
+ CHECK_EQ(noSamp10ms, (unsigned int)noSamplesOut);
+
+ // Insert as much as fits in data buffer
+ nCopy =
+ (dataSize - dataPos) > noSamp10ms ? noSamp10ms : (dataSize - dataPos);
+ memcpy(&data[dataPos], dataTmp, 2 * nCopy);
+
+ // Save rest in playout buffer if any
+ if (nCopy < noSamp10ms) {
+ memcpy(_playoutBuffer, &dataTmp[nCopy], 2 * (noSamp10ms - nCopy));
+ _playoutBufferUsed = noSamp10ms - nCopy;
+ }
- return 0;
+ // Update loop/index counter, if we copied less than noSamp10ms
+ // samples we shall quit loop anyway
+ dataPos += noSamp10ms;
+ }
+ delete[] dataTmp;
+ }
+ return 0;
}
-int16_t AudioDeviceIOS::RecordingDevices() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+// TODO(henrika): can either be removed or simplified.
+void AudioDeviceIOS::UpdatePlayoutDelay() {
+ ++_playoutDelayMeasurementCounter;
- return (int16_t)1;
-}
+ if (_playoutDelayMeasurementCounter >= 100) {
+ // Update HW and OS delay every second, unlikely to change
-int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetRecordingDevice(index=%u)", index);
+ // Since this is eventually rounded to integral ms, add 0.5ms
+ // here to get round-to-nearest-int behavior instead of
+ // truncation.
+ double totalDelaySeconds = 0.0005;
- if (_recIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording already initialized");
- return -1;
+ // HW output latency
+ AVAudioSession* session = [AVAudioSession sharedInstance];
+ double latency = session.outputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
+
+ // HW buffer duration
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
+
+ // AU latency
+ Float64 f64(0);
+ UInt32 size = sizeof(f64);
+ OSStatus result =
+ AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "AU latency error: " << result;
}
+ assert(f64 >= 0);
+ totalDelaySeconds += f64;
- if (index !=0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " SetRecordingDevice invalid index");
- return -1;
- }
+ // To ms
+ _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
- _recordingDeviceIsSpecified = true;
+ // Reset counter
+ _playoutDelayMeasurementCounter = 0;
+ }
- return 0;
+ // todo: Add playout buffer?
}
-int32_t
- AudioDeviceIOS::SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "WindowsDeviceType not supported");
- return -1;
-}
+void AudioDeviceIOS::UpdateRecordingDelay() {
+ ++_recordingDelayMeasurementCounter;
-// ----------------------------------------------------------------------------
-// SetLoudspeakerStatus
-//
-// Change the default receiver playout route to speaker.
-//
-// ----------------------------------------------------------------------------
+ if (_recordingDelayMeasurementCounter >= 100) {
+ // Update HW and OS delay every second, unlikely to change
-int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetLoudspeakerStatus(enable=%d)", enable);
+ // Since this is eventually rounded to integral ms, add 0.5ms
+ // here to get round-to-nearest-int behavior instead of
+ // truncation.
+ double totalDelaySeconds = 0.0005;
+ // HW input latency
AVAudioSession* session = [AVAudioSession sharedInstance];
- NSString* category = session.category;
- AVAudioSessionCategoryOptions options = session.categoryOptions;
- // Respect old category options if category is
- // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
- // might not be valid for this category.
- if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
- if (enable) {
- options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
- } else {
- options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
- }
- } else {
- options = AVAudioSessionCategoryOptionDefaultToSpeaker;
- }
-
- NSError* error = nil;
- [session setCategory:AVAudioSessionCategoryPlayAndRecord
- withOptions:options
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "Error changing default output route ");
- return -1;
+ double latency = session.inputLatency;
+ assert(latency >= 0);
+ totalDelaySeconds += latency;
+
+ // HW buffer duration
+ double ioBufferDuration = session.IOBufferDuration;
+ assert(ioBufferDuration >= 0);
+ totalDelaySeconds += ioBufferDuration;
+
+ // AU latency
+ Float64 f64(0);
+ UInt32 size = sizeof(f64);
+ OSStatus result =
+ AudioUnitGetProperty(_auVoiceProcessing, kAudioUnitProperty_Latency,
+ kAudioUnitScope_Global, 0, &f64, &size);
+ if (0 != result) {
+ LOG_F(LS_ERROR) << "AU latency error: " << result;
}
+ assert(f64 >= 0);
+ totalDelaySeconds += f64;
- return 0;
-}
+ // To ms
+ _recordingDelayHWAndOS = static_cast<uint32_t>(totalDelaySeconds / 1000);
-int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool &enabled) const {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetLoudspeakerStatus(enabled=?)");
+ // Reset counter
+ _recordingDelayMeasurementCounter = 0;
+ }
- AVAudioSession* session = [AVAudioSession sharedInstance];
- AVAudioSessionCategoryOptions options = session.categoryOptions;
- enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
+ _recordingDelay = _recordingDelayHWAndOS;
- return 0;
+ // ADB recording buffer size, update every time
+ // Don't count the one next 10 ms to be sent, then convert samples => ms
+ const uint32_t noSamp10ms = _adbSampFreq / 100;
+ if (_recordingBufferTotalSize > noSamp10ms) {
+ _recordingDelay +=
+ (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
+ }
}
-int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- available = false;
-
- // Try to initialize the playout side
- int32_t res = InitPlayout();
-
- // Cancel effect of initialization
- StopPlayout();
-
- if (res != -1) {
- available = true;
- }
-
- return 0;
+bool AudioDeviceIOS::RunCapture(void* ptrThis) {
+ return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
}
-int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
+bool AudioDeviceIOS::CaptureWorkerThread() {
+ if (_recording) {
+ int bufPos = 0;
+ unsigned int lowestSeq = 0;
+ int lowestSeqBufPos = 0;
+ bool foundBuf = true;
+ const unsigned int noSamp10ms = _adbSampFreq / 100;
+
+ while (foundBuf) {
+ // Check if we have any buffer with data to insert
+ // into the Audio Device Buffer,
+ // and find the one with the lowest seq number
+ foundBuf = false;
+ for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
+ if (noSamp10ms == _recordingLength[bufPos]) {
+ if (!foundBuf) {
+ lowestSeq = _recordingSeqNumber[bufPos];
+ lowestSeqBufPos = bufPos;
+ foundBuf = true;
+ } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
+ lowestSeq = _recordingSeqNumber[bufPos];
+ lowestSeqBufPos = bufPos;
+ }
+ }
+ }
- available = false;
+ // Insert data into the Audio Device Buffer if found any
+ if (foundBuf) {
+ // Update recording delay
+ UpdateRecordingDelay();
- // Try to initialize the recording side
- int32_t res = InitRecording();
+ // Set the recorded buffer
+ audio_device_buffer_->SetRecordedBuffer(
+ reinterpret_cast<int8_t*>(_recordingBuffer[lowestSeqBufPos]),
+ _recordingLength[lowestSeqBufPos]);
- // Cancel effect of initialization
- StopRecording();
+ // Don't need to set the current mic level in ADB since we only
+ // support digital AGC,
+ // and besides we cannot get or set the IOS mic level anyway.
- if (res != -1) {
- available = true;
- }
+ // Set VQE info, use clockdrift == 0
+ audio_device_buffer_->SetVQEData(_playoutDelay, _recordingDelay, 0);
- return 0;
-}
+ // Deliver recorded samples at specified sample rate, mic level
+ // etc. to the observer using callback
+ audio_device_buffer_->DeliverRecordedData();
-int32_t AudioDeviceIOS::InitPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, " Not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Playout already started");
- return -1;
- }
-
- if (_playIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout already initialized");
- return 0;
- }
-
- if (!_playoutDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout device is not specified");
- return -1;
- }
-
- // Initialize the speaker
- if (InitSpeaker() == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitSpeaker() failed");
- }
-
- _playIsInitialized = true;
-
- if (!_recIsInitialized) {
- // Audio init
- if (InitPlayOrRecord() == -1) {
- // todo: Handle error
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitPlayOrRecord() failed");
- }
- } else {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already initialized - InitPlayOrRecord() not called");
- }
-
- return 0;
-}
-
-bool AudioDeviceIOS::PlayoutIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_playIsInitialized);
-}
-
-int32_t AudioDeviceIOS::InitRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_initialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Recording already started");
- return -1;
- }
-
- if (_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already initialized");
- return 0;
- }
-
- if (!_recordingDeviceIsSpecified) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording device is not specified");
- return -1;
- }
-
- // Initialize the microphone
- if (InitMicrophone() == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitMicrophone() failed");
- }
-
- _recIsInitialized = true;
-
- if (!_playIsInitialized) {
- // Audio init
- if (InitPlayOrRecord() == -1) {
- // todo: Handle error
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " InitPlayOrRecord() failed");
- }
- } else {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout already initialized - InitPlayOrRecord() " \
- "not called");
- }
-
- return 0;
-}
-
-bool AudioDeviceIOS::RecordingIsInitialized() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_recIsInitialized);
-}
-
-int32_t AudioDeviceIOS::StartRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_recIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Recording not initialized");
- return -1;
- }
-
- if (_recording) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording already started");
- return 0;
- }
-
- // Reset recording buffer
- memset(_recordingBuffer, 0, sizeof(_recordingBuffer));
- memset(_recordingLength, 0, sizeof(_recordingLength));
- memset(_recordingSeqNumber, 0, sizeof(_recordingSeqNumber));
- _recordingCurrentSeq = 0;
- _recordingBufferTotalSize = 0;
- _recordingDelay = 0;
- _recordingDelayHWAndOS = 0;
- // Make sure first call to update delay function will update delay
- _recordingDelayMeasurementCounter = 9999;
- _recWarning = 0;
- _recError = 0;
-
- if (!_playing) {
- // Start Audio Unit
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Starting Audio Unit");
- OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " Error starting Audio Unit (result=%d)", result);
- return -1;
- }
- }
-
- _recording = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::StopRecording() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Recording is not initialized");
- return 0;
- }
-
- _recording = false;
-
- if (!_playing) {
- // Both playout and recording has stopped, shutdown the device
- ShutdownPlayOrRecord();
- }
-
- _recIsInitialized = false;
- _micIsInitialized = false;
-
- return 0;
-}
-
-bool AudioDeviceIOS::Recording() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
- return (_recording);
-}
-
-int32_t AudioDeviceIOS::StartPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- // This lock is (among other things) needed to avoid concurrency issues
- // with capture thread
- // shutting down Audio Unit
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Playout not initialized");
- return -1;
- }
-
- if (_playing) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playing already started");
- return 0;
- }
-
- // Reset playout buffer
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- _playoutBufferUsed = 0;
- _playoutDelay = 0;
- // Make sure first call to update delay function will update delay
- _playoutDelayMeasurementCounter = 9999;
- _playWarning = 0;
- _playError = 0;
-
- if (!_recording) {
- // Start Audio Unit
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Starting Audio Unit");
- OSStatus result = AudioOutputUnitStart(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
- " Error starting Audio Unit (result=%d)", result);
- return -1;
- }
- }
-
- _playing = true;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::StopPlayout() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout is not initialized");
- return 0;
- }
-
- _playing = false;
-
- if (!_recording) {
- // Both playout and recording has stopped, signal shutdown the device
- ShutdownPlayOrRecord();
- }
-
- _playIsInitialized = false;
- _speakerIsInitialized = false;
-
- return 0;
-}
-
-bool AudioDeviceIOS::Playing() const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "%s", __FUNCTION__);
- return (_playing);
-}
-
-// ----------------------------------------------------------------------------
-// ResetAudioDevice
-//
-// Disable playout and recording, signal to capture thread to shutdown,
-// and set enable states after shutdown to same as current.
-// In capture thread audio device will be shutdown, then started again.
-// ----------------------------------------------------------------------------
-int32_t AudioDeviceIOS::ResetAudioDevice() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- CriticalSectionScoped lock(&_critSect);
-
- if (!_playIsInitialized && !_recIsInitialized) {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Playout or recording not initialized, doing nothing");
- return 0; // Nothing to reset
- }
-
- // Store the states we have before stopping to restart below
- bool initPlay = _playIsInitialized;
- bool play = _playing;
- bool initRec = _recIsInitialized;
- bool rec = _recording;
-
- int res(0);
-
- // Stop playout and recording
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Stopping playout and recording");
- res += StopPlayout();
- res += StopRecording();
-
- // Restart
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Restarting playout and recording (%d, %d, %d, %d)",
- initPlay, play, initRec, rec);
- if (initPlay) res += InitPlayout();
- if (initRec) res += InitRecording();
- if (play) res += StartPlayout();
- if (rec) res += StartRecording();
-
- if (0 != res) {
- // Logging is done in init/start/stop calls above
- return -1;
- }
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
- delayMS = _playoutDelay;
- return 0;
-}
-
-int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
- delayMS = _recordingDelay;
- return 0;
-}
-
-int32_t
- AudioDeviceIOS::SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
- "AudioDeviceIOS::SetPlayoutBuffer(type=%u, sizeMS=%u)",
- type, sizeMS);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-int32_t
- AudioDeviceIOS::PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- type = AudioDeviceModule::kAdaptiveBufferSize;
-
- sizeMS = _playoutDelay;
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::CPULoad(uint16_t& /*load*/) const {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " API call not supported on this platform");
- return -1;
-}
-
-bool AudioDeviceIOS::PlayoutWarning() const {
- return (_playWarning > 0);
-}
-
-bool AudioDeviceIOS::PlayoutError() const {
- return (_playError > 0);
-}
-
-bool AudioDeviceIOS::RecordingWarning() const {
- return (_recWarning > 0);
-}
-
-bool AudioDeviceIOS::RecordingError() const {
- return (_recError > 0);
-}
-
-void AudioDeviceIOS::ClearPlayoutWarning() {
- _playWarning = 0;
-}
-
-void AudioDeviceIOS::ClearPlayoutError() {
- _playError = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingWarning() {
- _recWarning = 0;
-}
-
-void AudioDeviceIOS::ClearRecordingError() {
- _recError = 0;
-}
-
-// ============================================================================
-// Private Methods
-// ============================================================================
-
-int32_t AudioDeviceIOS::InitPlayOrRecord() {
- WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- OSStatus result = -1;
-
- // Check if already initialized
- if (NULL != _auVoiceProcessing) {
- // We already have initialized before and created any of the audio unit,
- // check that all exist
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Already initialized");
- // todo: Call AudioUnitReset() here and empty all buffers?
- return 0;
- }
-
- // Create Voice Processing Audio Unit
- AudioComponentDescription desc;
- AudioComponent comp;
-
- desc.componentType = kAudioUnitType_Output;
- desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
- desc.componentManufacturer = kAudioUnitManufacturer_Apple;
- desc.componentFlags = 0;
- desc.componentFlagsMask = 0;
-
- comp = AudioComponentFindNext(NULL, &desc);
- if (NULL == comp) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not find audio component for Audio Unit");
- return -1;
- }
-
- result = AudioComponentInstanceNew(comp, &_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not create Audio Unit instance (result=%d)",
- result);
- return -1;
- }
-
- // Set preferred hardware sample rate to 16 kHz
- NSError* error = nil;
- AVAudioSession* session = [AVAudioSession sharedInstance];
- Float64 preferredSampleRate(16000.0);
- [session setPreferredSampleRate:preferredSampleRate
- error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set preferred sample rate: %s", errorString);
- }
- error = nil;
- // Make the setMode:error: and setCategory:error: calls only if necessary.
- // Non-obviously, setting them to the value they already have will clear
- // transient properties (such as PortOverride) that some other component may
- // have set up.
- if (session.mode != AVAudioSessionModeVoiceChat) {
- [session setMode:AVAudioSessionModeVoiceChat error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set mode: %s", errorString);
+ // Make buffer available
+ _recordingSeqNumber[lowestSeqBufPos] = 0;
+ _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
+ // Must be done last to avoid interrupt problems between threads
+ _recordingLength[lowestSeqBufPos] = 0;
}
}
- error = nil;
- if (session.category != AVAudioSessionCategoryPlayAndRecord) {
- [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
- if (error != nil) {
- const char* errorString = [[error localizedDescription] UTF8String];
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- "Could not set category: %s", errorString);
- }
- }
-
- //////////////////////
- // Setup Voice Processing Audio Unit
-
- // Note: For Signal Processing AU element 0 is output bus, element 1 is
- // input bus for global scope element is irrelevant (always use
- // element 0)
-
- // Enable IO on both elements
-
- // todo: Below we just log and continue upon error. We might want
- // to close AU and return error for some cases.
- // todo: Log info about setup.
-
- UInt32 enableIO = 1;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Input,
- 1, // input bus
- &enableIO,
- sizeof(enableIO));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not enable IO on input (result=%d)", result);
- }
-
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_EnableIO,
- kAudioUnitScope_Output,
- 0, // output bus
- &enableIO,
- sizeof(enableIO));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not enable IO on output (result=%d)", result);
- }
-
- // Disable AU buffer allocation for the recorder, we allocate our own
- UInt32 flag = 0;
- result = AudioUnitSetProperty(
- _auVoiceProcessing, kAudioUnitProperty_ShouldAllocateBuffer,
- kAudioUnitScope_Output, 1, &flag, sizeof(flag));
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Could not disable AU buffer allocation (result=%d)",
- result);
- // Should work anyway
- }
-
- // Set recording callback
- AURenderCallbackStruct auCbS;
- memset(&auCbS, 0, sizeof(auCbS));
- auCbS.inputProc = RecordProcess;
- auCbS.inputProcRefCon = this;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioOutputUnitProperty_SetInputCallback,
- kAudioUnitScope_Global, 1,
- &auCbS, sizeof(auCbS));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set record callback for Audio Unit (result=%d)",
- result);
- }
-
- // Set playout callback
- memset(&auCbS, 0, sizeof(auCbS));
- auCbS.inputProc = PlayoutProcess;
- auCbS.inputProcRefCon = this;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_SetRenderCallback,
- kAudioUnitScope_Global, 0,
- &auCbS, sizeof(auCbS));
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set play callback for Audio Unit (result=%d)",
- result);
- }
-
- // Get stream format for out/0
- AudioStreamBasicDescription playoutDesc;
- UInt32 size = sizeof(playoutDesc);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output, 0, &playoutDesc,
- &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not get stream format Audio Unit out/0 (result=%d)",
- result);
- }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Audio Unit playout opened in sampling rate %f",
- playoutDesc.mSampleRate);
-
- playoutDesc.mSampleRate = preferredSampleRate;
-
- // Store the sampling frequency to use towards the Audio Device Buffer
- // todo: Add 48 kHz (increase buffer sizes). Other fs?
- if ((playoutDesc.mSampleRate > 44090.0)
- && (playoutDesc.mSampleRate < 44110.0)) {
- _adbSampFreq = 44100;
- } else if ((playoutDesc.mSampleRate > 15990.0)
- && (playoutDesc.mSampleRate < 16010.0)) {
- _adbSampFreq = 16000;
- } else if ((playoutDesc.mSampleRate > 7990.0)
- && (playoutDesc.mSampleRate < 8010.0)) {
- _adbSampFreq = 8000;
- } else {
- _adbSampFreq = 0;
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Audio Unit out/0 opened in unknown sampling rate (%f)",
- playoutDesc.mSampleRate);
- // todo: We should bail out here.
- }
-
- // Set the audio device buffer sampling rate,
- // we assume we get the same for play and record
- if (_ptrAudioBuffer->SetRecordingSampleRate(_adbSampFreq) < 0) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set audio device buffer recording sampling rate (%d)",
- _adbSampFreq);
- }
-
- if (_ptrAudioBuffer->SetPlayoutSampleRate(_adbSampFreq) < 0) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set audio device buffer playout sampling rate (%d)",
- _adbSampFreq);
- }
-
- // Set stream format for in/0 (use same sampling frequency as for out/0)
- playoutDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
- | kLinearPCMFormatFlagIsPacked
- | kLinearPCMFormatFlagIsNonInterleaved;
- playoutDesc.mBytesPerPacket = 2;
- playoutDesc.mFramesPerPacket = 1;
- playoutDesc.mBytesPerFrame = 2;
- playoutDesc.mChannelsPerFrame = 1;
- playoutDesc.mBitsPerChannel = 16;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input, 0, &playoutDesc, size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set stream format Audio Unit in/0 (result=%d)",
- result);
- }
-
- // Get stream format for in/1
- AudioStreamBasicDescription recordingDesc;
- size = sizeof(recordingDesc);
- result = AudioUnitGetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Input, 1, &recordingDesc,
- &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not get stream format Audio Unit in/1 (result=%d)",
- result);
- }
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
- " Audio Unit recording opened in sampling rate %f",
- recordingDesc.mSampleRate);
-
- recordingDesc.mSampleRate = preferredSampleRate;
-
- // Set stream format for out/1 (use same sampling frequency as for in/1)
- recordingDesc.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger
- | kLinearPCMFormatFlagIsPacked
- | kLinearPCMFormatFlagIsNonInterleaved;
-
- recordingDesc.mBytesPerPacket = 2;
- recordingDesc.mFramesPerPacket = 1;
- recordingDesc.mBytesPerFrame = 2;
- recordingDesc.mChannelsPerFrame = 1;
- recordingDesc.mBitsPerChannel = 16;
- result = AudioUnitSetProperty(_auVoiceProcessing,
- kAudioUnitProperty_StreamFormat,
- kAudioUnitScope_Output, 1, &recordingDesc,
- size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not set stream format Audio Unit out/1 (result=%d)",
- result);
- }
-
- // Initialize here already to be able to get/set stream properties.
- result = AudioUnitInitialize(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- " Could not init Audio Unit (result=%d)", result);
- }
-
- // Get hardware sample rate for logging (see if we get what we asked for)
- double sampleRate = session.sampleRate;
- WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
- " Current HW sample rate is %f, ADB sample rate is %d",
- sampleRate, _adbSampFreq);
-
- // Listen to audio interruptions.
- NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
- id observer =
- [center addObserverForName:AVAudioSessionInterruptionNotification
- object:nil
- queue:[NSOperationQueue mainQueue]
- usingBlock:^(NSNotification* notification) {
- NSNumber* typeNumber =
- [notification userInfo][AVAudioSessionInterruptionTypeKey];
- AVAudioSessionInterruptionType type =
- (AVAudioSessionInterruptionType)[typeNumber unsignedIntegerValue];
- switch (type) {
- case AVAudioSessionInterruptionTypeBegan:
- // At this point our audio session has been deactivated and the
- // audio unit render callbacks no longer occur. Nothing to do.
- break;
- case AVAudioSessionInterruptionTypeEnded: {
- NSError* error = nil;
- AVAudioSession* session = [AVAudioSession sharedInstance];
- [session setActive:YES
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Error activating audio session");
- }
- // Post interruption the audio unit render callbacks don't
- // automatically continue, so we restart the unit manually here.
- AudioOutputUnitStop(_auVoiceProcessing);
- AudioOutputUnitStart(_auVoiceProcessing);
- break;
- }
- }
- }];
- // Increment refcount on observer using ARC bridge. Instance variable is a
- // void* instead of an id because header is included in other pure C++
- // files.
- _audioInterruptionObserver = (__bridge_retained void*)observer;
-
- // Activate audio session.
- error = nil;
- [session setActive:YES
- error:&error];
- if (error != nil) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "Error activating audio session");
- }
-
- return 0;
-}
-
-int32_t AudioDeviceIOS::ShutdownPlayOrRecord() {
- WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
-
- if (_audioInterruptionObserver != NULL) {
- NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
- // Transfer ownership of observer back to ARC, which will dealloc the
- // observer once it exits this scope.
- id observer = (__bridge_transfer id)_audioInterruptionObserver;
- [center removeObserver:observer];
- _audioInterruptionObserver = NULL;
- }
-
- // Close and delete AU
- OSStatus result = -1;
- if (NULL != _auVoiceProcessing) {
- result = AudioOutputUnitStop(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error stopping Audio Unit (result=%d)", result);
- }
- result = AudioComponentInstanceDispose(_auVoiceProcessing);
- if (0 != result) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error disposing Audio Unit (result=%d)", result);
- }
- _auVoiceProcessing = NULL;
- }
-
- return 0;
-}
-
-// ============================================================================
-// Thread Methods
-// ============================================================================
-
-OSStatus
- AudioDeviceIOS::RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
-
- return ptrThis->RecordProcessImpl(ioActionFlags,
- inTimeStamp,
- inBusNumber,
- inNumberFrames);
-}
-
-
-OSStatus
- AudioDeviceIOS::RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames) {
- // Setup some basic stuff
- // Use temp buffer not to lock up recording buffer more than necessary
- // todo: Make dataTmp a member variable with static size that holds
- // max possible frames?
- int16_t* dataTmp = new int16_t[inNumberFrames];
- memset(dataTmp, 0, 2*inNumberFrames);
-
- AudioBufferList abList;
- abList.mNumberBuffers = 1;
- abList.mBuffers[0].mData = dataTmp;
- abList.mBuffers[0].mDataByteSize = 2*inNumberFrames; // 2 bytes/sample
- abList.mBuffers[0].mNumberChannels = 1;
-
- // Get data from mic
- OSStatus res = AudioUnitRender(_auVoiceProcessing,
- ioActionFlags, inTimeStamp,
- inBusNumber, inNumberFrames, &abList);
- if (res != 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Error getting rec data, error = %d", res);
-
- if (_recWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending rec warning exists");
- }
- _recWarning = 1;
-
- delete [] dataTmp;
- return 0;
- }
-
- if (_recording) {
- // Insert all data in temp buffer into recording buffers
- // There is zero or one buffer partially full at any given time,
- // all others are full or empty
- // Full means filled with noSamp10ms samples.
-
- const unsigned int noSamp10ms = _adbSampFreq / 100;
- unsigned int dataPos = 0;
- uint16_t bufPos = 0;
- int16_t insertPos = -1;
- unsigned int nCopy = 0; // Number of samples to copy
-
- while (dataPos < inNumberFrames) {
- // Loop over all recording buffers or
- // until we find the partially full buffer
- // First choice is to insert into partially full buffer,
- // second choice is to insert into empty buffer
- bufPos = 0;
- insertPos = -1;
- nCopy = 0;
- while (bufPos < N_REC_BUFFERS) {
- if ((_recordingLength[bufPos] > 0)
- && (_recordingLength[bufPos] < noSamp10ms)) {
- // Found the partially full buffer
- insertPos = static_cast<int16_t>(bufPos);
- // Don't need to search more, quit loop
- bufPos = N_REC_BUFFERS;
- } else if ((-1 == insertPos)
- && (0 == _recordingLength[bufPos])) {
- // Found an empty buffer
- insertPos = static_cast<int16_t>(bufPos);
- }
- ++bufPos;
- }
-
- // Insert data into buffer
- if (insertPos > -1) {
- // We found a non-full buffer, copy data to it
- unsigned int dataToCopy = inNumberFrames - dataPos;
- unsigned int currentRecLen = _recordingLength[insertPos];
- unsigned int roomInBuffer = noSamp10ms - currentRecLen;
- nCopy = (dataToCopy < roomInBuffer ? dataToCopy : roomInBuffer);
-
- memcpy(&_recordingBuffer[insertPos][currentRecLen],
- &dataTmp[dataPos], nCopy*sizeof(int16_t));
- if (0 == currentRecLen) {
- _recordingSeqNumber[insertPos] = _recordingCurrentSeq;
- ++_recordingCurrentSeq;
- }
- _recordingBufferTotalSize += nCopy;
- // Has to be done last to avoid interrupt problems
- // between threads
- _recordingLength[insertPos] += nCopy;
- dataPos += nCopy;
- } else {
- // Didn't find a non-full buffer
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Could not insert into recording buffer");
- if (_recWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending rec warning exists");
- }
- _recWarning = 1;
- dataPos = inNumberFrames; // Don't try to insert more
- }
- }
- }
-
- delete [] dataTmp;
-
- return 0;
-}
-
-OSStatus
- AudioDeviceIOS::PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *inTimeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData) {
- AudioDeviceIOS* ptrThis = static_cast<AudioDeviceIOS*>(inRefCon);
-
- return ptrThis->PlayoutProcessImpl(inNumberFrames, ioData);
-}
-
-OSStatus
- AudioDeviceIOS::PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData) {
- // Setup some basic stuff
-// assert(sizeof(short) == 2); // Assumption for implementation
-
- int16_t* data =
- static_cast<int16_t*>(ioData->mBuffers[0].mData);
- unsigned int dataSizeBytes = ioData->mBuffers[0].mDataByteSize;
- unsigned int dataSize = dataSizeBytes/2; // Number of samples
- if (dataSize != inNumberFrames) { // Should always be the same
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "dataSize (%u) != inNumberFrames (%u)",
- dataSize, (unsigned int)inNumberFrames);
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
- memset(data, 0, dataSizeBytes); // Start with empty buffer
-
-
- // Get playout data from Audio Device Buffer
-
- if (_playing) {
- unsigned int noSamp10ms = _adbSampFreq / 100;
- // todo: Member variable and allocate when samp freq is determined
- int16_t* dataTmp = new int16_t[noSamp10ms];
- memset(dataTmp, 0, 2*noSamp10ms);
- unsigned int dataPos = 0;
- int noSamplesOut = 0;
- unsigned int nCopy = 0;
-
- // First insert data from playout buffer if any
- if (_playoutBufferUsed > 0) {
- nCopy = (dataSize < _playoutBufferUsed) ?
- dataSize : _playoutBufferUsed;
- if (nCopy != _playoutBufferUsed) {
- // todo: If dataSize < _playoutBufferUsed
- // (should normally never be)
- // we must move the remaining data
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "nCopy (%u) != _playoutBufferUsed (%u)",
- nCopy, _playoutBufferUsed);
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
- memcpy(data, _playoutBuffer, 2*nCopy);
- dataPos = nCopy;
- memset(_playoutBuffer, 0, sizeof(_playoutBuffer));
- _playoutBufferUsed = 0;
- }
-
- // Now get the rest from Audio Device Buffer
- while (dataPos < dataSize) {
- // Update playout delay
- UpdatePlayoutDelay();
-
- // Ask for new PCM data to be played out using the AudioDeviceBuffer
- noSamplesOut = _ptrAudioBuffer->RequestPlayoutData(noSamp10ms);
-
- // Get data from Audio Device Buffer
- noSamplesOut =
- _ptrAudioBuffer->GetPlayoutData(
- reinterpret_cast<int8_t*>(dataTmp));
- // Cast OK since only equality comparison
- if (noSamp10ms != (unsigned int)noSamplesOut) {
- // Should never happen
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "noSamp10ms (%u) != noSamplesOut (%d)",
- noSamp10ms, noSamplesOut);
-
- if (_playWarning > 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- " Pending play warning exists");
- }
- _playWarning = 1;
- }
-
- // Insert as much as fits in data buffer
- nCopy = (dataSize-dataPos) > noSamp10ms ?
- noSamp10ms : (dataSize-dataPos);
- memcpy(&data[dataPos], dataTmp, 2*nCopy);
-
- // Save rest in playout buffer if any
- if (nCopy < noSamp10ms) {
- memcpy(_playoutBuffer, &dataTmp[nCopy], 2*(noSamp10ms-nCopy));
- _playoutBufferUsed = noSamp10ms - nCopy;
- }
-
- // Update loop/index counter, if we copied less than noSamp10ms
- // samples we shall quit loop anyway
- dataPos += noSamp10ms;
- }
-
- delete [] dataTmp;
- }
-
- return 0;
-}
-
-void AudioDeviceIOS::UpdatePlayoutDelay() {
- ++_playoutDelayMeasurementCounter;
-
- if (_playoutDelayMeasurementCounter >= 100) {
- // Update HW and OS delay every second, unlikely to change
-
- // Since this is eventually rounded to integral ms, add 0.5ms
- // here to get round-to-nearest-int behavior instead of
- // truncation.
- double totalDelaySeconds = 0.0005;
-
- // HW output latency
- AVAudioSession* session = [AVAudioSession sharedInstance];
- double latency = session.outputLatency;
- assert(latency >= 0);
- totalDelaySeconds += latency;
-
- // HW buffer duration
- double ioBufferDuration = session.IOBufferDuration;
- assert(ioBufferDuration >= 0);
- totalDelaySeconds += ioBufferDuration;
-
- // AU latency
- Float64 f64(0);
- UInt32 size = sizeof(f64);
- OSStatus result = AudioUnitGetProperty(
- _auVoiceProcessing, kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error AU latency (result=%d)", result);
- }
- assert(f64 >= 0);
- totalDelaySeconds += f64;
-
- // To ms
- _playoutDelay = static_cast<uint32_t>(totalDelaySeconds / 1000);
-
- // Reset counter
- _playoutDelayMeasurementCounter = 0;
- }
-
- // todo: Add playout buffer?
-}
-
-void AudioDeviceIOS::UpdateRecordingDelay() {
- ++_recordingDelayMeasurementCounter;
-
- if (_recordingDelayMeasurementCounter >= 100) {
- // Update HW and OS delay every second, unlikely to change
-
- // Since this is eventually rounded to integral ms, add 0.5ms
- // here to get round-to-nearest-int behavior instead of
- // truncation.
- double totalDelaySeconds = 0.0005;
-
- // HW input latency
- AVAudioSession* session = [AVAudioSession sharedInstance];
- double latency = session.inputLatency;
- assert(latency >= 0);
- totalDelaySeconds += latency;
-
- // HW buffer duration
- double ioBufferDuration = session.IOBufferDuration;
- assert(ioBufferDuration >= 0);
- totalDelaySeconds += ioBufferDuration;
-
- // AU latency
- Float64 f64(0);
- UInt32 size = sizeof(f64);
- OSStatus result = AudioUnitGetProperty(
- _auVoiceProcessing, kAudioUnitProperty_Latency,
- kAudioUnitScope_Global, 0, &f64, &size);
- if (0 != result) {
- WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
- "error AU latency (result=%d)", result);
- }
- assert(f64 >= 0);
- totalDelaySeconds += f64;
-
- // To ms
- _recordingDelayHWAndOS =
- static_cast<uint32_t>(totalDelaySeconds / 1000);
-
- // Reset counter
- _recordingDelayMeasurementCounter = 0;
- }
-
- _recordingDelay = _recordingDelayHWAndOS;
-
- // ADB recording buffer size, update every time
- // Don't count the one next 10 ms to be sent, then convert samples => ms
- const uint32_t noSamp10ms = _adbSampFreq / 100;
- if (_recordingBufferTotalSize > noSamp10ms) {
- _recordingDelay +=
- (_recordingBufferTotalSize - noSamp10ms) / (_adbSampFreq / 1000);
- }
-}
-
-bool AudioDeviceIOS::RunCapture(void* ptrThis) {
- return static_cast<AudioDeviceIOS*>(ptrThis)->CaptureWorkerThread();
-}
-
-bool AudioDeviceIOS::CaptureWorkerThread() {
- if (_recording) {
- int bufPos = 0;
- unsigned int lowestSeq = 0;
- int lowestSeqBufPos = 0;
- bool foundBuf = true;
- const unsigned int noSamp10ms = _adbSampFreq / 100;
-
- while (foundBuf) {
- // Check if we have any buffer with data to insert
- // into the Audio Device Buffer,
- // and find the one with the lowest seq number
- foundBuf = false;
- for (bufPos = 0; bufPos < N_REC_BUFFERS; ++bufPos) {
- if (noSamp10ms == _recordingLength[bufPos]) {
- if (!foundBuf) {
- lowestSeq = _recordingSeqNumber[bufPos];
- lowestSeqBufPos = bufPos;
- foundBuf = true;
- } else if (_recordingSeqNumber[bufPos] < lowestSeq) {
- lowestSeq = _recordingSeqNumber[bufPos];
- lowestSeqBufPos = bufPos;
- }
- }
- } // for
-
- // Insert data into the Audio Device Buffer if found any
- if (foundBuf) {
- // Update recording delay
- UpdateRecordingDelay();
-
- // Set the recorded buffer
- _ptrAudioBuffer->SetRecordedBuffer(
- reinterpret_cast<int8_t*>(
- _recordingBuffer[lowestSeqBufPos]),
- _recordingLength[lowestSeqBufPos]);
-
- // Don't need to set the current mic level in ADB since we only
- // support digital AGC,
- // and besides we cannot get or set the IOS mic level anyway.
-
- // Set VQE info, use clockdrift == 0
- _ptrAudioBuffer->SetVQEData(_playoutDelay, _recordingDelay, 0);
-
- // Deliver recorded samples at specified sample rate, mic level
- // etc. to the observer using callback
- _ptrAudioBuffer->DeliverRecordedData();
-
- // Make buffer available
- _recordingSeqNumber[lowestSeqBufPos] = 0;
- _recordingBufferTotalSize -= _recordingLength[lowestSeqBufPos];
- // Must be done last to avoid interrupt problems between threads
- _recordingLength[lowestSeqBufPos] = 0;
- }
- } // while (foundBuf)
- } // if (_recording)
-
- {
- // Normal case
- // Sleep thread (5ms) to let other threads get to work
- // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
- // Device Buffer?
- timespec t;
- t.tv_sec = 0;
- t.tv_nsec = 5*1000*1000;
- nanosleep(&t, NULL);
- }
-
- return true;
+ }
+
+ {
+ // Normal case
+ // Sleep thread (5ms) to let other threads get to work
+ // todo: Is 5 ms optimal? Sleep shorter if inserted into the Audio
+ // Device Buffer?
+ timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 5 * 1000 * 1000;
+ nanosleep(&t, nullptr);
+ }
+ return true;
}
} // namespace webrtc

Powered by Google App Engine
This is Rietveld 408576698