Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(294)

Unified Diff: webrtc/modules/audio_device/ios/audio_device_ios.h

Issue 1254883002: Refactor the AudioDevice for iOS and improve the performance and stability (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Improved error handling and added support for BT headsets Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/audio_device/ios/audio_device_ios.h
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h
index 8b211325853c7c2148c7f271f2e8da6d6be5d478..6fa2d4a77fa66fa0ec443f83b38342a7d97a23cc 100644
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h
@@ -8,26 +8,32 @@
* be found in the AUTHORS file in the root of the source tree.
*/
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
#include <AudioUnit/AudioUnit.h>
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
namespace webrtc {
-const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
-
-const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100);
-const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
-
-// Number of 10 ms recording blocks in recording buffer
-const uint16_t N_REC_BUFFERS = 20;
+class FineAudioBuffer;
+
+// Implements full duplex 16-bit mono PCM audio support for iOS using a
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
+// supports audio echo cancellation. It also adds automatic gain control,
+// adjustment of voice-processing quality and muting.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All supported public methods must also be called on the same thread.
+// A thread checker will DCHECK if any supported method is called on an invalid
+// thread.
+//
+// Recorded audio will be delivered on a real-time internal I/O thread in the
+// audio unit. The audio unit will also ask for audio data to play out on this
+// same thread.
class AudioDeviceIOS : public AudioDeviceGeneric {
public:
AudioDeviceIOS();
@@ -56,23 +62,28 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
int32_t SetLoudspeakerStatus(bool enable) override;
int32_t GetLoudspeakerStatus(bool& enabled) const override;
- // TODO(henrika): investigate if we can reduce the complexity here.
- // Do we even need delay estimates?
+ // These methods returns hard-coded delay values and not dynamic delay
+ // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
+ // AEC will always be disabled in the Libjingle layer to avoid running two
+ // AEC implementations at the same time. And, it saves resources to avoid
+ // updating these delay values continuously.
+ // TODO(henrika): it would be possible to mark these two methods as not
+ // implemented since they are only called for A/V-sync purposes today and
+ // A/V-sync is not supported on iOS. However, we avoid adding error messages
+ // the log by using these dummy implementations instead.
int32_t PlayoutDelay(uint16_t& delayMS) const override;
int32_t RecordingDelay(uint16_t& delayMS) const override;
- int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
- uint16_t& sizeMS) const override;
-
- // These methods are unique for the iOS implementation.
-
// Native audio parameters stored during construction.
+ // These methods are unique for the iOS implementation.
int GetPlayoutAudioParameters(AudioParameters* params) const override;
int GetRecordAudioParameters(AudioParameters* params) const override;
- // These methods are currently not implemented on iOS.
- // See audio_device_not_implemented_ios.mm for dummy implementations.
+ // These methods are currently not fully implemented on iOS:
+ // See audio_device_not_implemented.cc for trivial implementations.
+ int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
+ uint16_t& sizeMS) const override;
int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
int32_t ResetAudioDevice() override;
int32_t PlayoutIsAvailable(bool& available) override;
@@ -140,97 +151,132 @@ class AudioDeviceIOS : public AudioDeviceGeneric {
void ClearRecordingError() override{};
private:
- // TODO(henrika): try to remove these.
- void Lock() {
- _critSect.Enter();
- }
-
- void UnLock() {
- _critSect.Leave();
- }
-
- // Init and shutdown
- int32_t InitPlayOrRecord();
- int32_t ShutdownPlayOrRecord();
-
- void UpdateRecordingDelay();
- void UpdatePlayoutDelay();
-
- static OSStatus RecordProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- UInt32 inBusNumber,
- UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- static OSStatus PlayoutProcess(void *inRefCon,
- AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
+ // Uses current |_playoutParameters| and |_recordParameters| to inform the
+ // audio device buffer (ADB) about our internal audio parameters.
+ void UpdateAudioDeviceBuffer();
+
+ // Since the preferred audio parameters are only hints to the OS, the actual
+ // values may be different once the AVAudioSession has been activated.
+ // This method asks for the current hardware parameters and takes actions
+ // if they should differ from what we have asked for initially. It also
+ // defines |_playoutParameters| and |_recordParameters|.
+ void SetupAudioBuffersForActiveAudioSession();
+
+ // Creates a Voice-Processing I/O unit and configures it for full-duplex
+ // audio. The selected stream format is selected to avoid internal resampling
+ // and to match the 10ms callback rate for WebRTC as well as possible.
+ // This method also initializes the created audio unit.
+ bool SetupAndInitializeVoiceProcessingAudioUnit();
+
+ // Activates our audio session, creates and initilizes the voice-processing
+ // audio unit and verifies that we got the preferred native audio parameters.
+ bool InitPlayOrRecord();
+
+ // Closes and deletes the voice-processing I/O unit.
+ bool ShutdownPlayOrRecord();
+
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to signal that recorded audio is available.
+ static OSStatus RecordedDataIsAvailable(
+ void* inRefCon,
+ AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames,
+ AudioBufferList* ioData);
+ OSStatus OnRecordedDataIsAvailable(AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* timeStamp,
+ UInt32 inBusNumber,
+ UInt32 inNumberFrames);
+
+ // Callback function called on a real-time priority I/O thread from the audio
+ // unit. This method is used to provide audio samples to the audio unit.
+ static OSStatus GetPlayoutData(void* inRefCon,
+ AudioUnitRenderActionFlags* ioActionFlags,
+ const AudioTimeStamp* timeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
- AudioBufferList *ioData);
-
- OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags,
- const AudioTimeStamp *timeStamp,
- uint32_t inBusNumber,
- uint32_t inNumberFrames);
-
- OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
- AudioBufferList *ioData);
-
- static bool RunCapture(void* ptrThis);
- bool CaptureWorkerThread();
+ AudioBufferList* ioData);
+ OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* ioActionFlags,
+ UInt32 inNumberFrames,
+ AudioBufferList* ioData);
private:
- rtc::ThreadChecker thread_checker_;
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker _threadChecker;
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
// and therefore outlives this object.
- AudioDeviceBuffer* audio_device_buffer_;
-
- CriticalSectionWrapper& _critSect;
-
- AudioParameters playout_parameters_;
- AudioParameters record_parameters_;
-
- rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread;
-
- AudioUnit _auVoiceProcessing;
- void* _audioInterruptionObserver;
-
+ AudioDeviceBuffer* _audioDeviceBuffer;
+
+ // Contains audio parameters (sample rate, #channels, buffer size etc.) for
+ // the playout and recording sides. These structure is set in two steps:
+ // first, native sample rate and #channels are defined in Init(). Next, the
+ // audio session is activated and we verify that the preferred parameters
+ // were granted by the OS. At this stage it is also possible to add a third
+ // component to the parameters; the native I/O buffer duration.
+ // A CHECK will be hit if we for some reason fail to open an audio session
+ // using the specified parameters.
+ AudioParameters _playoutParameters;
+ AudioParameters _recordParameters;
+
+ // The Voice-Processing I/O unit has the same characteristics as the
+ // Remote I/O unit (supports full duplex low-latency audio input and output)
+ // and adds AEC for for two-way duplex communication. It also adds AGC,
+ // adjustment of voice-processing quality, and muting. Hence, ideal for
+ // VoIP applications.
+ AudioUnit _vpioUnit;
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples. Is also supports a similar scheme for the recording
+ // side.
+ // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
+ // in each callback (one every 8ms). This class can then ask for 128 and the
+ // FineAudioBuffer will ask WebRTC for new data only when needed and also
+ // cache non-utilized audio between callbacks. On the recording side, iOS
+ // can provide audio data frames of size 128 and these are accumulated until
+ // enough data to supply one 10ms call exists. This 10ms chunk is then sent
+ // to WebRTC and the remaining part is stored.
+ rtc::scoped_ptr<FineAudioBuffer> _fineAudioBuffer;
+
+ // Extra audio buffer to be used by the playout side for rendering audio.
+ // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes().
+ rtc::scoped_ptr<SInt8[]> _playoutAudioBuffer;
+
+ // Provides a mechanism for encapsulating one or more buffers of audio data.
+ // Only used on the recording side.
+ AudioBufferList _audioRecordBufferList;
+
+ // Temporary storage for recorded data. AudioUnitRender() renders into this
+ // array as soon as a frame of the desired buffer size has been recorded.
+ rtc::scoped_ptr<SInt8[]> _recordAudioBuffer;
+
+ // Set to 1 when recording is active and 0 otherwise.
+ volatile int _recording;
+
+ // Set to 1 when playout is active and 0 otherwise.
+ volatile int _playing;
+
+ // Set to true after successful call to Init(), false otherwise.
bool _initialized;
- bool _isShutDown;
- bool _recording;
- bool _playing;
- bool _recIsInitialized;
- bool _playIsInitialized;
-
- // The sampling rate to use with Audio Device Buffer
- int _adbSampFreq;
-
- // Delay calculation
- uint32_t _recordingDelay;
- uint32_t _playoutDelay;
- uint32_t _playoutDelayMeasurementCounter;
- uint32_t _recordingDelayHWAndOS;
- uint32_t _recordingDelayMeasurementCounter;
- // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
- int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
- uint32_t _playoutBufferUsed; // How much is filled
+ // Set to true after successful call to InitRecording(), false otherwise.
+ bool _recIsInitialized;
- // Recording buffers
- int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
- uint32_t _recordingLength[N_REC_BUFFERS];
- uint32_t _recordingSeqNumber[N_REC_BUFFERS];
- uint32_t _recordingCurrentSeq;
+ // Set to true after successful call to InitPlayout(), false otherwise.
+ bool _playIsInitialized;
- // Current total size all data in buffers, used for delay estimate
- uint32_t _recordingBufferTotalSize;
+ // Audio interruption observer instance.
+ void* _audioInterruptionObserver;
};
} // namespace webrtc
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_

Powered by Google App Engine
This is Rietveld 408576698