Index: webrtc/modules/audio_device/ios/audio_device_ios.h |
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.h b/webrtc/modules/audio_device/ios/audio_device_ios.h |
index 8b211325853c7c2148c7f271f2e8da6d6be5d478..d1f526591aa2b880af9cb1af6dee6b312c7e5381 100644 |
--- a/webrtc/modules/audio_device/ios/audio_device_ios.h |
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.h |
@@ -8,26 +8,32 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
-#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H |
-#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H |
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
+#define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
#include <AudioUnit/AudioUnit.h> |
+#include "webrtc/base/scoped_ptr.h" |
#include "webrtc/base/thread_checker.h" |
#include "webrtc/modules/audio_device/audio_device_generic.h" |
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h" |
-#include "webrtc/system_wrappers/interface/thread_wrapper.h" |
namespace webrtc { |
-const uint32_t N_REC_SAMPLES_PER_SEC = 44100; |
-const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100; |
- |
-const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100); |
-const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100); |
- |
-// Number of 10 ms recording blocks in recording buffer |
-const uint16_t N_REC_BUFFERS = 20; |
+class FineAudioBuffer; |
+ |
+// Implements full duplex 16-bit mono PCM audio support for iOS using a |
+// Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit |
+// supports audio echo cancellation. It also adds automatic gain control, |
+// adjustment of voice-processing quality and muting. |
+// |
+// An instance must be created and destroyed on one and the same thread. |
+// All supported public methods must also be called on the same thread. |
+// A thread checker will DCHECK if any supported method is called on an invalid |
+// thread. |
+// |
+// Recorded audio will be delivered on a real-time internal I/O thread in the |
+// audio unit. The audio unit will also ask for audio data to play out on this |
+// same thread. |
class AudioDeviceIOS : public AudioDeviceGeneric { |
public: |
AudioDeviceIOS(); |
@@ -56,23 +62,23 @@ class AudioDeviceIOS : public AudioDeviceGeneric { |
int32_t SetLoudspeakerStatus(bool enable) override; |
int32_t GetLoudspeakerStatus(bool& enabled) const override; |
- // TODO(henrika): investigate if we can reduce the complexity here. |
- // Do we even need delay estimates? |
+ // Currently using hard-coded delay values and not dynamic delay estimates. |
+ // Given that a built-in AEC is available on iOS, it saves resources to |
+ // return fixed (reasonable) values here instead. Any AEC in WebRTC will then |
tkchin_webrtc
2015/09/01 20:54:50
ooc, what happens if two aec passes occur? Does it
henrika_webrtc
2015/09/03 13:44:41
Extended the comments. Today we always disable all
|
+ // work as well. |
int32_t PlayoutDelay(uint16_t& delayMS) const override; |
int32_t RecordingDelay(uint16_t& delayMS) const override; |
- int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, |
- uint16_t& sizeMS) const override; |
- |
- // These methods are unique for the iOS implementation. |
- |
// Native audio parameters stored during construction. |
+ // These methods are unique for the iOS implementation. |
int GetPlayoutAudioParameters(AudioParameters* params) const override; |
int GetRecordAudioParameters(AudioParameters* params) const override; |
- // These methods are currently not implemented on iOS. |
- // See audio_device_not_implemented_ios.mm for dummy implementations. |
+ // These methods are currently not fully implemented on iOS: |
+ // See audio_device_not_implemented.cc for trivial implementations. |
+ int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, |
+ uint16_t& sizeMS) const override; |
int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; |
int32_t ResetAudioDevice() override; |
int32_t PlayoutIsAvailable(bool& available) override; |
@@ -140,97 +146,127 @@ class AudioDeviceIOS : public AudioDeviceGeneric { |
void ClearRecordingError() override{}; |
private: |
- // TODO(henrika): try to remove these. |
- void Lock() { |
- _critSect.Enter(); |
- } |
- |
- void UnLock() { |
- _critSect.Leave(); |
- } |
- |
- // Init and shutdown |
- int32_t InitPlayOrRecord(); |
- int32_t ShutdownPlayOrRecord(); |
- |
- void UpdateRecordingDelay(); |
- void UpdatePlayoutDelay(); |
- |
- static OSStatus RecordProcess(void *inRefCon, |
- AudioUnitRenderActionFlags *ioActionFlags, |
- const AudioTimeStamp *timeStamp, |
- UInt32 inBusNumber, |
- UInt32 inNumberFrames, |
- AudioBufferList *ioData); |
- |
- static OSStatus PlayoutProcess(void *inRefCon, |
- AudioUnitRenderActionFlags *ioActionFlags, |
- const AudioTimeStamp *timeStamp, |
+ // Uses current |_playoutParameters| and |_recordParameters| to inform the |
+ // audio device buffer (ADB) about our internal audio parameters. |
+ void UpdateAudioDeviceBuffer(); |
+ |
+ // Since the preferred audio parameters are only hints to the OS, the actual |
+ // values may be different once the AVAudioSession has been activated. |
+ // This method asks for the current hardware parameters and takes actions |
+ // if they should differ from what we have asked for initially. |
+ void VerifyAudioParametersForActiveAudioSession(); |
+ |
+ // Creates a Voice-Processing I/O unit and configures it for full-duplex |
+ // audio. The selected stream format is selected to avoid internal resampling |
+ // and to match the 10ms callback rate for WebRTC as well as possible. |
+ // This method also initializes the created audio unit. |
+ void SetupAndInitializeVoiceProcessingAudioUnit(); |
+ |
+ // Activates our audio session, creates and initilizes the voice-processing |
+ // audio unit and verifies that we got the preferred native audio parameters. |
+ bool InitPlayOrRecord(); |
+ |
+ // Closes and deletes the voice-processing I/O unit. |
+ bool ShutdownPlayOrRecord(); |
+ |
+ // Callback function called on a real-time priority I/O thread from the audio |
+ // unit. This method is used to signal that recorded audio is available. |
+ static OSStatus RecordedDataIsAvailable( |
tkchin_webrtc
2015/09/01 20:54:50
nit: Style guide is to use C++ syntax/style in C++
henrika_webrtc
2015/09/03 13:44:41
ooh nooo, I was not sure if one could use objc not
tkchin_webrtc
2015/09/04 05:29:16
I'm ok with changing old stuff separately not part
henrika_webrtc
2015/09/04 09:51:18
Agree. And thanks for letting me do this in a sepa
|
+ void* inRefCon, |
+ AudioUnitRenderActionFlags* ioActionFlags, |
+ const AudioTimeStamp* timeStamp, |
+ UInt32 inBusNumber, |
+ UInt32 inNumberFrames, |
+ AudioBufferList* ioData); |
+ OSStatus OnRecordedDataIsAvailable(AudioUnitRenderActionFlags* ioActionFlags, |
+ const AudioTimeStamp* timeStamp, |
+ UInt32 inBusNumber, |
+ UInt32 inNumberFrames); |
+ |
+ // Callback function called on a real-time priority I/O thread from the audio |
+ // unit. This method is used to provide audio samples to the audio unit. |
+ static OSStatus GetPlayoutData(void* inRefCon, |
+ AudioUnitRenderActionFlags* ioActionFlags, |
+ const AudioTimeStamp* timeStamp, |
UInt32 inBusNumber, |
UInt32 inNumberFrames, |
- AudioBufferList *ioData); |
- |
- OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags, |
- const AudioTimeStamp *timeStamp, |
- uint32_t inBusNumber, |
- uint32_t inNumberFrames); |
- |
- OSStatus PlayoutProcessImpl(uint32_t inNumberFrames, |
- AudioBufferList *ioData); |
- |
- static bool RunCapture(void* ptrThis); |
- bool CaptureWorkerThread(); |
+ AudioBufferList* ioData); |
+ OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* ioActionFlags, |
+ UInt32 inNumberFrames, |
+ AudioBufferList* ioData); |
private: |
- rtc::ThreadChecker thread_checker_; |
+ // Ensures that methods are called from the same thread as this object is |
+ // created on. |
+ rtc::ThreadChecker _threadChecker; |
tkchin_webrtc
2015/09/01 20:54:50
nit: Style guide is to use C++ syntax/style in C++
henrika_webrtc
2015/09/03 13:44:41
Acknowledged.
|
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the |
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). |
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance |
// and therefore outlives this object. |
- AudioDeviceBuffer* audio_device_buffer_; |
- |
- CriticalSectionWrapper& _critSect; |
- |
- AudioParameters playout_parameters_; |
- AudioParameters record_parameters_; |
- |
- rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread; |
- |
- AudioUnit _auVoiceProcessing; |
- void* _audioInterruptionObserver; |
- |
+ AudioDeviceBuffer* _audioDeviceBuffer; |
+ |
+ // Contains audio parameters (sample rate, #channels, buffer size etc.) for |
+ // the playout and recording sides. These structure is set in two steps: |
+ // first, native sample rate and #channels are defined in Init(). Next, the |
+ // audio session is activated and we verify that the preferred parameters |
+ // were granted by the OS. At this stage it is also possible to add a third |
+ // component to the parameters; the native I/O buffer duration. |
+ // A CHECK will be hit if we for some reason fail to open an audio session |
+ // using the specified parameters. |
+ AudioParameters _playoutParameters; |
+ AudioParameters _recordParameters; |
+ |
+ // The Voice-Processing I/O unit has the same characteristics as the |
+ // Remote I/O unit (supports full duplex low-latency audio input and output) |
+ // and adds AEC for for two-way duplex communication. It also adds AGC, |
+ // adjustment of voice-processing quality, and muting. Hence, ideal for |
+ // VoIP applications. |
+ AudioUnit _vpioUnit; |
+ |
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data |
+ // in chunks of 10ms. It then allows for this data to be pulled in |
+ // a finer or coarser granularity. I.e. interacting with this class instead |
+ // of directly with the AudioDeviceBuffer one can ask for any number of |
+ // audio data samples. |
+ // Example: native buffer size can be 128 audio frames at 16kHz sample rate. |
+ // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 |
+ // in each callback (one every 8ms). This class can then ask for 128 and the |
+ // FineAudioBuffer will ask WebRTC for new data only when needed and also |
+ // cach non-utilized audio between callbacks. |
tkchin_webrtc
2015/09/01 20:54:50
Thanks for this comment - it was helpful.
tkchin_webrtc
2015/09/01 20:54:50
nit: cache non-utilized
henrika_webrtc
2015/09/03 13:44:41
Done.
henrika_webrtc
2015/09/03 13:44:41
Thanks. Added comments for the recording side as w
|
+ rtc::scoped_ptr<FineAudioBuffer> _fineAudioBuffer; |
+ |
+ // Extra audio buffer to be used by the playout side for rendering audio. |
+ // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes(). |
+ rtc::scoped_ptr<SInt8[]> _playoutAudioBuffer; |
+ |
+ // Provides a mechanism for encapsulating one or more buffers of audio data. |
+ // Only used on the recording side. |
+ AudioBufferList _audioRecordBufferList; |
+ |
+ // Temporary storage for recorded data. AudioUnitRender() renders into this |
+ // array as soon as a frame of the desired buffer size has been recorded. |
+ rtc::scoped_ptr<SInt8[]> _recordAudioBuffer; |
+ |
+ // Set to 1 when recording is active and 0 otherwise. |
+ volatile int _recording; |
+ |
+ // Set to 1 when playout is active and 0 otherwise. |
+ volatile int _playing; |
+ |
+ // Set to true after successful call to Init(), false otherwise. |
bool _initialized; |
- bool _isShutDown; |
- bool _recording; |
- bool _playing; |
- bool _recIsInitialized; |
- bool _playIsInitialized; |
- |
- // The sampling rate to use with Audio Device Buffer |
- int _adbSampFreq; |
- |
- // Delay calculation |
- uint32_t _recordingDelay; |
- uint32_t _playoutDelay; |
- uint32_t _playoutDelayMeasurementCounter; |
- uint32_t _recordingDelayHWAndOS; |
- uint32_t _recordingDelayMeasurementCounter; |
- // Playout buffer, needed for 44.0 / 44.1 kHz mismatch |
- int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; |
- uint32_t _playoutBufferUsed; // How much is filled |
+ // Set to true after successful call to InitRecording(), false otherwise. |
+ bool _recIsInitialized; |
- // Recording buffers |
- int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES]; |
- uint32_t _recordingLength[N_REC_BUFFERS]; |
- uint32_t _recordingSeqNumber[N_REC_BUFFERS]; |
- uint32_t _recordingCurrentSeq; |
+ // Set to true after successful call to InitPlayout(), false otherwise. |
+ bool _playIsInitialized; |
- // Current total size all data in buffers, used for delay estimate |
- uint32_t _recordingBufferTotalSize; |
+ // Audio interruption observer instance. |
+ void* _audioInterruptionObserver; |
}; |
} // namespace webrtc |
-#endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H |
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |