Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(648)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.h

Issue 1254883002: Refactor the AudioDevice for iOS and improve the performance and stability (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebased and cleaned up Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H 11 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
12 #define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H 12 #define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
13 13
14 #include <AudioUnit/AudioUnit.h> 14 #include <AudioUnit/AudioUnit.h>
15 15
16 #include "webrtc/base/scoped_ptr.h"
16 #include "webrtc/base/thread_checker.h" 17 #include "webrtc/base/thread_checker.h"
17 #include "webrtc/modules/audio_device/audio_device_generic.h" 18 #include "webrtc/modules/audio_device/audio_device_generic.h"
18 #include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
19 #include "webrtc/system_wrappers/interface/thread_wrapper.h"
20 19
21 namespace webrtc { 20 namespace webrtc {
22 const uint32_t N_REC_SAMPLES_PER_SEC = 44100;
23 const uint32_t N_PLAY_SAMPLES_PER_SEC = 44100;
24 21
25 const uint32_t ENGINE_REC_BUF_SIZE_IN_SAMPLES = (N_REC_SAMPLES_PER_SEC / 100); 22 class FineAudioBuffer;
26 const uint32_t ENGINE_PLAY_BUF_SIZE_IN_SAMPLES = (N_PLAY_SAMPLES_PER_SEC / 100);
27 23
28 // Number of 10 ms recording blocks in recording buffer 24 // Implements full duplex 16-bit mono PCM audio support for iOS using a
29 const uint16_t N_REC_BUFFERS = 20; 25 // Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit
30 26 // supports audio echo cancellation. It also adds automatic gain control,
27 // adjustment of voice-processing quality and muting.
28 //
29 // An instance must be created and destroyed on one and the same thread.
30 // All supported public methods must also be called on the same thread.
31 // A thread checker will DCHECK if any supported method is called on an invalid
32 // thread.
33 //
34 // Recorded audio will be delivered on a real-time internal I/O thread in the
35 // audio unit. The audio unit will also ask for audio data to play out on this
36 // same thread.
31 class AudioDeviceIOS : public AudioDeviceGeneric { 37 class AudioDeviceIOS : public AudioDeviceGeneric {
32 public: 38 public:
33 AudioDeviceIOS(); 39 AudioDeviceIOS();
34 ~AudioDeviceIOS(); 40 ~AudioDeviceIOS();
35 41
36 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; 42 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override;
37 43
38 int32_t Init() override; 44 int32_t Init() override;
39 int32_t Terminate() override; 45 int32_t Terminate() override;
40 bool Initialized() const override { return _initialized; } 46 bool Initialized() const override { return _initialized; }
41 47
42 int32_t InitPlayout() override; 48 int32_t InitPlayout() override;
43 bool PlayoutIsInitialized() const override { return _playIsInitialized; } 49 bool PlayoutIsInitialized() const override { return _playIsInitialized; }
44 50
45 int32_t InitRecording() override; 51 int32_t InitRecording() override;
46 bool RecordingIsInitialized() const override { return _recIsInitialized; } 52 bool RecordingIsInitialized() const override { return _recIsInitialized; }
47 53
48 int32_t StartPlayout() override; 54 int32_t StartPlayout() override;
49 int32_t StopPlayout() override; 55 int32_t StopPlayout() override;
50 bool Playing() const override { return _playing; } 56 bool Playing() const override { return _playing; }
51 57
52 int32_t StartRecording() override; 58 int32_t StartRecording() override;
53 int32_t StopRecording() override; 59 int32_t StopRecording() override;
54 bool Recording() const override { return _recording; } 60 bool Recording() const override { return _recording; }
55 61
56 int32_t SetLoudspeakerStatus(bool enable) override; 62 int32_t SetLoudspeakerStatus(bool enable) override;
57 int32_t GetLoudspeakerStatus(bool& enabled) const override; 63 int32_t GetLoudspeakerStatus(bool& enabled) const override;
58 64
59 // TODO(henrika): investigate if we can reduce the complexity here. 65 // These methods returns hard-coded delay values and not dynamic delay
60 // Do we even need delay estimates? 66 // estimates. The reason is that iOS supports a built-in AEC and the WebRTC
67 // AEC will always be disabled in the Libjingle layer to avoid running two
68 // AEC implementations at the same time. And, it saves resources to avoid
69 // updating these delay values continuously.
70 // TODO(henrika): it would be possible to mark these two methods as not
71 // implemented since they are only called for A/V-sync purposes today and
72 // A/V-sync is not supported on iOS. However, we avoid adding error messages
73 // the log by using these dummy implementations instead.
61 int32_t PlayoutDelay(uint16_t& delayMS) const override; 74 int32_t PlayoutDelay(uint16_t& delayMS) const override;
62 int32_t RecordingDelay(uint16_t& delayMS) const override; 75 int32_t RecordingDelay(uint16_t& delayMS) const override;
63 76
64 int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, 77 // Native audio parameters stored during construction.
65 uint16_t& sizeMS) const override;
66
67 // These methods are unique for the iOS implementation. 78 // These methods are unique for the iOS implementation.
68
69 // Native audio parameters stored during construction.
70 int GetPlayoutAudioParameters(AudioParameters* params) const override; 79 int GetPlayoutAudioParameters(AudioParameters* params) const override;
71 int GetRecordAudioParameters(AudioParameters* params) const override; 80 int GetRecordAudioParameters(AudioParameters* params) const override;
72 81
73 // These methods are currently not implemented on iOS. 82 // These methods are currently not fully implemented on iOS:
74 // See audio_device_not_implemented_ios.mm for dummy implementations.
75 83
84 // See audio_device_not_implemented.cc for trivial implementations.
85 int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type,
86 uint16_t& sizeMS) const override;
76 int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const; 87 int32_t ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const;
77 int32_t ResetAudioDevice() override; 88 int32_t ResetAudioDevice() override;
78 int32_t PlayoutIsAvailable(bool& available) override; 89 int32_t PlayoutIsAvailable(bool& available) override;
79 int32_t RecordingIsAvailable(bool& available) override; 90 int32_t RecordingIsAvailable(bool& available) override;
80 int32_t SetAGC(bool enable) override; 91 int32_t SetAGC(bool enable) override;
81 bool AGC() const override; 92 bool AGC() const override;
82 int16_t PlayoutDevices() override; 93 int16_t PlayoutDevices() override;
83 int16_t RecordingDevices() override; 94 int16_t RecordingDevices() override;
84 int32_t PlayoutDeviceName(uint16_t index, 95 int32_t PlayoutDeviceName(uint16_t index,
85 char name[kAdmMaxDeviceNameSize], 96 char name[kAdmMaxDeviceNameSize],
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 bool PlayoutWarning() const override; 144 bool PlayoutWarning() const override;
134 bool PlayoutError() const override; 145 bool PlayoutError() const override;
135 bool RecordingWarning() const override; 146 bool RecordingWarning() const override;
136 bool RecordingError() const override; 147 bool RecordingError() const override;
137 void ClearPlayoutWarning() override{}; 148 void ClearPlayoutWarning() override{};
138 void ClearPlayoutError() override{}; 149 void ClearPlayoutError() override{};
139 void ClearRecordingWarning() override{}; 150 void ClearRecordingWarning() override{};
140 void ClearRecordingError() override{}; 151 void ClearRecordingError() override{};
141 152
142 private: 153 private:
143 // TODO(henrika): try to remove these. 154 // Uses current |_playoutParameters| and |_recordParameters| to inform the
144 void Lock() { 155 // audio device buffer (ADB) about our internal audio parameters.
145 _critSect.Enter(); 156 void UpdateAudioDeviceBuffer();
146 }
147 157
148 void UnLock() { 158 // Since the preferred audio parameters are only hints to the OS, the actual
149 _critSect.Leave(); 159 // values may be different once the AVAudioSession has been activated.
150 } 160 // This method asks for the current hardware parameters and takes actions
161 // if they should differ from what we have asked for initially. It also
162 // defines |_playoutParameters| and |_recordParameters|.
163 void SetupAudioBuffersForActiveAudioSession();
151 164
152 // Init and shutdown 165 // Creates a Voice-Processing I/O unit and configures it for full-duplex
153 int32_t InitPlayOrRecord(); 166 // audio. The selected stream format is selected to avoid internal resampling
154 int32_t ShutdownPlayOrRecord(); 167 // and to match the 10ms callback rate for WebRTC as well as possible.
168 // This method also initializes the created audio unit.
169 bool SetupAndInitializeVoiceProcessingAudioUnit();
155 170
156 void UpdateRecordingDelay(); 171 // Activates our audio session, creates and initilizes the voice-processing
157 void UpdatePlayoutDelay(); 172 // audio unit and verifies that we got the preferred native audio parameters.
173 bool InitPlayOrRecord();
158 174
159 static OSStatus RecordProcess(void *inRefCon, 175 // Closes and deletes the voice-processing I/O unit.
160 AudioUnitRenderActionFlags *ioActionFlags, 176 bool ShutdownPlayOrRecord();
161 const AudioTimeStamp *timeStamp,
162 UInt32 inBusNumber,
163 UInt32 inNumberFrames,
164 AudioBufferList *ioData);
165 177
166 static OSStatus PlayoutProcess(void *inRefCon, 178 // Callback function called on a real-time priority I/O thread from the audio
167 AudioUnitRenderActionFlags *ioActionFlags, 179 // unit. This method is used to signal that recorded audio is available.
168 const AudioTimeStamp *timeStamp, 180 static OSStatus RecordedDataIsAvailable(
181 void* inRefCon,
182 AudioUnitRenderActionFlags* ioActionFlags,
183 const AudioTimeStamp* timeStamp,
184 UInt32 inBusNumber,
185 UInt32 inNumberFrames,
186 AudioBufferList* ioData);
187 OSStatus OnRecordedDataIsAvailable(AudioUnitRenderActionFlags* ioActionFlags,
188 const AudioTimeStamp* timeStamp,
189 UInt32 inBusNumber,
190 UInt32 inNumberFrames);
191
192 // Callback function called on a real-time priority I/O thread from the audio
193 // unit. This method is used to provide audio samples to the audio unit.
194 static OSStatus GetPlayoutData(void* inRefCon,
195 AudioUnitRenderActionFlags* ioActionFlags,
196 const AudioTimeStamp* timeStamp,
169 UInt32 inBusNumber, 197 UInt32 inBusNumber,
170 UInt32 inNumberFrames, 198 UInt32 inNumberFrames,
171 AudioBufferList *ioData); 199 AudioBufferList* ioData);
172 200 OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* ioActionFlags,
173 OSStatus RecordProcessImpl(AudioUnitRenderActionFlags *ioActionFlags, 201 UInt32 inNumberFrames,
174 const AudioTimeStamp *timeStamp, 202 AudioBufferList* ioData);
175 uint32_t inBusNumber,
176 uint32_t inNumberFrames);
177
178 OSStatus PlayoutProcessImpl(uint32_t inNumberFrames,
179 AudioBufferList *ioData);
180
181 static bool RunCapture(void* ptrThis);
182 bool CaptureWorkerThread();
183 203
184 private: 204 private:
185 rtc::ThreadChecker thread_checker_; 205 // Ensures that methods are called from the same thread as this object is
206 // created on.
207 rtc::ThreadChecker _threadChecker;
186 208
187 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the 209 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
188 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). 210 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
189 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance 211 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
190 // and therefore outlives this object. 212 // and therefore outlives this object.
191 AudioDeviceBuffer* audio_device_buffer_; 213 AudioDeviceBuffer* _audioDeviceBuffer;
192 214
193 CriticalSectionWrapper& _critSect; 215 // Contains audio parameters (sample rate, #channels, buffer size etc.) for
216 // the playout and recording sides. These structure is set in two steps:
217 // first, native sample rate and #channels are defined in Init(). Next, the
218 // audio session is activated and we verify that the preferred parameters
219 // were granted by the OS. At this stage it is also possible to add a third
220 // component to the parameters; the native I/O buffer duration.
221 // A CHECK will be hit if we for some reason fail to open an audio session
222 // using the specified parameters.
223 AudioParameters _playoutParameters;
224 AudioParameters _recordParameters;
194 225
195 AudioParameters playout_parameters_; 226 // The Voice-Processing I/O unit has the same characteristics as the
196 AudioParameters record_parameters_; 227 // Remote I/O unit (supports full duplex low-latency audio input and output)
228 // and adds AEC for for two-way duplex communication. It also adds AGC,
229 // adjustment of voice-processing quality, and muting. Hence, ideal for
230 // VoIP applications.
231 AudioUnit _vpioUnit;
197 232
198 rtc::scoped_ptr<ThreadWrapper> _captureWorkerThread; 233 // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
234 // in chunks of 10ms. It then allows for this data to be pulled in
235 // a finer or coarser granularity. I.e. interacting with this class instead
236 // of directly with the AudioDeviceBuffer one can ask for any number of
237 // audio data samples. Is also supports a similar scheme for the recording
238 // side.
239 // Example: native buffer size can be 128 audio frames at 16kHz sample rate.
240 // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128
241 // in each callback (one every 8ms). This class can then ask for 128 and the
242 // FineAudioBuffer will ask WebRTC for new data only when needed and also
243 // cache non-utilized audio between callbacks. On the recording side, iOS
244 // can provide audio data frames of size 128 and these are accumulated until
245 // enough data to supply one 10ms call exists. This 10ms chunk is then sent
246 // to WebRTC and the remaining part is stored.
247 rtc::scoped_ptr<FineAudioBuffer> _fineAudioBuffer;
199 248
200 AudioUnit _auVoiceProcessing; 249 // Extra audio buffer to be used by the playout side for rendering audio.
201 void* _audioInterruptionObserver; 250 // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes().
251 rtc::scoped_ptr<SInt8[]> _playoutAudioBuffer;
202 252
253 // Provides a mechanism for encapsulating one or more buffers of audio data.
254 // Only used on the recording side.
255 AudioBufferList _audioRecordBufferList;
256
257 // Temporary storage for recorded data. AudioUnitRender() renders into this
258 // array as soon as a frame of the desired buffer size has been recorded.
259 rtc::scoped_ptr<SInt8[]> _recordAudioBuffer;
260
261 // Set to 1 when recording is active and 0 otherwise.
262 volatile int _recording;
263
264 // Set to 1 when playout is active and 0 otherwise.
265 volatile int _playing;
266
267 // Set to true after successful call to Init(), false otherwise.
203 bool _initialized; 268 bool _initialized;
204 bool _isShutDown; 269
205 bool _recording; 270 // Set to true after successful call to InitRecording(), false otherwise.
206 bool _playing;
207 bool _recIsInitialized; 271 bool _recIsInitialized;
272
273 // Set to true after successful call to InitPlayout(), false otherwise.
208 bool _playIsInitialized; 274 bool _playIsInitialized;
209 275
210 // The sampling rate to use with Audio Device Buffer 276 // Audio interruption observer instance.
211 int _adbSampFreq; 277 void* _audioInterruptionObserver;
212
213 // Delay calculation
214 uint32_t _recordingDelay;
215 uint32_t _playoutDelay;
216 uint32_t _playoutDelayMeasurementCounter;
217 uint32_t _recordingDelayHWAndOS;
218 uint32_t _recordingDelayMeasurementCounter;
219
220 // Playout buffer, needed for 44.0 / 44.1 kHz mismatch
221 int16_t _playoutBuffer[ENGINE_PLAY_BUF_SIZE_IN_SAMPLES];
222 uint32_t _playoutBufferUsed; // How much is filled
223
224 // Recording buffers
225 int16_t _recordingBuffer[N_REC_BUFFERS][ENGINE_REC_BUF_SIZE_IN_SAMPLES];
226 uint32_t _recordingLength[N_REC_BUFFERS];
227 uint32_t _recordingSeqNumber[N_REC_BUFFERS];
228 uint32_t _recordingCurrentSeq;
229
230 // Current total size all data in buffers, used for delay estimate
231 uint32_t _recordingBufferTotalSize;
232 }; 278 };
233 279
234 } // namespace webrtc 280 } // namespace webrtc
235 281
236 #endif // WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IOS_H 282 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_
OLDNEW
« no previous file with comments | « webrtc/modules/audio_device/include/audio_device_defines.h ('k') | webrtc/modules/audio_device/ios/audio_device_ios.mm » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698