OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ | 11 #ifndef WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
12 #define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ | 12 #define WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
13 | 13 |
14 #include <memory> | 14 #include <memory> |
15 | 15 |
16 #include <AudioUnit/AudioUnit.h> | |
17 | |
18 #include "webrtc/base/asyncinvoker.h" | 16 #include "webrtc/base/asyncinvoker.h" |
19 #include "webrtc/base/objc/RTCMacros.h" | 17 #include "webrtc/base/objc/RTCMacros.h" |
20 #include "webrtc/base/thread.h" | 18 #include "webrtc/base/thread.h" |
21 #include "webrtc/base/thread_checker.h" | 19 #include "webrtc/base/thread_checker.h" |
22 #include "webrtc/modules/audio_device/audio_device_generic.h" | 20 #include "webrtc/modules/audio_device/audio_device_generic.h" |
23 #include "webrtc/modules/audio_device/ios/audio_session_observer.h" | 21 #include "webrtc/modules/audio_device/ios/audio_session_observer.h" |
| 22 #include "webrtc/modules/audio_device/ios/voice_processing_audio_unit.h" |
24 | 23 |
25 RTC_FWD_DECL_OBJC_CLASS(RTCAudioSessionDelegateAdapter); | 24 RTC_FWD_DECL_OBJC_CLASS(RTCAudioSessionDelegateAdapter); |
26 | 25 |
27 namespace webrtc { | 26 namespace webrtc { |
28 | 27 |
29 class FineAudioBuffer; | 28 class FineAudioBuffer; |
30 | 29 |
31 // Implements full duplex 16-bit mono PCM audio support for iOS using a | 30 // Implements full duplex 16-bit mono PCM audio support for iOS using a |
32 // Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit | 31 // Voice-Processing (VP) I/O audio unit in Core Audio. The VP I/O audio unit |
33 // supports audio echo cancellation. It also adds automatic gain control, | 32 // supports audio echo cancellation. It also adds automatic gain control, |
34 // adjustment of voice-processing quality and muting. | 33 // adjustment of voice-processing quality and muting. |
35 // | 34 // |
36 // An instance must be created and destroyed on one and the same thread. | 35 // An instance must be created and destroyed on one and the same thread. |
37 // All supported public methods must also be called on the same thread. | 36 // All supported public methods must also be called on the same thread. |
38 // A thread checker will RTC_DCHECK if any supported method is called on an | 37 // A thread checker will RTC_DCHECK if any supported method is called on an |
39 // invalid thread. | 38 // invalid thread. |
40 // | 39 // |
41 // Recorded audio will be delivered on a real-time internal I/O thread in the | 40 // Recorded audio will be delivered on a real-time internal I/O thread in the |
42 // audio unit. The audio unit will also ask for audio data to play out on this | 41 // audio unit. The audio unit will also ask for audio data to play out on this |
43 // same thread. | 42 // same thread. |
44 class AudioDeviceIOS : public AudioDeviceGeneric, | 43 class AudioDeviceIOS : public AudioDeviceGeneric, |
45 public AudioSessionObserver { | 44 public AudioSessionObserver, |
| 45 public VoiceProcessingAudioUnitObserver { |
46 public: | 46 public: |
47 AudioDeviceIOS(); | 47 AudioDeviceIOS(); |
48 ~AudioDeviceIOS(); | 48 ~AudioDeviceIOS(); |
49 | 49 |
50 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; | 50 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; |
51 | 51 |
52 int32_t Init() override; | 52 int32_t Init() override; |
53 int32_t Terminate() override; | 53 int32_t Terminate() override; |
54 bool Initialized() const override { return initialized_; } | 54 bool Initialized() const override { return initialized_; } |
55 | 55 |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
156 void ClearPlayoutWarning() override {} | 156 void ClearPlayoutWarning() override {} |
157 void ClearPlayoutError() override {} | 157 void ClearPlayoutError() override {} |
158 void ClearRecordingWarning() override {} | 158 void ClearRecordingWarning() override {} |
159 void ClearRecordingError() override {} | 159 void ClearRecordingError() override {} |
160 | 160 |
161 // AudioSessionObserver methods. May be called from any thread. | 161 // AudioSessionObserver methods. May be called from any thread. |
162 void OnInterruptionBegin() override; | 162 void OnInterruptionBegin() override; |
163 void OnInterruptionEnd() override; | 163 void OnInterruptionEnd() override; |
164 void OnValidRouteChange() override; | 164 void OnValidRouteChange() override; |
165 | 165 |
| 166 // VoiceProcessingAudioUnitObserver methods. |
| 167 OSStatus OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, |
| 168 const AudioTimeStamp* time_stamp, |
| 169 UInt32 bus_number, |
| 170 UInt32 num_frames, |
| 171 AudioBufferList* io_data) override; |
| 172 OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* flags, |
| 173 const AudioTimeStamp* time_stamp, |
| 174 UInt32 bus_number, |
| 175 UInt32 num_frames, |
| 176 AudioBufferList* io_data) override; |
| 177 |
166 private: | 178 private: |
167 // Called by the relevant AudioSessionObserver methods on |thread_|. | 179 // Called by the relevant AudioSessionObserver methods on |thread_|. |
168 void HandleInterruptionBegin(); | 180 void HandleInterruptionBegin(); |
169 void HandleInterruptionEnd(); | 181 void HandleInterruptionEnd(); |
170 void HandleValidRouteChange(); | 182 void HandleValidRouteChange(); |
171 | 183 |
172 // Uses current |playout_parameters_| and |record_parameters_| to inform the | 184 // Uses current |playout_parameters_| and |record_parameters_| to inform the |
173 // audio device buffer (ADB) about our internal audio parameters. | 185 // audio device buffer (ADB) about our internal audio parameters. |
174 void UpdateAudioDeviceBuffer(); | 186 void UpdateAudioDeviceBuffer(); |
175 | 187 |
176 // Since the preferred audio parameters are only hints to the OS, the actual | 188 // Since the preferred audio parameters are only hints to the OS, the actual |
177 // values may be different once the AVAudioSession has been activated. | 189 // values may be different once the AVAudioSession has been activated. |
178 // This method asks for the current hardware parameters and takes actions | 190 // This method asks for the current hardware parameters and takes actions |
179 // if they should differ from what we have asked for initially. It also | 191 // if they should differ from what we have asked for initially. It also |
180 // defines |playout_parameters_| and |record_parameters_|. | 192 // defines |playout_parameters_| and |record_parameters_|. |
181 void SetupAudioBuffersForActiveAudioSession(); | 193 void SetupAudioBuffersForActiveAudioSession(); |
182 | 194 |
183 // Creates a Voice-Processing I/O unit and configures it for full-duplex | 195 // Creates the audio unit. |
184 // audio. The selected stream format is selected to avoid internal resampling | 196 bool CreateAudioUnit(); |
185 // and to match the 10ms callback rate for WebRTC as well as possible. | |
186 // This method also initializes the created audio unit. | |
187 bool SetupAndInitializeVoiceProcessingAudioUnit(); | |
188 | 197 |
189 // Restarts active audio streams using a new sample rate. Required when e.g. | 198 // Restarts active audio streams using a new sample rate. Required when e.g. |
190 // a BT headset is enabled or disabled. | 199 // a BT headset is enabled or disabled. |
191 bool RestartAudioUnitWithNewFormat(float sample_rate); | 200 bool RestartAudioUnit(float sample_rate); |
192 | 201 |
193 // Activates our audio session, creates and initializes the voice-processing | 202 // Activates our audio session, creates and initializes the voice-processing |
194 // audio unit and verifies that we got the preferred native audio parameters. | 203 // audio unit and verifies that we got the preferred native audio parameters. |
195 bool InitPlayOrRecord(); | 204 bool InitPlayOrRecord(); |
196 | 205 |
197 // Closes and deletes the voice-processing I/O unit. | 206 // Closes and deletes the voice-processing I/O unit. |
198 void ShutdownPlayOrRecord(); | 207 void ShutdownPlayOrRecord(); |
199 | 208 |
200 // Helper method for destroying the existing audio unit. | |
201 void DisposeAudioUnit(); | |
202 | |
203 // Callback function called on a real-time priority I/O thread from the audio | |
204 // unit. This method is used to signal that recorded audio is available. | |
205 static OSStatus RecordedDataIsAvailable( | |
206 void* in_ref_con, | |
207 AudioUnitRenderActionFlags* io_action_flags, | |
208 const AudioTimeStamp* time_stamp, | |
209 UInt32 in_bus_number, | |
210 UInt32 in_number_frames, | |
211 AudioBufferList* io_data); | |
212 OSStatus OnRecordedDataIsAvailable( | |
213 AudioUnitRenderActionFlags* io_action_flags, | |
214 const AudioTimeStamp* time_stamp, | |
215 UInt32 in_bus_number, | |
216 UInt32 in_number_frames); | |
217 | |
218 // Callback function called on a real-time priority I/O thread from the audio | |
219 // unit. This method is used to provide audio samples to the audio unit. | |
220 static OSStatus GetPlayoutData(void* in_ref_con, | |
221 AudioUnitRenderActionFlags* io_action_flags, | |
222 const AudioTimeStamp* time_stamp, | |
223 UInt32 in_bus_number, | |
224 UInt32 in_number_frames, | |
225 AudioBufferList* io_data); | |
226 OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, | |
227 UInt32 in_number_frames, | |
228 AudioBufferList* io_data); | |
229 | |
230 // Ensures that methods are called from the same thread as this object is | 209 // Ensures that methods are called from the same thread as this object is |
231 // created on. | 210 // created on. |
232 rtc::ThreadChecker thread_checker_; | 211 rtc::ThreadChecker thread_checker_; |
233 // Thread that this object is created on. | 212 // Thread that this object is created on. |
234 rtc::Thread* thread_; | 213 rtc::Thread* thread_; |
235 // Invoker used to execute methods on thread_. | 214 // Invoker used to execute methods on thread_. |
236 std::unique_ptr<rtc::AsyncInvoker> async_invoker_; | 215 std::unique_ptr<rtc::AsyncInvoker> async_invoker_; |
237 | 216 |
238 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the | 217 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the |
239 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). | 218 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). |
240 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance | 219 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance |
241 // and therefore outlives this object. | 220 // and therefore outlives this object. |
242 AudioDeviceBuffer* audio_device_buffer_; | 221 AudioDeviceBuffer* audio_device_buffer_; |
243 | 222 |
244 // Contains audio parameters (sample rate, #channels, buffer size etc.) for | 223 // Contains audio parameters (sample rate, #channels, buffer size etc.) for |
245 // the playout and recording sides. These structure is set in two steps: | 224 // the playout and recording sides. These structure is set in two steps: |
246 // first, native sample rate and #channels are defined in Init(). Next, the | 225 // first, native sample rate and #channels are defined in Init(). Next, the |
247 // audio session is activated and we verify that the preferred parameters | 226 // audio session is activated and we verify that the preferred parameters |
248 // were granted by the OS. At this stage it is also possible to add a third | 227 // were granted by the OS. At this stage it is also possible to add a third |
249 // component to the parameters; the native I/O buffer duration. | 228 // component to the parameters; the native I/O buffer duration. |
250 // A RTC_CHECK will be hit if we for some reason fail to open an audio session | 229 // A RTC_CHECK will be hit if we for some reason fail to open an audio session |
251 // using the specified parameters. | 230 // using the specified parameters. |
252 AudioParameters playout_parameters_; | 231 AudioParameters playout_parameters_; |
253 AudioParameters record_parameters_; | 232 AudioParameters record_parameters_; |
254 | 233 |
255 // The Voice-Processing I/O unit has the same characteristics as the | 234 // The AudioUnit used to play and record audio. |
256 // Remote I/O unit (supports full duplex low-latency audio input and output) | 235 std::unique_ptr<VoiceProcessingAudioUnit> audio_unit_; |
257 // and adds AEC for for two-way duplex communication. It also adds AGC, | |
258 // adjustment of voice-processing quality, and muting. Hence, ideal for | |
259 // VoIP applications. | |
260 AudioUnit vpio_unit_; | |
261 | 236 |
262 // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data | 237 // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data |
263 // in chunks of 10ms. It then allows for this data to be pulled in | 238 // in chunks of 10ms. It then allows for this data to be pulled in |
264 // a finer or coarser granularity. I.e. interacting with this class instead | 239 // a finer or coarser granularity. I.e. interacting with this class instead |
265 // of directly with the AudioDeviceBuffer one can ask for any number of | 240 // of directly with the AudioDeviceBuffer one can ask for any number of |
266 // audio data samples. Is also supports a similar scheme for the recording | 241 // audio data samples. Is also supports a similar scheme for the recording |
267 // side. | 242 // side. |
268 // Example: native buffer size can be 128 audio frames at 16kHz sample rate. | 243 // Example: native buffer size can be 128 audio frames at 16kHz sample rate. |
269 // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 | 244 // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 |
270 // in each callback (one every 8ms). This class can then ask for 128 and the | 245 // in each callback (one every 8ms). This class can then ask for 128 and the |
271 // FineAudioBuffer will ask WebRTC for new data only when needed and also | 246 // FineAudioBuffer will ask WebRTC for new data only when needed and also |
272 // cache non-utilized audio between callbacks. On the recording side, iOS | 247 // cache non-utilized audio between callbacks. On the recording side, iOS |
273 // can provide audio data frames of size 128 and these are accumulated until | 248 // can provide audio data frames of size 128 and these are accumulated until |
274 // enough data to supply one 10ms call exists. This 10ms chunk is then sent | 249 // enough data to supply one 10ms call exists. This 10ms chunk is then sent |
275 // to WebRTC and the remaining part is stored. | 250 // to WebRTC and the remaining part is stored. |
276 std::unique_ptr<FineAudioBuffer> fine_audio_buffer_; | 251 std::unique_ptr<FineAudioBuffer> fine_audio_buffer_; |
277 | 252 |
278 // Extra audio buffer to be used by the playout side for rendering audio. | 253 // Extra audio buffer to be used by the playout side for rendering audio. |
279 // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes(). | 254 // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes(). |
280 std::unique_ptr<SInt8[]> playout_audio_buffer_; | 255 std::unique_ptr<int8_t[]> playout_audio_buffer_; |
281 | 256 |
282 // Provides a mechanism for encapsulating one or more buffers of audio data. | 257 // Provides a mechanism for encapsulating one or more buffers of audio data. |
283 // Only used on the recording side. | 258 // Only used on the recording side. |
284 AudioBufferList audio_record_buffer_list_; | 259 AudioBufferList audio_record_buffer_list_; |
285 | 260 |
286 // Temporary storage for recorded data. AudioUnitRender() renders into this | 261 // Temporary storage for recorded data. AudioUnitRender() renders into this |
287 // array as soon as a frame of the desired buffer size has been recorded. | 262 // array as soon as a frame of the desired buffer size has been recorded. |
288 std::unique_ptr<SInt8[]> record_audio_buffer_; | 263 std::unique_ptr<int8_t[]> record_audio_buffer_; |
289 | 264 |
290 // Set to 1 when recording is active and 0 otherwise. | 265 // Set to 1 when recording is active and 0 otherwise. |
291 volatile int recording_; | 266 volatile int recording_; |
292 | 267 |
293 // Set to 1 when playout is active and 0 otherwise. | 268 // Set to 1 when playout is active and 0 otherwise. |
294 volatile int playing_; | 269 volatile int playing_; |
295 | 270 |
296 // Set to true after successful call to Init(), false otherwise. | 271 // Set to true after successful call to Init(), false otherwise. |
297 bool initialized_; | 272 bool initialized_; |
298 | 273 |
299 // Set to true after successful call to InitRecording(), false otherwise. | 274 // Set to true after successful call to InitRecording(), false otherwise. |
300 bool rec_is_initialized_; | 275 bool rec_is_initialized_; |
301 | 276 |
302 // Set to true after successful call to InitPlayout(), false otherwise. | 277 // Set to true after successful call to InitPlayout(), false otherwise. |
303 bool play_is_initialized_; | 278 bool play_is_initialized_; |
304 | 279 |
305 // Set to true if audio session is interrupted, false otherwise. | 280 // Set to true if audio session is interrupted, false otherwise. |
306 bool is_interrupted_; | 281 bool is_interrupted_; |
307 | 282 |
308 // Audio interruption observer instance. | 283 // Audio interruption observer instance. |
309 RTCAudioSessionDelegateAdapter* audio_session_observer_; | 284 RTCAudioSessionDelegateAdapter* audio_session_observer_; |
310 | 285 |
311 // Contains the audio data format specification for a stream of audio. | 286 // Contains the audio data format specification for a stream of audio. |
312 AudioStreamBasicDescription application_format_; | 287 AudioStreamBasicDescription application_format_; |
313 }; | 288 }; |
314 | 289 |
315 } // namespace webrtc | 290 } // namespace webrtc |
316 | 291 |
317 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ | 292 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
OLD | NEW |