OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 25 matching lines...) Expand all Loading... | |
36 // same thread. | 36 // same thread. |
37 class AudioDeviceIOS : public AudioDeviceGeneric { | 37 class AudioDeviceIOS : public AudioDeviceGeneric { |
38 public: | 38 public: |
39 AudioDeviceIOS(); | 39 AudioDeviceIOS(); |
40 ~AudioDeviceIOS(); | 40 ~AudioDeviceIOS(); |
41 | 41 |
42 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; | 42 void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) override; |
43 | 43 |
44 int32_t Init() override; | 44 int32_t Init() override; |
45 int32_t Terminate() override; | 45 int32_t Terminate() override; |
46 bool Initialized() const override { return _initialized; } | 46 bool Initialized() const override { return initialized_; } |
47 | 47 |
48 int32_t InitPlayout() override; | 48 int32_t InitPlayout() override; |
49 bool PlayoutIsInitialized() const override { return _playIsInitialized; } | 49 bool PlayoutIsInitialized() const override { return play_is_initialized_; } |
50 | 50 |
51 int32_t InitRecording() override; | 51 int32_t InitRecording() override; |
52 bool RecordingIsInitialized() const override { return _recIsInitialized; } | 52 bool RecordingIsInitialized() const override { return rec_is_initialized_; } |
53 | 53 |
54 int32_t StartPlayout() override; | 54 int32_t StartPlayout() override; |
55 int32_t StopPlayout() override; | 55 int32_t StopPlayout() override; |
56 bool Playing() const override { return _playing; } | 56 bool Playing() const override { return playing_; } |
57 | 57 |
58 int32_t StartRecording() override; | 58 int32_t StartRecording() override; |
59 int32_t StopRecording() override; | 59 int32_t StopRecording() override; |
60 bool Recording() const override { return _recording; } | 60 bool Recording() const override { return recording_; } |
61 | 61 |
62 int32_t SetLoudspeakerStatus(bool enable) override; | 62 int32_t SetLoudspeakerStatus(bool enable) override; |
63 int32_t GetLoudspeakerStatus(bool& enabled) const override; | 63 int32_t GetLoudspeakerStatus(bool& enabled) const override; |
64 | 64 |
65 // These methods returns hard-coded delay values and not dynamic delay | 65 // These methods returns hard-coded delay values and not dynamic delay |
66 // estimates. The reason is that iOS supports a built-in AEC and the WebRTC | 66 // estimates. The reason is that iOS supports a built-in AEC and the WebRTC |
67 // AEC will always be disabled in the Libjingle layer to avoid running two | 67 // AEC will always be disabled in the Libjingle layer to avoid running two |
68 // AEC implementations at the same time. And, it saves resources to avoid | 68 // AEC implementations at the same time. And, it saves resources to avoid |
69 // updating these delay values continuously. | 69 // updating these delay values continuously. |
70 // TODO(henrika): it would be possible to mark these two methods as not | 70 // TODO(henrika): it would be possible to mark these two methods as not |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
138 int32_t StereoRecordingIsAvailable(bool& available) override; | 138 int32_t StereoRecordingIsAvailable(bool& available) override; |
139 int32_t SetStereoRecording(bool enable) override; | 139 int32_t SetStereoRecording(bool enable) override; |
140 int32_t StereoRecording(bool& enabled) const override; | 140 int32_t StereoRecording(bool& enabled) const override; |
141 int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type, | 141 int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type, |
142 uint16_t sizeMS) override; | 142 uint16_t sizeMS) override; |
143 int32_t CPULoad(uint16_t& load) const override; | 143 int32_t CPULoad(uint16_t& load) const override; |
144 bool PlayoutWarning() const override; | 144 bool PlayoutWarning() const override; |
145 bool PlayoutError() const override; | 145 bool PlayoutError() const override; |
146 bool RecordingWarning() const override; | 146 bool RecordingWarning() const override; |
147 bool RecordingError() const override; | 147 bool RecordingError() const override; |
148 void ClearPlayoutWarning() override{}; | 148 void ClearPlayoutWarning() override {}; |
tommi
2015/10/01 10:52:51
remove semicolons
henrika_webrtc
2015/10/01 11:01:14
Thanks ;-)
| |
149 void ClearPlayoutError() override{}; | 149 void ClearPlayoutError() override {}; |
150 void ClearRecordingWarning() override{}; | 150 void ClearRecordingWarning() override {}; |
151 void ClearRecordingError() override{}; | 151 void ClearRecordingError() override {}; |
152 | 152 |
153 private: | 153 private: |
154 // Uses current |_playoutParameters| and |_recordParameters| to inform the | 154 // Uses current |playout_parameters_| and |record_parameters_| to inform the |
155 // audio device buffer (ADB) about our internal audio parameters. | 155 // audio device buffer (ADB) about our internal audio parameters. |
156 void UpdateAudioDeviceBuffer(); | 156 void UpdateAudioDeviceBuffer(); |
157 | 157 |
158 // Since the preferred audio parameters are only hints to the OS, the actual | 158 // Since the preferred audio parameters are only hints to the OS, the actual |
159 // values may be different once the AVAudioSession has been activated. | 159 // values may be different once the AVAudioSession has been activated. |
160 // This method asks for the current hardware parameters and takes actions | 160 // This method asks for the current hardware parameters and takes actions |
161 // if they should differ from what we have asked for initially. It also | 161 // if they should differ from what we have asked for initially. It also |
162 // defines |_playoutParameters| and |_recordParameters|. | 162 // defines |playout_parameters_| and |record_parameters_|. |
163 void SetupAudioBuffersForActiveAudioSession(); | 163 void SetupAudioBuffersForActiveAudioSession(); |
164 | 164 |
165 // Creates a Voice-Processing I/O unit and configures it for full-duplex | 165 // Creates a Voice-Processing I/O unit and configures it for full-duplex |
166 // audio. The selected stream format is selected to avoid internal resampling | 166 // audio. The selected stream format is selected to avoid internal resampling |
167 // and to match the 10ms callback rate for WebRTC as well as possible. | 167 // and to match the 10ms callback rate for WebRTC as well as possible. |
168 // This method also initializes the created audio unit. | 168 // This method also initializes the created audio unit. |
169 bool SetupAndInitializeVoiceProcessingAudioUnit(); | 169 bool SetupAndInitializeVoiceProcessingAudioUnit(); |
170 | 170 |
171 // Activates our audio session, creates and initilizes the voice-processing | 171 // Activates our audio session, creates and initializes the voice-processing |
172 // audio unit and verifies that we got the preferred native audio parameters. | 172 // audio unit and verifies that we got the preferred native audio parameters. |
173 bool InitPlayOrRecord(); | 173 bool InitPlayOrRecord(); |
174 | 174 |
175 // Closes and deletes the voice-processing I/O unit. | 175 // Closes and deletes the voice-processing I/O unit. |
176 bool ShutdownPlayOrRecord(); | 176 bool ShutdownPlayOrRecord(); |
177 | 177 |
178 // Callback function called on a real-time priority I/O thread from the audio | 178 // Callback function called on a real-time priority I/O thread from the audio |
179 // unit. This method is used to signal that recorded audio is available. | 179 // unit. This method is used to signal that recorded audio is available. |
180 static OSStatus RecordedDataIsAvailable( | 180 static OSStatus RecordedDataIsAvailable( |
181 void* inRefCon, | 181 void* in_ref_con, |
182 AudioUnitRenderActionFlags* ioActionFlags, | 182 AudioUnitRenderActionFlags* io_action_flags, |
183 const AudioTimeStamp* timeStamp, | 183 const AudioTimeStamp* time_stamp, |
184 UInt32 inBusNumber, | 184 UInt32 in_bus_number, |
185 UInt32 inNumberFrames, | 185 UInt32 in_number_frames, |
186 AudioBufferList* ioData); | 186 AudioBufferList* io_data); |
187 OSStatus OnRecordedDataIsAvailable(AudioUnitRenderActionFlags* ioActionFlags, | 187 OSStatus OnRecordedDataIsAvailable( |
188 const AudioTimeStamp* timeStamp, | 188 AudioUnitRenderActionFlags* io_action_flags, |
189 UInt32 inBusNumber, | 189 const AudioTimeStamp* time_stamp, |
190 UInt32 inNumberFrames); | 190 UInt32 in_bus_number, |
191 UInt32 in_number_frames); | |
191 | 192 |
192 // Callback function called on a real-time priority I/O thread from the audio | 193 // Callback function called on a real-time priority I/O thread from the audio |
193 // unit. This method is used to provide audio samples to the audio unit. | 194 // unit. This method is used to provide audio samples to the audio unit. |
194 static OSStatus GetPlayoutData(void* inRefCon, | 195 static OSStatus GetPlayoutData(void* in_ref_con, |
195 AudioUnitRenderActionFlags* ioActionFlags, | 196 AudioUnitRenderActionFlags* io_action_flags, |
196 const AudioTimeStamp* timeStamp, | 197 const AudioTimeStamp* time_stamp, |
197 UInt32 inBusNumber, | 198 UInt32 in_bus_number, |
198 UInt32 inNumberFrames, | 199 UInt32 in_number_frames, |
199 AudioBufferList* ioData); | 200 AudioBufferList* io_data); |
200 OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* ioActionFlags, | 201 OSStatus OnGetPlayoutData(AudioUnitRenderActionFlags* io_action_flags, |
201 UInt32 inNumberFrames, | 202 UInt32 in_number_frames, |
202 AudioBufferList* ioData); | 203 AudioBufferList* io_data); |
203 | 204 |
204 private: | 205 private: |
205 // Ensures that methods are called from the same thread as this object is | 206 // Ensures that methods are called from the same thread as this object is |
206 // created on. | 207 // created on. |
207 rtc::ThreadChecker _threadChecker; | 208 rtc::ThreadChecker thread_checker_; |
208 | 209 |
209 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the | 210 // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the |
210 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). | 211 // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create(). |
211 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance | 212 // The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance |
212 // and therefore outlives this object. | 213 // and therefore outlives this object. |
213 AudioDeviceBuffer* _audioDeviceBuffer; | 214 AudioDeviceBuffer* audio_device_buffer_; |
214 | 215 |
215 // Contains audio parameters (sample rate, #channels, buffer size etc.) for | 216 // Contains audio parameters (sample rate, #channels, buffer size etc.) for |
216 // the playout and recording sides. These structure is set in two steps: | 217 // the playout and recording sides. These structure is set in two steps: |
217 // first, native sample rate and #channels are defined in Init(). Next, the | 218 // first, native sample rate and #channels are defined in Init(). Next, the |
218 // audio session is activated and we verify that the preferred parameters | 219 // audio session is activated and we verify that the preferred parameters |
219 // were granted by the OS. At this stage it is also possible to add a third | 220 // were granted by the OS. At this stage it is also possible to add a third |
220 // component to the parameters; the native I/O buffer duration. | 221 // component to the parameters; the native I/O buffer duration. |
221 // A RTC_CHECK will be hit if we for some reason fail to open an audio session | 222 // A RTC_CHECK will be hit if we for some reason fail to open an audio session |
222 // using the specified parameters. | 223 // using the specified parameters. |
223 AudioParameters _playoutParameters; | 224 AudioParameters playout_parameters_; |
224 AudioParameters _recordParameters; | 225 AudioParameters record_parameters_; |
225 | 226 |
226 // The Voice-Processing I/O unit has the same characteristics as the | 227 // The Voice-Processing I/O unit has the same characteristics as the |
227 // Remote I/O unit (supports full duplex low-latency audio input and output) | 228 // Remote I/O unit (supports full duplex low-latency audio input and output) |
228 // and adds AEC for for two-way duplex communication. It also adds AGC, | 229 // and adds AEC for for two-way duplex communication. It also adds AGC, |
229 // adjustment of voice-processing quality, and muting. Hence, ideal for | 230 // adjustment of voice-processing quality, and muting. Hence, ideal for |
230 // VoIP applications. | 231 // VoIP applications. |
231 AudioUnit _vpioUnit; | 232 AudioUnit vpio_unit_; |
232 | 233 |
233 // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data | 234 // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data |
234 // in chunks of 10ms. It then allows for this data to be pulled in | 235 // in chunks of 10ms. It then allows for this data to be pulled in |
235 // a finer or coarser granularity. I.e. interacting with this class instead | 236 // a finer or coarser granularity. I.e. interacting with this class instead |
236 // of directly with the AudioDeviceBuffer one can ask for any number of | 237 // of directly with the AudioDeviceBuffer one can ask for any number of |
237 // audio data samples. Is also supports a similar scheme for the recording | 238 // audio data samples. Is also supports a similar scheme for the recording |
238 // side. | 239 // side. |
239 // Example: native buffer size can be 128 audio frames at 16kHz sample rate. | 240 // Example: native buffer size can be 128 audio frames at 16kHz sample rate. |
240 // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 | 241 // WebRTC will provide 480 audio frames per 10ms but iOS asks for 128 |
241 // in each callback (one every 8ms). This class can then ask for 128 and the | 242 // in each callback (one every 8ms). This class can then ask for 128 and the |
242 // FineAudioBuffer will ask WebRTC for new data only when needed and also | 243 // FineAudioBuffer will ask WebRTC for new data only when needed and also |
243 // cache non-utilized audio between callbacks. On the recording side, iOS | 244 // cache non-utilized audio between callbacks. On the recording side, iOS |
244 // can provide audio data frames of size 128 and these are accumulated until | 245 // can provide audio data frames of size 128 and these are accumulated until |
245 // enough data to supply one 10ms call exists. This 10ms chunk is then sent | 246 // enough data to supply one 10ms call exists. This 10ms chunk is then sent |
246 // to WebRTC and the remaining part is stored. | 247 // to WebRTC and the remaining part is stored. |
247 rtc::scoped_ptr<FineAudioBuffer> _fineAudioBuffer; | 248 rtc::scoped_ptr<FineAudioBuffer> fine_audio_buffer_; |
248 | 249 |
249 // Extra audio buffer to be used by the playout side for rendering audio. | 250 // Extra audio buffer to be used by the playout side for rendering audio. |
250 // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes(). | 251 // The buffer size is given by FineAudioBuffer::RequiredBufferSizeBytes(). |
251 rtc::scoped_ptr<SInt8[]> _playoutAudioBuffer; | 252 rtc::scoped_ptr<SInt8[]> playout_audio_buffer_; |
252 | 253 |
253 // Provides a mechanism for encapsulating one or more buffers of audio data. | 254 // Provides a mechanism for encapsulating one or more buffers of audio data. |
254 // Only used on the recording side. | 255 // Only used on the recording side. |
255 AudioBufferList _audioRecordBufferList; | 256 AudioBufferList audio_record_buffer_list_; |
256 | 257 |
257 // Temporary storage for recorded data. AudioUnitRender() renders into this | 258 // Temporary storage for recorded data. AudioUnitRender() renders into this |
258 // array as soon as a frame of the desired buffer size has been recorded. | 259 // array as soon as a frame of the desired buffer size has been recorded. |
259 rtc::scoped_ptr<SInt8[]> _recordAudioBuffer; | 260 rtc::scoped_ptr<SInt8[]> record_audio_buffer_; |
260 | 261 |
261 // Set to 1 when recording is active and 0 otherwise. | 262 // Set to 1 when recording is active and 0 otherwise. |
262 volatile int _recording; | 263 volatile int recording_; |
263 | 264 |
264 // Set to 1 when playout is active and 0 otherwise. | 265 // Set to 1 when playout is active and 0 otherwise. |
265 volatile int _playing; | 266 volatile int playing_; |
266 | 267 |
267 // Set to true after successful call to Init(), false otherwise. | 268 // Set to true after successful call to Init(), false otherwise. |
268 bool _initialized; | 269 bool initialized_; |
269 | 270 |
270 // Set to true after successful call to InitRecording(), false otherwise. | 271 // Set to true after successful call to InitRecording(), false otherwise. |
271 bool _recIsInitialized; | 272 bool rec_is_initialized_; |
272 | 273 |
273 // Set to true after successful call to InitPlayout(), false otherwise. | 274 // Set to true after successful call to InitPlayout(), false otherwise. |
274 bool _playIsInitialized; | 275 bool play_is_initialized_; |
275 | 276 |
276 // Audio interruption observer instance. | 277 // Audio interruption observer instance. |
277 void* _audioInterruptionObserver; | 278 void* audio_interruption_observer_; |
278 }; | 279 }; |
279 | 280 |
280 } // namespace webrtc | 281 } // namespace webrtc |
281 | 282 |
282 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ | 283 #endif // WEBRTC_MODULES_AUDIO_DEVICE_IOS_AUDIO_DEVICE_IOS_H_ |
OLD | NEW |