Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(172)

Side by Side Diff: webrtc/modules/audio_device/ios/audio_device_ios.mm

Issue 1379583002: Objective-C++ style guide changes for iOS ADM (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: nit Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/modules/audio_device/ios/audio_device_ios.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
170 @autoreleasepool { 170 @autoreleasepool {
171 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); 171 LOG(LS_INFO) << " system name: " << ios::GetSystemName();
172 LOG(LS_INFO) << " system version: " << ios::GetSystemVersion(); 172 LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
173 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); 173 LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
174 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); 174 LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
175 } 175 }
176 } 176 }
177 #endif // !defined(NDEBUG) 177 #endif // !defined(NDEBUG)
178 178
179 AudioDeviceIOS::AudioDeviceIOS() 179 AudioDeviceIOS::AudioDeviceIOS()
180 : _audioDeviceBuffer(nullptr), 180 : audio_device_buffer_(nullptr),
181 _vpioUnit(nullptr), 181 vpio_unit_(nullptr),
182 _recording(0), 182 recording_(0),
183 _playing(0), 183 playing_(0),
184 _initialized(false), 184 initialized_(false),
185 _recIsInitialized(false), 185 rec_is_initialized_(false),
186 _playIsInitialized(false), 186 play_is_initialized_(false),
187 _audioInterruptionObserver(nullptr) { 187 audio_interruption_observer_(nullptr) {
188 LOGI() << "ctor" << ios::GetCurrentThreadDescription(); 188 LOGI() << "ctor" << ios::GetCurrentThreadDescription();
189 } 189 }
190 190
191 AudioDeviceIOS::~AudioDeviceIOS() { 191 AudioDeviceIOS::~AudioDeviceIOS() {
192 LOGI() << "~dtor"; 192 LOGI() << "~dtor";
193 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 193 RTC_DCHECK(thread_checker_.CalledOnValidThread());
194 Terminate(); 194 Terminate();
195 } 195 }
196 196
197 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 197 void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
198 LOGI() << "AttachAudioBuffer"; 198 LOGI() << "AttachAudioBuffer";
199 RTC_DCHECK(audioBuffer); 199 RTC_DCHECK(audioBuffer);
200 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 200 RTC_DCHECK(thread_checker_.CalledOnValidThread());
201 _audioDeviceBuffer = audioBuffer; 201 audio_device_buffer_ = audioBuffer;
202 } 202 }
203 203
204 int32_t AudioDeviceIOS::Init() { 204 int32_t AudioDeviceIOS::Init() {
205 LOGI() << "Init"; 205 LOGI() << "Init";
206 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 206 RTC_DCHECK(thread_checker_.CalledOnValidThread());
207 if (_initialized) { 207 if (initialized_) {
208 return 0; 208 return 0;
209 } 209 }
210 #if !defined(NDEBUG) 210 #if !defined(NDEBUG)
211 LogDeviceInfo(); 211 LogDeviceInfo();
212 #endif 212 #endif
213 // Store the preferred sample rate and preferred number of channels already 213 // Store the preferred sample rate and preferred number of channels already
214 // here. They have not been set and confirmed yet since ActivateAudioSession() 214 // here. They have not been set and confirmed yet since ActivateAudioSession()
215 // is not called until audio is about to start. However, it makes sense to 215 // is not called until audio is about to start. However, it makes sense to
216 // store the parameters now and then verify at a later stage. 216 // store the parameters now and then verify at a later stage.
217 _playoutParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); 217 playout_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
218 _recordParameters.reset(kPreferredSampleRate, kPreferredNumberOfChannels); 218 record_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
219 // Ensure that the audio device buffer (ADB) knows about the internal audio 219 // Ensure that the audio device buffer (ADB) knows about the internal audio
220 // parameters. Note that, even if we are unable to get a mono audio session, 220 // parameters. Note that, even if we are unable to get a mono audio session,
221 // we will always tell the I/O audio unit to do a channel format conversion 221 // we will always tell the I/O audio unit to do a channel format conversion
222 // to guarantee mono on the "input side" of the audio unit. 222 // to guarantee mono on the "input side" of the audio unit.
223 UpdateAudioDeviceBuffer(); 223 UpdateAudioDeviceBuffer();
224 _initialized = true; 224 initialized_ = true;
225 return 0; 225 return 0;
226 } 226 }
227 227
228 int32_t AudioDeviceIOS::Terminate() { 228 int32_t AudioDeviceIOS::Terminate() {
229 LOGI() << "Terminate"; 229 LOGI() << "Terminate";
230 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 230 RTC_DCHECK(thread_checker_.CalledOnValidThread());
231 if (!_initialized) { 231 if (!initialized_) {
232 return 0; 232 return 0;
233 } 233 }
234 ShutdownPlayOrRecord(); 234 ShutdownPlayOrRecord();
235 _initialized = false; 235 initialized_ = false;
236 return 0; 236 return 0;
237 } 237 }
238 238
239 int32_t AudioDeviceIOS::InitPlayout() { 239 int32_t AudioDeviceIOS::InitPlayout() {
240 LOGI() << "InitPlayout"; 240 LOGI() << "InitPlayout";
241 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 241 RTC_DCHECK(thread_checker_.CalledOnValidThread());
242 RTC_DCHECK(_initialized); 242 RTC_DCHECK(initialized_);
243 RTC_DCHECK(!_playIsInitialized); 243 RTC_DCHECK(!play_is_initialized_);
244 RTC_DCHECK(!_playing); 244 RTC_DCHECK(!playing_);
245 if (!_recIsInitialized) { 245 if (!rec_is_initialized_) {
246 if (!InitPlayOrRecord()) { 246 if (!InitPlayOrRecord()) {
247 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 247 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
248 return -1; 248 return -1;
249 } 249 }
250 } 250 }
251 _playIsInitialized = true; 251 play_is_initialized_ = true;
252 return 0; 252 return 0;
253 } 253 }
254 254
255 int32_t AudioDeviceIOS::InitRecording() { 255 int32_t AudioDeviceIOS::InitRecording() {
256 LOGI() << "InitRecording"; 256 LOGI() << "InitRecording";
257 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 257 RTC_DCHECK(thread_checker_.CalledOnValidThread());
258 RTC_DCHECK(_initialized); 258 RTC_DCHECK(initialized_);
259 RTC_DCHECK(!_recIsInitialized); 259 RTC_DCHECK(!rec_is_initialized_);
260 RTC_DCHECK(!_recording); 260 RTC_DCHECK(!recording_);
261 if (!_playIsInitialized) { 261 if (!play_is_initialized_) {
262 if (!InitPlayOrRecord()) { 262 if (!InitPlayOrRecord()) {
263 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!"; 263 LOG_F(LS_ERROR) << "InitPlayOrRecord failed!";
264 return -1; 264 return -1;
265 } 265 }
266 } 266 }
267 _recIsInitialized = true; 267 rec_is_initialized_ = true;
268 return 0; 268 return 0;
269 } 269 }
270 270
271 int32_t AudioDeviceIOS::StartPlayout() { 271 int32_t AudioDeviceIOS::StartPlayout() {
272 LOGI() << "StartPlayout"; 272 LOGI() << "StartPlayout";
273 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 273 RTC_DCHECK(thread_checker_.CalledOnValidThread());
274 RTC_DCHECK(_playIsInitialized); 274 RTC_DCHECK(play_is_initialized_);
275 RTC_DCHECK(!_playing); 275 RTC_DCHECK(!playing_);
276 _fineAudioBuffer->ResetPlayout(); 276 fine_audio_buffer_->ResetPlayout();
277 if (!_recording) { 277 if (!recording_) {
278 OSStatus result = AudioOutputUnitStart(_vpioUnit); 278 OSStatus result = AudioOutputUnitStart(vpio_unit_);
279 if (result != noErr) { 279 if (result != noErr) {
280 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 280 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
281 return -1; 281 return -1;
282 } 282 }
283 } 283 }
284 rtc::AtomicOps::ReleaseStore(&_playing, 1); 284 rtc::AtomicOps::ReleaseStore(&playing_, 1);
285 return 0; 285 return 0;
286 } 286 }
287 287
288 int32_t AudioDeviceIOS::StopPlayout() { 288 int32_t AudioDeviceIOS::StopPlayout() {
289 LOGI() << "StopPlayout"; 289 LOGI() << "StopPlayout";
290 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 290 RTC_DCHECK(thread_checker_.CalledOnValidThread());
291 if (!_playIsInitialized || !_playing) { 291 if (!play_is_initialized_ || !playing_) {
292 return 0; 292 return 0;
293 } 293 }
294 if (!_recording) { 294 if (!recording_) {
295 ShutdownPlayOrRecord(); 295 ShutdownPlayOrRecord();
296 } 296 }
297 _playIsInitialized = false; 297 play_is_initialized_ = false;
298 rtc::AtomicOps::ReleaseStore(&_playing, 0); 298 rtc::AtomicOps::ReleaseStore(&playing_, 0);
299 return 0; 299 return 0;
300 } 300 }
301 301
302 int32_t AudioDeviceIOS::StartRecording() { 302 int32_t AudioDeviceIOS::StartRecording() {
303 LOGI() << "StartRecording"; 303 LOGI() << "StartRecording";
304 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 304 RTC_DCHECK(thread_checker_.CalledOnValidThread());
305 RTC_DCHECK(_recIsInitialized); 305 RTC_DCHECK(rec_is_initialized_);
306 RTC_DCHECK(!_recording); 306 RTC_DCHECK(!recording_);
307 _fineAudioBuffer->ResetRecord(); 307 fine_audio_buffer_->ResetRecord();
308 if (!_playing) { 308 if (!playing_) {
309 OSStatus result = AudioOutputUnitStart(_vpioUnit); 309 OSStatus result = AudioOutputUnitStart(vpio_unit_);
310 if (result != noErr) { 310 if (result != noErr) {
311 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 311 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
312 return -1; 312 return -1;
313 } 313 }
314 } 314 }
315 rtc::AtomicOps::ReleaseStore(&_recording, 1); 315 rtc::AtomicOps::ReleaseStore(&recording_, 1);
316 return 0; 316 return 0;
317 } 317 }
318 318
319 int32_t AudioDeviceIOS::StopRecording() { 319 int32_t AudioDeviceIOS::StopRecording() {
320 LOGI() << "StopRecording"; 320 LOGI() << "StopRecording";
321 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 321 RTC_DCHECK(thread_checker_.CalledOnValidThread());
322 if (!_recIsInitialized || !_recording) { 322 if (!rec_is_initialized_ || !recording_) {
323 return 0; 323 return 0;
324 } 324 }
325 if (!_playing) { 325 if (!playing_) {
326 ShutdownPlayOrRecord(); 326 ShutdownPlayOrRecord();
327 } 327 }
328 _recIsInitialized = false; 328 rec_is_initialized_ = false;
329 rtc::AtomicOps::ReleaseStore(&_recording, 0); 329 rtc::AtomicOps::ReleaseStore(&recording_, 0);
330 return 0; 330 return 0;
331 } 331 }
332 332
333 // Change the default receiver playout route to speaker. 333 // Change the default receiver playout route to speaker.
334 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) { 334 int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
335 LOGI() << "SetLoudspeakerStatus(" << enable << ")"; 335 LOGI() << "SetLoudspeakerStatus(" << enable << ")";
336 336
337 AVAudioSession* session = [AVAudioSession sharedInstance]; 337 AVAudioSession* session = [AVAudioSession sharedInstance];
338 NSString* category = session.category; 338 NSString* category = session.category;
339 AVAudioSessionCategoryOptions options = session.categoryOptions; 339 AVAudioSessionCategoryOptions options = session.categoryOptions;
(...skipping 30 matching lines...) Expand all
370 return 0; 370 return 0;
371 } 371 }
372 372
373 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const { 373 int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
374 delayMS = kFixedRecordDelayEstimate; 374 delayMS = kFixedRecordDelayEstimate;
375 return 0; 375 return 0;
376 } 376 }
377 377
378 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { 378 int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
379 LOGI() << "GetPlayoutAudioParameters"; 379 LOGI() << "GetPlayoutAudioParameters";
380 RTC_DCHECK(_playoutParameters.is_valid()); 380 RTC_DCHECK(playout_parameters_.is_valid());
381 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 381 RTC_DCHECK(thread_checker_.CalledOnValidThread());
382 *params = _playoutParameters; 382 *params = playout_parameters_;
383 return 0; 383 return 0;
384 } 384 }
385 385
386 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { 386 int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
387 LOGI() << "GetRecordAudioParameters"; 387 LOGI() << "GetRecordAudioParameters";
388 RTC_DCHECK(_recordParameters.is_valid()); 388 RTC_DCHECK(record_parameters_.is_valid());
389 RTC_DCHECK(_threadChecker.CalledOnValidThread()); 389 RTC_DCHECK(thread_checker_.CalledOnValidThread());
390 *params = _recordParameters; 390 *params = record_parameters_;
391 return 0; 391 return 0;
392 } 392 }
393 393
394 void AudioDeviceIOS::UpdateAudioDeviceBuffer() { 394 void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
395 LOGI() << "UpdateAudioDevicebuffer"; 395 LOGI() << "UpdateAudioDevicebuffer";
396 // AttachAudioBuffer() is called at construction by the main class but check 396 // AttachAudioBuffer() is called at construction by the main class but check
397 // just in case. 397 // just in case.
398 RTC_DCHECK(_audioDeviceBuffer) << "AttachAudioBuffer must be called first"; 398 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
399 // Inform the audio device buffer (ADB) about the new audio format. 399 // Inform the audio device buffer (ADB) about the new audio format.
400 _audioDeviceBuffer->SetPlayoutSampleRate(_playoutParameters.sample_rate()); 400 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
401 _audioDeviceBuffer->SetPlayoutChannels(_playoutParameters.channels()); 401 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
402 _audioDeviceBuffer->SetRecordingSampleRate(_recordParameters.sample_rate()); 402 audio_device_buffer_->SetRecordingSampleRate(
403 _audioDeviceBuffer->SetRecordingChannels(_recordParameters.channels()); 403 record_parameters_.sample_rate());
404 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
404 } 405 }
405 406
406 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 407 void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
407 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 408 LOGI() << "SetupAudioBuffersForActiveAudioSession";
408 AVAudioSession* session = [AVAudioSession sharedInstance]; 409 AVAudioSession* session = [AVAudioSession sharedInstance];
409 // Verify the current values once the audio session has been activated. 410 // Verify the current values once the audio session has been activated.
410 LOG(LS_INFO) << " sample rate: " << session.sampleRate; 411 LOG(LS_INFO) << " sample rate: " << session.sampleRate;
411 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration; 412 LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
412 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels; 413 LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
413 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels; 414 LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
414 LOG(LS_INFO) << " output latency: " << session.outputLatency; 415 LOG(LS_INFO) << " output latency: " << session.outputLatency;
415 LOG(LS_INFO) << " input latency: " << session.inputLatency; 416 LOG(LS_INFO) << " input latency: " << session.inputLatency;
416 // Log a warning message for the case when we are unable to set the preferred 417 // Log a warning message for the case when we are unable to set the preferred
417 // hardware sample rate but continue and use the non-ideal sample rate after 418 // hardware sample rate but continue and use the non-ideal sample rate after
418 // reinitializing the audio parameters. 419 // reinitializing the audio parameters.
419 if (session.sampleRate != _playoutParameters.sample_rate()) { 420 if (session.sampleRate != playout_parameters_.sample_rate()) {
420 LOG(LS_WARNING) 421 LOG(LS_WARNING)
421 << "Failed to enable an audio session with the preferred sample rate!"; 422 << "Failed to enable an audio session with the preferred sample rate!";
422 } 423 }
423 424
424 // At this stage, we also know the exact IO buffer duration and can add 425 // At this stage, we also know the exact IO buffer duration and can add
425 // that info to the existing audio parameters where it is converted into 426 // that info to the existing audio parameters where it is converted into
426 // number of audio frames. 427 // number of audio frames.
427 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 428 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
428 // Hence, 128 is the size we expect to see in upcoming render callbacks. 429 // Hence, 128 is the size we expect to see in upcoming render callbacks.
429 _playoutParameters.reset(session.sampleRate, _playoutParameters.channels(), 430 playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(),
431 session.IOBufferDuration);
432 RTC_DCHECK(playout_parameters_.is_complete());
433 record_parameters_.reset(session.sampleRate, record_parameters_.channels(),
430 session.IOBufferDuration); 434 session.IOBufferDuration);
431 RTC_DCHECK(_playoutParameters.is_complete()); 435 RTC_DCHECK(record_parameters_.is_complete());
432 _recordParameters.reset(session.sampleRate, _recordParameters.channels(),
433 session.IOBufferDuration);
434 RTC_DCHECK(_recordParameters.is_complete());
435 LOG(LS_INFO) << " frames per I/O buffer: " 436 LOG(LS_INFO) << " frames per I/O buffer: "
436 << _playoutParameters.frames_per_buffer(); 437 << playout_parameters_.frames_per_buffer();
437 LOG(LS_INFO) << " bytes per I/O buffer: " 438 LOG(LS_INFO) << " bytes per I/O buffer: "
438 << _playoutParameters.GetBytesPerBuffer(); 439 << playout_parameters_.GetBytesPerBuffer();
439 RTC_DCHECK_EQ(_playoutParameters.GetBytesPerBuffer(), 440 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
440 _recordParameters.GetBytesPerBuffer()); 441 record_parameters_.GetBytesPerBuffer());
441 442
442 // Update the ADB parameters since the sample rate might have changed. 443 // Update the ADB parameters since the sample rate might have changed.
443 UpdateAudioDeviceBuffer(); 444 UpdateAudioDeviceBuffer();
444 445
445 // Create a modified audio buffer class which allows us to ask for, 446 // Create a modified audio buffer class which allows us to ask for,
446 // or deliver, any number of samples (and not only multiple of 10ms) to match 447 // or deliver, any number of samples (and not only multiple of 10ms) to match
447 // the native audio unit buffer size. 448 // the native audio unit buffer size.
448 RTC_DCHECK(_audioDeviceBuffer); 449 RTC_DCHECK(audio_device_buffer_);
449 _fineAudioBuffer.reset(new FineAudioBuffer( 450 fine_audio_buffer_.reset(new FineAudioBuffer(
450 _audioDeviceBuffer, _playoutParameters.GetBytesPerBuffer(), 451 audio_device_buffer_, playout_parameters_.GetBytesPerBuffer(),
451 _playoutParameters.sample_rate())); 452 playout_parameters_.sample_rate()));
452 453
453 // The extra/temporary playoutbuffer must be of this size to avoid 454 // The extra/temporary playoutbuffer must be of this size to avoid
454 // unnecessary memcpy while caching data between successive callbacks. 455 // unnecessary memcpy while caching data between successive callbacks.
455 const int requiredPlayoutBufferSize = 456 const int required_playout_buffer_size =
456 _fineAudioBuffer->RequiredPlayoutBufferSizeBytes(); 457 fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
457 LOG(LS_INFO) << " required playout buffer size: " 458 LOG(LS_INFO) << " required playout buffer size: "
458 << requiredPlayoutBufferSize; 459 << required_playout_buffer_size;
459 _playoutAudioBuffer.reset(new SInt8[requiredPlayoutBufferSize]); 460 playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]);
460 461
461 // Allocate AudioBuffers to be used as storage for the received audio. 462 // Allocate AudioBuffers to be used as storage for the received audio.
462 // The AudioBufferList structure works as a placeholder for the 463 // The AudioBufferList structure works as a placeholder for the
463 // AudioBuffer structure, which holds a pointer to the actual data buffer 464 // AudioBuffer structure, which holds a pointer to the actual data buffer
464 // in |_recordAudioBuffer|. Recorded audio will be rendered into this memory 465 // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
465 // at each input callback when calling AudioUnitRender(). 466 // at each input callback when calling AudioUnitRender().
466 const int dataByteSize = _recordParameters.GetBytesPerBuffer(); 467 const int data_byte_size = record_parameters_.GetBytesPerBuffer();
467 _recordAudioBuffer.reset(new SInt8[dataByteSize]); 468 record_audio_buffer_.reset(new SInt8[data_byte_size]);
468 _audioRecordBufferList.mNumberBuffers = 1; 469 audio_record_buffer_list_.mNumberBuffers = 1;
469 AudioBuffer* audioBuffer = &_audioRecordBufferList.mBuffers[0]; 470 AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
470 audioBuffer->mNumberChannels = _recordParameters.channels(); 471 audio_buffer->mNumberChannels = record_parameters_.channels();
471 audioBuffer->mDataByteSize = dataByteSize; 472 audio_buffer->mDataByteSize = data_byte_size;
472 audioBuffer->mData = _recordAudioBuffer.get(); 473 audio_buffer->mData = record_audio_buffer_.get();
473 } 474 }
474 475
475 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() { 476 bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
476 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit"; 477 LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
477 RTC_DCHECK(!_vpioUnit); 478 RTC_DCHECK(!vpio_unit_);
478 // Create an audio component description to identify the Voice-Processing 479 // Create an audio component description to identify the Voice-Processing
479 // I/O audio unit. 480 // I/O audio unit.
480 AudioComponentDescription vpioUnitDescription; 481 AudioComponentDescription vpio_unit_description;
481 vpioUnitDescription.componentType = kAudioUnitType_Output; 482 vpio_unit_description.componentType = kAudioUnitType_Output;
482 vpioUnitDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO; 483 vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
483 vpioUnitDescription.componentManufacturer = kAudioUnitManufacturer_Apple; 484 vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
484 vpioUnitDescription.componentFlags = 0; 485 vpio_unit_description.componentFlags = 0;
485 vpioUnitDescription.componentFlagsMask = 0; 486 vpio_unit_description.componentFlagsMask = 0;
486 // Obtain an audio unit instance given the description. 487 // Obtain an audio unit instance given the description.
487 AudioComponent foundVpioUnitRef = 488 AudioComponent found_vpio_unit_ref =
488 AudioComponentFindNext(nullptr, &vpioUnitDescription); 489 AudioComponentFindNext(nullptr, &vpio_unit_description);
489 490
490 // Create a Voice-Processing IO audio unit. 491 // Create a Voice-Processing IO audio unit.
491 LOG_AND_RETURN_IF_ERROR( 492 LOG_AND_RETURN_IF_ERROR(
492 AudioComponentInstanceNew(foundVpioUnitRef, &_vpioUnit), 493 AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_),
493 "Failed to create a VoiceProcessingIO audio unit"); 494 "Failed to create a VoiceProcessingIO audio unit");
494 495
495 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable 496 // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
496 // input on the input scope of the input element. 497 // input on the input scope of the input element.
497 AudioUnitElement inputBus = 1; 498 AudioUnitElement input_bus = 1;
498 UInt32 enableInput = 1; 499 UInt32 enable_input = 1;
499 LOG_AND_RETURN_IF_ERROR( 500 LOG_AND_RETURN_IF_ERROR(
500 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_EnableIO, 501 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
501 kAudioUnitScope_Input, inputBus, &enableInput, 502 kAudioUnitScope_Input, input_bus, &enable_input,
502 sizeof(enableInput)), 503 sizeof(enable_input)),
503 "Failed to enable input on input scope of input element"); 504 "Failed to enable input on input scope of input element");
504 505
505 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable 506 // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
506 // output on the output scope of the output element. 507 // output on the output scope of the output element.
507 AudioUnitElement outputBus = 0; 508 AudioUnitElement output_bus = 0;
508 UInt32 enableOutput = 1; 509 UInt32 enable_output = 1;
509 LOG_AND_RETURN_IF_ERROR( 510 LOG_AND_RETURN_IF_ERROR(
510 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_EnableIO, 511 AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
511 kAudioUnitScope_Output, outputBus, &enableOutput, 512 kAudioUnitScope_Output, output_bus, &enable_output,
512 sizeof(enableOutput)), 513 sizeof(enable_output)),
513 "Failed to enable output on output scope of output element"); 514 "Failed to enable output on output scope of output element");
514 515
515 // Set the application formats for input and output: 516 // Set the application formats for input and output:
516 // - use same format in both directions 517 // - use same format in both directions
517 // - avoid resampling in the I/O unit by using the hardware sample rate 518 // - avoid resampling in the I/O unit by using the hardware sample rate
518 // - linear PCM => noncompressed audio data format with one frame per packet 519 // - linear PCM => noncompressed audio data format with one frame per packet
519 // - no need to specify interleaving since only mono is supported 520 // - no need to specify interleaving since only mono is supported
520 AudioStreamBasicDescription applicationFormat = {0}; 521 AudioStreamBasicDescription application_format = {0};
521 UInt32 size = sizeof(applicationFormat); 522 UInt32 size = sizeof(application_format);
522 RTC_DCHECK_EQ(_playoutParameters.sample_rate(), 523 RTC_DCHECK_EQ(playout_parameters_.sample_rate(),
523 _recordParameters.sample_rate()); 524 record_parameters_.sample_rate());
524 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels); 525 RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
525 applicationFormat.mSampleRate = _playoutParameters.sample_rate(); 526 application_format.mSampleRate = playout_parameters_.sample_rate();
526 applicationFormat.mFormatID = kAudioFormatLinearPCM; 527 application_format.mFormatID = kAudioFormatLinearPCM;
527 applicationFormat.mFormatFlags = 528 application_format.mFormatFlags =
528 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 529 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
529 applicationFormat.mBytesPerPacket = kBytesPerSample; 530 application_format.mBytesPerPacket = kBytesPerSample;
530 applicationFormat.mFramesPerPacket = 1; // uncompressed 531 application_format.mFramesPerPacket = 1; // uncompressed
531 applicationFormat.mBytesPerFrame = kBytesPerSample; 532 application_format.mBytesPerFrame = kBytesPerSample;
532 applicationFormat.mChannelsPerFrame = kPreferredNumberOfChannels; 533 application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
533 applicationFormat.mBitsPerChannel = 8 * kBytesPerSample; 534 application_format.mBitsPerChannel = 8 * kBytesPerSample;
534 #if !defined(NDEBUG) 535 #if !defined(NDEBUG)
535 LogABSD(applicationFormat); 536 LogABSD(application_format);
536 #endif 537 #endif
537 538
538 // Set the application format on the output scope of the input element/bus. 539 // Set the application format on the output scope of the input element/bus.
539 LOG_AND_RETURN_IF_ERROR( 540 LOG_AND_RETURN_IF_ERROR(
540 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_StreamFormat, 541 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
541 kAudioUnitScope_Output, inputBus, &applicationFormat, 542 kAudioUnitScope_Output, input_bus,
542 size), 543 &application_format, size),
543 "Failed to set application format on output scope of input element"); 544 "Failed to set application format on output scope of input element");
544 545
545 // Set the application format on the input scope of the output element/bus. 546 // Set the application format on the input scope of the output element/bus.
546 LOG_AND_RETURN_IF_ERROR( 547 LOG_AND_RETURN_IF_ERROR(
547 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_StreamFormat, 548 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
548 kAudioUnitScope_Input, outputBus, &applicationFormat, 549 kAudioUnitScope_Input, output_bus,
549 size), 550 &application_format, size),
550 "Failed to set application format on input scope of output element"); 551 "Failed to set application format on input scope of output element");
551 552
552 // Specify the callback function that provides audio samples to the audio 553 // Specify the callback function that provides audio samples to the audio
553 // unit. 554 // unit.
554 AURenderCallbackStruct renderCallback; 555 AURenderCallbackStruct render_callback;
555 renderCallback.inputProc = GetPlayoutData; 556 render_callback.inputProc = GetPlayoutData;
556 renderCallback.inputProcRefCon = this; 557 render_callback.inputProcRefCon = this;
557 LOG_AND_RETURN_IF_ERROR( 558 LOG_AND_RETURN_IF_ERROR(
558 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_SetRenderCallback, 559 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_SetRenderCallback,
559 kAudioUnitScope_Input, outputBus, &renderCallback, 560 kAudioUnitScope_Input, output_bus, &render_callback,
560 sizeof(renderCallback)), 561 sizeof(render_callback)),
561 "Failed to specify the render callback on the output element"); 562 "Failed to specify the render callback on the output element");
562 563
563 // Disable AU buffer allocation for the recorder, we allocate our own. 564 // Disable AU buffer allocation for the recorder, we allocate our own.
564 // TODO(henrika): not sure that it actually saves resource to make this call. 565 // TODO(henrika): not sure that it actually saves resource to make this call.
565 UInt32 flag = 0; 566 UInt32 flag = 0;
566 LOG_AND_RETURN_IF_ERROR( 567 LOG_AND_RETURN_IF_ERROR(
567 AudioUnitSetProperty(_vpioUnit, kAudioUnitProperty_ShouldAllocateBuffer, 568 AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
568 kAudioUnitScope_Output, inputBus, &flag, 569 kAudioUnitScope_Output, input_bus, &flag,
569 sizeof(flag)), 570 sizeof(flag)),
570 "Failed to disable buffer allocation on the input element"); 571 "Failed to disable buffer allocation on the input element");
571 572
572 // Specify the callback to be called by the I/O thread to us when input audio 573 // Specify the callback to be called by the I/O thread to us when input audio
573 // is available. The recorded samples can then be obtained by calling the 574 // is available. The recorded samples can then be obtained by calling the
574 // AudioUnitRender() method. 575 // AudioUnitRender() method.
575 AURenderCallbackStruct inputCallback; 576 AURenderCallbackStruct input_callback;
576 inputCallback.inputProc = RecordedDataIsAvailable; 577 input_callback.inputProc = RecordedDataIsAvailable;
577 inputCallback.inputProcRefCon = this; 578 input_callback.inputProcRefCon = this;
578 LOG_AND_RETURN_IF_ERROR( 579 LOG_AND_RETURN_IF_ERROR(
579 AudioUnitSetProperty(_vpioUnit, kAudioOutputUnitProperty_SetInputCallback, 580 AudioUnitSetProperty(vpio_unit_,
580 kAudioUnitScope_Global, inputBus, &inputCallback, 581 kAudioOutputUnitProperty_SetInputCallback,
581 sizeof(inputCallback)), 582 kAudioUnitScope_Global, input_bus, &input_callback,
583 sizeof(input_callback)),
582 "Failed to specify the input callback on the input element"); 584 "Failed to specify the input callback on the input element");
583 585
584 // Initialize the Voice-Processing I/O unit instance. 586 // Initialize the Voice-Processing I/O unit instance.
585 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(_vpioUnit), 587 LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
586 "Failed to initialize the Voice-Processing I/O unit"); 588 "Failed to initialize the Voice-Processing I/O unit");
587 return true; 589 return true;
588 } 590 }
589 591
590 bool AudioDeviceIOS::InitPlayOrRecord() { 592 bool AudioDeviceIOS::InitPlayOrRecord() {
591 LOGI() << "InitPlayOrRecord"; 593 LOGI() << "InitPlayOrRecord";
592 AVAudioSession* session = [AVAudioSession sharedInstance]; 594 AVAudioSession* session = [AVAudioSession sharedInstance];
593 // Activate the audio session and ask for a set of preferred audio parameters. 595 // Activate the audio session and ask for a set of preferred audio parameters.
594 ActivateAudioSession(session, true); 596 ActivateAudioSession(session, true);
595 597
(...skipping 14 matching lines...) Expand all
610 queue:[NSOperationQueue mainQueue] 612 queue:[NSOperationQueue mainQueue]
611 usingBlock:^(NSNotification* notification) { 613 usingBlock:^(NSNotification* notification) {
612 NSNumber* typeNumber = 614 NSNumber* typeNumber =
613 [notification userInfo][AVAudioSessionInterruptionTypeKey]; 615 [notification userInfo][AVAudioSessionInterruptionTypeKey];
614 AVAudioSessionInterruptionType type = 616 AVAudioSessionInterruptionType type =
615 (AVAudioSessionInterruptionType)[typeNumber 617 (AVAudioSessionInterruptionType)[typeNumber
616 unsignedIntegerValue]; 618 unsignedIntegerValue];
617 switch (type) { 619 switch (type) {
618 case AVAudioSessionInterruptionTypeBegan: 620 case AVAudioSessionInterruptionTypeBegan:
619 // At this point our audio session has been deactivated and 621 // At this point our audio session has been deactivated and
620 // the 622 // the audio unit render callbacks no longer occur.
621 // audio unit render callbacks no longer occur. Nothing to 623 // Nothing to do.
622 // do.
623 break; 624 break;
624 case AVAudioSessionInterruptionTypeEnded: { 625 case AVAudioSessionInterruptionTypeEnded: {
625 NSError* error = nil; 626 NSError* error = nil;
626 AVAudioSession* session = [AVAudioSession sharedInstance]; 627 AVAudioSession* session = [AVAudioSession sharedInstance];
627 [session setActive:YES error:&error]; 628 [session setActive:YES error:&error];
628 if (error != nil) { 629 if (error != nil) {
629 LOG_F(LS_ERROR) << "Failed to active audio session"; 630 LOG_F(LS_ERROR) << "Failed to active audio session";
630 } 631 }
631 // Post interruption the audio unit render callbacks don't 632 // Post interruption the audio unit render callbacks don't
632 // automatically continue, so we restart the unit manually 633 // automatically continue, so we restart the unit manually
633 // here. 634 // here.
634 AudioOutputUnitStop(_vpioUnit); 635 AudioOutputUnitStop(vpio_unit_);
635 AudioOutputUnitStart(_vpioUnit); 636 AudioOutputUnitStart(vpio_unit_);
636 break; 637 break;
637 } 638 }
638 } 639 }
639 }]; 640 }];
640 // Increment refcount on observer using ARC bridge. Instance variable is a 641 // Increment refcount on observer using ARC bridge. Instance variable is a
641 // void* instead of an id because header is included in other pure C++ 642 // void* instead of an id because header is included in other pure C++
642 // files. 643 // files.
643 _audioInterruptionObserver = (__bridge_retained void*)observer; 644 audio_interruption_observer_ = (__bridge_retained void*)observer;
644 return true; 645 return true;
645 } 646 }
646 647
647 bool AudioDeviceIOS::ShutdownPlayOrRecord() { 648 bool AudioDeviceIOS::ShutdownPlayOrRecord() {
648 LOGI() << "ShutdownPlayOrRecord"; 649 LOGI() << "ShutdownPlayOrRecord";
649 if (_audioInterruptionObserver != nullptr) { 650 if (audio_interruption_observer_ != nullptr) {
650 NSNotificationCenter* center = [NSNotificationCenter defaultCenter]; 651 NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
651 // Transfer ownership of observer back to ARC, which will dealloc the 652 // Transfer ownership of observer back to ARC, which will dealloc the
652 // observer once it exits this scope. 653 // observer once it exits this scope.
653 id observer = (__bridge_transfer id)_audioInterruptionObserver; 654 id observer = (__bridge_transfer id)audio_interruption_observer_;
654 [center removeObserver:observer]; 655 [center removeObserver:observer];
655 _audioInterruptionObserver = nullptr; 656 audio_interruption_observer_ = nullptr;
656 } 657 }
657 // Close and delete the voice-processing I/O unit. 658 // Close and delete the voice-processing I/O unit.
658 OSStatus result = -1; 659 OSStatus result = -1;
659 if (nullptr != _vpioUnit) { 660 if (nullptr != vpio_unit_) {
660 result = AudioOutputUnitStop(_vpioUnit); 661 result = AudioOutputUnitStop(vpio_unit_);
661 if (result != noErr) { 662 if (result != noErr) {
662 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result; 663 LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
663 } 664 }
664 result = AudioComponentInstanceDispose(_vpioUnit); 665 result = AudioComponentInstanceDispose(vpio_unit_);
665 if (result != noErr) { 666 if (result != noErr) {
666 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result; 667 LOG_F(LS_ERROR) << "AudioComponentInstanceDispose failed: " << result;
667 } 668 }
668 _vpioUnit = nullptr; 669 vpio_unit_ = nullptr;
669 } 670 }
670 // All I/O should be stopped or paused prior to deactivating the audio 671 // All I/O should be stopped or paused prior to deactivating the audio
671 // session, hence we deactivate as last action. 672 // session, hence we deactivate as last action.
672 AVAudioSession* session = [AVAudioSession sharedInstance]; 673 AVAudioSession* session = [AVAudioSession sharedInstance];
673 ActivateAudioSession(session, false); 674 ActivateAudioSession(session, false);
674 return true; 675 return true;
675 } 676 }
676 677
677 OSStatus AudioDeviceIOS::RecordedDataIsAvailable( 678 OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
678 void* inRefCon, 679 void* in_ref_con,
679 AudioUnitRenderActionFlags* ioActionFlags, 680 AudioUnitRenderActionFlags* io_action_flags,
680 const AudioTimeStamp* inTimeStamp, 681 const AudioTimeStamp* in_time_stamp,
681 UInt32 inBusNumber, 682 UInt32 in_bus_number,
682 UInt32 inNumberFrames, 683 UInt32 in_number_frames,
683 AudioBufferList* ioData) { 684 AudioBufferList* io_data) {
684 RTC_DCHECK_EQ(1u, inBusNumber); 685 RTC_DCHECK_EQ(1u, in_bus_number);
685 RTC_DCHECK(!ioData); // no buffer should be allocated for input at this stage 686 RTC_DCHECK(
686 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); 687 !io_data); // no buffer should be allocated for input at this stage
688 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
687 return audio_device_ios->OnRecordedDataIsAvailable( 689 return audio_device_ios->OnRecordedDataIsAvailable(
688 ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames); 690 io_action_flags, in_time_stamp, in_bus_number, in_number_frames);
689 } 691 }
690 692
691 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable( 693 OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
692 AudioUnitRenderActionFlags* ioActionFlags, 694 AudioUnitRenderActionFlags* io_action_flags,
693 const AudioTimeStamp* inTimeStamp, 695 const AudioTimeStamp* in_time_stamp,
694 UInt32 inBusNumber, 696 UInt32 in_bus_number,
695 UInt32 inNumberFrames) { 697 UInt32 in_number_frames) {
696 RTC_DCHECK_EQ(_recordParameters.frames_per_buffer(), inNumberFrames); 698 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
697 OSStatus result = noErr; 699 OSStatus result = noErr;
698 // Simply return if recording is not enabled. 700 // Simply return if recording is not enabled.
699 if (!rtc::AtomicOps::AcquireLoad(&_recording)) 701 if (!rtc::AtomicOps::AcquireLoad(&recording_))
700 return result; 702 return result;
703 RTC_DCHECK_EQ(record_parameters_.frames_per_buffer(), in_number_frames);
701 // Obtain the recorded audio samples by initiating a rendering cycle. 704 // Obtain the recorded audio samples by initiating a rendering cycle.
702 // Since it happens on the input bus, the |ioData| parameter is a reference 705 // Since it happens on the input bus, the |io_data| parameter is a reference
703 // to the preallocated audio buffer list that the audio unit renders into. 706 // to the preallocated audio buffer list that the audio unit renders into.
704 // TODO(henrika): should error handling be improved? 707 // TODO(henrika): should error handling be improved?
705 AudioBufferList* ioData = &_audioRecordBufferList; 708 AudioBufferList* io_data = &audio_record_buffer_list_;
706 result = AudioUnitRender(_vpioUnit, ioActionFlags, inTimeStamp, inBusNumber, 709 result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
707 inNumberFrames, ioData); 710 in_bus_number, in_number_frames, io_data);
708 if (result != noErr) { 711 if (result != noErr) {
709 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result; 712 LOG_F(LS_ERROR) << "AudioOutputUnitStart failed: " << result;
710 return result; 713 return result;
711 } 714 }
712 // Get a pointer to the recorded audio and send it to the WebRTC ADB. 715 // Get a pointer to the recorded audio and send it to the WebRTC ADB.
713 // Use the FineAudioBuffer instance to convert between native buffer size 716 // Use the FineAudioBuffer instance to convert between native buffer size
714 // and the 10ms buffer size used by WebRTC. 717 // and the 10ms buffer size used by WebRTC.
715 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; 718 const UInt32 data_size_in_bytes = io_data->mBuffers[0].mDataByteSize;
716 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); 719 RTC_CHECK_EQ(data_size_in_bytes / kBytesPerSample, in_number_frames);
717 SInt8* data = static_cast<SInt8*>(ioData->mBuffers[0].mData); 720 SInt8* data = static_cast<SInt8*>(io_data->mBuffers[0].mData);
718 _fineAudioBuffer->DeliverRecordedData(data, dataSizeInBytes, 721 fine_audio_buffer_->DeliverRecordedData(data, data_size_in_bytes,
719 kFixedPlayoutDelayEstimate, 722 kFixedPlayoutDelayEstimate,
720 kFixedRecordDelayEstimate); 723 kFixedRecordDelayEstimate);
721 return noErr; 724 return noErr;
722 } 725 }
723 726
724 OSStatus AudioDeviceIOS::GetPlayoutData( 727 OSStatus AudioDeviceIOS::GetPlayoutData(
725 void* inRefCon, 728 void* in_ref_con,
726 AudioUnitRenderActionFlags* ioActionFlags, 729 AudioUnitRenderActionFlags* io_action_flags,
727 const AudioTimeStamp* inTimeStamp, 730 const AudioTimeStamp* in_time_stamp,
728 UInt32 inBusNumber, 731 UInt32 in_bus_number,
729 UInt32 inNumberFrames, 732 UInt32 in_number_frames,
730 AudioBufferList* ioData) { 733 AudioBufferList* io_data) {
731 RTC_DCHECK_EQ(0u, inBusNumber); 734 RTC_DCHECK_EQ(0u, in_bus_number);
732 RTC_DCHECK(ioData); 735 RTC_DCHECK(io_data);
733 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(inRefCon); 736 AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
734 return audio_device_ios->OnGetPlayoutData(ioActionFlags, inNumberFrames, 737 return audio_device_ios->OnGetPlayoutData(io_action_flags, in_number_frames,
735 ioData); 738 io_data);
736 } 739 }
737 740
738 OSStatus AudioDeviceIOS::OnGetPlayoutData( 741 OSStatus AudioDeviceIOS::OnGetPlayoutData(
739 AudioUnitRenderActionFlags* ioActionFlags, 742 AudioUnitRenderActionFlags* io_action_flags,
740 UInt32 inNumberFrames, 743 UInt32 in_number_frames,
741 AudioBufferList* ioData) { 744 AudioBufferList* io_data) {
742 // Verify 16-bit, noninterleaved mono PCM signal format. 745 // Verify 16-bit, noninterleaved mono PCM signal format.
743 RTC_DCHECK_EQ(1u, ioData->mNumberBuffers); 746 RTC_DCHECK_EQ(1u, io_data->mNumberBuffers);
744 RTC_DCHECK_EQ(1u, ioData->mBuffers[0].mNumberChannels); 747 RTC_DCHECK_EQ(1u, io_data->mBuffers[0].mNumberChannels);
745 // Get pointer to internal audio buffer to which new audio data shall be 748 // Get pointer to internal audio buffer to which new audio data shall be
746 // written. 749 // written.
747 const UInt32 dataSizeInBytes = ioData->mBuffers[0].mDataByteSize; 750 const UInt32 dataSizeInBytes = io_data->mBuffers[0].mDataByteSize;
748 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, inNumberFrames); 751 RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, in_number_frames);
749 SInt8* destination = static_cast<SInt8*>(ioData->mBuffers[0].mData); 752 SInt8* destination = static_cast<SInt8*>(io_data->mBuffers[0].mData);
750 // Produce silence and give audio unit a hint about it if playout is not 753 // Produce silence and give audio unit a hint about it if playout is not
751 // activated. 754 // activated.
752 if (!rtc::AtomicOps::AcquireLoad(&_playing)) { 755 if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
753 *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence; 756 *io_action_flags |= kAudioUnitRenderAction_OutputIsSilence;
754 memset(destination, 0, dataSizeInBytes); 757 memset(destination, 0, dataSizeInBytes);
755 return noErr; 758 return noErr;
756 } 759 }
757 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 760 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
758 // the native I/O audio unit) to a preallocated intermediate buffer and 761 // the native I/O audio unit) to a preallocated intermediate buffer and
759 // copy the result to the audio buffer in the |ioData| destination. 762 // copy the result to the audio buffer in the |io_data| destination.
760 SInt8* source = _playoutAudioBuffer.get(); 763 SInt8* source = playout_audio_buffer_.get();
761 _fineAudioBuffer->GetPlayoutData(source); 764 fine_audio_buffer_->GetPlayoutData(source);
762 memcpy(destination, source, dataSizeInBytes); 765 memcpy(destination, source, dataSizeInBytes);
763 return noErr; 766 return noErr;
764 } 767 }
765 768
766 } // namespace webrtc 769 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_device/ios/audio_device_ios.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698