OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #import <AVFoundation/AVFoundation.h> | 11 #import <AVFoundation/AVFoundation.h> |
12 #import <Foundation/Foundation.h> | 12 #import <Foundation/Foundation.h> |
13 | 13 |
14 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" | 14 #include "webrtc/modules/audio_device/ios/audio_device_ios.h" |
15 | 15 |
16 #include <cmath> | 16 #include <cmath> |
17 | 17 |
| 18 #include "webrtc/base/array_view.h" |
18 #include "webrtc/base/atomicops.h" | 19 #include "webrtc/base/atomicops.h" |
19 #include "webrtc/base/bind.h" | 20 #include "webrtc/base/bind.h" |
20 #include "webrtc/base/checks.h" | 21 #include "webrtc/base/checks.h" |
21 #include "webrtc/base/criticalsection.h" | 22 #include "webrtc/base/criticalsection.h" |
22 #include "webrtc/base/logging.h" | 23 #include "webrtc/base/logging.h" |
23 #include "webrtc/base/thread.h" | 24 #include "webrtc/base/thread.h" |
24 #include "webrtc/base/thread_annotations.h" | 25 #include "webrtc/base/thread_annotations.h" |
25 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 26 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
26 #include "webrtc/sdk/objc/Framework/Classes/helpers.h" | 27 #include "webrtc/sdk/objc/Framework/Classes/helpers.h" |
27 | 28 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
62 enum AudioDeviceMessageType : uint32_t { | 63 enum AudioDeviceMessageType : uint32_t { |
63 kMessageTypeInterruptionBegin, | 64 kMessageTypeInterruptionBegin, |
64 kMessageTypeInterruptionEnd, | 65 kMessageTypeInterruptionEnd, |
65 kMessageTypeValidRouteChange, | 66 kMessageTypeValidRouteChange, |
66 kMessageTypeCanPlayOrRecordChange, | 67 kMessageTypeCanPlayOrRecordChange, |
67 }; | 68 }; |
68 | 69 |
69 using ios::CheckAndLogError; | 70 using ios::CheckAndLogError; |
70 | 71 |
71 #if !defined(NDEBUG) | 72 #if !defined(NDEBUG) |
| 73 // Returns true when the code runs on a device simulator. |
| 74 static bool DeviceIsSimulator() { |
| 75 return ios::GetDeviceName() == "x86_64"; |
| 76 } |
| 77 |
72 // Helper method that logs essential device information strings. | 78 // Helper method that logs essential device information strings. |
73 static void LogDeviceInfo() { | 79 static void LogDeviceInfo() { |
74 LOG(LS_INFO) << "LogDeviceInfo"; | 80 LOG(LS_INFO) << "LogDeviceInfo"; |
75 @autoreleasepool { | 81 @autoreleasepool { |
76 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); | 82 LOG(LS_INFO) << " system name: " << ios::GetSystemName(); |
77 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); | 83 LOG(LS_INFO) << " system version 1(2): " << ios::GetSystemVersionAsString(); |
78 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); | 84 LOG(LS_INFO) << " system version 2(2): " << ios::GetSystemVersion(); |
79 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); | 85 LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); |
80 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); | 86 LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); |
81 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); | 87 LOG(LS_INFO) << " process name: " << ios::GetProcessName(); |
82 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); | 88 LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); |
83 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); | 89 LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); |
84 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); | 90 LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); |
85 #if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \ | 91 #if defined(__IPHONE_9_0) && defined(__IPHONE_OS_VERSION_MAX_ALLOWED) \ |
86 && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 | 92 && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 |
87 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); | 93 LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); |
88 #endif | 94 #endif |
| 95 #if TARGET_IPHONE_SIMULATOR |
| 96 LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined"; |
| 97 #endif |
| 98 LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator(); |
89 } | 99 } |
90 } | 100 } |
91 #endif // !defined(NDEBUG) | 101 #endif // !defined(NDEBUG) |
92 | 102 |
93 AudioDeviceIOS::AudioDeviceIOS() | 103 AudioDeviceIOS::AudioDeviceIOS() |
94 : audio_device_buffer_(nullptr), | 104 : audio_device_buffer_(nullptr), |
95 audio_unit_(nullptr), | 105 audio_unit_(nullptr), |
96 recording_(0), | 106 recording_(0), |
97 playing_(0), | 107 playing_(0), |
98 initialized_(false), | 108 initialized_(false), |
(...skipping 289 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 } | 398 } |
389 | 399 |
390 // Get a pointer to the recorded audio and send it to the WebRTC ADB. | 400 // Get a pointer to the recorded audio and send it to the WebRTC ADB. |
391 // Use the FineAudioBuffer instance to convert between native buffer size | 401 // Use the FineAudioBuffer instance to convert between native buffer size |
392 // and the 10ms buffer size used by WebRTC. | 402 // and the 10ms buffer size used by WebRTC. |
393 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; | 403 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; |
394 const size_t size_in_bytes = audio_buffer->mDataByteSize; | 404 const size_t size_in_bytes = audio_buffer->mDataByteSize; |
395 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, | 405 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, |
396 num_frames); | 406 num_frames); |
397 int8_t* data = static_cast<int8_t*>(audio_buffer->mData); | 407 int8_t* data = static_cast<int8_t*>(audio_buffer->mData); |
398 fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, | 408 fine_audio_buffer_->DeliverRecordedData(rtc::ArrayView<const int8_t>(data, siz
e_in_bytes), |
399 kFixedPlayoutDelayEstimate, | 409 kFixedPlayoutDelayEstimate, |
400 kFixedRecordDelayEstimate); | 410 kFixedRecordDelayEstimate); |
401 return noErr; | 411 return noErr; |
402 } | 412 } |
403 | 413 |
404 OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, | 414 OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, |
405 const AudioTimeStamp* time_stamp, | 415 const AudioTimeStamp* time_stamp, |
406 UInt32 bus_number, | 416 UInt32 bus_number, |
407 UInt32 num_frames, | 417 UInt32 num_frames, |
408 AudioBufferList* io_data) { | 418 AudioBufferList* io_data) { |
409 // Verify 16-bit, noninterleaved mono PCM signal format. | 419 // Verify 16-bit, noninterleaved mono PCM signal format. |
410 RTC_DCHECK_EQ(1, io_data->mNumberBuffers); | 420 RTC_DCHECK_EQ(1, io_data->mNumberBuffers); |
411 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; | 421 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; |
412 RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels); | 422 RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels); |
413 // Get pointer to internal audio buffer to which new audio data shall be | 423 // Get pointer to internal audio buffer to which new audio data shall be |
414 // written. | 424 // written. |
415 const size_t size_in_bytes = audio_buffer->mDataByteSize; | 425 const size_t size_in_bytes = audio_buffer->mDataByteSize; |
416 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, | 426 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, |
417 num_frames); | 427 num_frames); |
418 int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData); | 428 int8_t* destination = reinterpret_cast<int8_t*>(audio_buffer->mData); |
419 // Produce silence and give audio unit a hint about it if playout is not | 429 // Produce silence and give audio unit a hint about it if playout is not |
420 // activated. | 430 // activated. |
421 if (!rtc::AtomicOps::AcquireLoad(&playing_)) { | 431 if (!rtc::AtomicOps::AcquireLoad(&playing_)) { |
422 *flags |= kAudioUnitRenderAction_OutputIsSilence; | 432 *flags |= kAudioUnitRenderAction_OutputIsSilence; |
423 memset(destination, 0, size_in_bytes); | 433 memset(destination, 0, size_in_bytes); |
424 return noErr; | 434 return noErr; |
425 } | 435 } |
426 // Produce silence and log a warning message for the case when Core Audio is | |
427 // asking for an invalid number of audio frames. I don't expect this to happen | |
428 // but it is done as a safety measure to avoid bad audio if such as case would | |
429 // ever be triggered e.g. in combination with BT devices. | |
430 const size_t frames_per_buffer = playout_parameters_.frames_per_buffer(); | |
431 if (num_frames != frames_per_buffer) { | |
432 RTCLogWarning(@"Expected %u frames but got %u", | |
433 static_cast<unsigned int>(frames_per_buffer), | |
434 static_cast<unsigned int>(num_frames)); | |
435 *flags |= kAudioUnitRenderAction_OutputIsSilence; | |
436 memset(destination, 0, size_in_bytes); | |
437 return noErr; | |
438 } | |
439 | 436 |
440 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches | 437 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
441 // the native I/O audio unit) to a preallocated intermediate buffer and | 438 // the native I/O audio unit) and copy the result to the audio buffer in the |
442 // copy the result to the audio buffer in the |io_data| destination. | 439 // |io_data| destination. |
443 int8_t* source = playout_audio_buffer_.get(); | 440 fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in
_bytes)); |
444 fine_audio_buffer_->GetPlayoutData(source); | |
445 memcpy(destination, source, size_in_bytes); | |
446 return noErr; | 441 return noErr; |
447 } | 442 } |
448 | 443 |
449 void AudioDeviceIOS::OnMessage(rtc::Message *msg) { | 444 void AudioDeviceIOS::OnMessage(rtc::Message *msg) { |
450 switch (msg->message_id) { | 445 switch (msg->message_id) { |
451 case kMessageTypeInterruptionBegin: | 446 case kMessageTypeInterruptionBegin: |
452 HandleInterruptionBegin(); | 447 HandleInterruptionBegin(); |
453 break; | 448 break; |
454 case kMessageTypeInterruptionEnd: | 449 case kMessageTypeInterruptionEnd: |
455 HandleInterruptionEnd(); | 450 HandleInterruptionEnd(); |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
625 LOG(LS_INFO) << " bytes per I/O buffer: " | 620 LOG(LS_INFO) << " bytes per I/O buffer: " |
626 << playout_parameters_.GetBytesPerBuffer(); | 621 << playout_parameters_.GetBytesPerBuffer(); |
627 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), | 622 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), |
628 record_parameters_.GetBytesPerBuffer()); | 623 record_parameters_.GetBytesPerBuffer()); |
629 | 624 |
630 // Update the ADB parameters since the sample rate might have changed. | 625 // Update the ADB parameters since the sample rate might have changed. |
631 UpdateAudioDeviceBuffer(); | 626 UpdateAudioDeviceBuffer(); |
632 | 627 |
633 // Create a modified audio buffer class which allows us to ask for, | 628 // Create a modified audio buffer class which allows us to ask for, |
634 // or deliver, any number of samples (and not only multiple of 10ms) to match | 629 // or deliver, any number of samples (and not only multiple of 10ms) to match |
635 // the native audio unit buffer size. | 630 // the native audio unit buffer size. Use a reasonable capacity to avoid |
| 631 // reallocations while audio is played to reduce risk of glitches. |
636 RTC_DCHECK(audio_device_buffer_); | 632 RTC_DCHECK(audio_device_buffer_); |
637 const size_t buffer_size_in_bytes = playout_parameters_.GetBytesPerBuffer(); | 633 const size_t capacity_in_bytes = 2 * playout_parameters_.GetBytesPerBuffer(); |
638 fine_audio_buffer_.reset(new FineAudioBuffer( | 634 fine_audio_buffer_.reset(new FineAudioBuffer( |
639 audio_device_buffer_, buffer_size_in_bytes, | 635 audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_bytes
)); |
640 playout_parameters_.sample_rate())); | |
641 playout_audio_buffer_.reset(new SInt8[buffer_size_in_bytes]); | |
642 | 636 |
643 // Allocate AudioBuffers to be used as storage for the received audio. | 637 // Allocate AudioBuffers to be used as storage for the received audio. |
644 // The AudioBufferList structure works as a placeholder for the | 638 // The AudioBufferList structure works as a placeholder for the |
645 // AudioBuffer structure, which holds a pointer to the actual data buffer | 639 // AudioBuffer structure, which holds a pointer to the actual data buffer |
646 // in |record_audio_buffer_|. Recorded audio will be rendered into this memory | 640 // in |record_audio_buffer_|. Recorded audio will be rendered into this memory |
647 // at each input callback when calling AudioUnitRender(). | 641 // at each input callback when calling AudioUnitRender(). |
648 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); | 642 const int data_byte_size = record_parameters_.GetBytesPerBuffer(); |
649 record_audio_buffer_.reset(new SInt8[data_byte_size]); | 643 record_audio_buffer_.reset(new SInt8[data_byte_size]); |
650 memset(record_audio_buffer_.get(), 0, data_byte_size); | 644 memset(record_audio_buffer_.get(), 0, data_byte_size); |
651 audio_record_buffer_list_.mNumberBuffers = 1; | 645 audio_record_buffer_list_.mNumberBuffers = 1; |
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
837 | 831 |
838 // All I/O should be stopped or paused prior to deactivating the audio | 832 // All I/O should be stopped or paused prior to deactivating the audio |
839 // session, hence we deactivate as last action. | 833 // session, hence we deactivate as last action. |
840 [session lockForConfiguration]; | 834 [session lockForConfiguration]; |
841 UnconfigureAudioSession(); | 835 UnconfigureAudioSession(); |
842 [session endWebRTCSession:nil]; | 836 [session endWebRTCSession:nil]; |
843 [session unlockForConfiguration]; | 837 [session unlockForConfiguration]; |
844 } | 838 } |
845 | 839 |
846 } // namespace webrtc | 840 } // namespace webrtc |
OLD | NEW |