Index: webrtc/modules/audio_device/ios/audio_device_ios.mm |
diff --git a/webrtc/modules/audio_device/ios/audio_device_ios.mm b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
index 8cb1cd2b99ec23c6381fc76498dd46e325fd1514..aada174795bcf2b0c39209ac5e0200289761ebcc 100644 |
--- a/webrtc/modules/audio_device/ios/audio_device_ios.mm |
+++ b/webrtc/modules/audio_device/ios/audio_device_ios.mm |
@@ -15,6 +15,7 @@ |
#include <cmath> |
+#include "webrtc/base/array_view.h" |
#include "webrtc/base/atomicops.h" |
#include "webrtc/base/bind.h" |
#include "webrtc/base/checks.h" |
@@ -69,6 +70,11 @@ enum AudioDeviceMessageType : uint32_t { |
using ios::CheckAndLogError; |
#if !defined(NDEBUG) |
+// Returns true when the code runs on a device simulator. |
+static bool DeviceIsSimulator() { |
+ return ios::GetDeviceName() == "x86_64"; |
+} |
+ |
// Helper method that logs essential device information strings. |
static void LogDeviceInfo() { |
LOG(LS_INFO) << "LogDeviceInfo"; |
@@ -86,6 +92,10 @@ static void LogDeviceInfo() { |
&& __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_9_0 |
LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); |
#endif |
+#if TARGET_IPHONE_SIMULATOR |
+ LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined"; |
+#endif |
+ LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator(); |
} |
} |
#endif // !defined(NDEBUG) |
@@ -395,7 +405,7 @@ OSStatus AudioDeviceIOS::OnDeliverRecordedData( |
RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, |
num_frames); |
int8_t* data = static_cast<int8_t*>(audio_buffer->mData); |
- fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, |
+ fine_audio_buffer_->DeliverRecordedData(rtc::ArrayView<const int8_t>(data, size_in_bytes), |
kFixedPlayoutDelayEstimate, |
kFixedRecordDelayEstimate); |
return noErr; |
@@ -423,26 +433,11 @@ OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, |
memset(destination, 0, size_in_bytes); |
return noErr; |
} |
- // Produce silence and log a warning message for the case when Core Audio is |
- // asking for an invalid number of audio frames. I don't expect this to happen |
- // but it is done as a safety measure to avoid bad audio if such as case would |
- // ever be triggered e.g. in combination with BT devices. |
- const size_t frames_per_buffer = playout_parameters_.frames_per_buffer(); |
- if (num_frames != frames_per_buffer) { |
- RTCLogWarning(@"Expected %u frames but got %u", |
- static_cast<unsigned int>(frames_per_buffer), |
- static_cast<unsigned int>(num_frames)); |
- *flags |= kAudioUnitRenderAction_OutputIsSilence; |
- memset(destination, 0, size_in_bytes); |
- return noErr; |
- } |
// Read decoded 16-bit PCM samples from WebRTC (using a size that matches |
- // the native I/O audio unit) to a preallocated intermediate buffer and |
- // copy the result to the audio buffer in the |io_data| destination. |
- int8_t* source = playout_audio_buffer_.get(); |
- fine_audio_buffer_->GetPlayoutData(source); |
- memcpy(destination, source, size_in_bytes); |
+ // the native I/O audio unit) and copy the result to the audio buffer in the |
+ // |io_data| destination. |
+ fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<int8_t>(destination, size_in_bytes)); |
return noErr; |
} |
@@ -632,13 +627,12 @@ void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { |
// Create a modified audio buffer class which allows us to ask for, |
// or deliver, any number of samples (and not only multiple of 10ms) to match |
- // the native audio unit buffer size. |
+ // the native audio unit buffer size. Use a reasonable capacity to avoid |
+ // reallocations while audio is played to reduce risk of glitches. |
RTC_DCHECK(audio_device_buffer_); |
- const size_t buffer_size_in_bytes = playout_parameters_.GetBytesPerBuffer(); |
+ const size_t capacity_in_bytes = 2 * playout_parameters_.GetBytesPerBuffer(); |
fine_audio_buffer_.reset(new FineAudioBuffer( |
- audio_device_buffer_, buffer_size_in_bytes, |
- playout_parameters_.sample_rate())); |
- playout_audio_buffer_.reset(new SInt8[buffer_size_in_bytes]); |
+ audio_device_buffer_, playout_parameters_.sample_rate(), capacity_in_bytes)); |
// Allocate AudioBuffers to be used as storage for the received audio. |
// The AudioBufferList structure works as a placeholder for the |