Index: webrtc/modules/audio_device/fine_audio_buffer.cc |
diff --git a/webrtc/modules/audio_device/fine_audio_buffer.cc b/webrtc/modules/audio_device/fine_audio_buffer.cc |
index 0829e982038cbf74fec5f760ed48fddbe0e19d77..c783a278f7ae5dce1d2df05ced28d0505acb3fa4 100644 |
--- a/webrtc/modules/audio_device/fine_audio_buffer.cc |
+++ b/webrtc/modules/audio_device/fine_audio_buffer.cc |
@@ -28,24 +28,14 @@ FineAudioBuffer::FineAudioBuffer(AudioDeviceBuffer* device_buffer, |
sample_rate_(sample_rate), |
samples_per_10_ms_(static_cast<size_t>(sample_rate_ * 10 / 1000)), |
bytes_per_10_ms_(samples_per_10_ms_ * sizeof(int16_t)), |
- playout_cached_buffer_start_(0), |
- playout_cached_bytes_(0) { |
- playout_cache_buffer_.reset(new int8_t[bytes_per_10_ms_]); |
+ buffer_10_ms_(new int8_t[bytes_per_10_ms_]) { |
+ LOG(INFO) << "desired_frame_size_bytes:" << desired_frame_size_bytes; |
} |
FineAudioBuffer::~FineAudioBuffer() {} |
-size_t FineAudioBuffer::RequiredPlayoutBufferSizeBytes() { |
- // It is possible that we store the desired frame size - 1 samples. Since new |
- // audio frames are pulled in chunks of 10ms we will need a buffer that can |
- // hold desired_frame_size - 1 + 10ms of data. We omit the - 1. |
- return desired_frame_size_bytes_ + bytes_per_10_ms_; |
-} |
- |
void FineAudioBuffer::ResetPlayout() { |
- playout_cached_buffer_start_ = 0; |
- playout_cached_bytes_ = 0; |
- memset(playout_cache_buffer_.get(), 0, bytes_per_10_ms_); |
+ playout_buffer_.Clear(); |
} |
void FineAudioBuffer::ResetRecord() { |
@@ -53,50 +43,25 @@ void FineAudioBuffer::ResetRecord() { |
} |
void FineAudioBuffer::GetPlayoutData(int8_t* buffer) { |
- if (desired_frame_size_bytes_ <= playout_cached_bytes_) { |
- memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_], |
- desired_frame_size_bytes_); |
- playout_cached_buffer_start_ += desired_frame_size_bytes_; |
- playout_cached_bytes_ -= desired_frame_size_bytes_; |
- RTC_CHECK_LT(playout_cached_buffer_start_ + playout_cached_bytes_, |
- bytes_per_10_ms_); |
- return; |
- } |
- memcpy(buffer, &playout_cache_buffer_.get()[playout_cached_buffer_start_], |
- playout_cached_bytes_); |
- // Push another n*10ms of audio to |buffer|. n > 1 if |
- // |desired_frame_size_bytes_| is greater than 10ms of audio. Note that we |
- // write the audio after the cached bytes copied earlier. |
- int8_t* unwritten_buffer = &buffer[playout_cached_bytes_]; |
- int bytes_left = |
- static_cast<int>(desired_frame_size_bytes_ - playout_cached_bytes_); |
- // Ceiling of integer division: 1 + ((x - 1) / y) |
- size_t number_of_requests = 1 + (bytes_left - 1) / (bytes_per_10_ms_); |
- for (size_t i = 0; i < number_of_requests; ++i) { |
+ const size_t num_bytes = desired_frame_size_bytes_; |
+ // Ask WebRTC for new data in chunks of 10ms until we have enough to |
+ // fulfill the request. It is possible that the buffer already contains |
+ // enough samples from the last round. |
+ while (playout_buffer_.size() < num_bytes) { |
+ // Get 10ms decoded audio from WebRTC. |
device_buffer_->RequestPlayoutData(samples_per_10_ms_); |
- int num_out = device_buffer_->GetPlayoutData(unwritten_buffer); |
- if (static_cast<size_t>(num_out) != samples_per_10_ms_) { |
- RTC_CHECK_EQ(num_out, 0); |
- playout_cached_bytes_ = 0; |
- return; |
- } |
- unwritten_buffer += bytes_per_10_ms_; |
- RTC_CHECK_GE(bytes_left, 0); |
- bytes_left -= static_cast<int>(bytes_per_10_ms_); |
+ int samples_per_channel = |
kwiberg-webrtc
2017/02/27 09:45:35
const?
henrika_webrtc
2017/02/27 12:32:27
Done.
|
+ device_buffer_->GetPlayoutData(buffer_10_ms_.get()); |
+ RTC_DCHECK_EQ(samples_per_channel, samples_per_10_ms_); |
+ // Append PCM audio samples to buffer. |
+ playout_buffer_.AppendData(buffer_10_ms_.get(), bytes_per_10_ms_); |
kwiberg-webrtc
2017/02/27 09:45:35
Hmm. The only use of buffer_10_ms_ is as temp stor
henrika_webrtc
2017/02/27 12:32:27
Correct. Only used as temp storage. Will try to im
|
} |
- RTC_CHECK_LE(bytes_left, 0); |
- // Put the samples that were written to |buffer| but are not used in the |
- // cache. |
- size_t cache_location = desired_frame_size_bytes_; |
- int8_t* cache_ptr = &buffer[cache_location]; |
- playout_cached_bytes_ = number_of_requests * bytes_per_10_ms_ - |
- (desired_frame_size_bytes_ - playout_cached_bytes_); |
- // If playout_cached_bytes_ is larger than the cache buffer, uninitialized |
- // memory will be read. |
- RTC_CHECK_LE(playout_cached_bytes_, bytes_per_10_ms_); |
- RTC_CHECK_EQ(-bytes_left, playout_cached_bytes_); |
- playout_cached_buffer_start_ = 0; |
- memcpy(playout_cache_buffer_.get(), cache_ptr, playout_cached_bytes_); |
+ // Provide the requested number of bytes to the consumer. |
+ memcpy(buffer, playout_buffer_.data(), num_bytes); |
+ // Move remaining samples to start of buffer to prepare for next round. |
+ memmove(playout_buffer_.data(), playout_buffer_.data() + num_bytes, |
+ playout_buffer_.size() - num_bytes); |
+ playout_buffer_.SetSize(playout_buffer_.size() - num_bytes); |
} |
void FineAudioBuffer::DeliverRecordedData(const int8_t* buffer, |