| Index: webrtc/modules/audio_device/audio_device_buffer.cc
|
| diff --git a/webrtc/modules/audio_device/audio_device_buffer.cc b/webrtc/modules/audio_device/audio_device_buffer.cc
|
| index ba8b6a5b3e1f773be7acc0a55a98ec7a77b7fe1a..d157c1cb1ae661483675e18411c0bd57d6a6d449 100644
|
| --- a/webrtc/modules/audio_device/audio_device_buffer.cc
|
| +++ b/webrtc/modules/audio_device/audio_device_buffer.cc
|
| @@ -22,9 +22,6 @@
|
|
|
| namespace webrtc {
|
|
|
| -static const int kHighDelayThresholdMs = 300;
|
| -static const int kLogHighDelayIntervalFrames = 500; // 5 seconds.
|
| -
|
| static const char kTimerQueueName[] = "AudioDeviceBufferTimer";
|
|
|
| // Time between two sucessive calls to LogStats().
|
| @@ -33,30 +30,26 @@ static const size_t kTimerIntervalInMilliseconds =
|
| kTimerIntervalInSeconds * rtc::kNumMillisecsPerSec;
|
|
|
| AudioDeviceBuffer::AudioDeviceBuffer()
|
| - : _ptrCbAudioTransport(nullptr),
|
| + : audio_transport_cb_(nullptr),
|
| task_queue_(kTimerQueueName),
|
| timer_has_started_(false),
|
| - _recSampleRate(0),
|
| - _playSampleRate(0),
|
| - _recChannels(0),
|
| - _playChannels(0),
|
| - _recChannel(AudioDeviceModule::kChannelBoth),
|
| - _recBytesPerSample(0),
|
| - _playBytesPerSample(0),
|
| - _recSamples(0),
|
| - _recSize(0),
|
| - _playSamples(0),
|
| - _playSize(0),
|
| - _recFile(*FileWrapper::Create()),
|
| - _playFile(*FileWrapper::Create()),
|
| - _currentMicLevel(0),
|
| - _newMicLevel(0),
|
| - _typingStatus(false),
|
| - _playDelayMS(0),
|
| - _recDelayMS(0),
|
| - _clockDrift(0),
|
| - // Set to the interval in order to log on the first occurrence.
|
| - high_delay_counter_(kLogHighDelayIntervalFrames),
|
| + rec_sample_rate_(0),
|
| + play_sample_rate_(0),
|
| + rec_channels_(0),
|
| + play_channels_(0),
|
| + rec_channel_(AudioDeviceModule::kChannelBoth),
|
| + rec_bytes_per_sample_(0),
|
| + play_bytes_per_sample_(0),
|
| + rec_samples_per_10ms_(0),
|
| + rec_bytes_per_10ms_(0),
|
| + play_samples_per_10ms_(0),
|
| + play_bytes_per_10ms_(0),
|
| + current_mic_level_(0),
|
| + new_mic_level_(0),
|
| + typing_status_(false),
|
| + play_delay_ms_(0),
|
| + rec_delay_ms_(0),
|
| + clock_drift_(0),
|
| num_stat_reports_(0),
|
| rec_callbacks_(0),
|
| last_rec_callbacks_(0),
|
| @@ -68,8 +61,6 @@ AudioDeviceBuffer::AudioDeviceBuffer()
|
| last_play_samples_(0),
|
| last_log_stat_time_(0) {
|
| LOG(INFO) << "AudioDeviceBuffer::ctor";
|
| - memset(_recBuffer, 0, kMaxBufferSizeBytes);
|
| - memset(_playBuffer, 0, kMaxBufferSizeBytes);
|
| }
|
|
|
| AudioDeviceBuffer::~AudioDeviceBuffer() {
|
| @@ -93,27 +84,19 @@ AudioDeviceBuffer::~AudioDeviceBuffer() {
|
| LOG(INFO) << "average: "
|
| << static_cast<float>(total_diff_time) / num_measurements;
|
| }
|
| -
|
| - _recFile.Flush();
|
| - _recFile.CloseFile();
|
| - delete &_recFile;
|
| -
|
| - _playFile.Flush();
|
| - _playFile.CloseFile();
|
| - delete &_playFile;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::RegisterAudioCallback(
|
| - AudioTransport* audioCallback) {
|
| + AudioTransport* audio_callback) {
|
| LOG(INFO) << __FUNCTION__;
|
| rtc::CritScope lock(&_critSectCb);
|
| - _ptrCbAudioTransport = audioCallback;
|
| + audio_transport_cb_ = audio_callback;
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::InitPlayout() {
|
| - RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| LOG(INFO) << __FUNCTION__;
|
| + RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| last_playout_time_ = rtc::TimeMillis();
|
| if (!timer_has_started_) {
|
| StartTimer();
|
| @@ -123,8 +106,8 @@ int32_t AudioDeviceBuffer::InitPlayout() {
|
| }
|
|
|
| int32_t AudioDeviceBuffer::InitRecording() {
|
| - RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| LOG(INFO) << __FUNCTION__;
|
| + RTC_DCHECK(thread_checker_.CalledOnValidThread());
|
| if (!timer_has_started_) {
|
| StartTimer();
|
| timer_has_started_ = true;
|
| @@ -135,38 +118,40 @@ int32_t AudioDeviceBuffer::InitRecording() {
|
| int32_t AudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) {
|
| LOG(INFO) << "SetRecordingSampleRate(" << fsHz << ")";
|
| rtc::CritScope lock(&_critSect);
|
| - _recSampleRate = fsHz;
|
| + rec_sample_rate_ = fsHz;
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) {
|
| LOG(INFO) << "SetPlayoutSampleRate(" << fsHz << ")";
|
| rtc::CritScope lock(&_critSect);
|
| - _playSampleRate = fsHz;
|
| + play_sample_rate_ = fsHz;
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::RecordingSampleRate() const {
|
| - return _recSampleRate;
|
| + return rec_sample_rate_;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::PlayoutSampleRate() const {
|
| - return _playSampleRate;
|
| + return play_sample_rate_;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::SetRecordingChannels(size_t channels) {
|
| + LOG(INFO) << "SetRecordingChannels(" << channels << ")";
|
| rtc::CritScope lock(&_critSect);
|
| - _recChannels = channels;
|
| - _recBytesPerSample =
|
| + rec_channels_ = channels;
|
| + rec_bytes_per_sample_ =
|
| 2 * channels; // 16 bits per sample in mono, 32 bits in stereo
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::SetPlayoutChannels(size_t channels) {
|
| + LOG(INFO) << "SetPlayoutChannels(" << channels << ")";
|
| rtc::CritScope lock(&_critSect);
|
| - _playChannels = channels;
|
| + play_channels_ = channels;
|
| // 16 bits per sample in mono, 32 bits in stereo
|
| - _playBytesPerSample = 2 * channels;
|
| + play_bytes_per_sample_ = 2 * channels;
|
| return 0;
|
| }
|
|
|
| @@ -174,135 +159,101 @@ int32_t AudioDeviceBuffer::SetRecordingChannel(
|
| const AudioDeviceModule::ChannelType channel) {
|
| rtc::CritScope lock(&_critSect);
|
|
|
| - if (_recChannels == 1) {
|
| + if (rec_channels_ == 1) {
|
| return -1;
|
| }
|
|
|
| if (channel == AudioDeviceModule::kChannelBoth) {
|
| // two bytes per channel
|
| - _recBytesPerSample = 4;
|
| + rec_bytes_per_sample_ = 4;
|
| } else {
|
| // only utilize one out of two possible channels (left or right)
|
| - _recBytesPerSample = 2;
|
| + rec_bytes_per_sample_ = 2;
|
| }
|
| - _recChannel = channel;
|
| + rec_channel_ = channel;
|
|
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::RecordingChannel(
|
| AudioDeviceModule::ChannelType& channel) const {
|
| - channel = _recChannel;
|
| + channel = rec_channel_;
|
| return 0;
|
| }
|
|
|
| size_t AudioDeviceBuffer::RecordingChannels() const {
|
| - return _recChannels;
|
| + return rec_channels_;
|
| }
|
|
|
| size_t AudioDeviceBuffer::PlayoutChannels() const {
|
| - return _playChannels;
|
| + return play_channels_;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::SetCurrentMicLevel(uint32_t level) {
|
| - _currentMicLevel = level;
|
| + current_mic_level_ = level;
|
| return 0;
|
| }
|
|
|
| -int32_t AudioDeviceBuffer::SetTypingStatus(bool typingStatus) {
|
| - _typingStatus = typingStatus;
|
| +int32_t AudioDeviceBuffer::SetTypingStatus(bool typing_status) {
|
| + typing_status_ = typing_status;
|
| return 0;
|
| }
|
|
|
| uint32_t AudioDeviceBuffer::NewMicLevel() const {
|
| - return _newMicLevel;
|
| + return new_mic_level_;
|
| }
|
|
|
| -void AudioDeviceBuffer::SetVQEData(int playDelayMs,
|
| - int recDelayMs,
|
| - int clockDrift) {
|
| - if (high_delay_counter_ < kLogHighDelayIntervalFrames) {
|
| - ++high_delay_counter_;
|
| - } else {
|
| - if (playDelayMs + recDelayMs > kHighDelayThresholdMs) {
|
| - high_delay_counter_ = 0;
|
| - LOG(LS_WARNING) << "High audio device delay reported (render="
|
| - << playDelayMs << " ms, capture=" << recDelayMs << " ms)";
|
| - }
|
| - }
|
| -
|
| - _playDelayMS = playDelayMs;
|
| - _recDelayMS = recDelayMs;
|
| - _clockDrift = clockDrift;
|
| +void AudioDeviceBuffer::SetVQEData(int play_delay_ms,
|
| + int rec_delay_ms,
|
| + int clock_drift) {
|
| + play_delay_ms_ = play_delay_ms;
|
| + rec_delay_ms_ = rec_delay_ms;
|
| + clock_drift_ = clock_drift;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::StartInputFileRecording(
|
| const char fileName[kAdmMaxFileNameSize]) {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - _recFile.Flush();
|
| - _recFile.CloseFile();
|
| -
|
| - return _recFile.OpenFile(fileName, false) ? 0 : -1;
|
| + LOG(LS_WARNING) << "Not implemented";
|
| + return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::StopInputFileRecording() {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - _recFile.Flush();
|
| - _recFile.CloseFile();
|
| -
|
| + LOG(LS_WARNING) << "Not implemented";
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::StartOutputFileRecording(
|
| const char fileName[kAdmMaxFileNameSize]) {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - _playFile.Flush();
|
| - _playFile.CloseFile();
|
| -
|
| - return _playFile.OpenFile(fileName, false) ? 0 : -1;
|
| + LOG(LS_WARNING) << "Not implemented";
|
| + return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::StopOutputFileRecording() {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - _playFile.Flush();
|
| - _playFile.CloseFile();
|
| -
|
| + LOG(LS_WARNING) << "Not implemented";
|
| return 0;
|
| }
|
|
|
| -int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
|
| - size_t nSamples) {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - if (_recBytesPerSample == 0) {
|
| - assert(false);
|
| - return -1;
|
| - }
|
| +int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audio_buffer,
|
| + size_t num_samples) {
|
| + AllocateRecordingBufferIfNeeded();
|
| + RTC_CHECK(rec_buffer_);
|
| + // WebRTC can only receive audio in 10ms chunks, hence we fail if the native
|
| + // audio layer tries to deliver something else.
|
| + RTC_CHECK_EQ(num_samples, rec_samples_per_10ms_);
|
|
|
| - _recSamples = nSamples;
|
| - _recSize = _recBytesPerSample * nSamples; // {2,4}*nSamples
|
| - if (_recSize > kMaxBufferSizeBytes) {
|
| - assert(false);
|
| - return -1;
|
| - }
|
| + rtc::CritScope lock(&_critSect);
|
|
|
| - if (_recChannel == AudioDeviceModule::kChannelBoth) {
|
| - // (default) copy the complete input buffer to the local buffer
|
| - memcpy(&_recBuffer[0], audioBuffer, _recSize);
|
| + if (rec_channel_ == AudioDeviceModule::kChannelBoth) {
|
| + // Copy the complete input buffer to the local buffer.
|
| + memcpy(&rec_buffer_[0], audio_buffer, rec_bytes_per_10ms_);
|
| } else {
|
| - int16_t* ptr16In = (int16_t*)audioBuffer;
|
| - int16_t* ptr16Out = (int16_t*)&_recBuffer[0];
|
| -
|
| - if (AudioDeviceModule::kChannelRight == _recChannel) {
|
| + int16_t* ptr16In = (int16_t*)audio_buffer;
|
| + int16_t* ptr16Out = (int16_t*)&rec_buffer_[0];
|
| + if (AudioDeviceModule::kChannelRight == rec_channel_) {
|
| ptr16In++;
|
| }
|
| -
|
| - // exctract left or right channel from input buffer to the local buffer
|
| - for (size_t i = 0; i < _recSamples; i++) {
|
| + // Exctract left or right channel from input buffer to the local buffer.
|
| + for (size_t i = 0; i < rec_samples_per_10ms_; i++) {
|
| *ptr16Out = *ptr16In;
|
| ptr16Out++;
|
| ptr16In++;
|
| @@ -310,52 +261,40 @@ int32_t AudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
|
| }
|
| }
|
|
|
| - if (_recFile.is_open()) {
|
| - // write to binary file in mono or stereo (interleaved)
|
| - _recFile.Write(&_recBuffer[0], _recSize);
|
| - }
|
| -
|
| // Update some stats but do it on the task queue to ensure that the members
|
| // are modified and read on the same thread.
|
| task_queue_.PostTask(
|
| - rtc::Bind(&AudioDeviceBuffer::UpdateRecStats, this, nSamples));
|
| -
|
| + rtc::Bind(&AudioDeviceBuffer::UpdateRecStats, this, num_samples));
|
| return 0;
|
| }
|
|
|
| int32_t AudioDeviceBuffer::DeliverRecordedData() {
|
| + RTC_CHECK(rec_buffer_);
|
| + RTC_DCHECK(audio_transport_cb_);
|
| rtc::CritScope lock(&_critSectCb);
|
| - // Ensure that user has initialized all essential members
|
| - if ((_recSampleRate == 0) || (_recSamples == 0) ||
|
| - (_recBytesPerSample == 0) || (_recChannels == 0)) {
|
| - RTC_NOTREACHED();
|
| - return -1;
|
| - }
|
|
|
| - if (!_ptrCbAudioTransport) {
|
| + if (!audio_transport_cb_) {
|
| LOG(LS_WARNING) << "Invalid audio transport";
|
| return 0;
|
| }
|
|
|
| int32_t res(0);
|
| uint32_t newMicLevel(0);
|
| - uint32_t totalDelayMS = _playDelayMS + _recDelayMS;
|
| - res = _ptrCbAudioTransport->RecordedDataIsAvailable(
|
| - &_recBuffer[0], _recSamples, _recBytesPerSample, _recChannels,
|
| - _recSampleRate, totalDelayMS, _clockDrift, _currentMicLevel,
|
| - _typingStatus, newMicLevel);
|
| + uint32_t totalDelayMS = play_delay_ms_ + rec_delay_ms_;
|
| + res = audio_transport_cb_->RecordedDataIsAvailable(
|
| + &rec_buffer_[0], rec_samples_per_10ms_, rec_bytes_per_sample_,
|
| + rec_channels_, rec_sample_rate_, totalDelayMS, clock_drift_,
|
| + current_mic_level_, typing_status_, newMicLevel);
|
| if (res != -1) {
|
| - _newMicLevel = newMicLevel;
|
| + new_mic_level_ = newMicLevel;
|
| + } else {
|
| + LOG(LS_ERROR) << "RecordedDataIsAvailable() failed";
|
| }
|
|
|
| return 0;
|
| }
|
|
|
| -int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
|
| - uint32_t playSampleRate = 0;
|
| - size_t playBytesPerSample = 0;
|
| - size_t playChannels = 0;
|
| -
|
| +int32_t AudioDeviceBuffer::RequestPlayoutData(size_t num_samples) {
|
| // Measure time since last function call and update an array where the
|
| // position/index corresponds to time differences (in milliseconds) between
|
| // two successive playout callbacks, and the stored value is the number of
|
| @@ -367,37 +306,17 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
|
| last_playout_time_ = now_time;
|
| playout_diff_times_[diff_time]++;
|
|
|
| - // TOOD(henrika): improve bad locking model and make it more clear that only
|
| - // 10ms buffer sizes is supported in WebRTC.
|
| - {
|
| - rtc::CritScope lock(&_critSect);
|
| -
|
| - // Store copies under lock and use copies hereafter to avoid race with
|
| - // setter methods.
|
| - playSampleRate = _playSampleRate;
|
| - playBytesPerSample = _playBytesPerSample;
|
| - playChannels = _playChannels;
|
| -
|
| - // Ensure that user has initialized all essential members
|
| - if ((playBytesPerSample == 0) || (playChannels == 0) ||
|
| - (playSampleRate == 0)) {
|
| - RTC_NOTREACHED();
|
| - return -1;
|
| - }
|
| -
|
| - _playSamples = nSamples;
|
| - _playSize = playBytesPerSample * nSamples; // {2,4}*nSamples
|
| - RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes);
|
| - RTC_CHECK_EQ(nSamples, _playSamples);
|
| - }
|
| -
|
| - size_t nSamplesOut(0);
|
| + AllocatePlayoutBufferIfNeeded();
|
| + RTC_CHECK(play_buffer_);
|
| + // WebRTC can only provide audio in 10ms chunks, hence we fail if the native
|
| + // audio layer asks for something else.
|
| + RTC_CHECK_EQ(num_samples, play_samples_per_10ms_);
|
|
|
| rtc::CritScope lock(&_critSectCb);
|
|
|
| // It is currently supported to start playout without a valid audio
|
| // transport object. Leads to warning and silence.
|
| - if (!_ptrCbAudioTransport) {
|
| + if (!audio_transport_cb_) {
|
| LOG(LS_WARNING) << "Invalid audio transport";
|
| return 0;
|
| }
|
| @@ -405,9 +324,11 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
|
| uint32_t res(0);
|
| int64_t elapsed_time_ms = -1;
|
| int64_t ntp_time_ms = -1;
|
| - res = _ptrCbAudioTransport->NeedMorePlayData(
|
| - _playSamples, playBytesPerSample, playChannels, playSampleRate,
|
| - &_playBuffer[0], nSamplesOut, &elapsed_time_ms, &ntp_time_ms);
|
| + size_t num_samples_out(0);
|
| + res = audio_transport_cb_->NeedMorePlayData(
|
| + play_samples_per_10ms_, play_bytes_per_sample_, play_channels_,
|
| + play_sample_rate_, &play_buffer_[0], num_samples_out, &elapsed_time_ms,
|
| + &ntp_time_ms);
|
| if (res != 0) {
|
| LOG(LS_ERROR) << "NeedMorePlayData() failed";
|
| }
|
| @@ -415,23 +336,46 @@ int32_t AudioDeviceBuffer::RequestPlayoutData(size_t nSamples) {
|
| // Update some stats but do it on the task queue to ensure that access of
|
| // members is serialized hence avoiding usage of locks.
|
| task_queue_.PostTask(
|
| - rtc::Bind(&AudioDeviceBuffer::UpdatePlayStats, this, nSamplesOut));
|
| -
|
| - return static_cast<int32_t>(nSamplesOut);
|
| + rtc::Bind(&AudioDeviceBuffer::UpdatePlayStats, this, num_samples_out));
|
| + return static_cast<int32_t>(num_samples_out);
|
| }
|
|
|
| -int32_t AudioDeviceBuffer::GetPlayoutData(void* audioBuffer) {
|
| +int32_t AudioDeviceBuffer::GetPlayoutData(void* audio_buffer) {
|
| rtc::CritScope lock(&_critSect);
|
| - RTC_CHECK_LE(_playSize, kMaxBufferSizeBytes);
|
| -
|
| - memcpy(audioBuffer, &_playBuffer[0], _playSize);
|
| + memcpy(audio_buffer, &play_buffer_[0], play_bytes_per_10ms_);
|
| + return static_cast<int32_t>(play_samples_per_10ms_);
|
| +}
|
|
|
| - if (_playFile.is_open()) {
|
| - // write to binary file in mono or stereo (interleaved)
|
| - _playFile.Write(&_playBuffer[0], _playSize);
|
| - }
|
| +void AudioDeviceBuffer::AllocatePlayoutBufferIfNeeded() {
|
| + RTC_CHECK(play_bytes_per_sample_);
|
| + if (play_buffer_)
|
| + return;
|
| + LOG(INFO) << __FUNCTION__;
|
| + rtc::CritScope lock(&_critSect);
|
| + // Derive the required buffer size given sample rate and number of channels.
|
| + play_samples_per_10ms_ = static_cast<size_t>(play_sample_rate_ * 10 / 1000);
|
| + play_bytes_per_10ms_ = play_bytes_per_sample_ * play_samples_per_10ms_;
|
| + LOG(INFO) << "playout samples per 10ms: " << play_samples_per_10ms_;
|
| + LOG(INFO) << "playout bytes per 10ms: " << play_bytes_per_10ms_;
|
| + // Allocate memory for the playout audio buffer. It will always contain audio
|
| + // samples corresponding to 10ms of audio to be played out.
|
| + play_buffer_.reset(new int8_t[play_bytes_per_10ms_]);
|
| +}
|
|
|
| - return static_cast<int32_t>(_playSamples);
|
| +void AudioDeviceBuffer::AllocateRecordingBufferIfNeeded() {
|
| + RTC_CHECK(rec_bytes_per_sample_);
|
| + if (rec_buffer_)
|
| + return;
|
| + LOG(INFO) << __FUNCTION__;
|
| + rtc::CritScope lock(&_critSect);
|
| + // Derive the required buffer size given sample rate and number of channels.
|
| + rec_samples_per_10ms_ = static_cast<size_t>(rec_sample_rate_ * 10 / 1000);
|
| + rec_bytes_per_10ms_ = rec_bytes_per_sample_ * rec_samples_per_10ms_;
|
| + LOG(INFO) << "recorded samples per 10ms: " << rec_samples_per_10ms_;
|
| + LOG(INFO) << "recorded bytes per 10ms: " << rec_bytes_per_10ms_;
|
| + // Allocate memory for the recording audio buffer. It will always contain
|
| + // audio samples corresponding to 10ms of audio.
|
| + rec_buffer_.reset(new int8_t[rec_bytes_per_10ms_]);
|
| }
|
|
|
| void AudioDeviceBuffer::StartTimer() {
|
| @@ -455,7 +399,7 @@ void AudioDeviceBuffer::LogStats() {
|
| uint32_t diff_samples = rec_samples_ - last_rec_samples_;
|
| uint32_t rate = diff_samples / kTimerIntervalInSeconds;
|
| LOG(INFO) << "[REC : " << time_since_last << "msec, "
|
| - << _recSampleRate / 1000
|
| + << rec_sample_rate_ / 1000
|
| << "kHz] callbacks: " << rec_callbacks_ - last_rec_callbacks_
|
| << ", "
|
| << "samples: " << diff_samples << ", "
|
| @@ -464,7 +408,7 @@ void AudioDeviceBuffer::LogStats() {
|
| diff_samples = play_samples_ - last_play_samples_;
|
| rate = diff_samples / kTimerIntervalInSeconds;
|
| LOG(INFO) << "[PLAY: " << time_since_last << "msec, "
|
| - << _playSampleRate / 1000
|
| + << play_sample_rate_ / 1000
|
| << "kHz] callbacks: " << play_callbacks_ - last_play_callbacks_
|
| << ", "
|
| << "samples: " << diff_samples << ", "
|
|
|