Chromium Code Reviews| Index: webrtc/modules/audio_device/android/opensles_player.cc | 
| diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc | 
| index b3ad64e424627f6f1b3580ecc0b5f6e8dbbd9df9..a63b8c1257acec6fc2eda9b66fdf124bd20b71ee 100644 | 
| --- a/webrtc/modules/audio_device/android/opensles_player.cc | 
| +++ b/webrtc/modules/audio_device/android/opensles_player.cc | 
| @@ -44,7 +44,6 @@ OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager) | 
| audio_device_buffer_(nullptr), | 
| initialized_(false), | 
| playing_(false), | 
| - bytes_per_buffer_(0), | 
| buffer_index_(0), | 
| engine_(nullptr), | 
| player_(nullptr), | 
| @@ -94,11 +93,13 @@ int OpenSLESPlayer::InitPlayout() { | 
| RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| RTC_DCHECK(!initialized_); | 
| RTC_DCHECK(!playing_); | 
| - ObtainEngineInterface(); | 
| + if (!ObtainEngineInterface()) { | 
| + ALOGE("Failed to obtain SL Engine interface"); | 
| + return -1; | 
| + } | 
| CreateMix(); | 
| initialized_ = true; | 
| buffer_index_ = 0; | 
| - last_play_time_ = rtc::Time(); | 
| return 0; | 
| } | 
| @@ -107,6 +108,9 @@ int OpenSLESPlayer::StartPlayout() { | 
| RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| RTC_DCHECK(initialized_); | 
| RTC_DCHECK(!playing_); | 
| + if (fine_audio_buffer_) { | 
| + fine_audio_buffer_->ResetPlayout(); | 
| + } | 
| // The number of lower latency audio players is limited, hence we create the | 
| // audio player in Start() and destroy it in Stop(). | 
| CreateAudioPlayer(); | 
| @@ -114,6 +118,7 @@ int OpenSLESPlayer::StartPlayout() { | 
| // starts when mode is later changed to SL_PLAYSTATE_PLAYING. | 
| // TODO(henrika): we can save some delay by only making one call to | 
| // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch. | 
| + last_play_time_ = rtc::Time(); | 
| for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { | 
| EnqueuePlayoutData(); | 
| } | 
| @@ -187,77 +192,29 @@ void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { | 
| AllocateDataBuffers(); | 
| } | 
| -SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration( | 
| - size_t channels, | 
| - int sample_rate, | 
| - size_t bits_per_sample) { | 
| - ALOGD("CreatePCMConfiguration"); | 
| - RTC_CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16); | 
| - SLDataFormat_PCM format; | 
| - format.formatType = SL_DATAFORMAT_PCM; | 
| - format.numChannels = static_cast<SLuint32>(channels); | 
| - // Note that, the unit of sample rate is actually in milliHertz and not Hertz. | 
| - switch (sample_rate) { | 
| - case 8000: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_8; | 
| - break; | 
| - case 16000: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_16; | 
| - break; | 
| - case 22050: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_22_05; | 
| - break; | 
| - case 32000: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_32; | 
| - break; | 
| - case 44100: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_44_1; | 
| - break; | 
| - case 48000: | 
| - format.samplesPerSec = SL_SAMPLINGRATE_48; | 
| - break; | 
| - default: | 
| - RTC_CHECK(false) << "Unsupported sample rate: " << sample_rate; | 
| - } | 
| - format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; | 
| - format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16; | 
| - format.endianness = SL_BYTEORDER_LITTLEENDIAN; | 
| - if (format.numChannels == 1) | 
| - format.channelMask = SL_SPEAKER_FRONT_CENTER; | 
| - else if (format.numChannels == 2) | 
| - format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT; | 
| - else | 
| - RTC_CHECK(false) << "Unsupported number of channels: " | 
| - << format.numChannels; | 
| - return format; | 
| -} | 
| - | 
| void OpenSLESPlayer::AllocateDataBuffers() { | 
| ALOGD("AllocateDataBuffers"); | 
| RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| RTC_DCHECK(!simple_buffer_queue_); | 
| RTC_CHECK(audio_device_buffer_); | 
| - // Don't use the lowest possible size as native buffer size. Instead, | 
| - // use 10ms to better match the frame size that WebRTC uses. It will result | 
| - // in a reduced risk for audio glitches and also in a more "clean" sequence | 
| - // of callbacks from the OpenSL ES thread in to WebRTC when asking for audio | 
| - // to render. | 
| - ALOGD("lowest possible buffer size: %" PRIuS, | 
| - audio_parameters_.GetBytesPerBuffer()); | 
| - bytes_per_buffer_ = audio_parameters_.GetBytesPerFrame() * | 
| - audio_parameters_.frames_per_10ms_buffer(); | 
| - RTC_DCHECK_GE(bytes_per_buffer_, audio_parameters_.GetBytesPerBuffer()); | 
| - ALOGD("native buffer size: %" PRIuS, bytes_per_buffer_); | 
| // Create a modified audio buffer class which allows us to ask for any number | 
| // of samples (and not only multiple of 10ms) to match the native OpenSL ES | 
| - // buffer size. | 
| - fine_buffer_.reset(new FineAudioBuffer(audio_device_buffer_, | 
| - bytes_per_buffer_, | 
| - audio_parameters_.sample_rate())); | 
| + // buffer size. The native buffer size corresponds to the | 
| + // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio | 
| + // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is | 
| + // recommended to construct audio buffers so that they contain an exact | 
| + // multiple of this number. If so, callbacks will occur at regular intervals, | 
| + // which reduces jitter. | 
| + ALOGD("native buffer size: %" PRIuS, audio_parameters_.GetBytesPerBuffer()); | 
| + ALOGD("native buffer size in ms: %.2f", | 
| + audio_parameters_.GetBufferSizeInMilliseconds()); | 
| + fine_audio_buffer_.reset(new FineAudioBuffer( | 
| + audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(), | 
| + audio_parameters_.sample_rate())); | 
| // Each buffer must be of this size to avoid unnecessary memcpy while caching | 
| // data between successive callbacks. | 
| const size_t required_buffer_size = | 
| - fine_buffer_->RequiredPlayoutBufferSizeBytes(); | 
| + fine_audio_buffer_->RequiredPlayoutBufferSizeBytes(); | 
| ALOGD("required buffer size: %" PRIuS, required_buffer_size); | 
| for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { | 
| audio_buffers_[i].reset(new SLint8[required_buffer_size]); | 
| @@ -267,7 +224,8 @@ void OpenSLESPlayer::AllocateDataBuffers() { | 
| bool OpenSLESPlayer::ObtainEngineInterface() { | 
| ALOGD("ObtainEngineInterface"); | 
| RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| - RTC_DCHECK(!engine_); | 
| + if (engine_) | 
| 
 
tommi
2016/09/15 09:34:13
is ObtainEngineInterface always called on the same
 
henrika_webrtc
2016/09/16 13:30:47
Yes Sir, it is...and I do have a thread checker to
 
 | 
| + return true; | 
| // Get access to (or create if not already existing) the global OpenSL Engine | 
| // object. | 
| SLObjectItf engine_object = audio_manager_->GetOpenSLEngine(); | 
| @@ -395,6 +353,8 @@ void OpenSLESPlayer::DestroyAudioPlayer() { | 
| RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| if (!player_object_.Get()) | 
| return; | 
| + (*simple_buffer_queue_) | 
| + ->RegisterCallback(simple_buffer_queue_, nullptr, nullptr); | 
| player_object_.Reset(); | 
| player_ = nullptr; | 
| simple_buffer_queue_ = nullptr; | 
| @@ -422,10 +382,10 @@ void OpenSLESPlayer::FillBufferQueue() { | 
| void OpenSLESPlayer::EnqueuePlayoutData() { | 
| // Check delta time between two successive callbacks and provide a warning | 
| // if it becomes very large. | 
| - // TODO(henrika): using 100ms as upper limit but this value is rather random. | 
| + // TODO(henrika): using 150ms as upper limit but this value is rather random. | 
| const uint32_t current_time = rtc::Time(); | 
| const uint32_t diff = current_time - last_play_time_; | 
| - if (diff > 100) { | 
| + if (diff > 150) { | 
| ALOGW("Bad OpenSL ES playout timing, dT=%u [ms]", diff); | 
| } | 
| last_play_time_ = current_time; | 
| @@ -433,11 +393,11 @@ void OpenSLESPlayer::EnqueuePlayoutData() { | 
| // to adjust for differences in buffer size between WebRTC (10ms) and native | 
| // OpenSL ES. | 
| SLint8* audio_ptr = audio_buffers_[buffer_index_].get(); | 
| - fine_buffer_->GetPlayoutData(audio_ptr); | 
| + fine_audio_buffer_->GetPlayoutData(audio_ptr); | 
| // Enqueue the decoded audio buffer for playback. | 
| - SLresult err = | 
| - (*simple_buffer_queue_) | 
| - ->Enqueue(simple_buffer_queue_, audio_ptr, bytes_per_buffer_); | 
| + SLresult err = (*simple_buffer_queue_) | 
| + ->Enqueue(simple_buffer_queue_, audio_ptr, | 
| + audio_parameters_.GetBytesPerBuffer()); | 
| if (SL_RESULT_SUCCESS != err) { | 
| ALOGE("Enqueue failed: %d", err); | 
| } |