OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_device/android/opensles_player.h" | 11 #include "webrtc/modules/audio_device/android/opensles_player.h" |
12 | 12 |
13 #include <android/log.h> | 13 #include <android/log.h> |
14 | 14 |
| 15 #include "webrtc/base/array_view.h" |
15 #include "webrtc/base/arraysize.h" | 16 #include "webrtc/base/arraysize.h" |
16 #include "webrtc/base/checks.h" | 17 #include "webrtc/base/checks.h" |
17 #include "webrtc/base/format_macros.h" | 18 #include "webrtc/base/format_macros.h" |
18 #include "webrtc/base/timeutils.h" | 19 #include "webrtc/base/timeutils.h" |
19 #include "webrtc/modules/audio_device/android/audio_common.h" | 20 #include "webrtc/modules/audio_device/android/audio_common.h" |
20 #include "webrtc/modules/audio_device/android/audio_manager.h" | 21 #include "webrtc/modules/audio_device/android/audio_manager.h" |
21 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 22 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
22 | 23 |
23 #define TAG "OpenSLESPlayer" | 24 #define TAG "OpenSLESPlayer" |
24 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) | 25 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
202 // buffer size. The native buffer size corresponds to the | 203 // buffer size. The native buffer size corresponds to the |
203 // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio | 204 // PROPERTY_OUTPUT_FRAMES_PER_BUFFER property which is the number of audio |
204 // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is | 205 // frames that the HAL (Hardware Abstraction Layer) buffer can hold. It is |
205 // recommended to construct audio buffers so that they contain an exact | 206 // recommended to construct audio buffers so that they contain an exact |
206 // multiple of this number. If so, callbacks will occur at regular intervals, | 207 // multiple of this number. If so, callbacks will occur at regular intervals, |
207 // which reduces jitter. | 208 // which reduces jitter. |
208 const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer(); | 209 const size_t buffer_size_in_bytes = audio_parameters_.GetBytesPerBuffer(); |
209 ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes); | 210 ALOGD("native buffer size: %" PRIuS, buffer_size_in_bytes); |
210 ALOGD("native buffer size in ms: %.2f", | 211 ALOGD("native buffer size in ms: %.2f", |
211 audio_parameters_.GetBufferSizeInMilliseconds()); | 212 audio_parameters_.GetBufferSizeInMilliseconds()); |
212 fine_audio_buffer_.reset( | 213 fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_, |
213 new FineAudioBuffer(audio_device_buffer_, buffer_size_in_bytes, | 214 audio_parameters_.sample_rate(), |
214 audio_parameters_.sample_rate())); | 215 2 * buffer_size_in_bytes)); |
215 // Allocated memory for audio buffers. | 216 // Allocated memory for audio buffers. |
216 for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { | 217 for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { |
217 audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]); | 218 audio_buffers_[i].reset(new SLint8[buffer_size_in_bytes]); |
218 } | 219 } |
219 } | 220 } |
220 | 221 |
221 bool OpenSLESPlayer::ObtainEngineInterface() { | 222 bool OpenSLESPlayer::ObtainEngineInterface() { |
222 ALOGD("ObtainEngineInterface"); | 223 ALOGD("ObtainEngineInterface"); |
223 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 224 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
224 if (engine_) | 225 if (engine_) |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
391 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 392 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
392 // Avoid aquiring real audio data from WebRTC and fill the buffer with | 393 // Avoid aquiring real audio data from WebRTC and fill the buffer with |
393 // zeros instead. Used to prime the buffer with silence and to avoid asking | 394 // zeros instead. Used to prime the buffer with silence and to avoid asking |
394 // for audio data from two different threads. | 395 // for audio data from two different threads. |
395 memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer()); | 396 memset(audio_ptr, 0, audio_parameters_.GetBytesPerBuffer()); |
396 } else { | 397 } else { |
397 RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); | 398 RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); |
398 // Read audio data from the WebRTC source using the FineAudioBuffer object | 399 // Read audio data from the WebRTC source using the FineAudioBuffer object |
399 // to adjust for differences in buffer size between WebRTC (10ms) and native | 400 // to adjust for differences in buffer size between WebRTC (10ms) and native |
400 // OpenSL ES. | 401 // OpenSL ES. |
401 fine_audio_buffer_->GetPlayoutData(audio_ptr); | 402 fine_audio_buffer_->GetPlayoutData(rtc::ArrayView<SLint8>( |
| 403 audio_ptr, audio_parameters_.GetBytesPerBuffer())); |
402 } | 404 } |
403 // Enqueue the decoded audio buffer for playback. | 405 // Enqueue the decoded audio buffer for playback. |
404 SLresult err = (*simple_buffer_queue_) | 406 SLresult err = (*simple_buffer_queue_) |
405 ->Enqueue(simple_buffer_queue_, audio_ptr, | 407 ->Enqueue(simple_buffer_queue_, audio_ptr, |
406 audio_parameters_.GetBytesPerBuffer()); | 408 audio_parameters_.GetBytesPerBuffer()); |
407 if (SL_RESULT_SUCCESS != err) { | 409 if (SL_RESULT_SUCCESS != err) { |
408 ALOGE("Enqueue failed: %d", err); | 410 ALOGE("Enqueue failed: %d", err); |
409 } | 411 } |
410 buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers; | 412 buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers; |
411 } | 413 } |
412 | 414 |
413 SLuint32 OpenSLESPlayer::GetPlayState() const { | 415 SLuint32 OpenSLESPlayer::GetPlayState() const { |
414 RTC_DCHECK(player_); | 416 RTC_DCHECK(player_); |
415 SLuint32 state; | 417 SLuint32 state; |
416 SLresult err = (*player_)->GetPlayState(player_, &state); | 418 SLresult err = (*player_)->GetPlayState(player_, &state); |
417 if (SL_RESULT_SUCCESS != err) { | 419 if (SL_RESULT_SUCCESS != err) { |
418 ALOGE("GetPlayState failed: %d", err); | 420 ALOGE("GetPlayState failed: %d", err); |
419 } | 421 } |
420 return state; | 422 return state; |
421 } | 423 } |
422 | 424 |
423 } // namespace webrtc | 425 } // namespace webrtc |
OLD | NEW |