| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_device/android/opensles_recorder.h" | 11 #include "webrtc/modules/audio_device/android/opensles_recorder.h" |
| 12 | 12 |
| 13 #include <android/log.h> | 13 #include <android/log.h> |
| 14 | 14 |
| 15 #include "webrtc/base/array_view.h" |
| 15 #include "webrtc/base/arraysize.h" | 16 #include "webrtc/base/arraysize.h" |
| 16 #include "webrtc/base/checks.h" | 17 #include "webrtc/base/checks.h" |
| 17 #include "webrtc/base/format_macros.h" | 18 #include "webrtc/base/format_macros.h" |
| 18 #include "webrtc/base/timeutils.h" | 19 #include "webrtc/base/timeutils.h" |
| 19 #include "webrtc/modules/audio_device/android/audio_common.h" | 20 #include "webrtc/modules/audio_device/android/audio_common.h" |
| 20 #include "webrtc/modules/audio_device/android/audio_manager.h" | 21 #include "webrtc/modules/audio_device/android/audio_manager.h" |
| 21 #include "webrtc/modules/audio_device/fine_audio_buffer.h" | 22 #include "webrtc/modules/audio_device/fine_audio_buffer.h" |
| 22 | 23 |
| 23 #define TAG "OpenSLESRecorder" | 24 #define TAG "OpenSLESRecorder" |
| 24 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) | 25 #define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__) |
| (...skipping 303 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 328 // of samples (and not only multiple of 10ms) to match the native audio unit | 329 // of samples (and not only multiple of 10ms) to match the native audio unit |
| 329 // buffer size. | 330 // buffer size. |
| 330 ALOGD("frames per native buffer: %" PRIuS, | 331 ALOGD("frames per native buffer: %" PRIuS, |
| 331 audio_parameters_.frames_per_buffer()); | 332 audio_parameters_.frames_per_buffer()); |
| 332 ALOGD("frames per 10ms buffer: %" PRIuS, | 333 ALOGD("frames per 10ms buffer: %" PRIuS, |
| 333 audio_parameters_.frames_per_10ms_buffer()); | 334 audio_parameters_.frames_per_10ms_buffer()); |
| 334 ALOGD("bytes per native buffer: %" PRIuS, | 335 ALOGD("bytes per native buffer: %" PRIuS, |
| 335 audio_parameters_.GetBytesPerBuffer()); | 336 audio_parameters_.GetBytesPerBuffer()); |
| 336 ALOGD("native sample rate: %d", audio_parameters_.sample_rate()); | 337 ALOGD("native sample rate: %d", audio_parameters_.sample_rate()); |
| 337 RTC_DCHECK(audio_device_buffer_); | 338 RTC_DCHECK(audio_device_buffer_); |
| 338 fine_audio_buffer_.reset(new FineAudioBuffer( | 339 fine_audio_buffer_.reset( |
| 339 audio_device_buffer_, audio_parameters_.GetBytesPerBuffer(), | 340 new FineAudioBuffer(audio_device_buffer_, audio_parameters_.sample_rate(), |
| 340 audio_parameters_.sample_rate())); | 341 2 * audio_parameters_.GetBytesPerBuffer())); |
| 341 // Allocate queue of audio buffers that stores recorded audio samples. | 342 // Allocate queue of audio buffers that stores recorded audio samples. |
| 342 const int data_size_bytes = audio_parameters_.GetBytesPerBuffer(); | 343 const int data_size_bytes = audio_parameters_.GetBytesPerBuffer(); |
| 343 audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]); | 344 audio_buffers_.reset(new std::unique_ptr<SLint8[]>[kNumOfOpenSLESBuffers]); |
| 344 for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { | 345 for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) { |
| 345 audio_buffers_[i].reset(new SLint8[data_size_bytes]); | 346 audio_buffers_[i].reset(new SLint8[data_size_bytes]); |
| 346 } | 347 } |
| 347 } | 348 } |
| 348 | 349 |
| 349 void OpenSLESRecorder::ReadBufferQueue() { | 350 void OpenSLESRecorder::ReadBufferQueue() { |
| 350 RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); | 351 RTC_DCHECK(thread_checker_opensles_.CalledOnValidThread()); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 364 last_rec_time_ = current_time; | 365 last_rec_time_ = current_time; |
| 365 // Send recorded audio data to the WebRTC sink. | 366 // Send recorded audio data to the WebRTC sink. |
| 366 // TODO(henrika): fix delay estimates. It is OK to use fixed values for now | 367 // TODO(henrika): fix delay estimates. It is OK to use fixed values for now |
| 367 // since there is no support to turn off built-in EC in combination with | 368 // since there is no support to turn off built-in EC in combination with |
| 368 // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use | 369 // OpenSL ES anyhow. Hence, as is, the WebRTC based AEC (which would use |
| 369 // these estimates) will never be active. | 370 // these estimates) will never be active. |
| 370 const size_t size_in_bytes = | 371 const size_t size_in_bytes = |
| 371 static_cast<size_t>(audio_parameters_.GetBytesPerBuffer()); | 372 static_cast<size_t>(audio_parameters_.GetBytesPerBuffer()); |
| 372 const int8_t* data = | 373 const int8_t* data = |
| 373 static_cast<const int8_t*>(audio_buffers_[buffer_index_].get()); | 374 static_cast<const int8_t*>(audio_buffers_[buffer_index_].get()); |
| 374 fine_audio_buffer_->DeliverRecordedData(data, size_in_bytes, 25, 25); | 375 fine_audio_buffer_->DeliverRecordedData( |
| 376 rtc::ArrayView<const int8_t>(data, size_in_bytes), 25, 25); |
| 375 // Enqueue the utilized audio buffer and use if for recording again. | 377 // Enqueue the utilized audio buffer and use if for recording again. |
| 376 EnqueueAudioBuffer(); | 378 EnqueueAudioBuffer(); |
| 377 } | 379 } |
| 378 | 380 |
| 379 bool OpenSLESRecorder::EnqueueAudioBuffer() { | 381 bool OpenSLESRecorder::EnqueueAudioBuffer() { |
| 380 SLresult err = | 382 SLresult err = |
| 381 (*simple_buffer_queue_) | 383 (*simple_buffer_queue_) |
| 382 ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(), | 384 ->Enqueue(simple_buffer_queue_, audio_buffers_[buffer_index_].get(), |
| 383 audio_parameters_.GetBytesPerBuffer()); | 385 audio_parameters_.GetBytesPerBuffer()); |
| 384 if (SL_RESULT_SUCCESS != err) { | 386 if (SL_RESULT_SUCCESS != err) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 417 SLAndroidSimpleBufferQueueState state = GetBufferQueueState(); | 419 SLAndroidSimpleBufferQueueState state = GetBufferQueueState(); |
| 418 ALOGD("state.count:%d state.index:%d", state.count, state.index); | 420 ALOGD("state.count:%d state.index:%d", state.count, state.index); |
| 419 } | 421 } |
| 420 | 422 |
| 421 SLuint32 OpenSLESRecorder::GetBufferCount() { | 423 SLuint32 OpenSLESRecorder::GetBufferCount() { |
| 422 SLAndroidSimpleBufferQueueState state = GetBufferQueueState(); | 424 SLAndroidSimpleBufferQueueState state = GetBufferQueueState(); |
| 423 return state.count; | 425 return state.count; |
| 424 } | 426 } |
| 425 | 427 |
| 426 } // namespace webrtc | 428 } // namespace webrtc |
| OLD | NEW |