Chromium Code Reviews| Index: webrtc/modules/utility/source/audio_frame_operations.cc |
| diff --git a/webrtc/modules/utility/source/audio_frame_operations.cc b/webrtc/modules/utility/source/audio_frame_operations.cc |
| index 102407d0f0f0fd223f6eaf9836dfa2b18278a5f7..552a268d0a72133ae073b5d222fd9e2c74d83c7b 100644 |
| --- a/webrtc/modules/utility/source/audio_frame_operations.cc |
| +++ b/webrtc/modules/utility/source/audio_frame_operations.cc |
| @@ -8,6 +8,8 @@ |
| * be found in the AUTHORS file in the root of the source tree. |
| */ |
| +#include <algorithm> |
| + |
| #include "webrtc/modules/include/module_common_types.h" |
| #include "webrtc/modules/utility/include/audio_frame_operations.h" |
| #include "webrtc/base/checks.h" |
| @@ -21,6 +23,56 @@ const float kMuteFadeInc = 1.0f / kMuteFadeFrames; |
| } // namespace { |
|
aleloi
2016/10/18 11:38:04
All implementations are copied from AudioFrame wit
the sun
2016/10/19 09:23:17
Can we just make it "Add()" or "AddTo()"? Or you c
aleloi
2016/10/20 08:27:06
Done.
|
| +void AudioFrameOperations::AddFrames(const AudioFrame& frame_to_add, |
| + AudioFrame* result_frame) { |
| + // Sanity check |
| + RTC_DCHECK_GT(result_frame->num_channels_, 0u); |
| + RTC_DCHECK_LT(result_frame->num_channels_, 3u); |
| + if ((result_frame->num_channels_ > 2) || (result_frame->num_channels_ < 1)) |
| + return; |
| + if (result_frame->num_channels_ != frame_to_add.num_channels_) |
| + return; |
| + |
| + bool noPrevData = false; |
| + if (result_frame->samples_per_channel_ != frame_to_add.samples_per_channel_) { |
| + if (result_frame->samples_per_channel_ == 0) { |
| + // special case we have no data to start with |
| + result_frame->samples_per_channel_ = frame_to_add.samples_per_channel_; |
| + noPrevData = true; |
| + } else { |
| + return; |
| + } |
| + } |
| + |
| + if ((result_frame->vad_activity_ == AudioFrame::kVadActive) || |
| + frame_to_add.vad_activity_ == result_frame->kVadActive) { |
| + result_frame->vad_activity_ = AudioFrame::kVadActive; |
| + } else if (result_frame->vad_activity_ == AudioFrame::kVadUnknown || |
| + frame_to_add.vad_activity_ == AudioFrame::kVadUnknown) { |
| + result_frame->vad_activity_ = AudioFrame::kVadUnknown; |
| + } |
| + |
| + if (result_frame->speech_type_ != frame_to_add.speech_type_) |
| + result_frame->speech_type_ = AudioFrame::kUndefined; |
| + |
| + if (noPrevData) { |
| + std::copy(frame_to_add.data_, frame_to_add.data_ + |
| + frame_to_add.samples_per_channel_ * |
| + result_frame->num_channels_, |
| + result_frame->data_); |
| + } else { |
| + // IMPROVEMENT this can be done very fast in assembly |
|
the sun
2016/10/19 09:23:17
I think you can remove the comment
aleloi
2016/10/20 08:27:06
Done.
|
| + for (size_t i = 0; |
| + i < result_frame->samples_per_channel_ * result_frame->num_channels_; |
| + i++) { |
| + int32_t wrap_guard = static_cast<int32_t>(result_frame->data_[i]) + |
| + static_cast<int32_t>(frame_to_add.data_[i]); |
| + result_frame->data_[i] = ClampToInt16(wrap_guard); |
| + } |
| + } |
| + return; |
| +} |
| + |
| void AudioFrameOperations::MonoToStereo(const int16_t* src_audio, |
| size_t samples_per_channel, |
| int16_t* dst_audio) { |
| @@ -125,6 +177,36 @@ void AudioFrameOperations::Mute(AudioFrame* frame, bool previous_frame_muted, |
| } |
| } |
| +void AudioFrameOperations::Mute(AudioFrame* frame) { |
| + Mute(frame, true, true); |
| +} |
| + |
| +void AudioFrameOperations::Reset(AudioFrame* frame) { |
| + frame->id_ = -1; |
|
the sun
2016/10/19 09:23:17
It is dangerous to have the default values in mult
aleloi
2016/10/20 08:27:06
I think it might lead to a (barely noticeable) per
the sun
2016/10/20 08:37:57
Acknowledged.
|
| + // TODO(wu): Zero is a valid value for |timestamp_|. We should initialize |
| + // to an invalid value, or add a new member to indicate invalidity. |
| + frame->timestamp_ = 0; |
| + frame->elapsed_time_ms_ = -1; |
| + frame->ntp_time_ms_ = -1; |
| + frame->samples_per_channel_ = 0; |
| + frame->sample_rate_hz_ = 0; |
| + frame->num_channels_ = 0; |
| + frame->speech_type_ = AudioFrame::kUndefined; |
| + frame->vad_activity_ = AudioFrame::kVadUnknown; |
| +} |
| + |
| +void AudioFrameOperations::ShiftDown(AudioFrame* frame) { |
| + RTC_DCHECK_GT(frame->num_channels_, 0u); |
| + RTC_DCHECK_LT(frame->num_channels_, 3u); |
| + if ((frame->num_channels_ > 2) || (frame->num_channels_ < 1)) |
| + return; |
| + |
| + for (size_t i = 0; i < frame->samples_per_channel_ * frame->num_channels_; |
| + i++) { |
| + frame->data_[i] = static_cast<int16_t>(frame->data_[i] >> 1); |
| + } |
| +} |
| + |
| int AudioFrameOperations::Scale(float left, float right, AudioFrame& frame) { |
| if (frame.num_channels_ != 2) { |
| return -1; |
| @@ -157,4 +239,40 @@ int AudioFrameOperations::ScaleWithSat(float scale, AudioFrame& frame) { |
| return 0; |
| } |
| +void AudioFrameOperations::UpdateFrame(int id, |
| + uint32_t timestamp, |
| + const int16_t* data, |
| + size_t samples_per_channel, |
| + int sample_rate_hz, |
| + AudioFrame::SpeechType speech_type, |
| + AudioFrame::VADActivity vad_activity, |
| + size_t num_channels, |
| + AudioFrame* frame) { |
| + frame->id_ = id; |
| + frame->timestamp_ = timestamp; |
| + frame->samples_per_channel_ = samples_per_channel; |
| + frame->sample_rate_hz_ = sample_rate_hz; |
| + frame->speech_type_ = speech_type; |
| + frame->vad_activity_ = vad_activity; |
| + frame->num_channels_ = num_channels; |
| + |
| + const size_t length = samples_per_channel * num_channels; |
| + RTC_DCHECK_LT(length, AudioFrame::kMaxDataSizeSamples); |
| + if (data != nullptr) { |
| + std::copy(data, data + length, frame->data_); |
| + } else { |
| + std::fill(frame->data_, frame->data_ + length, 0); |
| + } |
| +} |
| + |
| +int16_t ClampToInt16(int32_t input) { |
| + if (input < -0x00008000) { |
| + return -0x8000; |
| + } else if (input > 0x00007FFF) { |
| + return 0x7FFF; |
| + } else { |
| + return static_cast<int16_t>(input); |
| + } |
| +} |
| + |
| } // namespace webrtc |