Index: webrtc/audio/utility/audio_frame_operations.h |
diff --git a/webrtc/modules/utility/include/audio_frame_operations.h b/webrtc/audio/utility/audio_frame_operations.h |
similarity index 66% |
copy from webrtc/modules/utility/include/audio_frame_operations.h |
copy to webrtc/audio/utility/audio_frame_operations.h |
index e12e3e561be8d439fe5a59709149a875fc2b0509..d16b163e7d1139f10ba00e87e73524648e01bebd 100644 |
--- a/webrtc/modules/utility/include/audio_frame_operations.h |
+++ b/webrtc/audio/utility/audio_frame_operations.h |
@@ -8,8 +8,10 @@ |
* be found in the AUTHORS file in the root of the source tree. |
*/ |
-#ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_ |
-#define WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_ |
+#ifndef WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_ |
+#define WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_ |
+ |
+#include <stddef.h> |
#include "webrtc/typedefs.h" |
@@ -22,11 +24,21 @@ class AudioFrame; |
// than a class. |
class AudioFrameOperations { |
public: |
+ // Add samples in |frame_to_add| with samples in |result_frame| |
+ // putting the results in |results_frame|. The fields |
+ // |vad_activity_| and |speech_type_| of the result frame are |
+ // updated. If |result_frame| is empty (|samples_per_channel_|==0), |
+ // the samples in |frame_to_add| are added to it. The number of |
+ // channels and number of samples per channel must match except when |
+ // |result_frame| is empty. |
+ static void Add(const AudioFrame& frame_to_add, AudioFrame* result_frame); |
+ |
// Upmixes mono |src_audio| to stereo |dst_audio|. This is an out-of-place |
// operation, meaning src_audio and dst_audio must point to different |
// buffers. It is the caller's responsibility to ensure that |dst_audio| is |
// sufficiently large. |
- static void MonoToStereo(const int16_t* src_audio, size_t samples_per_channel, |
+ static void MonoToStereo(const int16_t* src_audio, |
+ size_t samples_per_channel, |
int16_t* dst_audio); |
// |frame.num_channels_| will be updated. This version checks for sufficient |
// buffer size and that |num_channels_| is mono. |
@@ -35,7 +47,8 @@ class AudioFrameOperations { |
// Downmixes stereo |src_audio| to mono |dst_audio|. This is an in-place |
// operation, meaning |src_audio| and |dst_audio| may point to the same |
// buffer. |
- static void StereoToMono(const int16_t* src_audio, size_t samples_per_channel, |
+ static void StereoToMono(const int16_t* src_audio, |
+ size_t samples_per_channel, |
int16_t* dst_audio); |
// |frame.num_channels_| will be updated. This version checks that |
// |num_channels_| is stereo. |
@@ -50,9 +63,16 @@ class AudioFrameOperations { |
// |previous_frame_muted| && !|current_frame_muted| - Fade-in at frame start. |
// !|previous_frame_muted| && |current_frame_muted| - Fade-out at frame end. |
// !|previous_frame_muted| && !|current_frame_muted| - Leave frame untouched. |
- static void Mute(AudioFrame* frame, bool previous_frame_muted, |
+ static void Mute(AudioFrame* frame, |
+ bool previous_frame_muted, |
bool current_frame_muted); |
+ // Zero out contents of frame. |
+ static void Mute(AudioFrame* frame); |
+ |
+ // Halve samples in |frame|. |
+ static void ApplyHalfGain(AudioFrame* frame); |
+ |
static int Scale(float left, float right, AudioFrame& frame); |
static int ScaleWithSat(float scale, AudioFrame& frame); |
@@ -60,4 +80,4 @@ class AudioFrameOperations { |
} // namespace webrtc |
-#endif // #ifndef WEBRTC_MODULES_UTILITY_INCLUDE_AUDIO_FRAME_OPERATIONS_H_ |
+#endif // WEBRTC_AUDIO_UTILITY_AUDIO_FRAME_OPERATIONS_H_ |