| Index: webrtc/common_audio/include/audio_util.h
|
| diff --git a/webrtc/common_audio/include/audio_util.h b/webrtc/common_audio/include/audio_util.h
|
| index 8262649145546180f23b6e82094d324b7f604e7f..fc2c13f5b285c4f7be78663dd254e4ee5b3557bb 100644
|
| --- a/webrtc/common_audio/include/audio_util.h
|
| +++ b/webrtc/common_audio/include/audio_util.h
|
| @@ -12,6 +12,7 @@
|
| #define WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
|
|
|
| #include <limits>
|
| +#include <cstring>
|
|
|
| #include "webrtc/base/scoped_ptr.h"
|
| #include "webrtc/typedefs.h"
|
| @@ -97,6 +98,86 @@ void Interleave(const T* const* deinterleaved, int samples_per_channel,
|
| }
|
| }
|
|
|
| +template <typename T, typename Intermediate>
|
| +void DownmixStereoToMono(const T* left,
|
| + const T* right,
|
| + int num_frames,
|
| + T* out) {
|
| + for (int i = 0; i < num_frames; ++i) {
|
| + out[i] = (static_cast<Intermediate>(left[i]) + right[i]) / 2;
|
| + }
|
| +}
|
| +
|
| +template <typename T, typename Intermediate>
|
| +void DownmixToMono(const T* const* input_channels,
|
| + int num_frames,
|
| + int num_channels,
|
| + T* out) {
|
| + if (num_channels == 2) {
|
| + DownmixStereoToMono<T, Intermediate>(input_channels[0], input_channels[1],
|
| + num_frames, out);
|
| + } else {
|
| + for (int i = 0; i < num_frames; ++i) {
|
| + Intermediate value = input_channels[0][i];
|
| + for (int j = 1; j < num_channels; ++j) {
|
| + value += input_channels[j][i];
|
| + }
|
| + out[i] = value / num_channels;
|
| + }
|
| + }
|
| +}
|
| +
|
| +// Downmixes an interleaved multichannel signal to a single channel by averaging
|
| +// all channels.
|
| +template <typename T, typename Intermediate>
|
| +void DownmixInterleavedToMonoImpl(const T* interleaved,
|
| + int num_multichannel_frames,
|
| + int num_channels,
|
| + T* deinterleaved) {
|
| + assert(num_channels > 0);
|
| + assert(num_multichannel_frames > 0);
|
| +
|
| + const T* const end = interleaved + num_multichannel_frames * num_channels;
|
| +
|
| + if (num_channels == 1) {
|
| + std::memmove(deinterleaved, interleaved,
|
| + num_multichannel_frames * sizeof(*deinterleaved));
|
| + } else if (num_channels == 2) {
|
| + // Explicitly unroll for the common stereo case.
|
| + while (interleaved < end) {
|
| + *deinterleaved++ =
|
| + (static_cast<Intermediate>(*interleaved) + *(interleaved + 1)) / 2;
|
| + interleaved += 2;
|
| + }
|
| + } else {
|
| + while (interleaved < end) {
|
| + const T* const frame_end = interleaved + num_channels;
|
| +
|
| + Intermediate value = *interleaved++;
|
| + while (interleaved < frame_end) {
|
| + value += *interleaved++;
|
| + }
|
| +
|
| + *deinterleaved++ = value / num_channels;
|
| + }
|
| + }
|
| +}
|
| +
|
| +template <typename T>
|
| +void DownmixInterleavedToMono(const T* interleaved,
|
| + int num_multichannel_frames,
|
| + int num_channels,
|
| + T* deinterleaved) {
|
| + DownmixInterleavedToMonoImpl<T, T>(interleaved, num_multichannel_frames,
|
| + num_channels, deinterleaved);
|
| +}
|
| +
|
| +template <>
|
| +void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved,
|
| + int num_multichannel_frames,
|
| + int num_channels,
|
| + int16_t* deinterleaved);
|
| +
|
| } // namespace webrtc
|
|
|
| #endif // WEBRTC_COMMON_AUDIO_INCLUDE_AUDIO_UTIL_H_
|
|
|