| Index: webrtc/modules/audio_processing/audio_buffer.h
|
| diff --git a/webrtc/modules/audio_processing/audio_buffer.h b/webrtc/modules/audio_processing/audio_buffer.h
|
| index 4291fb3eb99832a416d691cecab9c8b8ff9a041e..0ecda828791defa26f586f40fc8c1cf262af0218 100644
|
| --- a/webrtc/modules/audio_processing/audio_buffer.h
|
| +++ b/webrtc/modules/audio_processing/audio_buffer.h
|
| @@ -30,6 +30,72 @@ enum Band {
|
| kBand16To24kHz = 2
|
| };
|
|
|
| +template <typename T, typename Intermediate>
|
| +void DownmixStereoToMono(int num_frames,
|
| + T* out,
|
| + const T* left,
|
| + const T* right) {
|
| + for (int i = 0; i < num_frames; ++i) {
|
| + out[i] = (static_cast<Intermediate>(left[i]) + right[i]) / 2;
|
| + }
|
| +}
|
| +
|
| +template <typename T, typename Intermediate>
|
| +void DownmixToMono(int num_frames,
|
| + T* out,
|
| + const T* const* input_channels,
|
| + int num_channels) {
|
| + if (num_channels == 2) {
|
| + DownmixStereoToMono<T, Intermediate>(num_frames, out, input_channels[0],
|
| + input_channels[1]);
|
| + } else {
|
| + for (int i = 0; i < num_frames; ++i) {
|
| + Intermediate value = input_channels[0][i];
|
| + for (int j = 1; j < num_channels; ++j) {
|
| + value += input_channels[j][i];
|
| + }
|
| + out[i] = value / num_channels;
|
| + }
|
| + }
|
| +}
|
| +
|
| +// Downmixes an interleaved multichannel signal to a single channel by averaging
|
| +// all channels.
|
| +template <typename T, typename Intermediate>
|
| +void DownmixInterleavedToMonoImpl(const T* interleaved,
|
| + T* deinterleaved,
|
| + int num_multichannel_frames,
|
| + int num_channels) {
|
| + assert(num_channels > 0);
|
| + assert(num_multichannel_frames > 0);
|
| +
|
| + const T* const end = interleaved + num_multichannel_frames * num_channels;
|
| +
|
| + if (num_channels == 1) {
|
| + std::memmove(
|
| + deinterleaved, interleaved,
|
| + num_channels * num_multichannel_frames * sizeof(*deinterleaved));
|
| + } else if (num_channels == 2) {
|
| + // Explicitly unroll for the common stereo case.
|
| + while (interleaved < end) {
|
| + *deinterleaved++ =
|
| + (static_cast<Intermediate>(*interleaved) + *(interleaved + 1)) / 2;
|
| + interleaved += 2;
|
| + }
|
| + } else {
|
| + while (interleaved < end) {
|
| + const T* const frame_end = interleaved + num_channels;
|
| +
|
| + Intermediate value = *interleaved++;
|
| + while (interleaved < frame_end) {
|
| + value += *interleaved++;
|
| + }
|
| +
|
| + *deinterleaved++ = value / num_channels;
|
| + }
|
| + }
|
| +}
|
| +
|
| class AudioBuffer {
|
| public:
|
| // TODO(ajm): Switch to take ChannelLayouts.
|
| @@ -112,12 +178,8 @@ class AudioBuffer {
|
| void InterleaveTo(AudioFrame* frame, bool data_changed) const;
|
|
|
| // Use for float deinterleaved data.
|
| - void CopyFrom(const float* const* data,
|
| - int num_frames,
|
| - AudioProcessing::ChannelLayout layout);
|
| - void CopyTo(int num_frames,
|
| - AudioProcessing::ChannelLayout layout,
|
| - float* const* data);
|
| + void CopyFrom(const float* const* data, const StreamConfig& stream_config);
|
| + void CopyTo(const StreamConfig& stream_config, float* const* data);
|
| void CopyLowPassToReference();
|
|
|
| // Splits the signal into different bands.
|
|
|