Index: webrtc/modules/audio_processing/audio_buffer.cc |
diff --git a/webrtc/modules/audio_processing/audio_buffer.cc b/webrtc/modules/audio_processing/audio_buffer.cc |
index 04dcaea799d60af6bbc48d899e9ded8134d6ce03..17cd31146c3aaae220efb337d601da30add2d6aa 100644 |
--- a/webrtc/modules/audio_processing/audio_buffer.cc |
+++ b/webrtc/modules/audio_processing/audio_buffer.cc |
@@ -23,28 +23,11 @@ const int kSamplesPer16kHzChannel = 160; |
const int kSamplesPer32kHzChannel = 320; |
const int kSamplesPer48kHzChannel = 480; |
-bool HasKeyboardChannel(AudioProcessing::ChannelLayout layout) { |
- switch (layout) { |
- case AudioProcessing::kMono: |
- case AudioProcessing::kStereo: |
- return false; |
- case AudioProcessing::kMonoAndKeyboard: |
- case AudioProcessing::kStereoAndKeyboard: |
- return true; |
- } |
- assert(false); |
- return false; |
-} |
- |
-int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) { |
- switch (layout) { |
- case AudioProcessing::kMono: |
- case AudioProcessing::kStereo: |
- assert(false); |
- return -1; |
- case AudioProcessing::kMonoAndKeyboard: |
+int KeyboardChannelIndex(const StreamConfig& stream_config) { |
aluebs-webrtc
2015/07/14 23:12:42
You could check inside here if stream_config.has_k
mgraczyk
2015/07/15 01:12:45
Done, although the only callsite is conditional on
|
+ switch (stream_config.num_channels()) { |
+ case 1: |
return 1; |
- case AudioProcessing::kStereoAndKeyboard: |
+ case 2: |
return 2; |
} |
assert(false); |
@@ -52,10 +35,21 @@ int KeyboardChannelIndex(AudioProcessing::ChannelLayout layout) { |
} |
template <typename T> |
-void StereoToMono(const T* left, const T* right, T* out, |
- int num_frames) { |
- for (int i = 0; i < num_frames; ++i) |
- out[i] = (left[i] + right[i]) / 2; |
+void DownmixInterleavedToMono(const T* interleaved, |
aluebs-webrtc
2015/07/14 23:12:42
Is this specialization used anywhere?
mgraczyk
2015/07/15 01:12:46
This isn't the specialization, it's a normal templ
aluebs-webrtc
2015/07/15 18:04:05
I meant, I don't think we have any interleaved flo
mgraczyk
2015/07/15 20:03:19
That's true. I removed the definition so now the
|
+ T* deinterleaved, |
+ int num_multichannel_frames, |
+ int num_channels) { |
aluebs-webrtc
2015/07/14 23:12:42
Inputs before outputs?
mgraczyk
2015/07/15 01:12:45
Done.
|
+ return DownmixInterleavedToMonoImpl<T, T>( |
aluebs-webrtc
2015/07/14 23:12:42
This return isn't needed, right?
mgraczyk
2015/07/15 01:12:45
Done.
|
+ interleaved, deinterleaved, num_multichannel_frames, num_channels); |
+} |
+ |
+template <> |
aluebs-webrtc
2015/07/14 23:12:42
Why is this template needed? Just curious.
mgraczyk
2015/07/15 01:12:46
I could write a function called "DownmixInterleave
aluebs-webrtc
2015/07/15 18:04:05
Thanks for the explanation! Makes sense :)
|
+void DownmixInterleavedToMono<int16_t>(const int16_t* interleaved, |
+ int16_t* deinterleaved, |
+ int num_multichannel_frames, |
+ int num_channels) { |
+ return DownmixInterleavedToMonoImpl<int16_t, int32_t>( |
aluebs-webrtc
2015/07/14 23:12:42
This return isn't needed, right?
mgraczyk
2015/07/15 01:12:45
Done.
|
+ interleaved, deinterleaved, num_multichannel_frames, num_channels); |
} |
int NumBandsFromSamplesPerChannel(int num_frames) { |
@@ -91,7 +85,7 @@ AudioBuffer::AudioBuffer(int input_num_frames, |
assert(input_num_frames_ > 0); |
assert(proc_num_frames_ > 0); |
assert(output_num_frames_ > 0); |
- assert(num_input_channels_ > 0 && num_input_channels_ <= 2); |
+ assert(num_input_channels_ > 0); |
assert(num_proc_channels_ > 0 && num_proc_channels_ <= num_input_channels_); |
if (input_num_frames_ != proc_num_frames_ || |
@@ -130,29 +124,29 @@ AudioBuffer::AudioBuffer(int input_num_frames, |
AudioBuffer::~AudioBuffer() {} |
void AudioBuffer::CopyFrom(const float* const* data, |
- int num_frames, |
- AudioProcessing::ChannelLayout layout) { |
- assert(num_frames == input_num_frames_); |
- assert(ChannelsFromLayout(layout) == num_input_channels_); |
+ const StreamConfig& stream_config) { |
+ assert(stream_config.samples_per_channel() == input_num_frames_); |
+ assert(stream_config.num_channels() == num_input_channels_); |
InitForNewData(); |
// Initialized lazily because there's a different condition in |
// DeinterleaveFrom. |
- if ((num_input_channels_ == 2 && num_proc_channels_ == 1) && !input_buffer_) { |
+ const bool need_to_downmix = |
+ num_input_channels_ > 1 && num_proc_channels_ == 1; |
+ if (need_to_downmix && !input_buffer_) { |
input_buffer_.reset( |
new IFChannelBuffer(input_num_frames_, num_proc_channels_)); |
} |
- if (HasKeyboardChannel(layout)) { |
- keyboard_data_ = data[KeyboardChannelIndex(layout)]; |
+ if (stream_config.has_keyboard()) { |
+ keyboard_data_ = data[KeyboardChannelIndex(stream_config)]; |
} |
// Downmix. |
const float* const* data_ptr = data; |
- if (num_input_channels_ == 2 && num_proc_channels_ == 1) { |
- StereoToMono(data[0], |
- data[1], |
- input_buffer_->fbuf()->channels()[0], |
- input_num_frames_); |
+ if (need_to_downmix) { |
+ DownmixToMono<float, float>(input_num_frames_, |
+ input_buffer_->fbuf()->channels()[0], data, |
+ num_input_channels_); |
data_ptr = input_buffer_->fbuf_const()->channels(); |
} |
@@ -175,11 +169,10 @@ void AudioBuffer::CopyFrom(const float* const* data, |
} |
} |
-void AudioBuffer::CopyTo(int num_frames, |
- AudioProcessing::ChannelLayout layout, |
+void AudioBuffer::CopyTo(const StreamConfig& stream_config, |
float* const* data) { |
- assert(num_frames == output_num_frames_); |
- assert(ChannelsFromLayout(layout) == num_channels_); |
+ assert(stream_config.samples_per_channel() == output_num_frames_); |
+ assert(stream_config.num_channels() == num_channels_); |
// Convert to the float range. |
float* const* data_ptr = data; |
@@ -339,10 +332,9 @@ const int16_t* AudioBuffer::mixed_low_pass_data() { |
mixed_low_pass_channels_.reset( |
new ChannelBuffer<int16_t>(num_split_frames_, 1)); |
} |
- StereoToMono(split_bands_const(0)[kBand0To8kHz], |
- split_bands_const(1)[kBand0To8kHz], |
- mixed_low_pass_channels_->channels()[0], |
- num_split_frames_); |
+ DownmixStereoToMono<int16_t, int32_t>( |
aluebs-webrtc
2015/07/14 23:12:42
Shouldn't this support multiple channels?
mgraczyk
2015/07/15 01:12:46
Done. Does this work?
aluebs-webrtc
2015/07/15 18:04:05
You don't need to do this manually, audio_buffer/c
mgraczyk
2015/07/15 20:03:19
Nice, Done.
|
+ num_split_frames_, mixed_low_pass_channels_->channels()[0], |
+ split_bands_const(0)[kBand0To8kHz], split_bands_const(1)[kBand0To8kHz]); |
mixed_low_pass_valid_ = true; |
} |
return mixed_low_pass_channels_->channels()[0]; |
@@ -411,11 +403,10 @@ void AudioBuffer::DeinterleaveFrom(AudioFrame* frame) { |
} else { |
deinterleaved = input_buffer_->ibuf()->channels(); |
} |
- if (num_input_channels_ == 2 && num_proc_channels_ == 1) { |
- // Downmix directly; no explicit deinterleaving needed. |
- for (int i = 0; i < input_num_frames_; ++i) { |
- deinterleaved[0][i] = (frame->data_[i * 2] + frame->data_[i * 2 + 1]) / 2; |
- } |
+ if (num_proc_channels_ == 1) { |
mgraczyk
2015/07/15 01:12:46
This function works for any number of input channe
|
+ // Downmix and deinterleave simultaneously. |
+ DownmixInterleavedToMono(frame->data_, deinterleaved[0], input_num_frames_, |
+ num_input_channels_); |
} else { |
assert(num_proc_channels_ == num_input_channels_); |
Deinterleave(frame->data_, |