Index: webrtc/modules/audio_mixer/frame_combiner.cc |
diff --git a/webrtc/modules/audio_mixer/frame_combiner.cc b/webrtc/modules/audio_mixer/frame_combiner.cc |
index d08ed0f4739ba77b0fe0f5ab4dc4f8cc12fed397..c248d5e89a5a46168e700e04f10e51fdecdfc0b5 100644 |
--- a/webrtc/modules/audio_mixer/frame_combiner.cc |
+++ b/webrtc/modules/audio_mixer/frame_combiner.cc |
@@ -35,10 +35,15 @@ void CombineOneFrame(const AudioFrame* input_frame, |
AudioFrame* audio_frame_for_mixing) { |
audio_frame_for_mixing->timestamp_ = input_frame->timestamp_; |
audio_frame_for_mixing->elapsed_time_ms_ = input_frame->elapsed_time_ms_; |
- std::copy(input_frame->data_, |
- input_frame->data_ + |
- input_frame->num_channels_ * input_frame->samples_per_channel_, |
- audio_frame_for_mixing->data_); |
+ if (!input_frame->muted()) { |
+ size_t length = |
+ input_frame->num_channels_ * input_frame->samples_per_channel_; |
+ const int16_t* input_data = input_frame->data(); |
+ std::copy(input_data, input_data + length, |
+ audio_frame_for_mixing->mutable_data()); |
+ } else { |
+ AudioFrameOperations::Mute(audio_frame_for_mixing); |
+ } |
} |
std::unique_ptr<AudioProcessing> CreateLimiter() { |
@@ -105,7 +110,7 @@ void FrameCombiner::Combine(const std::vector<AudioFrame*>& mix_list, |
std::vector<rtc::ArrayView<const int16_t>> input_frames; |
for (size_t i = 0; i < mix_list.size(); ++i) { |
input_frames.push_back(rtc::ArrayView<const int16_t>( |
- mix_list[i]->data_, samples_per_channel * number_of_channels)); |
+ mix_list[i]->data(), samples_per_channel * number_of_channels)); |
} |
CombineMultipleFrames(input_frames, audio_frame_for_mixing); |
} |
@@ -133,6 +138,7 @@ void FrameCombiner::CombineMultipleFrames( |
add_buffer.fill(0); |
for (const auto& frame : input_frames) { |
+ // TODO(yujo): skip this for muted input frames. |
std::transform(frame.begin(), frame.end(), add_buffer.begin(), |
add_buffer.begin(), std::plus<int32_t>()); |
} |
@@ -140,7 +146,7 @@ void FrameCombiner::CombineMultipleFrames( |
if (use_apm_limiter_) { |
// Halve all samples to avoid saturation before limiting. |
std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, |
- audio_frame_for_mixing->data_, [](int32_t a) { |
+ audio_frame_for_mixing->mutable_data(), [](int32_t a) { |
return rtc::saturated_cast<int16_t>(a / 2); |
}); |
@@ -165,7 +171,7 @@ void FrameCombiner::CombineMultipleFrames( |
AudioFrameOperations::Add(*audio_frame_for_mixing, audio_frame_for_mixing); |
} else { |
std::transform(add_buffer.begin(), add_buffer.begin() + frame_length, |
- audio_frame_for_mixing->data_, |
+ audio_frame_for_mixing->mutable_data(), |
[](int32_t a) { return rtc::saturated_cast<int16_t>(a); }); |
} |
} |