Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc |
diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
index 82932f40955d4e1d96bf1ae4fd24bc863a343763..b5d1782cad4d764f163550a7a3599167a37bada4 100644 |
--- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
+++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
@@ -13,12 +13,8 @@ |
#include <algorithm> |
#include <functional> |
-#include "webrtc/base/thread_annotations.h" |
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
-#include "webrtc/modules/audio_mixer/audio_mixer_defines.h" |
-#include "webrtc/modules/audio_processing/include/audio_processing.h" |
#include "webrtc/modules/utility/include/audio_frame_operations.h" |
-#include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
#include "webrtc/system_wrappers/include/trace.h" |
namespace webrtc { |
@@ -71,7 +67,7 @@ class SourceFrame { |
}; |
// Remixes a frame between stereo and mono. |
-void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
+void RemixFrame(AudioFrame* frame, int number_of_channels) { |
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
if (frame->num_channels_ == 1 && number_of_channels == 2) { |
AudioFrameOperations::MonoToStereo(frame); |
@@ -95,46 +91,54 @@ void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
} |
} |
-} // namespace |
- |
-MixerAudioSource::MixerAudioSource() : mix_history_(new NewMixHistory()) {} |
- |
-MixerAudioSource::~MixerAudioSource() { |
- delete mix_history_; |
-} |
- |
-bool MixerAudioSource::IsMixed() const { |
- return mix_history_->IsMixed(); |
-} |
- |
-NewMixHistory::NewMixHistory() : is_mixed_(0) {} |
- |
-NewMixHistory::~NewMixHistory() {} |
+// Mix the AudioFrames stored in audioFrameList into mixed_audio. |
+int32_t MixFromList(AudioFrame* mixed_audio, |
+ const AudioFrameList& audio_frame_list, |
+ int32_t id, |
+ bool use_limiter) { |
+ WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
+ "MixFromList(mixed_audio, audio_frame_list)"); |
+ if (audio_frame_list.empty()) |
+ return 0; |
-bool NewMixHistory::IsMixed() const { |
- return is_mixed_; |
-} |
+ if (audio_frame_list.size() == 1) { |
+ mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; |
+ mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; |
+ } else { |
+ // TODO(wu): Issue 3390. |
+ // Audio frame timestamp is only supported in one channel case. |
+ mixed_audio->timestamp_ = 0; |
+ mixed_audio->elapsed_time_ms_ = -1; |
+ } |
-bool NewMixHistory::WasMixed() const { |
- // Was mixed is the same as is mixed depending on perspective. This function |
- // is for the perspective of NewAudioConferenceMixerImpl. |
- return IsMixed(); |
-} |
+ for (const auto& frame : audio_frame_list) { |
+ RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); |
+ RTC_DCHECK_EQ( |
+ frame->samples_per_channel_, |
+ static_cast<size_t>((mixed_audio->sample_rate_hz_ * |
+ webrtc::AudioMixerImpl::kFrameDurationInMs) / |
+ 1000)); |
-int32_t NewMixHistory::SetIsMixed(const bool mixed) { |
- is_mixed_ = mixed; |
+ // Mix |f.frame| into |mixed_audio|, with saturation protection. |
+ // These effect is applied to |f.frame| itself prior to mixing. |
+ if (use_limiter) { |
+ // Divide by two to avoid saturation in the mixing. |
+ // This is only meaningful if the limiter will be used. |
+ *frame >>= 1; |
+ } |
+ RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
+ *mixed_audio += *frame; |
+ } |
return 0; |
} |
-void NewMixHistory::ResetMixedStatus() { |
- is_mixed_ = false; |
-} |
+} // namespace |
std::unique_ptr<AudioMixer> AudioMixer::Create(int id) { |
AudioMixerImpl* mixer = new AudioMixerImpl(id); |
ossu
2016/09/01 15:42:50
This is a bit strange. Not sure it's 100% related
aleloi
2016/09/02 11:52:34
Changed in upstream CL.
|
if (!mixer->Init()) { |
delete mixer; |
- return NULL; |
+ return std::unique_ptr<AudioMixer>(nullptr); |
} |
return std::unique_ptr<AudioMixer>(mixer); |
} |
@@ -191,7 +195,7 @@ bool AudioMixerImpl::Init() { |
} |
void AudioMixerImpl::Mix(int sample_rate, |
- size_t number_of_channels, |
+ int number_of_channels, |
AudioFrame* audio_frame_for_mixing) { |
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
RTC_DCHECK_RUN_ON(&thread_checker_); |
@@ -215,7 +219,7 @@ void AudioMixerImpl::Mix(int sample_rate, |
CriticalSectionScoped cs(crit_.get()); |
mix_list = GetNonAnonymousAudio(); |
anonymous_mix_list = GetAnonymousAudio(); |
- num_mixed_audio_sources = static_cast<int>(num_mixed_audio_sources_); |
+ num_mixed_audio_sources = num_mixed_audio_sources_; |
} |
mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
@@ -254,8 +258,7 @@ void AudioMixerImpl::Mix(int sample_rate, |
int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
RTC_DCHECK_RUN_ON(&thread_checker_); |
output_frequency_ = frequency; |
- sample_size_ = |
- static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); |
+ sample_size_ = (output_frequency_ * kFrameDurationInMs) / 1000; |
return 0; |
} |
@@ -295,12 +298,13 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
return -1; |
} |
- size_t num_mixed_non_anonymous = audio_source_list_.size(); |
+ int num_mixed_non_anonymous = static_cast<int>(audio_source_list_.size()); |
ossu
2016/09/01 15:42:50
Why not just keep this as size_t? According to the
aleloi
2016/09/02 11:52:34
Thanks! I didn't read it carefully enough. Changin
|
if (num_mixed_non_anonymous > kMaximumAmountOfMixedAudioSources) { |
num_mixed_non_anonymous = kMaximumAmountOfMixedAudioSources; |
} |
num_mixed_audio_sources_ = |
- num_mixed_non_anonymous + additional_audio_source_list_.size(); |
+ num_mixed_non_anonymous + |
+ static_cast<int>(additional_audio_source_list_.size()); |
} |
return 0; |
} |
@@ -482,45 +486,6 @@ bool AudioMixerImpl::RemoveAudioSourceFromList( |
} |
} |
-int32_t AudioMixerImpl::MixFromList(AudioFrame* mixed_audio, |
- const AudioFrameList& audio_frame_list, |
- int32_t id, |
- bool use_limiter) { |
- WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
- "MixFromList(mixed_audio, audio_frame_list)"); |
- if (audio_frame_list.empty()) |
- return 0; |
- |
- if (audio_frame_list.size() == 1) { |
- mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; |
- mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; |
- } else { |
- // TODO(wu): Issue 3390. |
- // Audio frame timestamp is only supported in one channel case. |
- mixed_audio->timestamp_ = 0; |
- mixed_audio->elapsed_time_ms_ = -1; |
- } |
- |
- for (const auto& frame : audio_frame_list) { |
- RTC_DCHECK_EQ(mixed_audio->sample_rate_hz_, frame->sample_rate_hz_); |
- RTC_DCHECK_EQ( |
- frame->samples_per_channel_, |
- static_cast<size_t>( |
- (mixed_audio->sample_rate_hz_ * kFrameDurationInMs) / 1000)); |
- |
- // Mix |f.frame| into |mixed_audio|, with saturation protection. |
- // These effect is applied to |f.frame| itself prior to mixing. |
- if (use_limiter) { |
- // Divide by two to avoid saturation in the mixing. |
- // This is only meaningful if the limiter will be used. |
- *frame >>= 1; |
- } |
- RTC_DCHECK_EQ(frame->num_channels_, mixed_audio->num_channels_); |
- *mixed_audio += *frame; |
- } |
- return 0; |
-} |
- |
bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixed_audio) const { |
RTC_DCHECK_RUN_ON(&thread_checker_); |
if (!use_limiter_) { |