Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc |
diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
index 5a8d1eb7645705113dc3155565668b198a6bbeb3..bb58e2e3b36e139bc82722336a0155bf1156c396 100644 |
--- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
+++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
@@ -13,6 +13,7 @@ |
#include <algorithm> |
#include <functional> |
+#include "webrtc/base/thread_annotations.h" |
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
#include "webrtc/modules/audio_mixer/audio_mixer_defines.h" |
#include "webrtc/modules/audio_processing/include/audio_processing.h" |
@@ -35,6 +36,17 @@ class SourceFrame { |
} |
} |
+ SourceFrame(MixerAudioSource* p, |
+ AudioFrame* a, |
+ bool m, |
+ bool was_mixed_before, |
+ uint32_t energy) |
+ : audio_source_(p), |
+ audio_frame_(a), |
+ muted_(m), |
+ energy_(energy), |
+ was_mixed_before_(was_mixed_before) {} |
+ |
aleloi
2016/08/30 15:13:34
Differs from the above by not calculating energy.
|
// a.shouldMixBefore(b) is used to select mixer participants. |
bool shouldMixBefore(const SourceFrame& other) const { |
if (muted_ != other.muted_) { |
@@ -68,20 +80,19 @@ void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
} |
} |
-// Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
-// These effects are applied to |frame| itself prior to mixing. Assumes that |
-// |mixed_frame| always has at least as many channels as |frame|. Supports |
-// stereo at most. |
-// |
-void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
- RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
- if (use_limiter) { |
- // Divide by two to avoid saturation in the mixing. |
- // This is only meaningful if the limiter will be used. |
- *frame >>= 1; |
+void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) { |
+ for (const auto& source_frame : mixed_sources_and_frames) { |
+ // Ramp in previously unmixed. |
+ if (!source_frame.was_mixed_before_) { |
+ NewMixerRampIn(source_frame.audio_frame_); |
+ } |
+ |
+ const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed(); |
+ // Ramp out currently unmixed. |
+ if (source_frame.was_mixed_before_ && !is_mixed) { |
+ NewMixerRampOut(source_frame.audio_frame_); |
+ } |
aleloi
2016/08/30 15:13:34
Ramping in/out should be done both for anonymous a
|
} |
- RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_); |
- *mixed_frame += *frame; |
aleloi
2016/08/30 15:13:34
MixFrames is now done in MixFromList.
|
} |
} // namespace |
@@ -147,10 +158,6 @@ bool AudioMixerImpl::Init() { |
if (crit_.get() == NULL) |
return false; |
- cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); |
- if (cb_crit_.get() == NULL) |
- return false; |
- |
Config config; |
config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
limiter_.reset(AudioProcessing::Create(config)); |
@@ -188,70 +195,64 @@ void AudioMixerImpl::Mix(int sample_rate, |
AudioFrame* audio_frame_for_mixing) { |
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- AudioFrameList mixList; |
- AudioFrameList additionalFramesList; |
- std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
aleloi
2016/08/30 15:13:34
Isn't used any longer.
|
- { |
- CriticalSectionScoped cs(cb_crit_.get()); |
- Frequency mixing_frequency; |
- |
- switch (sample_rate) { |
- case 8000: |
- mixing_frequency = kNbInHz; |
- break; |
- case 16000: |
- mixing_frequency = kWbInHz; |
- break; |
- case 32000: |
- mixing_frequency = kSwbInHz; |
- break; |
- case 48000: |
- mixing_frequency = kFbInHz; |
- break; |
- default: |
- RTC_NOTREACHED(); |
- return; |
- } |
aleloi
2016/08/30 15:13:33
The frequency is no longer guarded by a lock. This
|
- if (OutputFrequency() != mixing_frequency) { |
- SetOutputFrequency(mixing_frequency); |
- } |
+ Frequency mixing_frequency; |
+ |
+ switch (sample_rate) { |
+ case 8000: |
+ mixing_frequency = kNbInHz; |
+ break; |
+ case 16000: |
+ mixing_frequency = kWbInHz; |
+ break; |
+ case 32000: |
+ mixing_frequency = kSwbInHz; |
+ break; |
+ case 48000: |
+ mixing_frequency = kFbInHz; |
+ break; |
+ default: |
+ WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
+ "Invalid frequency: %d", sample_rate); |
+ RTC_NOTREACHED(); |
+ return; |
+ } |
- mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); |
- GetAdditionalAudio(&additionalFramesList); |
+ if (OutputFrequency() != mixing_frequency) { |
+ SetOutputFrequency(mixing_frequency); |
} |
- for (FrameAndMuteInfo& frame_and_mute : mixList) { |
- RemixFrame(frame_and_mute.frame, number_of_channels); |
+ AudioFrameList mix_list; |
+ AudioFrameList anonymous_mix_list; |
+ { |
+ CriticalSectionScoped cs(crit_.get()); |
+ mix_list = GetNonAnonymousAudio(); |
+ anonymous_mix_list = GetAnonymousAudio(); |
} |
aleloi
2016/08/30 15:13:34
This is the only section that accesses the partici
|
- for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { |
- RemixFrame(frame_and_mute.frame, number_of_channels); |
+ |
+ mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(), |
+ anonymous_mix_list.end()); |
+ |
+ for (const auto& frame : mix_list) { |
+ RemixFrame(frame, number_of_channels); |
} |
audio_frame_for_mixing->UpdateFrame( |
- -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, |
+ -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech, |
AudioFrame::kVadPassive, number_of_channels); |
- |
time_stamp_ += static_cast<uint32_t>(sample_size_); |
- |
use_limiter_ = num_mixed_audio_sources_ > 1; |
- // We only use the limiter if it supports the output sample rate and |
aleloi
2016/08/30 15:13:33
It should support all sample rates now.
|
- // we're actually mixing multiple streams. |
- MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); |
- |
- { |
- CriticalSectionScoped cs(crit_.get()); |
- MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
+ // We only use the limiter if we're actually mixing multiple streams. |
+ MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_); |
- if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
- // Nothing was mixed, set the audio samples to silence. |
- audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
- audio_frame_for_mixing->Mute(); |
- } else { |
- // Only call the limiter if we have something to mix. |
- LimitMixedAudio(audio_frame_for_mixing); |
- } |
aleloi
2016/08/30 15:13:34
There is no access to members that can be modified
|
+ if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
+ // Nothing was mixed, set the audio samples to silence. |
+ audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
+ audio_frame_for_mixing->Mute(); |
+ } else { |
+ // Only call the limiter if we have something to mix. |
+ LimitMixedAudio(audio_frame_for_mixing); |
} |
// Pass the final result to the level indicator. |
@@ -261,8 +262,7 @@ void AudioMixerImpl::Mix(int sample_rate, |
} |
int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
- CriticalSectionScoped cs(crit_.get()); |
- |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
output_frequency_ = frequency; |
sample_size_ = |
static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); |
@@ -271,7 +271,7 @@ int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
} |
AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { |
- CriticalSectionScoped cs(crit_.get()); |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
return output_frequency_; |
} |
@@ -282,9 +282,8 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
// audio source is in the _audioSourceList if it is being mixed. |
SetAnonymousMixabilityStatus(audio_source, false); |
} |
- size_t numMixedAudioSources; |
{ |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); |
// API must be called with a new state. |
if (!(mixable ^ isMixed)) { |
@@ -309,27 +308,22 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { |
numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; |
} |
- numMixedAudioSources = |
+ num_mixed_audio_sources_ = |
numMixedNonAnonymous + additional_audio_source_list_.size(); |
} |
- // A MixerAudioSource was added or removed. Make sure the scratch |
- // buffer is updated if necessary. |
- // Note: The scratch buffer may only be updated in Process(). |
aleloi
2016/08/30 15:13:34
Scratch buffer and Process() comment
seems stale
|
- CriticalSectionScoped cs(crit_.get()); |
- num_mixed_audio_sources_ = numMixedAudioSources; |
aleloi
2016/08/30 15:13:34
AFAIK, no need to guard |num_sources| with other l
|
return 0; |
} |
bool AudioMixerImpl::MixabilityStatus( |
const MixerAudioSource& audio_source) const { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
return IsAudioSourceInList(audio_source, audio_source_list_); |
} |
int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( |
MixerAudioSource* audio_source, |
bool anonymous) { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { |
if (anonymous) { |
return 0; |
@@ -363,18 +357,23 @@ int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( |
bool AudioMixerImpl::AnonymousMixabilityStatus( |
const MixerAudioSource& audio_source) const { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
} |
-AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
+AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const |
+ EXCLUSIVE_LOCKS_REQUIRED(crit_) { |
+ WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
+ "GetNonAnonymousAudio()"); |
+ |
AudioFrameList result; |
std::vector<SourceFrame> audioSourceMixingDataList; |
+ std::vector<SourceFrame> ramp_list; |
aleloi
2016/08/30 15:13:34
See comment about 2:nd SourceFrame constructor.
|
// Get audio source audio and put it in the struct vector. |
for (MixerAudioSource* audio_source : audio_source_list_) { |
auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
- id_, static_cast<int>(output_frequency_)); |
+ id_, static_cast<int>(OutputFrequency())); |
auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
@@ -394,7 +393,9 @@ AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), |
std::mem_fn(&SourceFrame::shouldMixBefore)); |
- // Go through list in order and put things in mixList. |
+ int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources; |
+ |
+ // Go through list in order and put unmuted frames in result list. |
for (SourceFrame& p : audioSourceMixingDataList) { |
// Filter muted. |
if (p.muted_) { |
@@ -406,56 +407,51 @@ AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
bool is_mixed = false; |
if (maxAudioFrameCounter > 0) { |
--maxAudioFrameCounter; |
- if (!p.was_mixed_before_) { |
- NewMixerRampIn(p.audio_frame_); |
- } |
- result.emplace_back(p.audio_frame_, false); |
+ result.push_back(p.audio_frame_); |
+ ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, |
+ p.was_mixed_before_, -1); |
is_mixed = true; |
} |
- |
- // Ramp out unmuted. |
- if (p.was_mixed_before_ && !is_mixed) { |
- NewMixerRampOut(p.audio_frame_); |
- result.emplace_back(p.audio_frame_, false); |
- } |
- |
p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
} |
+ Ramp(ramp_list); |
return result; |
} |
-void AudioMixerImpl::GetAdditionalAudio( |
- AudioFrameList* additionalFramesList) const { |
+AudioFrameList AudioMixerImpl::GetAnonymousAudio() const |
+ EXCLUSIVE_LOCKS_REQUIRED(crit_) { |
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
- "GetAdditionalAudio(additionalFramesList)"); |
+ "GetAnonymousAudio()"); |
// The GetAudioFrameWithMuted() callback may result in the audio source being |
// removed from additionalAudioFramesList_. If that happens it will |
// invalidate any iterators. Create a copy of the audio sources list such |
// that the list of participants can be traversed safely. |
+ std::vector<SourceFrame> ramp_list; |
MixerAudioSourceList additionalAudioSourceList; |
+ AudioFrameList result; |
additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
additional_audio_source_list_.begin(), |
additional_audio_source_list_.end()); |
- for (MixerAudioSourceList::const_iterator audio_source = |
- additionalAudioSourceList.begin(); |
- audio_source != additionalAudioSourceList.end(); ++audio_source) { |
- auto audio_frame_with_info = |
- (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_); |
- auto ret = audio_frame_with_info.audio_frame_info; |
+ for (const auto& audio_source : additionalAudioSourceList) { |
+ const auto audio_frame_with_info = |
+ audio_source->GetAudioFrameWithMuted(id_, OutputFrequency()); |
+ const auto ret = audio_frame_with_info.audio_frame_info; |
AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, |
"failed to GetAudioFrameWithMuted() from audio_source"); |
continue; |
} |
- if (audio_frame->samples_per_channel_ == 0) { |
- // Empty frame. Don't use it. |
- continue; |
aleloi
2016/08/30 15:13:34
Replaced this with a RTC_DCHECK_EQ in
MixFromList.
|
+ if (ret != MixerAudioSource::AudioFrameInfo::kMuted) { |
+ result.push_back(audio_frame); |
+ ramp_list.emplace_back(audio_source, audio_frame, false, |
+ audio_source->_mixHistory->IsMixed(), 0); |
+ audio_source->_mixHistory->SetIsMixed(true); |
} |
- additionalFramesList->push_back(FrameAndMuteInfo( |
- audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
} |
+ Ramp(ramp_list); |
+ return result; |
} |
bool AudioMixerImpl::IsAudioSourceInList( |
@@ -504,12 +500,9 @@ int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, |
if (audioFrameList.empty()) |
return 0; |
- uint32_t position = 0; |
- |
aleloi
2016/08/30 15:13:34
Was unused.
|
if (audioFrameList.size() == 1) { |
- mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; |
- mixedAudio->elapsed_time_ms_ = |
- audioFrameList.front().frame->elapsed_time_ms_; |
+ mixedAudio->timestamp_ = audioFrameList.front()->timestamp_; |
+ mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_; |
} else { |
// TODO(wu): Issue 3390. |
// Audio frame timestamp is only supported in one channel case. |
@@ -517,37 +510,27 @@ int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, |
mixedAudio->elapsed_time_ms_ = -1; |
} |
- for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
- iter != audioFrameList.end(); ++iter) { |
- if (!iter->muted) { |
- MixFrames(mixedAudio, iter->frame, use_limiter); |
+ for (const auto& frame : audioFrameList) { |
+ RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_); |
+ RTC_DCHECK_EQ( |
+ frame->samples_per_channel_, |
+ static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) / |
+ 1000)); |
+ |
+ // Mix |f.frame| into |mixedAudio|, with saturation protection. |
+ // These effect is applied to |f.frame| itself prior to mixing. |
+ if (use_limiter) { |
+ // Divide by two to avoid saturation in the mixing. |
+ // This is only meaningful if the limiter will be used. |
+ *frame >>= 1; |
} |
- |
- position++; |
+ RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_); |
+ *mixedAudio += *frame; |
} |
return 0; |
} |
-// TODO(andrew): consolidate this function with MixFromList. |
aleloi
2016/08/30 15:13:34
Done :)
|
-int32_t AudioMixerImpl::MixAnonomouslyFromList( |
- AudioFrame* mixedAudio, |
- const AudioFrameList& audioFrameList) const { |
- WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
- "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); |
- |
- if (audioFrameList.empty()) |
- return 0; |
- |
- for (AudioFrameList::const_iterator iter = audioFrameList.begin(); |
- iter != audioFrameList.end(); ++iter) { |
- if (!iter->muted) { |
- MixFrames(mixedAudio, iter->frame, use_limiter_); |
- } |
- } |
- return 0; |
-} |
- |
bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { |
if (!use_limiter_) { |
return true; |