Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(719)

Unified Diff: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc

Issue 2132563002: Rewrote UpdateToMix in the audio mixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@remove_memory_pool
Patch Set: Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
diff --git a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
index 19991bcb2a70ed937c253127e9c0e6fc5bde58fb..a3814873d5200cc61506c8c455d873e80abde5b2 100644
--- a/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
+++ b/webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc
@@ -22,12 +22,43 @@
namespace webrtc {
namespace {
-struct ParticipantFrameStruct {
- ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m)
- : participant(p), audioFrame(a), muted(m) {}
- MixerAudioSource* participant;
- AudioFrame* audioFrame;
- bool muted;
+class ParticipantFrameStruct {
ossu 2016/07/08 11:02:14 I don't think we suffix structs with Struct. Proba
aleloi 2016/07/08 13:45:23 Done.
+ public:
+ ParticipantFrameStruct(MixerAudioSource* p,
+ AudioFrame* a,
+ bool m,
+ bool was_mixed_before)
+ : participant_(p),
+ audio_frame_(a),
+ muted_(m),
+ was_mixed_before_(was_mixed_before) {
+ if (!muted_) {
+ energy_ = CalculateEnergy(*a);
+ }
+ }
+
+ // a.shouldMixAfter(b) is used to select mixer participants.
+ bool shouldMixAfter(const ParticipantFrameStruct& other) const {
+ if (muted_ != other.muted_) {
+ return muted_ && !other.muted_;
ossu 2016/07/08 11:02:14 wh... what? Returning muted should be enough here.
aleloi 2016/07/08 13:45:23 Done. You are right, it's shorter.
+ }
+
+ auto our_activity = audio_frame_->vad_activity_,
+ other_activity = other.audio_frame_->vad_activity_;
ossu 2016/07/08 11:02:14 I'm not sure if this is in the style guide, but I'
aleloi 2016/07/08 13:45:24 Done.
+ if (our_activity != other_activity) {
+ return (our_activity == AudioFrame::kVadPassive ||
+ our_activity == AudioFrame::kVadUnknown) &&
+ other_activity == AudioFrame::kVadActive;
+ }
+
+ return energy_ < other.energy_;
+ }
+
+ MixerAudioSource* participant_;
ossu 2016/07/08 11:02:13 participant or source? Pick one! :)
+ AudioFrame* audio_frame_;
+ bool muted_;
+ uint32_t energy_;
+ bool was_mixed_before_;
};
typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
@@ -183,7 +214,6 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
lowFreq = 32000;
}
if (lowFreq <= 0) {
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
ossu 2016/07/08 11:02:14 Why is this no longer useful?
return;
} else {
switch (lowFreq) {
@@ -210,16 +240,13 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
default:
RTC_DCHECK(false);
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
return;
}
}
- UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
- &remainingParticipantsAllowedToMix);
-
+ mixList = UpdateToMix(remainingParticipantsAllowedToMix);
ossu 2016/07/08 11:02:14 You could just use kMaximumAmountOfMixedParticipan
aleloi 2016/07/08 13:45:24 Done.
+ remainingParticipantsAllowedToMix -= mixList.size();
ossu 2016/07/08 11:02:14 Who cares? It's never used again, is it?
aleloi 2016/07/08 13:45:24 Removed. Thanks!
GetAdditionalAudio(&additionalFramesList);
- UpdateMixedStatus(mixedParticipantsMap);
}
// TODO(henrike): it might be better to decide the number of channels
@@ -258,10 +285,6 @@ void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
}
}
- ClearAudioFrameList(&mixList);
ossu 2016/07/08 11:02:14 So I guess this used to be necessary to hand the f
- ClearAudioFrameList(&rampOutList);
- ClearAudioFrameList(&additionalFramesList);
- RTC_DCHECK(thread_checker_.CalledOnValidThread());
return;
}
@@ -426,173 +449,65 @@ int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
return highestFreq;
}
-void NewAudioConferenceMixerImpl::UpdateToMix(
- AudioFrameList* mixList,
- AudioFrameList* rampOutList,
- std::map<int, MixerAudioSource*>* mixParticipantList,
- size_t* maxAudioFrameCounter) const {
- WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
- "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
- *maxAudioFrameCounter);
- const size_t mixListStartSize = mixList->size();
- AudioFrameList activeList;
- // Struct needed by the passive lists to keep track of which AudioFrame
- // belongs to which MixerAudioSource.
- ParticipantFrameStructList passiveWasNotMixedList;
- ParticipantFrameStructList passiveWasMixedList;
- for (MixerAudioSourceList::const_iterator participant =
- _participantList.begin();
- participant != _participantList.end(); ++participant) {
- // Stop keeping track of passive participants if there are already
- // enough participants available (they wont be mixed anyway).
- bool mustAddToPassiveList =
- (*maxAudioFrameCounter >
- (activeList.size() + passiveWasMixedList.size() +
- passiveWasNotMixedList.size()));
-
- bool wasMixed = false;
- wasMixed = (*participant)->_mixHistory->WasMixed();
+AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
+ size_t maxAudioFrameCounter) const {
+ AudioFrameList result;
+ std::vector<ParticipantFrameStruct> participantMixingDataList;
- auto audio_frame_with_info =
- (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency);
+ // Get participant audio and put it in the struct vector.
+ for (MixerAudioSource* participant : _participantList) {
+ auto audio_frame_with_info = participant->GetAudioFrameWithMuted(
ossu 2016/07/08 11:02:14 Hmm... reading this, I'm thinking AudioFrameWithIn
aleloi 2016/07/08 13:45:24 I agree. Changed.
+ _id, static_cast<int>(_outputFrequency));
- auto ret = audio_frame_with_info.audio_frame_info;
- AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
- if (ret == MixerAudioSource::AudioFrameInfo::kError) {
- continue;
- }
- const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
- if (_participantList.size() != 1) {
- // TODO(wu): Issue 3390, add support for multiple participants case.
- audio_frame->ntp_time_ms_ = -1;
- }
+ auto audio_frame_info = audio_frame_with_info.audio_frame_info;
+ AudioFrame* participant_audio_frame = audio_frame_with_info.audio_frame;
- // TODO(henrike): this assert triggers in some test cases where SRTP is
- // used which prevents NetEQ from making a VAD. Temporarily disable this
- // assert until the problem is fixed on a higher level.
- // RTC_DCHECK(audio_frame->vad_activity_ != AudioFrame::kVadUnknown);
- if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
+ if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
- "invalid VAD state from participant");
+ "failed to GetAudioFrameWithMuted() from participant");
+ continue;
}
+ participantMixingDataList.emplace_back(
+ participant, participant_audio_frame,
+ audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
+ participant->_mixHistory->WasMixed());
+ }
- if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
- if (!wasMixed && !muted) {
- RampIn(*audio_frame);
- }
+ // Sort frames by sorting function.
+ std::sort(
ossu 2016/07/08 11:02:13 Suggestion: rework shouldMixAfter into shouldMixBe
aleloi 2016/07/08 13:45:24 Done. Btw, I got the impression from reading the s
ossu 2016/07/08 13:58:34 That makes perfect sense. Reading up on that, I mi
+ participantMixingDataList.begin(), participantMixingDataList.end(),
+ [](const ParticipantFrameStruct& a, const ParticipantFrameStruct& b) {
+ return b.shouldMixAfter(a);
+ });
+
+ // Go through list in order and put things in mixList.
+ for (ParticipantFrameStruct& p : participantMixingDataList) {
+ // Filter muted.
+ if (p.muted_) {
+ p.participant_->_mixHistory->SetIsMixed(false);
+ continue;
+ }
- if (activeList.size() >= *maxAudioFrameCounter) {
- // There are already more active participants than should be
- // mixed. Only keep the ones with the highest energy.
- AudioFrameList::iterator replaceItem;
- uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
-
- bool found_replace_item = false;
- for (AudioFrameList::iterator iter = activeList.begin();
- iter != activeList.end(); ++iter) {
- const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
- if (energy < lowestEnergy) {
- replaceItem = iter;
- lowestEnergy = energy;
- found_replace_item = true;
- }
- }
- if (found_replace_item) {
- RTC_DCHECK(!muted); // Cannot replace with a muted frame.
- FrameAndMuteInfo replaceFrame = *replaceItem;
-
- bool replaceWasMixed = false;
- std::map<int, MixerAudioSource*>::const_iterator it =
- mixParticipantList->find(replaceFrame.frame->id_);
-
- // When a frame is pushed to |activeList| it is also pushed
- // to mixParticipantList with the frame's id. This means
- // that the Find call above should never fail.
- RTC_DCHECK(it != mixParticipantList->end());
- replaceWasMixed = it->second->_mixHistory->WasMixed();
-
- mixParticipantList->erase(replaceFrame.frame->id_);
- activeList.erase(replaceItem);
-
- activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
- (*mixParticipantList)[audio_frame->id_] = *participant;
- RTC_DCHECK(mixParticipantList->size() <=
- kMaximumAmountOfMixedParticipants);
-
- if (replaceWasMixed) {
- if (!replaceFrame.muted) {
- RampOut(*replaceFrame.frame);
- }
- rampOutList->push_back(replaceFrame);
- RTC_DCHECK(rampOutList->size() <=
- kMaximumAmountOfMixedParticipants);
- }
- } else {
- if (wasMixed) {
- if (!muted) {
- RampOut(*audio_frame);
- }
- rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
- assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
- }
- }
- } else {
- activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
- (*mixParticipantList)[audio_frame->id_] = *participant;
- RTC_DCHECK(mixParticipantList->size() <=
- kMaximumAmountOfMixedParticipants);
- }
- } else {
- if (wasMixed) {
- ParticipantFrameStruct* part_struct =
- new ParticipantFrameStruct(*participant, audio_frame, muted);
- passiveWasMixedList.push_back(part_struct);
- } else if (mustAddToPassiveList) {
- if (!muted) {
- RampIn(*audio_frame);
- }
- ParticipantFrameStruct* part_struct =
- new ParticipantFrameStruct(*participant, audio_frame, muted);
- passiveWasNotMixedList.push_back(part_struct);
+ // Add frame to result vectorfor mixing.
ossu 2016/07/08 11:02:14 vectorfor -> vector for
aleloi 2016/07/08 13:45:23 Done.
+ bool is_mixed = false;
+ if (maxAudioFrameCounter > 0) {
+ --maxAudioFrameCounter;
+ if (!p.was_mixed_before_) {
+ RampIn(*p.audio_frame_);
}
+ result.emplace_back(p.audio_frame_, false);
+ is_mixed = true;
}
- }
- RTC_DCHECK(activeList.size() <= *maxAudioFrameCounter);
- // At this point it is known which participants should be mixed. Transfer
- // this information to this functions output parameters.
- for (AudioFrameList::const_iterator iter = activeList.begin();
- iter != activeList.end(); ++iter) {
- mixList->push_back(*iter);
- }
- activeList.clear();
- // Always mix a constant number of AudioFrames. If there aren't enough
- // active participants mix passive ones. Starting with those that was mixed
- // last iteration.
- for (ParticipantFrameStructList::const_iterator iter =
- passiveWasMixedList.begin();
- iter != passiveWasMixedList.end(); ++iter) {
- if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
- mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
- (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
- RTC_DCHECK(mixParticipantList->size() <=
- kMaximumAmountOfMixedParticipants);
- }
- delete *iter;
- }
- // And finally the ones that have not been mixed for a while.
- for (ParticipantFrameStructList::const_iterator iter =
- passiveWasNotMixedList.begin();
- iter != passiveWasNotMixedList.end(); ++iter) {
- if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
- mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
- (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
- RTC_DCHECK(mixParticipantList->size() <=
- kMaximumAmountOfMixedParticipants);
+
+ // Ramp out unmuted.
+ if (p.was_mixed_before_ && !is_mixed) {
+ RampOut(*p.audio_frame_);
+ result.emplace_back(p.audio_frame_, false);
}
- delete *iter;
+
+ p.participant_->_mixHistory->SetIsMixed(is_mixed);
}
- RTC_DCHECK(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
- *maxAudioFrameCounter += mixListStartSize - mixList->size();
+ return result;
}
void NewAudioConferenceMixerImpl::GetAdditionalAudio(
ossu 2016/07/08 11:02:14 Not necessary for this CL, but this should be chan
aleloi 2016/07/08 13:45:23 Acknowledged.
@@ -629,37 +544,6 @@ void NewAudioConferenceMixerImpl::GetAdditionalAudio(
}
}
-void NewAudioConferenceMixerImpl::UpdateMixedStatus(
- const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const {
- WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
- "UpdateMixedStatus(mixedParticipantsMap)");
- RTC_DCHECK(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
-
- // Loop through all participants. If they are in the mix map they
- // were mixed.
- for (MixerAudioSourceList::const_iterator participant =
- _participantList.begin();
- participant != _participantList.end(); ++participant) {
- bool isMixed = false;
- for (std::map<int, MixerAudioSource*>::const_iterator it =
- mixedParticipantsMap.begin();
- it != mixedParticipantsMap.end(); ++it) {
- if (it->second == *participant) {
- isMixed = true;
- break;
- }
- }
- (*participant)->_mixHistory->SetIsMixed(isMixed);
- }
-}
-
-void NewAudioConferenceMixerImpl::ClearAudioFrameList(
- AudioFrameList* audioFrameList) const {
- WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
- "ClearAudioFrameList(audioFrameList)");
- audioFrameList->clear();
-}
-
bool NewAudioConferenceMixerImpl::IsParticipantInList(
const MixerAudioSource& participant,
const MixerAudioSourceList& participantList) const {

Powered by Google App Engine
This is Rietveld 408576698