| Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc
|
| diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
|
| index 44e04fa32d4aa6de25d5297b0cdc74154f1368ca..43265a1341e2e7e709511927d2b84c90c6a43256 100644
|
| --- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc
|
| +++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
|
| @@ -14,6 +14,7 @@
|
| #include <functional>
|
| #include <utility>
|
|
|
| +#include "webrtc/base/logging.h"
|
| #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
|
| #include "webrtc/modules/utility/include/audio_frame_operations.h"
|
| #include "webrtc/system_wrappers/include/trace.h"
|
| @@ -23,29 +24,26 @@ namespace {
|
|
|
| class SourceFrame {
|
| public:
|
| - SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before)
|
| - : audio_source_(p),
|
| - audio_frame_(a),
|
| - muted_(m),
|
| - was_mixed_before_(was_mixed_before) {
|
| + SourceFrame(AudioSourceWithMixStatus* audio_source,
|
| + AudioFrame* audio_frame,
|
| + bool muted)
|
| + : audio_source_(audio_source), audio_frame_(audio_frame), muted_(muted) {
|
| if (!muted_) {
|
| - energy_ = NewMixerCalculateEnergy(*a);
|
| + energy_ = AudioMixerCalculateEnergy(*audio_frame);
|
| }
|
| }
|
|
|
| - SourceFrame(MixerAudioSource* p,
|
| - AudioFrame* a,
|
| - bool m,
|
| - bool was_mixed_before,
|
| + SourceFrame(AudioSourceWithMixStatus* audio_source,
|
| + AudioFrame* audio_frame,
|
| + bool muted,
|
| uint32_t energy)
|
| - : audio_source_(p),
|
| - audio_frame_(a),
|
| - muted_(m),
|
| - energy_(energy),
|
| - was_mixed_before_(was_mixed_before) {}
|
| -
|
| - // a.shouldMixBefore(b) is used to select mixer participants.
|
| - bool shouldMixBefore(const SourceFrame& other) const {
|
| + : audio_source_(audio_source),
|
| + audio_frame_(audio_frame),
|
| + muted_(muted),
|
| + energy_(energy) {}
|
| +
|
| + // a.ShouldMixBefore(b) is used to select mixer sources.
|
| + bool ShouldMixBefore(const SourceFrame& other) const {
|
| if (muted_ != other.muted_) {
|
| return other.muted_;
|
| }
|
| @@ -60,11 +58,10 @@ class SourceFrame {
|
| return energy_ > other.energy_;
|
| }
|
|
|
| - MixerAudioSource* audio_source_;
|
| - AudioFrame* audio_frame_;
|
| - bool muted_;
|
| - uint32_t energy_;
|
| - bool was_mixed_before_;
|
| + AudioSourceWithMixStatus* audio_source_ = nullptr;
|
| + AudioFrame* audio_frame_ = nullptr;
|
| + bool muted_ = true;
|
| + uint32_t energy_ = 0;
|
| };
|
|
|
| // Remixes a frame between stereo and mono.
|
| @@ -80,13 +77,13 @@ void RemixFrame(AudioFrame* frame, size_t number_of_channels) {
|
| void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
|
| for (const auto& source_frame : mixed_sources_and_frames) {
|
| // Ramp in previously unmixed.
|
| - if (!source_frame.was_mixed_before_) {
|
| + if (!source_frame.audio_source_->WasMixed()) {
|
| NewMixerRampIn(source_frame.audio_frame_);
|
| }
|
|
|
| const bool is_mixed = source_frame.audio_source_->IsMixed();
|
| // Ramp out currently unmixed.
|
| - if (source_frame.was_mixed_before_ && !is_mixed) {
|
| + if (source_frame.audio_source_->WasMixed() && !is_mixed) {
|
| NewMixerRampOut(source_frame.audio_frame_);
|
| }
|
| }
|
| @@ -133,6 +130,24 @@ int32_t MixFromList(AudioFrame* mixed_audio,
|
| return 0;
|
| }
|
|
|
| +MixerAudioSourceList::const_iterator FindSourceInList(
|
| + MixerAudioSource const* audio_source,
|
| + MixerAudioSourceList const* audio_source_list) {
|
| + return std::find_if(audio_source_list->begin(), audio_source_list->end(),
|
| + [audio_source](const AudioSourceWithMixStatus& p) {
|
| + return p.audio_source() == audio_source;
|
| + });
|
| +}
|
| +
|
| +MixerAudioSourceList::iterator FindSourceInList(
|
| + MixerAudioSource const* audio_source,
|
| + MixerAudioSourceList* audio_source_list) {
|
| + return std::find_if(audio_source_list->begin(), audio_source_list->end(),
|
| + [audio_source](const AudioSourceWithMixStatus& p) {
|
| + return p.audio_source() == audio_source;
|
| + });
|
| +}
|
| +
|
| } // namespace
|
|
|
| std::unique_ptr<AudioMixer> AudioMixer::Create(int id) {
|
| @@ -153,7 +168,7 @@ AudioMixerImpl::AudioMixerImpl(int id, std::unique_ptr<AudioProcessing> limiter)
|
|
|
| AudioMixerImpl::~AudioMixerImpl() {}
|
|
|
| -std::unique_ptr<AudioMixer> AudioMixerImpl::Create(int id) {
|
| +std::unique_ptr<AudioMixerImpl> AudioMixerImpl::Create(int id) {
|
| Config config;
|
| config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
|
| std::unique_ptr<AudioProcessing> limiter(AudioProcessing::Create(config));
|
| @@ -179,7 +194,7 @@ std::unique_ptr<AudioMixer> AudioMixerImpl::Create(int id) {
|
| if (limiter->gain_control()->Enable(true) != limiter->kNoError)
|
| return nullptr;
|
|
|
| - return std::unique_ptr<AudioMixer>(
|
| + return std::unique_ptr<AudioMixerImpl>(
|
| new AudioMixerImpl(id, std::move(limiter)));
|
| }
|
|
|
| @@ -266,8 +281,8 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
|
| }
|
| {
|
| rtc::CritScope lock(&crit_);
|
| - const bool is_mixed =
|
| - IsAudioSourceInList(*audio_source, audio_source_list_);
|
| + const bool is_mixed = FindSourceInList(audio_source, &audio_source_list_) !=
|
| + audio_source_list_.end();
|
| // API must be called with a new state.
|
| if (!(mixable ^ is_mixed)) {
|
| WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
|
| @@ -300,14 +315,16 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
|
| bool AudioMixerImpl::MixabilityStatus(
|
| const MixerAudioSource& audio_source) const {
|
| rtc::CritScope lock(&crit_);
|
| - return IsAudioSourceInList(audio_source, audio_source_list_);
|
| + return FindSourceInList(&audio_source, &audio_source_list_) !=
|
| + audio_source_list_.end();
|
| }
|
|
|
| int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
|
| MixerAudioSource* audio_source,
|
| bool anonymous) {
|
| rtc::CritScope lock(&crit_);
|
| - if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
|
| + if (FindSourceInList(audio_source, &additional_audio_source_list_) !=
|
| + additional_audio_source_list_.end()) {
|
| if (anonymous) {
|
| return 0;
|
| }
|
| @@ -341,10 +358,11 @@ int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
|
| bool AudioMixerImpl::AnonymousMixabilityStatus(
|
| const MixerAudioSource& audio_source) const {
|
| rtc::CritScope lock(&crit_);
|
| - return IsAudioSourceInList(audio_source, additional_audio_source_list_);
|
| + return FindSourceInList(&audio_source, &additional_audio_source_list_) !=
|
| + additional_audio_source_list_.end();
|
| }
|
|
|
| -AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
|
| +AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
|
| RTC_DCHECK_RUN_ON(&thread_checker_);
|
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
|
| "GetNonAnonymousAudio()");
|
| @@ -353,33 +371,33 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
|
| std::vector<SourceFrame> ramp_list;
|
|
|
| // Get audio source audio and put it in the struct vector.
|
| - for (auto* const audio_source : audio_source_list_) {
|
| - auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
|
| - id_, static_cast<int>(OutputFrequency()));
|
| + for (auto& source_and_status : audio_source_list_) {
|
| + auto audio_frame_with_info =
|
| + source_and_status.audio_source()->GetAudioFrameWithMuted(
|
| + id_, static_cast<int>(OutputFrequency()));
|
|
|
| const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
|
| AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
|
|
|
| if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
|
| WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
|
| - "failed to GetAudioFrameWithMuted() from participant");
|
| + "failed to GetAudioFrameWithMuted() from source");
|
| continue;
|
| }
|
| audio_source_mixing_data_list.emplace_back(
|
| - audio_source, audio_source_audio_frame,
|
| - audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
|
| - audio_source->WasMixed());
|
| + &source_and_status, audio_source_audio_frame,
|
| + audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted);
|
| }
|
|
|
| // Sort frames by sorting function.
|
| std::sort(audio_source_mixing_data_list.begin(),
|
| audio_source_mixing_data_list.end(),
|
| - std::mem_fn(&SourceFrame::shouldMixBefore));
|
| + std::mem_fn(&SourceFrame::ShouldMixBefore));
|
|
|
| int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
|
|
|
| // Go through list in order and put unmuted frames in result list.
|
| - for (const SourceFrame& p : audio_source_mixing_data_list) {
|
| + for (const auto& p : audio_source_mixing_data_list) {
|
| // Filter muted.
|
| if (p.muted_) {
|
| p.audio_source_->SetIsMixed(false);
|
| @@ -391,8 +409,7 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
|
| if (max_audio_frame_counter > 0) {
|
| --max_audio_frame_counter;
|
| result.push_back(p.audio_frame_);
|
| - ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false,
|
| - p.was_mixed_before_, -1);
|
| + ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1);
|
| is_mixed = true;
|
| }
|
| p.audio_source_->SetIsMixed(is_mixed);
|
| @@ -401,24 +418,16 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const {
|
| return result;
|
| }
|
|
|
| -AudioFrameList AudioMixerImpl::GetAnonymousAudio() const {
|
| +AudioFrameList AudioMixerImpl::GetAnonymousAudio() {
|
| RTC_DCHECK_RUN_ON(&thread_checker_);
|
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
|
| "GetAnonymousAudio()");
|
| - // The GetAudioFrameWithMuted() callback may result in the audio source being
|
| - // removed from additionalAudioFramesList_. If that happens it will
|
| - // invalidate any iterators. Create a copy of the audio sources list such
|
| - // that the list of participants can be traversed safely.
|
| std::vector<SourceFrame> ramp_list;
|
| - MixerAudioSourceList additional_audio_sources_list;
|
| AudioFrameList result;
|
| - additional_audio_sources_list.insert(additional_audio_sources_list.begin(),
|
| - additional_audio_source_list_.begin(),
|
| - additional_audio_source_list_.end());
|
| -
|
| - for (const auto& audio_source : additional_audio_sources_list) {
|
| + for (auto& source_and_status : additional_audio_source_list_) {
|
| const auto audio_frame_with_info =
|
| - audio_source->GetAudioFrameWithMuted(id_, OutputFrequency());
|
| + source_and_status.audio_source()->GetAudioFrameWithMuted(
|
| + id_, OutputFrequency());
|
| const auto ret = audio_frame_with_info.audio_frame_info;
|
| AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
|
| if (ret == MixerAudioSource::AudioFrameInfo::kError) {
|
| @@ -428,32 +437,20 @@ AudioFrameList AudioMixerImpl::GetAnonymousAudio() const {
|
| }
|
| if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
|
| result.push_back(audio_frame);
|
| - ramp_list.emplace_back(audio_source, audio_frame, false,
|
| - audio_source->IsMixed(), 0);
|
| - audio_source->SetIsMixed(true);
|
| + ramp_list.emplace_back(&source_and_status, audio_frame, false, 0);
|
| + source_and_status.SetIsMixed(true);
|
| }
|
| }
|
| Ramp(ramp_list);
|
| return result;
|
| }
|
|
|
| -bool AudioMixerImpl::IsAudioSourceInList(
|
| - const MixerAudioSource& audio_source,
|
| - const MixerAudioSourceList& audio_source_list) const {
|
| - WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
|
| - "IsAudioSourceInList(audio_source,audio_source_list)");
|
| - return std::find(audio_source_list.begin(), audio_source_list.end(),
|
| - &audio_source) != audio_source_list.end();
|
| -}
|
| -
|
| bool AudioMixerImpl::AddAudioSourceToList(
|
| MixerAudioSource* audio_source,
|
| MixerAudioSourceList* audio_source_list) const {
|
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
|
| "AddAudioSourceToList(audio_source, audio_source_list)");
|
| - audio_source_list->push_back(audio_source);
|
| - // Make sure that the mixed status is correct for new MixerAudioSource.
|
| - audio_source->ResetMixedStatus();
|
| + audio_source_list->emplace_back(audio_source);
|
| return true;
|
| }
|
|
|
| @@ -462,12 +459,9 @@ bool AudioMixerImpl::RemoveAudioSourceFromList(
|
| MixerAudioSourceList* audio_source_list) const {
|
| WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
|
| "RemoveAudioSourceFromList(audio_source, audio_source_list)");
|
| - const auto iter = std::find(audio_source_list->begin(),
|
| - audio_source_list->end(), audio_source);
|
| + const auto iter = FindSourceInList(audio_source, audio_source_list);
|
| if (iter != audio_source_list->end()) {
|
| audio_source_list->erase(iter);
|
| - // AudioSource is no longer mixed, reset to default.
|
| - audio_source->ResetMixedStatus();
|
| return true;
|
| } else {
|
| return false;
|
| @@ -519,4 +513,25 @@ int AudioMixerImpl::GetOutputAudioLevelFullRange() {
|
| "GetAudioOutputLevelFullRange() => level=%d", level);
|
| return level;
|
| }
|
| +
|
| +bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
|
| + MixerAudioSource* audio_source) {
|
| + RTC_DCHECK_RUN_ON(&thread_checker_);
|
| + rtc::CritScope lock(&crit_);
|
| +
|
| + const auto non_anonymous_iter =
|
| + FindSourceInList(audio_source, &audio_source_list_);
|
| + if (non_anonymous_iter != audio_source_list_.end()) {
|
| + return non_anonymous_iter->IsMixed();
|
| + }
|
| +
|
| + const auto anonymous_iter =
|
| + FindSourceInList(audio_source, &additional_audio_source_list_);
|
| + if (anonymous_iter != audio_source_list_.end()) {
|
| + return anonymous_iter->IsMixed();
|
| + }
|
| +
|
| + LOG(LS_ERROR) << "Audio source unknown";
|
| + return false;
|
| +}
|
| } // namespace webrtc
|
|
|