Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(230)

Unified Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2396803004: Moved MixerAudioSource and removed audio_mixer_defines.h. (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc
diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
index 43265a1341e2e7e709511927d2b84c90c6a43256..f8f7586f55d26ea2ecbf66d291162408e7954031 100644
--- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc
+++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc
@@ -24,68 +24,60 @@ namespace {
class SourceFrame {
public:
- SourceFrame(AudioSourceWithMixStatus* audio_source,
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
AudioFrame* audio_frame,
bool muted)
- : audio_source_(audio_source), audio_frame_(audio_frame), muted_(muted) {
+ : source_status_(source_status),
+ audio_frame_(audio_frame),
+ muted_(muted) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
if (!muted_) {
energy_ = AudioMixerCalculateEnergy(*audio_frame);
}
}
- SourceFrame(AudioSourceWithMixStatus* audio_source,
+ SourceFrame(AudioMixerImpl::SourceStatus* source_status,
AudioFrame* audio_frame,
bool muted,
uint32_t energy)
- : audio_source_(audio_source),
+ : source_status_(source_status),
audio_frame_(audio_frame),
muted_(muted),
- energy_(energy) {}
-
- // a.ShouldMixBefore(b) is used to select mixer sources.
- bool ShouldMixBefore(const SourceFrame& other) const {
- if (muted_ != other.muted_) {
- return other.muted_;
- }
-
- const auto our_activity = audio_frame_->vad_activity_;
- const auto other_activity = other.audio_frame_->vad_activity_;
-
- if (our_activity != other_activity) {
- return our_activity == AudioFrame::kVadActive;
- }
-
- return energy_ > other.energy_;
+ energy_(energy) {
+ RTC_DCHECK(source_status);
+ RTC_DCHECK(audio_frame);
aleloi 2016/10/07 12:59:21 Added checks.
}
- AudioSourceWithMixStatus* audio_source_ = nullptr;
+ AudioMixerImpl::SourceStatus* source_status_ = nullptr;
AudioFrame* audio_frame_ = nullptr;
bool muted_ = true;
uint32_t energy_ = 0;
};
-// Remixes a frame between stereo and mono.
-void RemixFrame(AudioFrame* frame, size_t number_of_channels) {
- RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
- if (frame->num_channels_ == 1 && number_of_channels == 2) {
- AudioFrameOperations::MonoToStereo(frame);
- } else if (frame->num_channels_ == 2 && number_of_channels == 1) {
- AudioFrameOperations::StereoToMono(frame);
aleloi 2016/10/07 12:59:21 Moved to audio_frame_operations
+// ShouldMixBefore(a, b) is used to select mixer sources.
+bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) {
+ if (a.muted_ != b.muted_) {
+ return b.muted_;
}
+
+ const auto a_activity = a.audio_frame_->vad_activity_;
+ const auto b_activity = b.audio_frame_->vad_activity_;
+
+ if (a_activity != b_activity) {
+ return a_activity == AudioFrame::kVadActive;
+ }
+
+ return a.energy_ > b.energy_;
}
aleloi 2016/10/07 12:59:21 Moved ShouldMixBefore outside.
-void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
+void RampAndUpdateGain(
+ const std::vector<SourceFrame>& mixed_sources_and_frames) {
for (const auto& source_frame : mixed_sources_and_frames) {
- // Ramp in previously unmixed.
- if (!source_frame.audio_source_->WasMixed()) {
- NewMixerRampIn(source_frame.audio_frame_);
aleloi 2016/10/07 12:59:21 Replaced with single 'Ramp' in audio_frame_operati
- }
-
- const bool is_mixed = source_frame.audio_source_->IsMixed();
- // Ramp out currently unmixed.
- if (source_frame.audio_source_->WasMixed() && !is_mixed) {
- NewMixerRampOut(source_frame.audio_frame_);
- }
+ float target_gain_ = source_frame.source_status_->is_mixed_ ? 1 : 0;
+ Ramp(source_frame.audio_frame_, source_frame.source_status_->gain_,
+ target_gain_);
+ source_frame.source_status_->gain_ = target_gain_;
}
}
@@ -130,21 +122,21 @@ int32_t MixFromList(AudioFrame* mixed_audio,
return 0;
}
-MixerAudioSourceList::const_iterator FindSourceInList(
- MixerAudioSource const* audio_source,
- MixerAudioSourceList const* audio_source_list) {
+AudioMixerImpl::MixerAudioSourceList::const_iterator FindSourceInList(
+ AudioMixerImpl::Source const* audio_source,
+ AudioMixerImpl::MixerAudioSourceList const* audio_source_list) {
return std::find_if(audio_source_list->begin(), audio_source_list->end(),
- [audio_source](const AudioSourceWithMixStatus& p) {
- return p.audio_source() == audio_source;
+ [audio_source](const AudioMixerImpl::SourceStatus& p) {
+ return p.audio_source_ == audio_source;
});
}
-MixerAudioSourceList::iterator FindSourceInList(
- MixerAudioSource const* audio_source,
- MixerAudioSourceList* audio_source_list) {
+AudioMixerImpl::MixerAudioSourceList::iterator FindSourceInList(
+ AudioMixerImpl::Source const* audio_source,
+ AudioMixerImpl::MixerAudioSourceList* audio_source_list) {
return std::find_if(audio_source_list->begin(), audio_source_list->end(),
- [audio_source](const AudioSourceWithMixStatus& p) {
- return p.audio_source() == audio_source;
+ [audio_source](const AudioMixerImpl::SourceStatus& p) {
+ return p.audio_source_ == audio_source;
});
}
@@ -272,7 +264,7 @@ AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
return output_frequency_;
}
-int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
+int32_t AudioMixerImpl::SetMixabilityStatus(Source* audio_source,
bool mixable) {
if (!mixable) {
// Anonymous audio sources are in a separate list. Make sure that the
@@ -312,16 +304,14 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
return 0;
}
-bool AudioMixerImpl::MixabilityStatus(
- const MixerAudioSource& audio_source) const {
+bool AudioMixerImpl::MixabilityStatus(const Source& audio_source) const {
rtc::CritScope lock(&crit_);
return FindSourceInList(&audio_source, &audio_source_list_) !=
audio_source_list_.end();
}
-int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
- MixerAudioSource* audio_source,
- bool anonymous) {
+int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(Source* audio_source,
+ bool anonymous) {
rtc::CritScope lock(&crit_);
if (FindSourceInList(audio_source, &additional_audio_source_list_) !=
additional_audio_source_list_.end()) {
@@ -356,7 +346,7 @@ int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
}
bool AudioMixerImpl::AnonymousMixabilityStatus(
- const MixerAudioSource& audio_source) const {
+ const Source& audio_source) const {
rtc::CritScope lock(&crit_);
return FindSourceInList(&audio_source, &additional_audio_source_list_) !=
additional_audio_source_list_.end();
@@ -373,26 +363,25 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
// Get audio source audio and put it in the struct vector.
for (auto& source_and_status : audio_source_list_) {
auto audio_frame_with_info =
- source_and_status.audio_source()->GetAudioFrameWithMuted(
+ source_and_status.audio_source_->GetAudioFrameWithMuted(
id_, static_cast<int>(OutputFrequency()));
const auto audio_frame_info = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
- if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
+ if (audio_frame_info == Source::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
"failed to GetAudioFrameWithMuted() from source");
continue;
}
audio_source_mixing_data_list.emplace_back(
&source_and_status, audio_source_audio_frame,
- audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted);
+ audio_frame_info == Source::AudioFrameInfo::kMuted);
}
// Sort frames by sorting function.
std::sort(audio_source_mixing_data_list.begin(),
- audio_source_mixing_data_list.end(),
- std::mem_fn(&SourceFrame::ShouldMixBefore));
+ audio_source_mixing_data_list.end(), ShouldMixBefore);
int max_audio_frame_counter = kMaximumAmountOfMixedAudioSources;
@@ -400,7 +389,7 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
for (const auto& p : audio_source_mixing_data_list) {
// Filter muted.
if (p.muted_) {
- p.audio_source_->SetIsMixed(false);
+ p.source_status_->is_mixed_ = false;
continue;
}
@@ -409,12 +398,12 @@ AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() {
if (max_audio_frame_counter > 0) {
--max_audio_frame_counter;
result.push_back(p.audio_frame_);
- ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false, -1);
+ ramp_list.emplace_back(p.source_status_, p.audio_frame_, false, -1);
is_mixed = true;
}
- p.audio_source_->SetIsMixed(is_mixed);
+ p.source_status_->is_mixed_ = is_mixed;
}
- Ramp(ramp_list);
+ RampAndUpdateGain(ramp_list);
return result;
}
@@ -426,36 +415,36 @@ AudioFrameList AudioMixerImpl::GetAnonymousAudio() {
AudioFrameList result;
for (auto& source_and_status : additional_audio_source_list_) {
const auto audio_frame_with_info =
- source_and_status.audio_source()->GetAudioFrameWithMuted(
+ source_and_status.audio_source_->GetAudioFrameWithMuted(
id_, OutputFrequency());
const auto ret = audio_frame_with_info.audio_frame_info;
AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
- if (ret == MixerAudioSource::AudioFrameInfo::kError) {
+ if (ret == Source::AudioFrameInfo::kError) {
WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
"failed to GetAudioFrameWithMuted() from audio_source");
continue;
}
- if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
+ if (ret != Source::AudioFrameInfo::kMuted) {
result.push_back(audio_frame);
ramp_list.emplace_back(&source_and_status, audio_frame, false, 0);
- source_and_status.SetIsMixed(true);
+ source_and_status.is_mixed_ = true;
}
}
- Ramp(ramp_list);
+ RampAndUpdateGain(ramp_list);
return result;
}
bool AudioMixerImpl::AddAudioSourceToList(
- MixerAudioSource* audio_source,
+ Source* audio_source,
MixerAudioSourceList* audio_source_list) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"AddAudioSourceToList(audio_source, audio_source_list)");
- audio_source_list->emplace_back(audio_source);
+ audio_source_list->emplace_back(audio_source, false, 0);
return true;
}
bool AudioMixerImpl::RemoveAudioSourceFromList(
- MixerAudioSource* audio_source,
+ Source* audio_source,
MixerAudioSourceList* audio_source_list) const {
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
"RemoveAudioSourceFromList(audio_source, audio_source_list)");
@@ -515,20 +504,20 @@ int AudioMixerImpl::GetOutputAudioLevelFullRange() {
}
bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest(
- MixerAudioSource* audio_source) {
+ AudioMixerImpl::Source* audio_source) {
RTC_DCHECK_RUN_ON(&thread_checker_);
rtc::CritScope lock(&crit_);
const auto non_anonymous_iter =
FindSourceInList(audio_source, &audio_source_list_);
if (non_anonymous_iter != audio_source_list_.end()) {
- return non_anonymous_iter->IsMixed();
+ return non_anonymous_iter->is_mixed_;
}
const auto anonymous_iter =
FindSourceInList(audio_source, &additional_audio_source_list_);
if (anonymous_iter != audio_source_list_.end()) {
- return anonymous_iter->IsMixed();
+ return anonymous_iter->is_mixed_;
}
LOG(LS_ERROR) << "Audio source unknown";

Powered by Google App Engine
This is Rietveld 408576698