Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc |
diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
index 5a8d1eb7645705113dc3155565668b198a6bbeb3..275e16ff20cc8c3ba8ec9bc19488e6b8900db4d2 100644 |
--- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
+++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
@@ -13,6 +13,7 @@ |
#include <algorithm> |
#include <functional> |
+#include "webrtc/base/thread_annotations.h" |
#include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
#include "webrtc/modules/audio_mixer/audio_mixer_defines.h" |
#include "webrtc/modules/audio_processing/include/audio_processing.h" |
@@ -147,10 +148,6 @@ bool AudioMixerImpl::Init() { |
if (crit_.get() == NULL) |
return false; |
- cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); |
- if (cb_crit_.get() == NULL) |
- return false; |
- |
Config config; |
config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); |
limiter_.reset(AudioProcessing::Create(config)); |
@@ -188,35 +185,37 @@ void AudioMixerImpl::Mix(int sample_rate, |
AudioFrame* audio_frame_for_mixing) { |
RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
- AudioFrameList mixList; |
- AudioFrameList additionalFramesList; |
std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
- { |
- CriticalSectionScoped cs(cb_crit_.get()); |
- Frequency mixing_frequency; |
- |
- switch (sample_rate) { |
- case 8000: |
- mixing_frequency = kNbInHz; |
- break; |
- case 16000: |
- mixing_frequency = kWbInHz; |
- break; |
- case 32000: |
- mixing_frequency = kSwbInHz; |
- break; |
- case 48000: |
- mixing_frequency = kFbInHz; |
- break; |
- default: |
- RTC_NOTREACHED(); |
- return; |
- } |
+ Frequency mixing_frequency; |
+ |
+ switch (sample_rate) { |
ivoc
2016/08/31 09:22:45
It's not really related to the threading/locking i
aleloi
2016/08/31 11:03:10
This is indeed simpler! Thanks!
|
+ case 8000: |
+ mixing_frequency = kNbInHz; |
+ break; |
+ case 16000: |
+ mixing_frequency = kWbInHz; |
+ break; |
+ case 32000: |
+ mixing_frequency = kSwbInHz; |
+ break; |
+ case 48000: |
+ mixing_frequency = kFbInHz; |
+ break; |
+ default: |
+ WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
+ "Invalid frequency: %d", sample_rate); |
+ RTC_NOTREACHED(); |
+ return; |
+ } |
- if (OutputFrequency() != mixing_frequency) { |
- SetOutputFrequency(mixing_frequency); |
- } |
+ if (OutputFrequency() != mixing_frequency) { |
+ SetOutputFrequency(mixing_frequency); |
+ } |
+ AudioFrameList mixList; |
+ AudioFrameList additionalFramesList; |
+ { |
+ CriticalSectionScoped cs(crit_.get()); |
mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); |
GetAdditionalAudio(&additionalFramesList); |
} |
@@ -240,18 +239,13 @@ void AudioMixerImpl::Mix(int sample_rate, |
// we're actually mixing multiple streams. |
MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_); |
- { |
- CriticalSectionScoped cs(crit_.get()); |
- MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
kwiberg-webrtc
2016/08/31 09:10:36
Where did this line go?
aleloi
2016/08/31 11:03:10
It should still be here, but be removed in the nex
|
- |
- if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
- // Nothing was mixed, set the audio samples to silence. |
- audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
- audio_frame_for_mixing->Mute(); |
- } else { |
- // Only call the limiter if we have something to mix. |
- LimitMixedAudio(audio_frame_for_mixing); |
- } |
+ if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
+ // Nothing was mixed, set the audio samples to silence. |
+ audio_frame_for_mixing->samples_per_channel_ = sample_size_; |
+ audio_frame_for_mixing->Mute(); |
+ } else { |
+ // Only call the limiter if we have something to mix. |
+ LimitMixedAudio(audio_frame_for_mixing); |
} |
// Pass the final result to the level indicator. |
@@ -261,8 +255,7 @@ void AudioMixerImpl::Mix(int sample_rate, |
} |
int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
- CriticalSectionScoped cs(crit_.get()); |
- |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
output_frequency_ = frequency; |
sample_size_ = |
static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); |
@@ -271,7 +264,7 @@ int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { |
} |
AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { |
- CriticalSectionScoped cs(crit_.get()); |
+ RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
return output_frequency_; |
} |
@@ -282,9 +275,8 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
// audio source is in the _audioSourceList if it is being mixed. |
SetAnonymousMixabilityStatus(audio_source, false); |
} |
- size_t numMixedAudioSources; |
{ |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); |
// API must be called with a new state. |
if (!(mixable ^ isMixed)) { |
@@ -309,27 +301,22 @@ int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, |
if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { |
numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; |
} |
- numMixedAudioSources = |
+ num_mixed_audio_sources_ = |
numMixedNonAnonymous + additional_audio_source_list_.size(); |
} |
- // A MixerAudioSource was added or removed. Make sure the scratch |
- // buffer is updated if necessary. |
- // Note: The scratch buffer may only be updated in Process(). |
- CriticalSectionScoped cs(crit_.get()); |
- num_mixed_audio_sources_ = numMixedAudioSources; |
return 0; |
} |
bool AudioMixerImpl::MixabilityStatus( |
const MixerAudioSource& audio_source) const { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
return IsAudioSourceInList(audio_source, audio_source_list_); |
} |
int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( |
MixerAudioSource* audio_source, |
bool anonymous) { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { |
if (anonymous) { |
return 0; |
@@ -363,11 +350,12 @@ int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( |
bool AudioMixerImpl::AnonymousMixabilityStatus( |
const MixerAudioSource& audio_source) const { |
- CriticalSectionScoped cs(cb_crit_.get()); |
+ CriticalSectionScoped cs(crit_.get()); |
return IsAudioSourceInList(audio_source, additional_audio_source_list_); |
} |
-AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
+AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const |
+ EXCLUSIVE_LOCKS_REQUIRED(crit_) { |
kwiberg-webrtc
2016/08/31 09:10:36
IIRC we always put these annotations in the .h fil
aleloi
2016/08/31 11:03:10
This makes sense! Thanks! I've moved the annotatio
|
AudioFrameList result; |
std::vector<SourceFrame> audioSourceMixingDataList; |
@@ -424,8 +412,8 @@ AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { |
return result; |
} |
-void AudioMixerImpl::GetAdditionalAudio( |
- AudioFrameList* additionalFramesList) const { |
+void AudioMixerImpl::GetAdditionalAudio(AudioFrameList* additionalFramesList) |
+ const EXCLUSIVE_LOCKS_REQUIRED(crit_) { |
kwiberg-webrtc
2016/08/31 09:10:36
Move to .h file.
aleloi
2016/08/31 11:03:10
Done.
|
WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, |
"GetAdditionalAudio(additionalFramesList)"); |
// The GetAudioFrameWithMuted() callback may result in the audio source being |