Chromium Code Reviews| Index: webrtc/modules/audio_mixer/audio_mixer_impl.cc |
| diff --git a/webrtc/modules/audio_mixer/audio_mixer_impl.cc b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
| index 57fae8cc0d34e28d57fd71859d6340bf3140a486..7dff6bfa409f07b08ab85277217f9a286de05e60 100644 |
| --- a/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
| +++ b/webrtc/modules/audio_mixer/audio_mixer_impl.cc |
| @@ -22,30 +22,32 @@ namespace webrtc { |
| namespace { |
| struct SourceFrame { |
| - SourceFrame(AudioMixerImpl::SourceStatus* source_status, |
| + SourceFrame(AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame, |
| AudioFrame* audio_frame, |
| bool muted) |
| - : source_status(source_status), audio_frame(audio_frame), muted(muted) { |
| - RTC_DCHECK(source_status); |
| + : source_status_with_frame(source_status_with_frame), |
| + audio_frame(audio_frame), |
| + muted(muted) { |
| + RTC_DCHECK(source_status_with_frame); |
| RTC_DCHECK(audio_frame); |
| if (!muted) { |
| energy = AudioMixerCalculateEnergy(*audio_frame); |
| } |
| } |
| - SourceFrame(AudioMixerImpl::SourceStatus* source_status, |
| + SourceFrame(AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame, |
| AudioFrame* audio_frame, |
| bool muted, |
| uint32_t energy) |
| - : source_status(source_status), |
| + : source_status_with_frame(source_status_with_frame), |
| audio_frame(audio_frame), |
| muted(muted), |
| energy(energy) { |
| - RTC_DCHECK(source_status); |
| + RTC_DCHECK(source_status_with_frame); |
| RTC_DCHECK(audio_frame); |
| } |
| - AudioMixerImpl::SourceStatus* source_status = nullptr; |
| + AudioMixerImpl::SourceStatusWithFrame* source_status_with_frame = nullptr; |
| AudioFrame* audio_frame = nullptr; |
| bool muted = true; |
| uint32_t energy = 0; |
| @@ -70,10 +72,11 @@ bool ShouldMixBefore(const SourceFrame& a, const SourceFrame& b) { |
| void RampAndUpdateGain( |
| const std::vector<SourceFrame>& mixed_sources_and_frames) { |
| for (const auto& source_frame : mixed_sources_and_frames) { |
| - float target_gain = source_frame.source_status->is_mixed ? 1.0f : 0.0f; |
| - Ramp(source_frame.source_status->gain, target_gain, |
| + float target_gain = |
| + source_frame.source_status_with_frame->is_mixed ? 1.0f : 0.0f; |
| + Ramp(source_frame.source_status_with_frame->gain, target_gain, |
| source_frame.audio_frame); |
| - source_frame.source_status->gain = target_gain; |
| + source_frame.source_status_with_frame->gain = target_gain; |
| } |
| } |
| @@ -116,23 +119,27 @@ int32_t MixFromList(AudioFrame* mixed_audio, |
| return 0; |
| } |
| -AudioMixerImpl::SourceStatusList::const_iterator FindSourceInList( |
| +AudioMixerImpl::SourceStatusWithFrameList::const_iterator FindSourceInList( |
| AudioMixerImpl::Source const* audio_source, |
| - AudioMixerImpl::SourceStatusList const* audio_source_list) { |
| - return std::find_if(audio_source_list->begin(), audio_source_list->end(), |
| - [audio_source](const AudioMixerImpl::SourceStatus& p) { |
| - return p.audio_source == audio_source; |
| - }); |
| + AudioMixerImpl::SourceStatusWithFrameList const* audio_source_list) { |
| + return std::find_if( |
| + audio_source_list->begin(), audio_source_list->end(), |
| + [audio_source]( |
| + const std::unique_ptr<AudioMixerImpl::SourceStatusWithFrame>& p) { |
| + return p->audio_source == audio_source; |
| + }); |
| } |
| // TODO(aleloi): remove non-const version when WEBRTC only supports modern STL. |
| -AudioMixerImpl::SourceStatusList::iterator FindSourceInList( |
| +AudioMixerImpl::SourceStatusWithFrameList::iterator FindSourceInList( |
| AudioMixerImpl::Source const* audio_source, |
| - AudioMixerImpl::SourceStatusList* audio_source_list) { |
| - return std::find_if(audio_source_list->begin(), audio_source_list->end(), |
| - [audio_source](const AudioMixerImpl::SourceStatus& p) { |
| - return p.audio_source == audio_source; |
| - }); |
| + AudioMixerImpl::SourceStatusWithFrameList* audio_source_list) { |
| + return std::find_if( |
| + audio_source_list->begin(), audio_source_list->end(), |
| + [audio_source]( |
| + const std::unique_ptr<AudioMixerImpl::SourceStatusWithFrame>& p) { |
| + return p->audio_source == audio_source; |
| + }); |
| } |
| } // namespace |
| @@ -245,7 +252,8 @@ bool AudioMixerImpl::AddSource(Source* audio_source) { |
| RTC_DCHECK(FindSourceInList(audio_source, &audio_source_list_) == |
| audio_source_list_.end()) |
| << "Source already added to mixer"; |
| - audio_source_list_.emplace_back(audio_source, false, 0); |
| + audio_source_list_.emplace_back( |
| + new SourceStatusWithFrame(audio_source, false, 0)); |
| return true; |
| } |
| @@ -264,21 +272,24 @@ AudioFrameList AudioMixerImpl::GetAudioFromSources() { |
| std::vector<SourceFrame> audio_source_mixing_data_list; |
| std::vector<SourceFrame> ramp_list; |
| - // Get audio source audio and put it in the struct vector. |
| + // Get audio from the audio sources and put it in the SourceFrame vector. |
|
aleloi
2016/10/14 15:34:44
This comment doesn't have to do anything with the
kwiberg-webrtc
2016/10/14 19:59:32
Acknowledged.
|
| for (auto& source_and_status : audio_source_list_) { |
| - auto audio_frame_with_info = |
| - source_and_status.audio_source->GetAudioFrameWithInfo( |
| - static_cast<int>(OutputFrequency())); |
| - |
| - const auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
| - AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
| + AudioFrame* const audio_source_audio_frame = |
| + &source_and_status->audio_frame; |
| + const auto audio_frame_info = |
| + source_and_status->audio_source->GetAudioFrameWithInfo( |
| + OutputFrequency(), audio_source_audio_frame); |
| if (audio_frame_info == Source::AudioFrameInfo::kError) { |
| LOG_F(LS_WARNING) << "failed to GetAudioFrameWithInfo() from source"; |
| continue; |
| } |
| audio_source_mixing_data_list.emplace_back( |
| - &source_and_status, audio_source_audio_frame, |
| + // Using get() is OK here, since audio_source_mixing_data_list is |
| + // local and no SourceStatus pointers leak outside of |
| + // this method (they are passed to Ramp, but are not leaked |
| + // from there) |
| + source_and_status.get(), audio_source_audio_frame, |
| audio_frame_info == Source::AudioFrameInfo::kMuted); |
| } |
| @@ -292,7 +303,7 @@ AudioFrameList AudioMixerImpl::GetAudioFromSources() { |
| for (const auto& p : audio_source_mixing_data_list) { |
| // Filter muted. |
| if (p.muted) { |
| - p.source_status->is_mixed = false; |
| + p.source_status_with_frame->is_mixed = false; |
| continue; |
| } |
| @@ -301,10 +312,11 @@ AudioFrameList AudioMixerImpl::GetAudioFromSources() { |
| if (max_audio_frame_counter > 0) { |
| --max_audio_frame_counter; |
| result.push_back(p.audio_frame); |
| - ramp_list.emplace_back(p.source_status, p.audio_frame, false, -1); |
| + ramp_list.emplace_back(p.source_status_with_frame, p.audio_frame, false, |
| + -1); |
| is_mixed = true; |
| } |
| - p.source_status->is_mixed = is_mixed; |
| + p.source_status_with_frame->is_mixed = is_mixed; |
| } |
| RampAndUpdateGain(ramp_list); |
| return result; |
| @@ -348,7 +360,7 @@ bool AudioMixerImpl::GetAudioSourceMixabilityStatusForTest( |
| const auto non_anonymous_iter = |
| FindSourceInList(audio_source, &audio_source_list_); |
| if (non_anonymous_iter != audio_source_list_.end()) { |
| - return non_anonymous_iter->is_mixed; |
| + return (*non_anonymous_iter)->is_mixed; |
| } |
| LOG(LS_ERROR) << "Audio source unknown"; |