Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: webrtc/modules/audio_mixer/audio_mixer_impl.cc

Issue 2286343002: Less lock acquisitions for AudioMixer. (Closed)
Patch Set: A huge pile of messy changes (left because of the comments) Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional> 14 #include <functional>
15 15
16 #include "webrtc/base/thread_annotations.h"
16 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h"
17 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h" 18 #include "webrtc/modules/audio_mixer/audio_mixer_defines.h"
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" 19 #include "webrtc/modules/audio_processing/include/audio_processing.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 20 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 21 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
21 #include "webrtc/system_wrappers/include/trace.h" 22 #include "webrtc/system_wrappers/include/trace.h"
22 23
23 namespace webrtc { 24 namespace webrtc {
24 namespace { 25 namespace {
25 26
26 class SourceFrame { 27 class SourceFrame {
27 public: 28 public:
28 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) 29 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before)
29 : audio_source_(p), 30 : audio_source_(p),
30 audio_frame_(a), 31 audio_frame_(a),
31 muted_(m), 32 muted_(m),
32 was_mixed_before_(was_mixed_before) { 33 was_mixed_before_(was_mixed_before) {
33 if (!muted_) { 34 if (!muted_) {
34 energy_ = NewMixerCalculateEnergy(*a); 35 energy_ = NewMixerCalculateEnergy(*a);
35 } 36 }
36 } 37 }
37 38
39 SourceFrame(MixerAudioSource* p,
40 AudioFrame* a,
41 bool m,
42 bool was_mixed_before,
43 uint32_t energy)
44 : audio_source_(p),
45 audio_frame_(a),
46 muted_(m),
47 energy_(energy),
48 was_mixed_before_(was_mixed_before) {}
49
aleloi 2016/08/30 15:13:34 Differs from the above by not calculating energy.
38 // a.shouldMixBefore(b) is used to select mixer participants. 50 // a.shouldMixBefore(b) is used to select mixer participants.
39 bool shouldMixBefore(const SourceFrame& other) const { 51 bool shouldMixBefore(const SourceFrame& other) const {
40 if (muted_ != other.muted_) { 52 if (muted_ != other.muted_) {
41 return other.muted_; 53 return other.muted_;
42 } 54 }
43 55
44 auto our_activity = audio_frame_->vad_activity_; 56 auto our_activity = audio_frame_->vad_activity_;
45 auto other_activity = other.audio_frame_->vad_activity_; 57 auto other_activity = other.audio_frame_->vad_activity_;
46 58
47 if (our_activity != other_activity) { 59 if (our_activity != other_activity) {
(...skipping 13 matching lines...) Expand all
61 // Remixes a frame between stereo and mono. 73 // Remixes a frame between stereo and mono.
62 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { 74 void RemixFrame(AudioFrame* frame, size_t number_of_channels) {
63 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 75 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
64 if (frame->num_channels_ == 1 && number_of_channels == 2) { 76 if (frame->num_channels_ == 1 && number_of_channels == 2) {
65 AudioFrameOperations::MonoToStereo(frame); 77 AudioFrameOperations::MonoToStereo(frame);
66 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { 78 } else if (frame->num_channels_ == 2 && number_of_channels == 1) {
67 AudioFrameOperations::StereoToMono(frame); 79 AudioFrameOperations::StereoToMono(frame);
68 } 80 }
69 } 81 }
70 82
71 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. 83 void Ramp(const std::vector<SourceFrame>& mixed_sources_and_frames) {
72 // These effects are applied to |frame| itself prior to mixing. Assumes that 84 for (const auto& source_frame : mixed_sources_and_frames) {
73 // |mixed_frame| always has at least as many channels as |frame|. Supports 85 // Ramp in previously unmixed.
74 // stereo at most. 86 if (!source_frame.was_mixed_before_) {
75 // 87 NewMixerRampIn(source_frame.audio_frame_);
76 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { 88 }
77 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); 89
78 if (use_limiter) { 90 const bool is_mixed = source_frame.audio_source_->_mixHistory->IsMixed();
79 // Divide by two to avoid saturation in the mixing. 91 // Ramp out currently unmixed.
80 // This is only meaningful if the limiter will be used. 92 if (source_frame.was_mixed_before_ && !is_mixed) {
81 *frame >>= 1; 93 NewMixerRampOut(source_frame.audio_frame_);
94 }
aleloi 2016/08/30 15:13:34 Ramping in/out should be done both for anonymous a
82 } 95 }
83 RTC_DCHECK_EQ(frame->num_channels_, mixed_frame->num_channels_);
84 *mixed_frame += *frame;
aleloi 2016/08/30 15:13:34 MixFrames is now done in MixFromList.
85 } 96 }
86 97
87 } // namespace 98 } // namespace
88 99
89 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {} 100 MixerAudioSource::MixerAudioSource() : _mixHistory(new NewMixHistory()) {}
90 101
91 MixerAudioSource::~MixerAudioSource() { 102 MixerAudioSource::~MixerAudioSource() {
92 delete _mixHistory; 103 delete _mixHistory;
93 } 104 }
94 105
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
140 thread_checker_.DetachFromThread(); 151 thread_checker_.DetachFromThread();
141 } 152 }
142 153
143 AudioMixerImpl::~AudioMixerImpl() {} 154 AudioMixerImpl::~AudioMixerImpl() {}
144 155
145 bool AudioMixerImpl::Init() { 156 bool AudioMixerImpl::Init() {
146 crit_.reset(CriticalSectionWrapper::CreateCriticalSection()); 157 crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
147 if (crit_.get() == NULL) 158 if (crit_.get() == NULL)
148 return false; 159 return false;
149 160
150 cb_crit_.reset(CriticalSectionWrapper::CreateCriticalSection());
151 if (cb_crit_.get() == NULL)
152 return false;
153
154 Config config; 161 Config config;
155 config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); 162 config.Set<ExperimentalAgc>(new ExperimentalAgc(false));
156 limiter_.reset(AudioProcessing::Create(config)); 163 limiter_.reset(AudioProcessing::Create(config));
157 if (!limiter_.get()) 164 if (!limiter_.get())
158 return false; 165 return false;
159 166
160 if (SetOutputFrequency(kDefaultFrequency) == -1) 167 if (SetOutputFrequency(kDefaultFrequency) == -1)
161 return false; 168 return false;
162 169
163 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) != 170 if (limiter_->gain_control()->set_mode(GainControl::kFixedDigital) !=
(...skipping 17 matching lines...) Expand all
181 return false; 188 return false;
182 189
183 return true; 190 return true;
184 } 191 }
185 192
186 void AudioMixerImpl::Mix(int sample_rate, 193 void AudioMixerImpl::Mix(int sample_rate,
187 size_t number_of_channels, 194 size_t number_of_channels,
188 AudioFrame* audio_frame_for_mixing) { 195 AudioFrame* audio_frame_for_mixing) {
189 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); 196 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2);
190 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 197 RTC_DCHECK(thread_checker_.CalledOnValidThread());
191 AudioFrameList mixList;
192 AudioFrameList additionalFramesList;
193 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
aleloi 2016/08/30 15:13:34 Isn't used any longer.
194 {
195 CriticalSectionScoped cs(cb_crit_.get());
196 Frequency mixing_frequency;
197 198
198 switch (sample_rate) { 199 Frequency mixing_frequency;
199 case 8000:
200 mixing_frequency = kNbInHz;
201 break;
202 case 16000:
203 mixing_frequency = kWbInHz;
204 break;
205 case 32000:
206 mixing_frequency = kSwbInHz;
207 break;
208 case 48000:
209 mixing_frequency = kFbInHz;
210 break;
211 default:
212 RTC_NOTREACHED();
213 return;
214 }
aleloi 2016/08/30 15:13:33 The frequency is no longer guarded by a lock. This
215 200
216 if (OutputFrequency() != mixing_frequency) { 201 switch (sample_rate) {
217 SetOutputFrequency(mixing_frequency); 202 case 8000:
218 } 203 mixing_frequency = kNbInHz;
219 204 break;
220 mixList = UpdateToMix(kMaximumAmountOfMixedAudioSources); 205 case 16000:
221 GetAdditionalAudio(&additionalFramesList); 206 mixing_frequency = kWbInHz;
207 break;
208 case 32000:
209 mixing_frequency = kSwbInHz;
210 break;
211 case 48000:
212 mixing_frequency = kFbInHz;
213 break;
214 default:
215 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
216 "Invalid frequency: %d", sample_rate);
217 RTC_NOTREACHED();
218 return;
222 } 219 }
223 220
224 for (FrameAndMuteInfo& frame_and_mute : mixList) { 221 if (OutputFrequency() != mixing_frequency) {
225 RemixFrame(frame_and_mute.frame, number_of_channels); 222 SetOutputFrequency(mixing_frequency);
226 } 223 }
227 for (FrameAndMuteInfo& frame_and_mute : additionalFramesList) { 224
228 RemixFrame(frame_and_mute.frame, number_of_channels); 225 AudioFrameList mix_list;
226 AudioFrameList anonymous_mix_list;
227 {
228 CriticalSectionScoped cs(crit_.get());
229 mix_list = GetNonAnonymousAudio();
230 anonymous_mix_list = GetAnonymousAudio();
231 }
aleloi 2016/08/30 15:13:34 This is the only section that accesses the partici
232
233 mix_list.insert(mix_list.begin(), anonymous_mix_list.begin(),
234 anonymous_mix_list.end());
235
236 for (const auto& frame : mix_list) {
237 RemixFrame(frame, number_of_channels);
229 } 238 }
230 239
231 audio_frame_for_mixing->UpdateFrame( 240 audio_frame_for_mixing->UpdateFrame(
232 -1, time_stamp_, NULL, 0, output_frequency_, AudioFrame::kNormalSpeech, 241 -1, time_stamp_, NULL, 0, OutputFrequency(), AudioFrame::kNormalSpeech,
233 AudioFrame::kVadPassive, number_of_channels); 242 AudioFrame::kVadPassive, number_of_channels);
234
235 time_stamp_ += static_cast<uint32_t>(sample_size_); 243 time_stamp_ += static_cast<uint32_t>(sample_size_);
236
237 use_limiter_ = num_mixed_audio_sources_ > 1; 244 use_limiter_ = num_mixed_audio_sources_ > 1;
238 245
239 // We only use the limiter if it supports the output sample rate and 246 // We only use the limiter if we're actually mixing multiple streams.
aleloi 2016/08/30 15:13:33 It should support all sample rates now.
240 // we're actually mixing multiple streams. 247 MixFromList(audio_frame_for_mixing, mix_list, id_, use_limiter_);
241 MixFromList(audio_frame_for_mixing, mixList, id_, use_limiter_);
242 248
243 { 249 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
244 CriticalSectionScoped cs(crit_.get()); 250 // Nothing was mixed, set the audio samples to silence.
245 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); 251 audio_frame_for_mixing->samples_per_channel_ = sample_size_;
246 252 audio_frame_for_mixing->Mute();
247 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 253 } else {
248 // Nothing was mixed, set the audio samples to silence. 254 // Only call the limiter if we have something to mix.
249 audio_frame_for_mixing->samples_per_channel_ = sample_size_; 255 LimitMixedAudio(audio_frame_for_mixing);
250 audio_frame_for_mixing->Mute();
251 } else {
252 // Only call the limiter if we have something to mix.
253 LimitMixedAudio(audio_frame_for_mixing);
254 }
aleloi 2016/08/30 15:13:34 There is no access to members that can be modified
255 } 256 }
256 257
257 // Pass the final result to the level indicator. 258 // Pass the final result to the level indicator.
258 audio_level_.ComputeLevel(*audio_frame_for_mixing); 259 audio_level_.ComputeLevel(*audio_frame_for_mixing);
259 260
260 return; 261 return;
261 } 262 }
262 263
263 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) { 264 int32_t AudioMixerImpl::SetOutputFrequency(const Frequency& frequency) {
264 CriticalSectionScoped cs(crit_.get()); 265 RTC_DCHECK(thread_checker_.CalledOnValidThread());
265
266 output_frequency_ = frequency; 266 output_frequency_ = frequency;
267 sample_size_ = 267 sample_size_ =
268 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000); 268 static_cast<size_t>((output_frequency_ * kFrameDurationInMs) / 1000);
269 269
270 return 0; 270 return 0;
271 } 271 }
272 272
273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const { 273 AudioMixer::Frequency AudioMixerImpl::OutputFrequency() const {
274 CriticalSectionScoped cs(crit_.get()); 274 RTC_DCHECK(thread_checker_.CalledOnValidThread());
275 return output_frequency_; 275 return output_frequency_;
276 } 276 }
277 277
278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source, 278 int32_t AudioMixerImpl::SetMixabilityStatus(MixerAudioSource* audio_source,
279 bool mixable) { 279 bool mixable) {
280 if (!mixable) { 280 if (!mixable) {
281 // Anonymous audio sources are in a separate list. Make sure that the 281 // Anonymous audio sources are in a separate list. Make sure that the
282 // audio source is in the _audioSourceList if it is being mixed. 282 // audio source is in the _audioSourceList if it is being mixed.
283 SetAnonymousMixabilityStatus(audio_source, false); 283 SetAnonymousMixabilityStatus(audio_source, false);
284 } 284 }
285 size_t numMixedAudioSources;
286 { 285 {
287 CriticalSectionScoped cs(cb_crit_.get()); 286 CriticalSectionScoped cs(crit_.get());
288 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_); 287 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
289 // API must be called with a new state. 288 // API must be called with a new state.
290 if (!(mixable ^ isMixed)) { 289 if (!(mixable ^ isMixed)) {
291 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 290 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
292 "Mixable is aready %s", isMixed ? "ON" : "off"); 291 "Mixable is aready %s", isMixed ? "ON" : "off");
293 return -1; 292 return -1;
294 } 293 }
295 bool success = false; 294 bool success = false;
296 if (mixable) { 295 if (mixable) {
297 success = AddAudioSourceToList(audio_source, &audio_source_list_); 296 success = AddAudioSourceToList(audio_source, &audio_source_list_);
298 } else { 297 } else {
299 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_); 298 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
300 } 299 }
301 if (!success) { 300 if (!success) {
302 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 301 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
303 "failed to %s audio_source", mixable ? "add" : "remove"); 302 "failed to %s audio_source", mixable ? "add" : "remove");
304 RTC_NOTREACHED(); 303 RTC_NOTREACHED();
305 return -1; 304 return -1;
306 } 305 }
307 306
308 size_t numMixedNonAnonymous = audio_source_list_.size(); 307 size_t numMixedNonAnonymous = audio_source_list_.size();
309 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) { 308 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
310 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources; 309 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
311 } 310 }
312 numMixedAudioSources = 311 num_mixed_audio_sources_ =
313 numMixedNonAnonymous + additional_audio_source_list_.size(); 312 numMixedNonAnonymous + additional_audio_source_list_.size();
314 } 313 }
315 // A MixerAudioSource was added or removed. Make sure the scratch
316 // buffer is updated if necessary.
317 // Note: The scratch buffer may only be updated in Process().
aleloi 2016/08/30 15:13:34 Scratch buffer and Process() comment seems stale
318 CriticalSectionScoped cs(crit_.get());
319 num_mixed_audio_sources_ = numMixedAudioSources;
aleloi 2016/08/30 15:13:34 AFAIK, no need to guard |num_sources| with other l
320 return 0; 314 return 0;
321 } 315 }
322 316
323 bool AudioMixerImpl::MixabilityStatus( 317 bool AudioMixerImpl::MixabilityStatus(
324 const MixerAudioSource& audio_source) const { 318 const MixerAudioSource& audio_source) const {
325 CriticalSectionScoped cs(cb_crit_.get()); 319 CriticalSectionScoped cs(crit_.get());
326 return IsAudioSourceInList(audio_source, audio_source_list_); 320 return IsAudioSourceInList(audio_source, audio_source_list_);
327 } 321 }
328 322
329 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus( 323 int32_t AudioMixerImpl::SetAnonymousMixabilityStatus(
330 MixerAudioSource* audio_source, 324 MixerAudioSource* audio_source,
331 bool anonymous) { 325 bool anonymous) {
332 CriticalSectionScoped cs(cb_crit_.get()); 326 CriticalSectionScoped cs(crit_.get());
333 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) { 327 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
334 if (anonymous) { 328 if (anonymous) {
335 return 0; 329 return 0;
336 } 330 }
337 if (!RemoveAudioSourceFromList(audio_source, 331 if (!RemoveAudioSourceFromList(audio_source,
338 &additional_audio_source_list_)) { 332 &additional_audio_source_list_)) {
339 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, 333 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_,
340 "unable to remove audio_source from anonymous list"); 334 "unable to remove audio_source from anonymous list");
341 RTC_NOTREACHED(); 335 RTC_NOTREACHED();
342 return -1; 336 return -1;
(...skipping 13 matching lines...) Expand all
356 // already registered. 350 // already registered.
357 return -1; 351 return -1;
358 } 352 }
359 return AddAudioSourceToList(audio_source, &additional_audio_source_list_) 353 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
360 ? 0 354 ? 0
361 : -1; 355 : -1;
362 } 356 }
363 357
364 bool AudioMixerImpl::AnonymousMixabilityStatus( 358 bool AudioMixerImpl::AnonymousMixabilityStatus(
365 const MixerAudioSource& audio_source) const { 359 const MixerAudioSource& audio_source) const {
366 CriticalSectionScoped cs(cb_crit_.get()); 360 CriticalSectionScoped cs(crit_.get());
367 return IsAudioSourceInList(audio_source, additional_audio_source_list_); 361 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
368 } 362 }
369 363
370 AudioFrameList AudioMixerImpl::UpdateToMix(size_t maxAudioFrameCounter) const { 364 AudioFrameList AudioMixerImpl::GetNonAnonymousAudio() const
365 EXCLUSIVE_LOCKS_REQUIRED(crit_) {
366 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
367 "GetNonAnonymousAudio()");
368
371 AudioFrameList result; 369 AudioFrameList result;
372 std::vector<SourceFrame> audioSourceMixingDataList; 370 std::vector<SourceFrame> audioSourceMixingDataList;
371 std::vector<SourceFrame> ramp_list;
aleloi 2016/08/30 15:13:34 See comment about 2:nd SourceFrame constructor.
373 372
374 // Get audio source audio and put it in the struct vector. 373 // Get audio source audio and put it in the struct vector.
375 for (MixerAudioSource* audio_source : audio_source_list_) { 374 for (MixerAudioSource* audio_source : audio_source_list_) {
376 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( 375 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
377 id_, static_cast<int>(output_frequency_)); 376 id_, static_cast<int>(OutputFrequency()));
378 377
379 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 378 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
380 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 379 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame;
381 380
382 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 381 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
383 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 382 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
384 "failed to GetAudioFrameWithMuted() from participant"); 383 "failed to GetAudioFrameWithMuted() from participant");
385 continue; 384 continue;
386 } 385 }
387 audioSourceMixingDataList.emplace_back( 386 audioSourceMixingDataList.emplace_back(
388 audio_source, audio_source_audio_frame, 387 audio_source, audio_source_audio_frame,
389 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, 388 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
390 audio_source->_mixHistory->WasMixed()); 389 audio_source->_mixHistory->WasMixed());
391 } 390 }
392 391
393 // Sort frames by sorting function. 392 // Sort frames by sorting function.
394 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), 393 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(),
395 std::mem_fn(&SourceFrame::shouldMixBefore)); 394 std::mem_fn(&SourceFrame::shouldMixBefore));
396 395
397 // Go through list in order and put things in mixList. 396 int maxAudioFrameCounter = kMaximumAmountOfMixedAudioSources;
397
398 // Go through list in order and put unmuted frames in result list.
398 for (SourceFrame& p : audioSourceMixingDataList) { 399 for (SourceFrame& p : audioSourceMixingDataList) {
399 // Filter muted. 400 // Filter muted.
400 if (p.muted_) { 401 if (p.muted_) {
401 p.audio_source_->_mixHistory->SetIsMixed(false); 402 p.audio_source_->_mixHistory->SetIsMixed(false);
402 continue; 403 continue;
403 } 404 }
404 405
405 // Add frame to result vector for mixing. 406 // Add frame to result vector for mixing.
406 bool is_mixed = false; 407 bool is_mixed = false;
407 if (maxAudioFrameCounter > 0) { 408 if (maxAudioFrameCounter > 0) {
408 --maxAudioFrameCounter; 409 --maxAudioFrameCounter;
409 if (!p.was_mixed_before_) { 410 result.push_back(p.audio_frame_);
410 NewMixerRampIn(p.audio_frame_); 411 ramp_list.emplace_back(p.audio_source_, p.audio_frame_, false,
411 } 412 p.was_mixed_before_, -1);
412 result.emplace_back(p.audio_frame_, false);
413 is_mixed = true; 413 is_mixed = true;
414 } 414 }
415
416 // Ramp out unmuted.
417 if (p.was_mixed_before_ && !is_mixed) {
418 NewMixerRampOut(p.audio_frame_);
419 result.emplace_back(p.audio_frame_, false);
420 }
421
422 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); 415 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
423 } 416 }
417 Ramp(ramp_list);
424 return result; 418 return result;
425 } 419 }
426 420
427 void AudioMixerImpl::GetAdditionalAudio( 421 AudioFrameList AudioMixerImpl::GetAnonymousAudio() const
428 AudioFrameList* additionalFramesList) const { 422 EXCLUSIVE_LOCKS_REQUIRED(crit_) {
429 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 423 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
430 "GetAdditionalAudio(additionalFramesList)"); 424 "GetAnonymousAudio()");
431 // The GetAudioFrameWithMuted() callback may result in the audio source being 425 // The GetAudioFrameWithMuted() callback may result in the audio source being
432 // removed from additionalAudioFramesList_. If that happens it will 426 // removed from additionalAudioFramesList_. If that happens it will
433 // invalidate any iterators. Create a copy of the audio sources list such 427 // invalidate any iterators. Create a copy of the audio sources list such
434 // that the list of participants can be traversed safely. 428 // that the list of participants can be traversed safely.
429 std::vector<SourceFrame> ramp_list;
435 MixerAudioSourceList additionalAudioSourceList; 430 MixerAudioSourceList additionalAudioSourceList;
431 AudioFrameList result;
436 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), 432 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
437 additional_audio_source_list_.begin(), 433 additional_audio_source_list_.begin(),
438 additional_audio_source_list_.end()); 434 additional_audio_source_list_.end());
439 435
440 for (MixerAudioSourceList::const_iterator audio_source = 436 for (const auto& audio_source : additionalAudioSourceList) {
441 additionalAudioSourceList.begin(); 437 const auto audio_frame_with_info =
442 audio_source != additionalAudioSourceList.end(); ++audio_source) { 438 audio_source->GetAudioFrameWithMuted(id_, OutputFrequency());
443 auto audio_frame_with_info = 439 const auto ret = audio_frame_with_info.audio_frame_info;
444 (*audio_source)->GetAudioFrameWithMuted(id_, output_frequency_);
445 auto ret = audio_frame_with_info.audio_frame_info;
446 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 440 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
447 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 441 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
448 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_, 442 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, id_,
449 "failed to GetAudioFrameWithMuted() from audio_source"); 443 "failed to GetAudioFrameWithMuted() from audio_source");
450 continue; 444 continue;
451 } 445 }
452 if (audio_frame->samples_per_channel_ == 0) { 446 if (ret != MixerAudioSource::AudioFrameInfo::kMuted) {
453 // Empty frame. Don't use it. 447 result.push_back(audio_frame);
454 continue; 448 ramp_list.emplace_back(audio_source, audio_frame, false,
aleloi 2016/08/30 15:13:34 Replaced this with a RTC_DCHECK_EQ in MixFromList.
449 audio_source->_mixHistory->IsMixed(), 0);
450 audio_source->_mixHistory->SetIsMixed(true);
455 } 451 }
456 additionalFramesList->push_back(FrameAndMuteInfo(
457 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
458 } 452 }
453 Ramp(ramp_list);
454 return result;
459 } 455 }
460 456
461 bool AudioMixerImpl::IsAudioSourceInList( 457 bool AudioMixerImpl::IsAudioSourceInList(
462 const MixerAudioSource& audio_source, 458 const MixerAudioSource& audio_source,
463 const MixerAudioSourceList& audioSourceList) const { 459 const MixerAudioSourceList& audioSourceList) const {
464 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_, 460 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
465 "IsAudioSourceInList(audio_source,audioSourceList)"); 461 "IsAudioSourceInList(audio_source,audioSourceList)");
466 return std::find(audioSourceList.begin(), audioSourceList.end(), 462 return std::find(audioSourceList.begin(), audioSourceList.end(),
467 &audio_source) != audioSourceList.end(); 463 &audio_source) != audioSourceList.end();
468 } 464 }
(...skipping 28 matching lines...) Expand all
497 493
498 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio, 494 int32_t AudioMixerImpl::MixFromList(AudioFrame* mixedAudio,
499 const AudioFrameList& audioFrameList, 495 const AudioFrameList& audioFrameList,
500 int32_t id, 496 int32_t id,
501 bool use_limiter) { 497 bool use_limiter) {
502 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, 498 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
503 "MixFromList(mixedAudio, audioFrameList)"); 499 "MixFromList(mixedAudio, audioFrameList)");
504 if (audioFrameList.empty()) 500 if (audioFrameList.empty())
505 return 0; 501 return 0;
506 502
507 uint32_t position = 0;
508
aleloi 2016/08/30 15:13:34 Was unused.
509 if (audioFrameList.size() == 1) { 503 if (audioFrameList.size() == 1) {
510 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; 504 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
511 mixedAudio->elapsed_time_ms_ = 505 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
512 audioFrameList.front().frame->elapsed_time_ms_;
513 } else { 506 } else {
514 // TODO(wu): Issue 3390. 507 // TODO(wu): Issue 3390.
515 // Audio frame timestamp is only supported in one channel case. 508 // Audio frame timestamp is only supported in one channel case.
516 mixedAudio->timestamp_ = 0; 509 mixedAudio->timestamp_ = 0;
517 mixedAudio->elapsed_time_ms_ = -1; 510 mixedAudio->elapsed_time_ms_ = -1;
518 } 511 }
519 512
520 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); 513 for (const auto& frame : audioFrameList) {
521 iter != audioFrameList.end(); ++iter) { 514 RTC_DCHECK_EQ(mixedAudio->sample_rate_hz_, frame->sample_rate_hz_);
522 if (!iter->muted) { 515 RTC_DCHECK_EQ(
523 MixFrames(mixedAudio, iter->frame, use_limiter); 516 frame->samples_per_channel_,
517 static_cast<size_t>((mixedAudio->sample_rate_hz_ * kFrameDurationInMs) /
518 1000));
519
520 // Mix |f.frame| into |mixedAudio|, with saturation protection.
521 // These effect is applied to |f.frame| itself prior to mixing.
522 if (use_limiter) {
523 // Divide by two to avoid saturation in the mixing.
524 // This is only meaningful if the limiter will be used.
525 *frame >>= 1;
524 } 526 }
525 527 RTC_DCHECK_EQ(frame->num_channels_, mixedAudio->num_channels_);
526 position++; 528 *mixedAudio += *frame;
527 } 529 }
528 530
529 return 0; 531 return 0;
530 } 532 }
531 533
532 // TODO(andrew): consolidate this function with MixFromList.
aleloi 2016/08/30 15:13:34 Done :)
533 int32_t AudioMixerImpl::MixAnonomouslyFromList(
534 AudioFrame* mixedAudio,
535 const AudioFrameList& audioFrameList) const {
536 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id_,
537 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
538
539 if (audioFrameList.empty())
540 return 0;
541
542 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
543 iter != audioFrameList.end(); ++iter) {
544 if (!iter->muted) {
545 MixFrames(mixedAudio, iter->frame, use_limiter_);
546 }
547 }
548 return 0;
549 }
550
551 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const { 534 bool AudioMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
552 if (!use_limiter_) { 535 if (!use_limiter_) {
553 return true; 536 return true;
554 } 537 }
555 538
556 // Smoothly limit the mixed frame. 539 // Smoothly limit the mixed frame.
557 const int error = limiter_->ProcessStream(mixedAudio); 540 const int error = limiter_->ProcessStream(mixedAudio);
558 541
559 // And now we can safely restore the level. This procedure results in 542 // And now we can safely restore the level. This procedure results in
560 // some loss of resolution, deemed acceptable. 543 // some loss of resolution, deemed acceptable.
(...skipping 23 matching lines...) Expand all
584 return level; 567 return level;
585 } 568 }
586 569
587 int AudioMixerImpl::GetOutputAudioLevelFullRange() { 570 int AudioMixerImpl::GetOutputAudioLevelFullRange() {
588 const int level = audio_level_.LevelFullRange(); 571 const int level = audio_level_.LevelFullRange();
589 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, 572 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_,
590 "GetAudioOutputLevelFullRange() => level=%d", level); 573 "GetAudioOutputLevelFullRange() => level=%d", level);
591 return level; 574 return level;
592 } 575 }
593 } // namespace webrtc 576 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698