Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1150)

Side by Side Diff: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc

Issue 2111293003: Removed callback between old AudioConferenceMixer and OutputMixer. The audio frame with mixed audio… (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@new_mixer_format
Patch Set: Renamed variables, removed DCHECK(false), changed back copyright years. Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 14
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h "
16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
17 #include "webrtc/modules/audio_processing/include/audio_processing.h" 17 #include "webrtc/modules/audio_processing/include/audio_processing.h"
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" 18 #include "webrtc/modules/utility/include/audio_frame_operations.h"
19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
20 #include "webrtc/system_wrappers/include/trace.h" 20 #include "webrtc/system_wrappers/include/trace.h"
21 21
22 namespace webrtc { 22 namespace webrtc {
23 namespace { 23 namespace {
24 24
25 struct ParticipantFrameStruct { 25 struct AudioSourceWithFrame {
26 ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m) 26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m)
27 : participant(p), audioFrame(a), muted(m) {} 27 : audio_source(p), audio_frame(a), muted(m) {}
28 MixerAudioSource* participant; 28 MixerAudioSource* audio_source;
29 AudioFrame* audioFrame; 29 AudioFrame* audio_frame;
30 bool muted; 30 bool muted;
31 }; 31 };
32 32
33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; 33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList;
34 34
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
36 // These effects are applied to |frame| itself prior to mixing. Assumes that 36 // These effects are applied to |frame| itself prior to mixing. Assumes that
37 // |mixed_frame| always has at least as many channels as |frame|. Supports 37 // |mixed_frame| always has at least as many channels as |frame|. Supports
38 // stereo at most. 38 // stereo at most.
39 // 39 //
40 // TODO(andrew): consider not modifying |frame| here. 40 // TODO(andrew): consider not modifying |frame| here.
41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
42 assert(mixed_frame->num_channels_ >= frame->num_channels_); 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_);
43 if (use_limiter) { 43 if (use_limiter) {
44 // Divide by two to avoid saturation in the mixing. 44 // Divide by two to avoid saturation in the mixing.
45 // This is only meaningful if the limiter will be used. 45 // This is only meaningful if the limiter will be used.
46 *frame >>= 1; 46 *frame >>= 1;
47 } 47 }
48 if (mixed_frame->num_channels_ > frame->num_channels_) { 48 if (mixed_frame->num_channels_ > frame->num_channels_) {
49 // We only support mono-to-stereo. 49 // We only support mono-to-stereo.
50 assert(mixed_frame->num_channels_ == 2 && frame->num_channels_ == 1); 50 RTC_DCHECK_EQ(mixed_frame->num_channels_, static_cast<size_t>(2));
51 RTC_DCHECK_EQ(frame->num_channels_, static_cast<size_t>(1));
51 AudioFrameOperations::MonoToStereo(frame); 52 AudioFrameOperations::MonoToStereo(frame);
52 } 53 }
53 54
54 *mixed_frame += *frame; 55 *mixed_frame += *frame;
55 } 56 }
56 57
57 // Return the max number of channels from a |list| composed of AudioFrames. 58 // Return the max number of channels from a |list| composed of AudioFrames.
58 size_t MaxNumChannels(const AudioFrameList* list) { 59 size_t MaxNumChannels(const AudioFrameList* list) {
59 size_t max_num_channels = 1; 60 size_t max_num_channels = 1;
60 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end(); 61 for (AudioFrameList::const_iterator iter = list->begin(); iter != list->end();
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
104 if (!mixer->Init()) { 105 if (!mixer->Init()) {
105 delete mixer; 106 delete mixer;
106 return NULL; 107 return NULL;
107 } 108 }
108 return mixer; 109 return mixer;
109 } 110 }
110 111
111 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id) 112 NewAudioConferenceMixerImpl::NewAudioConferenceMixerImpl(int id)
112 : _id(id), 113 : _id(id),
113 _minimumMixingFreq(kLowestPossible), 114 _minimumMixingFreq(kLowestPossible),
114 _mixReceiver(NULL),
115 _outputFrequency(kDefaultFrequency), 115 _outputFrequency(kDefaultFrequency),
116 _sampleSize(0), 116 _sampleSize(0),
117 _audioFramePool(NULL), 117 _audioFramePool(NULL),
118 _participantList(), 118 audio_source_list_(),
119 _additionalParticipantList(), 119 additional_audio_source_list_(),
120 _numMixedParticipants(0), 120 num_mixed_audio_sources_(0),
121 use_limiter_(true), 121 use_limiter_(true),
122 _timeStamp(0), 122 _timeStamp(0),
123 _timeScheduler(kProcessPeriodicityInMs), 123 _timeScheduler(kProcessPeriodicityInMs),
124 _processCalls(0) {} 124 _processCalls(0) {}
125 125
126 bool NewAudioConferenceMixerImpl::Init() { 126 bool NewAudioConferenceMixerImpl::Init() {
127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection()); 127 _crit.reset(CriticalSectionWrapper::CreateCriticalSection());
128 if (_crit.get() == NULL) 128 if (_crit.get() == NULL)
129 return false; 129 return false;
130 130
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
164 return false; 164 return false;
165 165
166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) 166 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
167 return false; 167 return false;
168 168
169 return true; 169 return true;
170 } 170 }
171 171
172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() { 172 NewAudioConferenceMixerImpl::~NewAudioConferenceMixerImpl() {
173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool); 173 MemoryPool<AudioFrame>::DeleteMemoryPool(_audioFramePool);
174 assert(_audioFramePool == NULL); 174 RTC_DCHECK_EQ(_audioFramePool, static_cast<MemoryPool<AudioFrame>*>(nullptr));
175 } 175 }
176 176
177 // Process should be called every kProcessPeriodicityInMs ms 177 // Process should be called every kProcessPeriodicityInMs ms
178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() { 178 int64_t NewAudioConferenceMixerImpl::TimeUntilNextProcess() {
179 int64_t timeUntilNextProcess = 0; 179 int64_t timeUntilNextProcess = 0;
180 CriticalSectionScoped cs(_crit.get()); 180 CriticalSectionScoped cs(_crit.get());
181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { 181 if (_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 182 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
183 "failed in TimeToNextUpdate() call"); 183 "failed in TimeToNextUpdate() call");
184 // Sanity check 184 // Sanity check
185 assert(false); 185 RTC_NOTREACHED();
186 return -1; 186 return -1;
187 } 187 }
188 return timeUntilNextProcess; 188 return timeUntilNextProcess;
189 } 189 }
190 190
191 void NewAudioConferenceMixerImpl::Process() { 191 void NewAudioConferenceMixerImpl::Process() {
192 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; 192 // TODO(aleloi) Remove this method.
193 RTC_NOTREACHED();
194 }
195
196 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
197 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
193 { 198 {
194 CriticalSectionScoped cs(_crit.get()); 199 CriticalSectionScoped cs(_crit.get());
195 assert(_processCalls == 0); 200 RTC_DCHECK_EQ(_processCalls, 0);
196 _processCalls++; 201 _processCalls++;
197 202
198 // Let the scheduler know that we are running one iteration. 203 // Let the scheduler know that we are running one iteration.
199 _timeScheduler.UpdateScheduler(); 204 _timeScheduler.UpdateScheduler();
200 } 205 }
201 206
202 AudioFrameList mixList; 207 AudioFrameList mixList;
203 AudioFrameList rampOutList; 208 AudioFrameList rampOutList;
204 AudioFrameList additionalFramesList; 209 AudioFrameList additionalFramesList;
205 std::map<int, MixerAudioSource*> mixedParticipantsMap; 210 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
206 { 211 {
207 CriticalSectionScoped cs(_cbCrit.get()); 212 CriticalSectionScoped cs(_cbCrit.get());
208 213
209 int32_t lowFreq = GetLowestMixingFrequency(); 214 int32_t lowFreq = GetLowestMixingFrequency();
210 // SILK can run in 12 kHz and 24 kHz. These frequencies are not 215 // SILK can run in 12 kHz and 24 kHz. These frequencies are not
211 // supported so use the closest higher frequency to not lose any 216 // supported so use the closest higher frequency to not lose any
212 // information. 217 // information.
213 // TODO(henrike): this is probably more appropriate to do in 218 // TODO(aleloi): this is probably more appropriate to do in
214 // GetLowestMixingFrequency(). 219 // GetLowestMixingFrequency().
215 if (lowFreq == 12000) { 220 if (lowFreq == 12000) {
216 lowFreq = 16000; 221 lowFreq = 16000;
217 } else if (lowFreq == 24000) { 222 } else if (lowFreq == 24000) {
218 lowFreq = 32000; 223 lowFreq = 32000;
219 } 224 }
220 if (lowFreq <= 0) { 225 if (lowFreq <= 0) {
221 CriticalSectionScoped cs(_crit.get()); 226 CriticalSectionScoped cs(_crit.get());
222 _processCalls--; 227 _processCalls--;
223 return; 228 return;
(...skipping 13 matching lines...) Expand all
237 if (OutputFrequency() != kSwbInHz) { 242 if (OutputFrequency() != kSwbInHz) {
238 SetOutputFrequency(kSwbInHz); 243 SetOutputFrequency(kSwbInHz);
239 } 244 }
240 break; 245 break;
241 case 48000: 246 case 48000:
242 if (OutputFrequency() != kFbInHz) { 247 if (OutputFrequency() != kFbInHz) {
243 SetOutputFrequency(kFbInHz); 248 SetOutputFrequency(kFbInHz);
244 } 249 }
245 break; 250 break;
246 default: 251 default:
247 assert(false); 252 RTC_NOTREACHED();
248 253
249 CriticalSectionScoped cs(_crit.get()); 254 CriticalSectionScoped cs(_crit.get());
250 _processCalls--; 255 _processCalls--;
251 return; 256 return;
252 } 257 }
253 } 258 }
254 259
255 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, 260 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap,
256 &remainingParticipantsAllowedToMix); 261 &remainingAudioSourcesAllowedToMix);
257 262
258 GetAdditionalAudio(&additionalFramesList); 263 GetAdditionalAudio(&additionalFramesList);
259 UpdateMixedStatus(mixedParticipantsMap); 264 UpdateMixedStatus(mixedAudioSourcesMap);
260 } 265 }
261 266
262 // Get an AudioFrame for mixing from the memory pool. 267 // TODO(aleloi): it might be better to decide the number of channels
263 AudioFrame* mixedAudio = NULL; 268 // with an API instead of dynamically.
264 if (_audioFramePool->PopMemory(mixedAudio) == -1) { 269
265 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 270 // Find the max channels over all mixing lists.
266 "failed PopMemory() call"); 271 const size_t num_mixed_channels = std::max(
267 assert(false); 272 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
268 return; 273 MaxNumChannels(&rampOutList)));
269 } 274
275 audio_frame_for_mixing->UpdateFrame(
276 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech,
277 AudioFrame::kVadPassive, num_mixed_channels);
278
279 _timeStamp += static_cast<uint32_t>(_sampleSize);
280
281 use_limiter_ = num_mixed_audio_sources_ > 1 &&
282 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
283
284 // We only use the limiter if it supports the output sample rate and
285 // we're actually mixing multiple streams.
286 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_);
270 287
271 { 288 {
272 CriticalSectionScoped cs(_crit.get()); 289 CriticalSectionScoped cs(_crit.get());
290 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
291 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList);
273 292
274 // TODO(henrike): it might be better to decide the number of channels 293 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
275 // with an API instead of dynamically.
276
277 // Find the max channels over all mixing lists.
278 const size_t num_mixed_channels =
279 std::max(MaxNumChannels(&mixList),
280 std::max(MaxNumChannels(&additionalFramesList),
281 MaxNumChannels(&rampOutList)));
282
283 mixedAudio->UpdateFrame(-1, _timeStamp, NULL, 0, _outputFrequency,
284 AudioFrame::kNormalSpeech, AudioFrame::kVadPassive,
285 num_mixed_channels);
286
287 _timeStamp += static_cast<uint32_t>(_sampleSize);
288
289 // We only use the limiter if it supports the output sample rate and
290 // we're actually mixing multiple streams.
291 use_limiter_ = _numMixedParticipants > 1 &&
292 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
293
294 MixFromList(mixedAudio, mixList);
295 MixAnonomouslyFromList(mixedAudio, additionalFramesList);
296 MixAnonomouslyFromList(mixedAudio, rampOutList);
297
298 if (mixedAudio->samples_per_channel_ == 0) {
299 // Nothing was mixed, set the audio samples to silence. 294 // Nothing was mixed, set the audio samples to silence.
300 mixedAudio->samples_per_channel_ = _sampleSize; 295 audio_frame_for_mixing->samples_per_channel_ = _sampleSize;
301 mixedAudio->Mute(); 296 audio_frame_for_mixing->Mute();
302 } else { 297 } else {
303 // Only call the limiter if we have something to mix. 298 // Only call the limiter if we have something to mix.
304 LimitMixedAudio(mixedAudio); 299 LimitMixedAudio(audio_frame_for_mixing);
305 } 300 }
306 } 301 }
307 302
308 {
309 CriticalSectionScoped cs(_cbCrit.get());
310 if (_mixReceiver != NULL) {
311 const AudioFrame** dummy = NULL;
312 _mixReceiver->NewMixedAudio(_id, *mixedAudio, dummy, 0);
313 }
314 }
315
316 // Reclaim all outstanding memory.
317 _audioFramePool->PushMemory(mixedAudio);
318 ClearAudioFrameList(&mixList); 303 ClearAudioFrameList(&mixList);
319 ClearAudioFrameList(&rampOutList); 304 ClearAudioFrameList(&rampOutList);
320 ClearAudioFrameList(&additionalFramesList); 305 ClearAudioFrameList(&additionalFramesList);
321 { 306 {
322 CriticalSectionScoped cs(_crit.get()); 307 CriticalSectionScoped cs(_crit.get());
323 _processCalls--; 308 _processCalls--;
324 } 309 }
325 return; 310 return;
326 } 311 }
327 312
328 int32_t NewAudioConferenceMixerImpl::RegisterMixedStreamCallback(
329 OldAudioMixerOutputReceiver* mixReceiver) {
330 CriticalSectionScoped cs(_cbCrit.get());
331 if (_mixReceiver != NULL) {
332 return -1;
333 }
334 _mixReceiver = mixReceiver;
335 return 0;
336 }
337
338 int32_t NewAudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
339 CriticalSectionScoped cs(_cbCrit.get());
340 if (_mixReceiver == NULL) {
341 return -1;
342 }
343 _mixReceiver = NULL;
344 return 0;
345 }
346
347 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( 313 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
348 const Frequency& frequency) { 314 const Frequency& frequency) {
349 CriticalSectionScoped cs(_crit.get()); 315 CriticalSectionScoped cs(_crit.get());
350 316
351 _outputFrequency = frequency; 317 _outputFrequency = frequency;
352 _sampleSize = 318 _sampleSize =
353 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); 319 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000);
354 320
355 return 0; 321 return 0;
356 } 322 }
357 323
358 NewAudioConferenceMixer::Frequency 324 NewAudioConferenceMixer::Frequency
359 NewAudioConferenceMixerImpl::OutputFrequency() const { 325 NewAudioConferenceMixerImpl::OutputFrequency() const {
360 CriticalSectionScoped cs(_crit.get()); 326 CriticalSectionScoped cs(_crit.get());
361 return _outputFrequency; 327 return _outputFrequency;
362 } 328 }
363 329
364 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus( 330 int32_t NewAudioConferenceMixerImpl::SetMixabilityStatus(
365 MixerAudioSource* participant, 331 MixerAudioSource* audio_source,
366 bool mixable) { 332 bool mixable) {
367 if (!mixable) { 333 if (!mixable) {
368 // Anonymous participants are in a separate list. Make sure that the 334 // Anonymous audio sources are in a separate list. Make sure that the
369 // participant is in the _participantList if it is being mixed. 335 // audio source is in the _audioSourceList if it is being mixed.
370 SetAnonymousMixabilityStatus(participant, false); 336 SetAnonymousMixabilityStatus(audio_source, false);
371 } 337 }
372 size_t numMixedParticipants; 338 size_t numMixedAudioSources;
373 { 339 {
374 CriticalSectionScoped cs(_cbCrit.get()); 340 CriticalSectionScoped cs(_cbCrit.get());
375 const bool isMixed = IsParticipantInList(*participant, _participantList); 341 const bool isMixed = IsAudioSourceInList(*audio_source, audio_source_list_);
376 // API must be called with a new state. 342 // API must be called with a new state.
377 if (!(mixable ^ isMixed)) { 343 if (!(mixable ^ isMixed)) {
378 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 344 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
379 "Mixable is aready %s", isMixed ? "ON" : "off"); 345 "Mixable is aready %s", isMixed ? "ON" : "off");
380 return -1; 346 return -1;
381 } 347 }
382 bool success = false; 348 bool success = false;
383 if (mixable) { 349 if (mixable) {
384 success = AddParticipantToList(participant, &_participantList); 350 success = AddAudioSourceToList(audio_source, &audio_source_list_);
385 } else { 351 } else {
386 success = RemoveParticipantFromList(participant, &_participantList); 352 success = RemoveAudioSourceFromList(audio_source, &audio_source_list_);
387 } 353 }
388 if (!success) { 354 if (!success) {
389 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 355 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
390 "failed to %s participant", mixable ? "add" : "remove"); 356 "failed to %s audio_source", mixable ? "add" : "remove");
391 assert(false); 357 RTC_NOTREACHED();
392 return -1; 358 return -1;
393 } 359 }
394 360
395 size_t numMixedNonAnonymous = _participantList.size(); 361 size_t numMixedNonAnonymous = audio_source_list_.size();
396 if (numMixedNonAnonymous > kMaximumAmountOfMixedParticipants) { 362 if (numMixedNonAnonymous > kMaximumAmountOfMixedAudioSources) {
397 numMixedNonAnonymous = kMaximumAmountOfMixedParticipants; 363 numMixedNonAnonymous = kMaximumAmountOfMixedAudioSources;
398 } 364 }
399 numMixedParticipants = 365 numMixedAudioSources =
400 numMixedNonAnonymous + _additionalParticipantList.size(); 366 numMixedNonAnonymous + additional_audio_source_list_.size();
401 } 367 }
402 // A MixerAudioSource was added or removed. Make sure the scratch 368 // A MixerAudioSource was added or removed. Make sure the scratch
403 // buffer is updated if necessary. 369 // buffer is updated if necessary.
404 // Note: The scratch buffer may only be updated in Process(). 370 // Note: The scratch buffer may only be updated in Process().
405 CriticalSectionScoped cs(_crit.get()); 371 CriticalSectionScoped cs(_crit.get());
406 _numMixedParticipants = numMixedParticipants; 372 num_mixed_audio_sources_ = numMixedAudioSources;
407 return 0; 373 return 0;
408 } 374 }
409 375
410 bool NewAudioConferenceMixerImpl::MixabilityStatus( 376 bool NewAudioConferenceMixerImpl::MixabilityStatus(
411 const MixerAudioSource& participant) const { 377 const MixerAudioSource& audio_source) const {
412 CriticalSectionScoped cs(_cbCrit.get()); 378 CriticalSectionScoped cs(_cbCrit.get());
413 return IsParticipantInList(participant, _participantList); 379 return IsAudioSourceInList(audio_source, audio_source_list_);
414 } 380 }
415 381
416 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus( 382 int32_t NewAudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
417 MixerAudioSource* participant, 383 MixerAudioSource* audio_source,
418 bool anonymous) { 384 bool anonymous) {
419 CriticalSectionScoped cs(_cbCrit.get()); 385 CriticalSectionScoped cs(_cbCrit.get());
420 if (IsParticipantInList(*participant, _additionalParticipantList)) { 386 if (IsAudioSourceInList(*audio_source, additional_audio_source_list_)) {
421 if (anonymous) { 387 if (anonymous) {
422 return 0; 388 return 0;
423 } 389 }
424 if (!RemoveParticipantFromList(participant, &_additionalParticipantList)) { 390 if (!RemoveAudioSourceFromList(audio_source,
391 &additional_audio_source_list_)) {
425 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 392 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
426 "unable to remove participant from anonymous list"); 393 "unable to remove audio_source from anonymous list");
427 assert(false); 394 RTC_NOTREACHED();
428 return -1; 395 return -1;
429 } 396 }
430 return AddParticipantToList(participant, &_participantList) ? 0 : -1; 397 return AddAudioSourceToList(audio_source, &audio_source_list_) ? 0 : -1;
431 } 398 }
432 if (!anonymous) { 399 if (!anonymous) {
433 return 0; 400 return 0;
434 } 401 }
435 const bool mixable = 402 const bool mixable =
436 RemoveParticipantFromList(participant, &_participantList); 403 RemoveAudioSourceFromList(audio_source, &audio_source_list_);
437 if (!mixable) { 404 if (!mixable) {
438 WEBRTC_TRACE( 405 WEBRTC_TRACE(
439 kTraceWarning, kTraceAudioMixerServer, _id, 406 kTraceWarning, kTraceAudioMixerServer, _id,
440 "participant must be registered before turning it into anonymous"); 407 "audio_source must be registered before turning it into anonymous");
441 // Setting anonymous status is only possible if MixerAudioSource is 408 // Setting anonymous status is only possible if MixerAudioSource is
442 // already registered. 409 // already registered.
443 return -1; 410 return -1;
444 } 411 }
445 return AddParticipantToList(participant, &_additionalParticipantList) ? 0 412 return AddAudioSourceToList(audio_source, &additional_audio_source_list_)
446 : -1; 413 ? 0
414 : -1;
447 } 415 }
448 416
449 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus( 417 bool NewAudioConferenceMixerImpl::AnonymousMixabilityStatus(
450 const MixerAudioSource& participant) const { 418 const MixerAudioSource& audio_source) const {
451 CriticalSectionScoped cs(_cbCrit.get()); 419 CriticalSectionScoped cs(_cbCrit.get());
452 return IsParticipantInList(participant, _additionalParticipantList); 420 return IsAudioSourceInList(audio_source, additional_audio_source_list_);
453 } 421 }
454 422
455 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) { 423 int32_t NewAudioConferenceMixerImpl::SetMinimumMixingFrequency(Frequency freq) {
456 // Make sure that only allowed sampling frequencies are used. Use closest 424 // Make sure that only allowed sampling frequencies are used. Use closest
457 // higher sampling frequency to avoid losing information. 425 // higher sampling frequency to avoid losing information.
458 if (static_cast<int>(freq) == 12000) { 426 if (static_cast<int>(freq) == 12000) {
459 freq = kWbInHz; 427 freq = kWbInHz;
460 } else if (static_cast<int>(freq) == 24000) { 428 } else if (static_cast<int>(freq) == 24000) {
461 freq = kSwbInHz; 429 freq = kSwbInHz;
462 } 430 }
463 431
464 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || 432 if ((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
465 (freq == kLowestPossible)) { 433 (freq == kLowestPossible)) {
466 _minimumMixingFreq = freq; 434 _minimumMixingFreq = freq;
467 return 0; 435 return 0;
468 } else { 436 } else {
469 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 437 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
470 "SetMinimumMixingFrequency incorrect frequency: %i", freq); 438 "SetMinimumMixingFrequency incorrect frequency: %i", freq);
471 assert(false); 439 RTC_NOTREACHED();
472 return -1; 440 return -1;
473 } 441 }
474 } 442 }
475 443
476 // Check all AudioFrames that are to be mixed. The highest sampling frequency 444 // Check all AudioFrames that are to be mixed. The highest sampling frequency
477 // found is the lowest that can be used without losing information. 445 // found is the lowest that can be used without losing information.
478 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const { 446 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequency() const {
479 const int participantListFrequency = 447 const int audioSourceListFrequency =
480 GetLowestMixingFrequencyFromList(_participantList); 448 GetLowestMixingFrequencyFromList(audio_source_list_);
481 const int anonymousListFrequency = 449 const int anonymousListFrequency =
482 GetLowestMixingFrequencyFromList(_additionalParticipantList); 450 GetLowestMixingFrequencyFromList(additional_audio_source_list_);
483 const int highestFreq = (participantListFrequency > anonymousListFrequency) 451 const int highestFreq = (audioSourceListFrequency > anonymousListFrequency)
484 ? participantListFrequency 452 ? audioSourceListFrequency
485 : anonymousListFrequency; 453 : anonymousListFrequency;
486 // Check if the user specified a lowest mixing frequency. 454 // Check if the user specified a lowest mixing frequency.
487 if (_minimumMixingFreq != kLowestPossible) { 455 if (_minimumMixingFreq != kLowestPossible) {
488 if (_minimumMixingFreq > highestFreq) { 456 if (_minimumMixingFreq > highestFreq) {
489 return _minimumMixingFreq; 457 return _minimumMixingFreq;
490 } 458 }
491 } 459 }
492 return highestFreq; 460 return highestFreq;
493 } 461 }
494 462
495 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( 463 int32_t NewAudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
496 const MixerAudioSourceList& mixList) const { 464 const MixerAudioSourceList& mixList) const {
497 int32_t highestFreq = 8000; 465 int32_t highestFreq = 8000;
498 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); 466 for (MixerAudioSourceList::const_iterator iter = mixList.begin();
499 iter != mixList.end(); ++iter) { 467 iter != mixList.end(); ++iter) {
500 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); 468 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
501 if (neededFrequency > highestFreq) { 469 if (neededFrequency > highestFreq) {
502 highestFreq = neededFrequency; 470 highestFreq = neededFrequency;
503 } 471 }
504 } 472 }
505 return highestFreq; 473 return highestFreq;
506 } 474 }
507 475
508 void NewAudioConferenceMixerImpl::UpdateToMix( 476 void NewAudioConferenceMixerImpl::UpdateToMix(
509 AudioFrameList* mixList, 477 AudioFrameList* mixList,
510 AudioFrameList* rampOutList, 478 AudioFrameList* rampOutList,
511 std::map<int, MixerAudioSource*>* mixParticipantList, 479 std::map<int, MixerAudioSource*>* mixAudioSourceList,
512 size_t* maxAudioFrameCounter) const { 480 size_t* maxAudioFrameCounter) const {
513 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 481 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
514 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", 482 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)",
515 *maxAudioFrameCounter); 483 *maxAudioFrameCounter);
516 const size_t mixListStartSize = mixList->size(); 484 const size_t mixListStartSize = mixList->size();
517 AudioFrameList activeList; 485 AudioFrameList activeList;
518 // Struct needed by the passive lists to keep track of which AudioFrame 486 // Struct needed by the passive lists to keep track of which AudioFrame
519 // belongs to which MixerAudioSource. 487 // belongs to which MixerAudioSource.
520 ParticipantFrameStructList passiveWasNotMixedList; 488 AudioSourceWithFrameList passiveWasNotMixedList;
521 ParticipantFrameStructList passiveWasMixedList; 489 AudioSourceWithFrameList passiveWasMixedList;
522 for (MixerAudioSourceList::const_iterator participant = 490 for (MixerAudioSourceList::const_iterator audio_source =
523 _participantList.begin(); 491 audio_source_list_.begin();
524 participant != _participantList.end(); ++participant) { 492 audio_source != audio_source_list_.end(); ++audio_source) {
525 // Stop keeping track of passive participants if there are already 493 // Stop keeping track of passive audioSources if there are already
526 // enough participants available (they wont be mixed anyway). 494 // enough audio sources available (they wont be mixed anyway).
527 bool mustAddToPassiveList = 495 bool mustAddToPassiveList =
528 (*maxAudioFrameCounter > 496 (*maxAudioFrameCounter >
529 (activeList.size() + passiveWasMixedList.size() + 497 (activeList.size() + passiveWasMixedList.size() +
530 passiveWasNotMixedList.size())); 498 passiveWasNotMixedList.size()));
531 499
532 bool wasMixed = false; 500 bool wasMixed = false;
533 wasMixed = (*participant)->_mixHistory->WasMixed(); 501 wasMixed = (*audio_source)->_mixHistory->WasMixed();
534 AudioFrame* audioFrame = NULL; 502 AudioFrame* audioFrame = NULL;
535 if (_audioFramePool->PopMemory(audioFrame) == -1) { 503 if (_audioFramePool->PopMemory(audioFrame) == -1) {
536 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 504 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
537 "failed PopMemory() call"); 505 "failed PopMemory() call");
538 assert(false); 506 RTC_NOTREACHED();
539 return; 507 return;
540 } 508 }
541 audioFrame->sample_rate_hz_ = _outputFrequency; 509 audioFrame->sample_rate_hz_ = _outputFrequency;
542 510
543 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); 511 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
544 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 512 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
545 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 513 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
546 "failed to GetAudioFrameWithMuted() from participant"); 514 "failed to GetAudioFrameWithMuted() from audio source");
547 _audioFramePool->PushMemory(audioFrame); 515 _audioFramePool->PushMemory(audioFrame);
548 continue; 516 continue;
549 } 517 }
550 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); 518 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
551 if (_participantList.size() != 1) { 519 if (audio_source_list_.size() != 1) {
552 // TODO(wu): Issue 3390, add support for multiple participants case. 520 // TODO(wu): Issue 3390, add support for multiple audio sources case.
553 audioFrame->ntp_time_ms_ = -1; 521 audioFrame->ntp_time_ms_ = -1;
554 } 522 }
555 523
556 // TODO(henrike): this assert triggers in some test cases where SRTP is 524 // TODO(aleloi): this assert triggers in some test cases where SRTP is
557 // used which prevents NetEQ from making a VAD. Temporarily disable this 525 // used which prevents NetEQ from making a VAD. Temporarily disable this
558 // assert until the problem is fixed on a higher level. 526 // assert until the problem is fixed on a higher level.
559 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); 527 // RTC_DCHECK_NE(audioFrame->vad_activity_, AudioFrame::kVadUnknown);
560 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { 528 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
561 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 529 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
562 "invalid VAD state from participant"); 530 "invalid VAD state from audio source");
563 } 531 }
564 532
565 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) { 533 if (audioFrame->vad_activity_ == AudioFrame::kVadActive) {
566 if (!wasMixed && !muted) { 534 if (!wasMixed && !muted) {
567 RampIn(*audioFrame); 535 RampIn(*audioFrame);
568 } 536 }
569 537
570 if (activeList.size() >= *maxAudioFrameCounter) { 538 if (activeList.size() >= *maxAudioFrameCounter) {
571 // There are already more active participants than should be 539 // There are already more active audio sources than should be
572 // mixed. Only keep the ones with the highest energy. 540 // mixed. Only keep the ones with the highest energy.
573 AudioFrameList::iterator replaceItem; 541 AudioFrameList::iterator replaceItem;
574 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame); 542 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audioFrame);
575 543
576 bool found_replace_item = false; 544 bool found_replace_item = false;
577 for (AudioFrameList::iterator iter = activeList.begin(); 545 for (AudioFrameList::iterator iter = activeList.begin();
578 iter != activeList.end(); ++iter) { 546 iter != activeList.end(); ++iter) {
579 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); 547 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
580 if (energy < lowestEnergy) { 548 if (energy < lowestEnergy) {
581 replaceItem = iter; 549 replaceItem = iter;
582 lowestEnergy = energy; 550 lowestEnergy = energy;
583 found_replace_item = true; 551 found_replace_item = true;
584 } 552 }
585 } 553 }
586 if (found_replace_item) { 554 if (found_replace_item) {
587 RTC_DCHECK(!muted); // Cannot replace with a muted frame. 555 RTC_DCHECK(!muted); // Cannot replace with a muted frame.
588 FrameAndMuteInfo replaceFrame = *replaceItem; 556 FrameAndMuteInfo replaceFrame = *replaceItem;
589 557
590 bool replaceWasMixed = false; 558 bool replaceWasMixed = false;
591 std::map<int, MixerAudioSource*>::const_iterator it = 559 std::map<int, MixerAudioSource*>::const_iterator it =
592 mixParticipantList->find(replaceFrame.frame->id_); 560 mixAudioSourceList->find(replaceFrame.frame->id_);
593 561
594 // When a frame is pushed to |activeList| it is also pushed 562 // When a frame is pushed to |activeList| it is also pushed
595 // to mixParticipantList with the frame's id. This means 563 // to mixAudioSourceList with the frame's id. This means
596 // that the Find call above should never fail. 564 // that the Find call above should never fail.
597 assert(it != mixParticipantList->end()); 565 RTC_DCHECK(it != mixAudioSourceList->end());
598 replaceWasMixed = it->second->_mixHistory->WasMixed(); 566 replaceWasMixed = it->second->_mixHistory->WasMixed();
599 567
600 mixParticipantList->erase(replaceFrame.frame->id_); 568 mixAudioSourceList->erase(replaceFrame.frame->id_);
601 activeList.erase(replaceItem); 569 activeList.erase(replaceItem);
602 570
603 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); 571 activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
604 (*mixParticipantList)[audioFrame->id_] = *participant; 572 (*mixAudioSourceList)[audioFrame->id_] = *audio_source;
605 assert(mixParticipantList->size() <= 573 RTC_DCHECK_LE(mixAudioSourceList->size(),
606 kMaximumAmountOfMixedParticipants); 574 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
607 575
608 if (replaceWasMixed) { 576 if (replaceWasMixed) {
609 if (!replaceFrame.muted) { 577 if (!replaceFrame.muted) {
610 RampOut(*replaceFrame.frame); 578 RampOut(*replaceFrame.frame);
611 } 579 }
612 rampOutList->push_back(replaceFrame); 580 rampOutList->push_back(replaceFrame);
613 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); 581 RTC_DCHECK_LE(
582 rampOutList->size(),
583 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
614 } else { 584 } else {
615 _audioFramePool->PushMemory(replaceFrame.frame); 585 _audioFramePool->PushMemory(replaceFrame.frame);
616 } 586 }
617 } else { 587 } else {
618 if (wasMixed) { 588 if (wasMixed) {
619 if (!muted) { 589 if (!muted) {
620 RampOut(*audioFrame); 590 RampOut(*audioFrame);
621 } 591 }
622 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted)); 592 rampOutList->push_back(FrameAndMuteInfo(audioFrame, muted));
623 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants); 593 RTC_DCHECK_LE(
594 rampOutList->size(),
595 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
624 } else { 596 } else {
625 _audioFramePool->PushMemory(audioFrame); 597 _audioFramePool->PushMemory(audioFrame);
626 } 598 }
627 } 599 }
628 } else { 600 } else {
629 activeList.push_front(FrameAndMuteInfo(audioFrame, muted)); 601 activeList.push_front(FrameAndMuteInfo(audioFrame, muted));
630 (*mixParticipantList)[audioFrame->id_] = *participant; 602 (*mixAudioSourceList)[audioFrame->id_] = *audio_source;
631 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); 603 RTC_DCHECK_LE(mixAudioSourceList->size(),
604 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
632 } 605 }
633 } else { 606 } else {
634 if (wasMixed) { 607 if (wasMixed) {
635 ParticipantFrameStruct* part_struct = 608 AudioSourceWithFrame* part_struct =
636 new ParticipantFrameStruct(*participant, audioFrame, muted); 609 new AudioSourceWithFrame(*audio_source, audioFrame, muted);
637 passiveWasMixedList.push_back(part_struct); 610 passiveWasMixedList.push_back(part_struct);
638 } else if (mustAddToPassiveList) { 611 } else if (mustAddToPassiveList) {
639 if (!muted) { 612 if (!muted) {
640 RampIn(*audioFrame); 613 RampIn(*audioFrame);
641 } 614 }
642 ParticipantFrameStruct* part_struct = 615 AudioSourceWithFrame* part_struct =
643 new ParticipantFrameStruct(*participant, audioFrame, muted); 616 new AudioSourceWithFrame(*audio_source, audioFrame, muted);
644 passiveWasNotMixedList.push_back(part_struct); 617 passiveWasNotMixedList.push_back(part_struct);
645 } else { 618 } else {
646 _audioFramePool->PushMemory(audioFrame); 619 _audioFramePool->PushMemory(audioFrame);
647 } 620 }
648 } 621 }
649 } 622 }
650 assert(activeList.size() <= *maxAudioFrameCounter); 623 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
651 // At this point it is known which participants should be mixed. Transfer 624 // At this point it is known which audio sources should be mixed. Transfer
652 // this information to this functions output parameters. 625 // this information to this functions output parameters.
653 for (AudioFrameList::const_iterator iter = activeList.begin(); 626 for (AudioFrameList::const_iterator iter = activeList.begin();
654 iter != activeList.end(); ++iter) { 627 iter != activeList.end(); ++iter) {
655 mixList->push_back(*iter); 628 mixList->push_back(*iter);
656 } 629 }
657 activeList.clear(); 630 activeList.clear();
658 // Always mix a constant number of AudioFrames. If there aren't enough 631 // Always mix a constant number of AudioFrames. If there aren't enough
659 // active participants mix passive ones. Starting with those that was mixed 632 // active audio sources mix passive ones. Starting with those that was mixed
660 // last iteration. 633 // last iteration.
661 for (ParticipantFrameStructList::const_iterator iter = 634 for (AudioSourceWithFrameList::const_iterator iter =
662 passiveWasMixedList.begin(); 635 passiveWasMixedList.begin();
663 iter != passiveWasMixedList.end(); ++iter) { 636 iter != passiveWasMixedList.end(); ++iter) {
664 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { 637 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
665 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); 638 mixList->push_back(
666 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; 639 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
667 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); 640 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
641 RTC_DCHECK_LE(mixAudioSourceList->size(),
642 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
668 } else { 643 } else {
669 _audioFramePool->PushMemory((*iter)->audioFrame); 644 _audioFramePool->PushMemory((*iter)->audio_frame);
670 } 645 }
671 delete *iter; 646 delete *iter;
672 } 647 }
673 // And finally the ones that have not been mixed for a while. 648 // And finally the ones that have not been mixed for a while.
674 for (ParticipantFrameStructList::const_iterator iter = 649 for (AudioSourceWithFrameList::const_iterator iter =
675 passiveWasNotMixedList.begin(); 650 passiveWasNotMixedList.begin();
676 iter != passiveWasNotMixedList.end(); ++iter) { 651 iter != passiveWasNotMixedList.end(); ++iter) {
677 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { 652 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
678 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted)); 653 mixList->push_back(
679 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant; 654 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
680 assert(mixParticipantList->size() <= kMaximumAmountOfMixedParticipants); 655 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
656 RTC_DCHECK_LE(mixAudioSourceList->size(),
657 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
681 } else { 658 } else {
682 _audioFramePool->PushMemory((*iter)->audioFrame); 659 _audioFramePool->PushMemory((*iter)->audio_frame);
683 } 660 }
684 delete *iter; 661 delete *iter;
685 } 662 }
686 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size()); 663 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
687 *maxAudioFrameCounter += mixListStartSize - mixList->size(); 664 *maxAudioFrameCounter += mixListStartSize - mixList->size();
688 } 665 }
689 666
690 void NewAudioConferenceMixerImpl::GetAdditionalAudio( 667 void NewAudioConferenceMixerImpl::GetAdditionalAudio(
691 AudioFrameList* additionalFramesList) const { 668 AudioFrameList* additionalFramesList) const {
692 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 669 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
693 "GetAdditionalAudio(additionalFramesList)"); 670 "GetAdditionalAudio(additionalFramesList)");
694 // The GetAudioFrameWithMuted() callback may result in the participant being 671 // The GetAudioFrameWithMuted() callback may result in the audio source being
695 // removed from additionalParticipantList_. If that happens it will 672 // removed from additionalAudioSourceList_. If that happens it will
696 // invalidate any iterators. Create a copy of the participants list such 673 // invalidate any iterators. Create a copy of the audio sources list such
697 // that the list of participants can be traversed safely. 674 // that the list of audio sources can be traversed safely.
698 MixerAudioSourceList additionalParticipantList; 675 MixerAudioSourceList additionalAudioSourceList;
699 additionalParticipantList.insert(additionalParticipantList.begin(), 676 additionalAudioSourceList.insert(additionalAudioSourceList.begin(),
700 _additionalParticipantList.begin(), 677 additional_audio_source_list_.begin(),
701 _additionalParticipantList.end()); 678 additional_audio_source_list_.end());
702 679
703 for (MixerAudioSourceList::const_iterator participant = 680 for (MixerAudioSourceList::const_iterator audio_source =
704 additionalParticipantList.begin(); 681 additionalAudioSourceList.begin();
705 participant != additionalParticipantList.end(); ++participant) { 682 audio_source != additionalAudioSourceList.end(); ++audio_source) {
706 AudioFrame* audioFrame = NULL; 683 AudioFrame* audioFrame = NULL;
707 if (_audioFramePool->PopMemory(audioFrame) == -1) { 684 if (_audioFramePool->PopMemory(audioFrame) == -1) {
708 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 685 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
709 "failed PopMemory() call"); 686 "failed PopMemory() call");
710 assert(false); 687 RTC_NOTREACHED();
711 return; 688 return;
712 } 689 }
713 audioFrame->sample_rate_hz_ = _outputFrequency; 690 audioFrame->sample_rate_hz_ = _outputFrequency;
714 auto ret = (*participant)->GetAudioFrameWithMuted(_id, audioFrame); 691 auto ret = (*audio_source)->GetAudioFrameWithMuted(_id, audioFrame);
715 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 692 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
716 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 693 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
717 "failed to GetAudioFrameWithMuted() from participant"); 694 "failed to GetAudioFrameWithMuted() from audio_source");
718 _audioFramePool->PushMemory(audioFrame); 695 _audioFramePool->PushMemory(audioFrame);
719 continue; 696 continue;
720 } 697 }
721 if (audioFrame->samples_per_channel_ == 0) { 698 if (audioFrame->samples_per_channel_ == 0) {
722 // Empty frame. Don't use it. 699 // Empty frame. Don't use it.
723 _audioFramePool->PushMemory(audioFrame); 700 _audioFramePool->PushMemory(audioFrame);
724 continue; 701 continue;
725 } 702 }
726 additionalFramesList->push_back(FrameAndMuteInfo( 703 additionalFramesList->push_back(FrameAndMuteInfo(
727 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); 704 audioFrame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
728 } 705 }
729 } 706 }
730 707
731 void NewAudioConferenceMixerImpl::UpdateMixedStatus( 708 void NewAudioConferenceMixerImpl::UpdateMixedStatus(
732 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const { 709 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
733 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 710 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
734 "UpdateMixedStatus(mixedParticipantsMap)"); 711 "UpdateMixedStatus(mixedAudioSourcesMap)");
735 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); 712 RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
713 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
736 714
737 // Loop through all participants. If they are in the mix map they 715 // Loop through all audio_sources. If they are in the mix map they
738 // were mixed. 716 // were mixed.
739 for (MixerAudioSourceList::const_iterator participant = 717 for (MixerAudioSourceList::const_iterator audio_source =
740 _participantList.begin(); 718 audio_source_list_.begin();
741 participant != _participantList.end(); ++participant) { 719 audio_source != audio_source_list_.end(); ++audio_source) {
742 bool isMixed = false; 720 bool isMixed = false;
743 for (std::map<int, MixerAudioSource*>::const_iterator it = 721 for (std::map<int, MixerAudioSource*>::const_iterator it =
744 mixedParticipantsMap.begin(); 722 mixedAudioSourcesMap.begin();
745 it != mixedParticipantsMap.end(); ++it) { 723 it != mixedAudioSourcesMap.end(); ++it) {
746 if (it->second == *participant) { 724 if (it->second == *audio_source) {
747 isMixed = true; 725 isMixed = true;
748 break; 726 break;
749 } 727 }
750 } 728 }
751 (*participant)->_mixHistory->SetIsMixed(isMixed); 729 (*audio_source)->_mixHistory->SetIsMixed(isMixed);
752 } 730 }
753 } 731 }
754 732
755 void NewAudioConferenceMixerImpl::ClearAudioFrameList( 733 void NewAudioConferenceMixerImpl::ClearAudioFrameList(
756 AudioFrameList* audioFrameList) const { 734 AudioFrameList* audioFrameList) const {
757 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 735 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
758 "ClearAudioFrameList(audioFrameList)"); 736 "ClearAudioFrameList(audioFrameList)");
759 for (AudioFrameList::iterator iter = audioFrameList->begin(); 737 for (AudioFrameList::iterator iter = audioFrameList->begin();
760 iter != audioFrameList->end(); ++iter) { 738 iter != audioFrameList->end(); ++iter) {
761 _audioFramePool->PushMemory(iter->frame); 739 _audioFramePool->PushMemory(iter->frame);
762 } 740 }
763 audioFrameList->clear(); 741 audioFrameList->clear();
764 } 742 }
765 743
766 bool NewAudioConferenceMixerImpl::IsParticipantInList( 744 bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
767 const MixerAudioSource& participant, 745 const MixerAudioSource& audio_source,
768 const MixerAudioSourceList& participantList) const { 746 const MixerAudioSourceList& audioSourceList) const {
769 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 747 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
770 "IsParticipantInList(participant,participantList)"); 748 "IsAudioSourceInList(audio_source,audioSourceList)");
771 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); 749 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin();
772 iter != participantList.end(); ++iter) { 750 iter != audioSourceList.end(); ++iter) {
773 if (&participant == *iter) { 751 if (&audio_source == *iter) {
774 return true; 752 return true;
775 } 753 }
776 } 754 }
777 return false; 755 return false;
778 } 756 }
779 757
780 bool NewAudioConferenceMixerImpl::AddParticipantToList( 758 bool NewAudioConferenceMixerImpl::AddAudioSourceToList(
781 MixerAudioSource* participant, 759 MixerAudioSource* audio_source,
782 MixerAudioSourceList* participantList) const { 760 MixerAudioSourceList* audioSourceList) const {
783 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 761 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
784 "AddParticipantToList(participant, participantList)"); 762 "AddAudioSourceToList(audio_source, audioSourceList)");
785 participantList->push_back(participant); 763 audioSourceList->push_back(audio_source);
786 // Make sure that the mixed status is correct for new MixerAudioSource. 764 // Make sure that the mixed status is correct for new MixerAudioSource.
787 participant->_mixHistory->ResetMixedStatus(); 765 audio_source->_mixHistory->ResetMixedStatus();
788 return true; 766 return true;
789 } 767 }
790 768
791 bool NewAudioConferenceMixerImpl::RemoveParticipantFromList( 769 bool NewAudioConferenceMixerImpl::RemoveAudioSourceFromList(
792 MixerAudioSource* participant, 770 MixerAudioSource* audio_source,
793 MixerAudioSourceList* participantList) const { 771 MixerAudioSourceList* audioSourceList) const {
794 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 772 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
795 "RemoveParticipantFromList(participant, participantList)"); 773 "RemoveAudioSourceFromList(audio_source, audioSourceList)");
796 for (MixerAudioSourceList::iterator iter = participantList->begin(); 774 for (MixerAudioSourceList::iterator iter = audioSourceList->begin();
797 iter != participantList->end(); ++iter) { 775 iter != audioSourceList->end(); ++iter) {
798 if (*iter == participant) { 776 if (*iter == audio_source) {
799 participantList->erase(iter); 777 audioSourceList->erase(iter);
800 // Participant is no longer mixed, reset to default. 778 // AudioSource is no longer mixed, reset to default.
801 participant->_mixHistory->ResetMixedStatus(); 779 audio_source->_mixHistory->ResetMixedStatus();
802 return true; 780 return true;
803 } 781 }
804 } 782 }
805 return false; 783 return false;
806 } 784 }
807 785
808 int32_t NewAudioConferenceMixerImpl::MixFromList( 786 int32_t NewAudioConferenceMixerImpl::MixFromList(
809 AudioFrame* mixedAudio, 787 AudioFrame* mixedAudio,
810 const AudioFrameList& audioFrameList) const { 788 const AudioFrameList& audioFrameList,
811 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 789 int32_t id,
790 bool use_limiter) {
791 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id,
812 "MixFromList(mixedAudio, audioFrameList)"); 792 "MixFromList(mixedAudio, audioFrameList)");
813 if (audioFrameList.empty()) 793 if (audioFrameList.empty())
814 return 0; 794 return 0;
815 795
816 uint32_t position = 0; 796 uint32_t position = 0;
817 797
818 if (_numMixedParticipants == 1) { 798 if (audioFrameList.size() == 1) {
819 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_; 799 mixedAudio->timestamp_ = audioFrameList.front().frame->timestamp_;
820 mixedAudio->elapsed_time_ms_ = 800 mixedAudio->elapsed_time_ms_ =
821 audioFrameList.front().frame->elapsed_time_ms_; 801 audioFrameList.front().frame->elapsed_time_ms_;
822 } else { 802 } else {
823 // TODO(wu): Issue 3390. 803 // TODO(wu): Issue 3390.
824 // Audio frame timestamp is only supported in one channel case. 804 // Audio frame timestamp is only supported in one channel case.
825 mixedAudio->timestamp_ = 0; 805 mixedAudio->timestamp_ = 0;
826 mixedAudio->elapsed_time_ms_ = -1; 806 mixedAudio->elapsed_time_ms_ = -1;
827 } 807 }
828 808
829 for (AudioFrameList::const_iterator iter = audioFrameList.begin(); 809 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
830 iter != audioFrameList.end(); ++iter) { 810 iter != audioFrameList.end(); ++iter) {
831 if (position >= kMaximumAmountOfMixedParticipants) { 811 if (position >= kMaximumAmountOfMixedAudioSources) {
832 WEBRTC_TRACE( 812 WEBRTC_TRACE(
833 kTraceMemory, kTraceAudioMixerServer, _id, 813 kTraceMemory, kTraceAudioMixerServer, id,
834 "Trying to mix more than max amount of mixed participants:%d!", 814 "Trying to mix more than max amount of mixed audio sources:%d!",
835 kMaximumAmountOfMixedParticipants); 815 kMaximumAmountOfMixedAudioSources);
836 // Assert and avoid crash 816 // Assert and avoid crash
837 assert(false); 817 RTC_NOTREACHED();
838 position = 0; 818 position = 0;
839 } 819 }
840 if (!iter->muted) { 820 if (!iter->muted) {
841 MixFrames(mixedAudio, iter->frame, use_limiter_); 821 MixFrames(mixedAudio, iter->frame, use_limiter);
842 } 822 }
843 823
844 position++; 824 position++;
845 } 825 }
846 826
847 return 0; 827 return 0;
848 } 828 }
849 829
850 // TODO(andrew): consolidate this function with MixFromList. 830 // TODO(andrew): consolidate this function with MixFromList.
851 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList( 831 int32_t NewAudioConferenceMixerImpl::MixAnonomouslyFromList(
(...skipping 21 matching lines...) Expand all
873 } 853 }
874 854
875 // Smoothly limit the mixed frame. 855 // Smoothly limit the mixed frame.
876 const int error = _limiter->ProcessStream(mixedAudio); 856 const int error = _limiter->ProcessStream(mixedAudio);
877 857
878 // And now we can safely restore the level. This procedure results in 858 // And now we can safely restore the level. This procedure results in
879 // some loss of resolution, deemed acceptable. 859 // some loss of resolution, deemed acceptable.
880 // 860 //
881 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 861 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
882 // and compression gain of 6 dB). However, in the transition frame when this 862 // and compression gain of 6 dB). However, in the transition frame when this
883 // is enabled (moving from one to two participants) it has the potential to 863 // is enabled (moving from one to two audio sources) it has the potential to
884 // create discontinuities in the mixed frame. 864 // create discontinuities in the mixed frame.
885 // 865 //
886 // Instead we double the frame (with addition since left-shifting a 866 // Instead we double the frame (with addition since left-shifting a
887 // negative value is undefined). 867 // negative value is undefined).
888 *mixedAudio += *mixedAudio; 868 *mixedAudio += *mixedAudio;
889 869
890 if (error != _limiter->kNoError) { 870 if (error != _limiter->kNoError) {
891 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 871 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
892 "Error from AudioProcessing: %d", error); 872 "Error from AudioProcessing: %d", error);
893 assert(false); 873 RTC_NOTREACHED();
894 return false; 874 return false;
895 } 875 }
896 return true; 876 return true;
897 } 877 }
898 } // namespace webrtc 878 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698