Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(254)

Side by Side Diff: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc

Issue 2132563002: Rewrote UpdateToMix in the audio mixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@remove_memory_pool
Patch Set: Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2016 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 14
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h "
16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
17 #include "webrtc/modules/audio_processing/include/audio_processing.h" 17 #include "webrtc/modules/audio_processing/include/audio_processing.h"
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" 18 #include "webrtc/modules/utility/include/audio_frame_operations.h"
19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
20 #include "webrtc/system_wrappers/include/trace.h" 20 #include "webrtc/system_wrappers/include/trace.h"
21 21
22 namespace webrtc { 22 namespace webrtc {
23 namespace { 23 namespace {
24 24
25 struct ParticipantFrameStruct { 25 class ParticipantFrameStruct {
ossu 2016/07/08 11:02:14 I don't think we suffix structs with Struct. Proba
aleloi 2016/07/08 13:45:23 Done.
26 ParticipantFrameStruct(MixerAudioSource* p, AudioFrame* a, bool m) 26 public:
27 : participant(p), audioFrame(a), muted(m) {} 27 ParticipantFrameStruct(MixerAudioSource* p,
28 MixerAudioSource* participant; 28 AudioFrame* a,
29 AudioFrame* audioFrame; 29 bool m,
30 bool muted; 30 bool was_mixed_before)
31 : participant_(p),
32 audio_frame_(a),
33 muted_(m),
34 was_mixed_before_(was_mixed_before) {
35 if (!muted_) {
36 energy_ = CalculateEnergy(*a);
37 }
38 }
39
40 // a.shouldMixAfter(b) is used to select mixer participants.
41 bool shouldMixAfter(const ParticipantFrameStruct& other) const {
42 if (muted_ != other.muted_) {
43 return muted_ && !other.muted_;
ossu 2016/07/08 11:02:14 wh... what? Returning muted should be enough here.
aleloi 2016/07/08 13:45:23 Done. You are right, it's shorter.
44 }
45
46 auto our_activity = audio_frame_->vad_activity_,
47 other_activity = other.audio_frame_->vad_activity_;
ossu 2016/07/08 11:02:14 I'm not sure if this is in the style guide, but I'
aleloi 2016/07/08 13:45:24 Done.
48 if (our_activity != other_activity) {
49 return (our_activity == AudioFrame::kVadPassive ||
50 our_activity == AudioFrame::kVadUnknown) &&
51 other_activity == AudioFrame::kVadActive;
52 }
53
54 return energy_ < other.energy_;
55 }
56
57 MixerAudioSource* participant_;
ossu 2016/07/08 11:02:13 participant or source? Pick one! :)
58 AudioFrame* audio_frame_;
59 bool muted_;
60 uint32_t energy_;
61 bool was_mixed_before_;
31 }; 62 };
32 63
33 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; 64 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList;
34 65
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. 66 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
36 // These effects are applied to |frame| itself prior to mixing. Assumes that 67 // These effects are applied to |frame| itself prior to mixing. Assumes that
37 // |mixed_frame| always has at least as many channels as |frame|. Supports 68 // |mixed_frame| always has at least as many channels as |frame|. Supports
38 // stereo at most. 69 // stereo at most.
39 // 70 //
40 // TODO(andrew): consider not modifying |frame| here. 71 // TODO(andrew): consider not modifying |frame| here.
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
154 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError) 185 if (_limiter->gain_control()->enable_limiter(true) != _limiter->kNoError)
155 return false; 186 return false;
156 187
157 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) 188 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
158 return false; 189 return false;
159 190
160 return true; 191 return true;
161 } 192 }
162 193
163 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { 194 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
164 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants; 195 size_t remainingParticipantsAllowedToMix = kMaximumAmountOfMixedParticipants;
ossu 2016/07/08 11:02:13 Just in general, if we're now using the term Mixer
aleloi 2016/07/08 13:45:23 I'll change upstream.
165 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 196 RTC_DCHECK(thread_checker_.CalledOnValidThread());
166 197
167 AudioFrameList mixList; 198 AudioFrameList mixList;
ossu 2016/07/08 11:02:14 Move down to where it's actually initialized.
aleloi 2016/07/08 13:45:24 It's assigned to while we're holding the lock. And
ossu 2016/07/08 13:58:34 Oh, right! Silly me!
168 AudioFrameList rampOutList; 199 AudioFrameList rampOutList;
ossu 2016/07/08 11:02:14 Not used?
aleloi 2016/07/08 13:45:24 Yes! Thanks for noticing!
169 AudioFrameList additionalFramesList; 200 AudioFrameList additionalFramesList;
ossu 2016/07/08 11:02:14 Probably move this one down as well.
aleloi 2016/07/08 13:45:24 Changes are also guarded by lock.
170 std::map<int, MixerAudioSource*> mixedParticipantsMap; 201 std::map<int, MixerAudioSource*> mixedParticipantsMap;
171 { 202 {
172 CriticalSectionScoped cs(_cbCrit.get()); 203 CriticalSectionScoped cs(_cbCrit.get());
173 204
174 int32_t lowFreq = GetLowestMixingFrequency(); 205 int32_t lowFreq = GetLowestMixingFrequency();
175 // SILK can run in 12 kHz and 24 kHz. These frequencies are not 206 // SILK can run in 12 kHz and 24 kHz. These frequencies are not
176 // supported so use the closest higher frequency to not lose any 207 // supported so use the closest higher frequency to not lose any
177 // information. 208 // information.
178 // TODO(henrike): this is probably more appropriate to do in 209 // TODO(henrike): this is probably more appropriate to do in
179 // GetLowestMixingFrequency(). 210 // GetLowestMixingFrequency().
180 if (lowFreq == 12000) { 211 if (lowFreq == 12000) {
181 lowFreq = 16000; 212 lowFreq = 16000;
182 } else if (lowFreq == 24000) { 213 } else if (lowFreq == 24000) {
183 lowFreq = 32000; 214 lowFreq = 32000;
184 } 215 }
185 if (lowFreq <= 0) { 216 if (lowFreq <= 0) {
186 RTC_DCHECK(thread_checker_.CalledOnValidThread());
ossu 2016/07/08 11:02:14 Why is this no longer useful?
187 return; 217 return;
188 } else { 218 } else {
189 switch (lowFreq) { 219 switch (lowFreq) {
190 case 8000: 220 case 8000:
191 if (OutputFrequency() != kNbInHz) { 221 if (OutputFrequency() != kNbInHz) {
192 SetOutputFrequency(kNbInHz); 222 SetOutputFrequency(kNbInHz);
193 } 223 }
194 break; 224 break;
195 case 16000: 225 case 16000:
196 if (OutputFrequency() != kWbInHz) { 226 if (OutputFrequency() != kWbInHz) {
197 SetOutputFrequency(kWbInHz); 227 SetOutputFrequency(kWbInHz);
198 } 228 }
199 break; 229 break;
200 case 32000: 230 case 32000:
201 if (OutputFrequency() != kSwbInHz) { 231 if (OutputFrequency() != kSwbInHz) {
202 SetOutputFrequency(kSwbInHz); 232 SetOutputFrequency(kSwbInHz);
203 } 233 }
204 break; 234 break;
205 case 48000: 235 case 48000:
206 if (OutputFrequency() != kFbInHz) { 236 if (OutputFrequency() != kFbInHz) {
207 SetOutputFrequency(kFbInHz); 237 SetOutputFrequency(kFbInHz);
208 } 238 }
209 break; 239 break;
210 default: 240 default:
211 RTC_DCHECK(false); 241 RTC_DCHECK(false);
212 242
213 RTC_DCHECK(thread_checker_.CalledOnValidThread());
214 return; 243 return;
215 } 244 }
216 } 245 }
217 246
218 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, 247 mixList = UpdateToMix(remainingParticipantsAllowedToMix);
ossu 2016/07/08 11:02:14 You could just use kMaximumAmountOfMixedParticipan
aleloi 2016/07/08 13:45:24 Done.
219 &remainingParticipantsAllowedToMix); 248 remainingParticipantsAllowedToMix -= mixList.size();
ossu 2016/07/08 11:02:14 Who cares? It's never used again, is it?
aleloi 2016/07/08 13:45:24 Removed. Thanks!
220
221 GetAdditionalAudio(&additionalFramesList); 249 GetAdditionalAudio(&additionalFramesList);
222 UpdateMixedStatus(mixedParticipantsMap);
223 } 250 }
224 251
225 // TODO(henrike): it might be better to decide the number of channels 252 // TODO(henrike): it might be better to decide the number of channels
226 // with an API instead of dynamically. 253 // with an API instead of dynamically.
227 254
228 // Find the max channels over all mixing lists. 255 // Find the max channels over all mixing lists.
229 const size_t num_mixed_channels = std::max( 256 const size_t num_mixed_channels = std::max(
230 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), 257 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
231 MaxNumChannels(&rampOutList))); 258 MaxNumChannels(&rampOutList)));
232 259
(...skipping 18 matching lines...) Expand all
251 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 278 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
252 // Nothing was mixed, set the audio samples to silence. 279 // Nothing was mixed, set the audio samples to silence.
253 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; 280 audio_frame_for_mixing->samples_per_channel_ = _sampleSize;
254 audio_frame_for_mixing->Mute(); 281 audio_frame_for_mixing->Mute();
255 } else { 282 } else {
256 // Only call the limiter if we have something to mix. 283 // Only call the limiter if we have something to mix.
257 LimitMixedAudio(audio_frame_for_mixing); 284 LimitMixedAudio(audio_frame_for_mixing);
258 } 285 }
259 } 286 }
260 287
261 ClearAudioFrameList(&mixList);
ossu 2016/07/08 11:02:14 So I guess this used to be necessary to hand the f
262 ClearAudioFrameList(&rampOutList);
263 ClearAudioFrameList(&additionalFramesList);
264 RTC_DCHECK(thread_checker_.CalledOnValidThread());
265 return; 288 return;
266 } 289 }
267 290
268 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( 291 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
269 const Frequency& frequency) { 292 const Frequency& frequency) {
270 CriticalSectionScoped cs(_crit.get()); 293 CriticalSectionScoped cs(_crit.get());
271 294
272 _outputFrequency = frequency; 295 _outputFrequency = frequency;
273 _sampleSize = 296 _sampleSize =
274 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); 297 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000);
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); 442 for (MixerAudioSourceList::const_iterator iter = mixList.begin();
420 iter != mixList.end(); ++iter) { 443 iter != mixList.end(); ++iter) {
421 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); 444 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
422 if (neededFrequency > highestFreq) { 445 if (neededFrequency > highestFreq) {
423 highestFreq = neededFrequency; 446 highestFreq = neededFrequency;
424 } 447 }
425 } 448 }
426 return highestFreq; 449 return highestFreq;
427 } 450 }
428 451
429 void NewAudioConferenceMixerImpl::UpdateToMix( 452 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix(
430 AudioFrameList* mixList, 453 size_t maxAudioFrameCounter) const {
431 AudioFrameList* rampOutList, 454 AudioFrameList result;
432 std::map<int, MixerAudioSource*>* mixParticipantList, 455 std::vector<ParticipantFrameStruct> participantMixingDataList;
433 size_t* maxAudioFrameCounter) const {
434 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
435 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
436 *maxAudioFrameCounter);
437 const size_t mixListStartSize = mixList->size();
438 AudioFrameList activeList;
439 // Struct needed by the passive lists to keep track of which AudioFrame
440 // belongs to which MixerAudioSource.
441 ParticipantFrameStructList passiveWasNotMixedList;
442 ParticipantFrameStructList passiveWasMixedList;
443 for (MixerAudioSourceList::const_iterator participant =
444 _participantList.begin();
445 participant != _participantList.end(); ++participant) {
446 // Stop keeping track of passive participants if there are already
447 // enough participants available (they wont be mixed anyway).
448 bool mustAddToPassiveList =
449 (*maxAudioFrameCounter >
450 (activeList.size() + passiveWasMixedList.size() +
451 passiveWasNotMixedList.size()));
452 456
453 bool wasMixed = false; 457 // Get participant audio and put it in the struct vector.
454 wasMixed = (*participant)->_mixHistory->WasMixed(); 458 for (MixerAudioSource* participant : _participantList) {
459 auto audio_frame_with_info = participant->GetAudioFrameWithMuted(
ossu 2016/07/08 11:02:14 Hmm... reading this, I'm thinking AudioFrameWithIn
aleloi 2016/07/08 13:45:24 I agree. Changed.
460 _id, static_cast<int>(_outputFrequency));
455 461
456 auto audio_frame_with_info = 462 auto audio_frame_info = audio_frame_with_info.audio_frame_info;
457 (*participant)->GetAudioFrameWithMuted(_id, _outputFrequency); 463 AudioFrame* participant_audio_frame = audio_frame_with_info.audio_frame;
458 464
459 auto ret = audio_frame_with_info.audio_frame_info; 465 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) {
460 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; 466 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
461 if (ret == MixerAudioSource::AudioFrameInfo::kError) { 467 "failed to GetAudioFrameWithMuted() from participant");
462 continue; 468 continue;
463 } 469 }
464 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); 470 participantMixingDataList.emplace_back(
465 if (_participantList.size() != 1) { 471 participant, participant_audio_frame,
466 // TODO(wu): Issue 3390, add support for multiple participants case. 472 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted,
467 audio_frame->ntp_time_ms_ = -1; 473 participant->_mixHistory->WasMixed());
474 }
475
476 // Sort frames by sorting function.
477 std::sort(
ossu 2016/07/08 11:02:13 Suggestion: rework shouldMixAfter into shouldMixBe
aleloi 2016/07/08 13:45:24 Done. Btw, I got the impression from reading the s
ossu 2016/07/08 13:58:34 That makes perfect sense. Reading up on that, I mi
478 participantMixingDataList.begin(), participantMixingDataList.end(),
479 [](const ParticipantFrameStruct& a, const ParticipantFrameStruct& b) {
480 return b.shouldMixAfter(a);
481 });
482
483 // Go through list in order and put things in mixList.
484 for (ParticipantFrameStruct& p : participantMixingDataList) {
485 // Filter muted.
486 if (p.muted_) {
487 p.participant_->_mixHistory->SetIsMixed(false);
488 continue;
468 } 489 }
469 490
470 // TODO(henrike): this assert triggers in some test cases where SRTP is 491 // Add frame to result vectorfor mixing.
ossu 2016/07/08 11:02:14 vectorfor -> vector for
aleloi 2016/07/08 13:45:23 Done.
471 // used which prevents NetEQ from making a VAD. Temporarily disable this 492 bool is_mixed = false;
472 // assert until the problem is fixed on a higher level. 493 if (maxAudioFrameCounter > 0) {
473 // RTC_DCHECK(audio_frame->vad_activity_ != AudioFrame::kVadUnknown); 494 --maxAudioFrameCounter;
474 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) { 495 if (!p.was_mixed_before_) {
475 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 496 RampIn(*p.audio_frame_);
476 "invalid VAD state from participant"); 497 }
498 result.emplace_back(p.audio_frame_, false);
499 is_mixed = true;
477 } 500 }
478 501
479 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) { 502 // Ramp out unmuted.
480 if (!wasMixed && !muted) { 503 if (p.was_mixed_before_ && !is_mixed) {
481 RampIn(*audio_frame); 504 RampOut(*p.audio_frame_);
482 } 505 result.emplace_back(p.audio_frame_, false);
506 }
483 507
484 if (activeList.size() >= *maxAudioFrameCounter) { 508 p.participant_->_mixHistory->SetIsMixed(is_mixed);
485 // There are already more active participants than should be
486 // mixed. Only keep the ones with the highest energy.
487 AudioFrameList::iterator replaceItem;
488 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
489
490 bool found_replace_item = false;
491 for (AudioFrameList::iterator iter = activeList.begin();
492 iter != activeList.end(); ++iter) {
493 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
494 if (energy < lowestEnergy) {
495 replaceItem = iter;
496 lowestEnergy = energy;
497 found_replace_item = true;
498 }
499 }
500 if (found_replace_item) {
501 RTC_DCHECK(!muted); // Cannot replace with a muted frame.
502 FrameAndMuteInfo replaceFrame = *replaceItem;
503
504 bool replaceWasMixed = false;
505 std::map<int, MixerAudioSource*>::const_iterator it =
506 mixParticipantList->find(replaceFrame.frame->id_);
507
508 // When a frame is pushed to |activeList| it is also pushed
509 // to mixParticipantList with the frame's id. This means
510 // that the Find call above should never fail.
511 RTC_DCHECK(it != mixParticipantList->end());
512 replaceWasMixed = it->second->_mixHistory->WasMixed();
513
514 mixParticipantList->erase(replaceFrame.frame->id_);
515 activeList.erase(replaceItem);
516
517 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
518 (*mixParticipantList)[audio_frame->id_] = *participant;
519 RTC_DCHECK(mixParticipantList->size() <=
520 kMaximumAmountOfMixedParticipants);
521
522 if (replaceWasMixed) {
523 if (!replaceFrame.muted) {
524 RampOut(*replaceFrame.frame);
525 }
526 rampOutList->push_back(replaceFrame);
527 RTC_DCHECK(rampOutList->size() <=
528 kMaximumAmountOfMixedParticipants);
529 }
530 } else {
531 if (wasMixed) {
532 if (!muted) {
533 RampOut(*audio_frame);
534 }
535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
536 assert(rampOutList->size() <= kMaximumAmountOfMixedParticipants);
537 }
538 }
539 } else {
540 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
541 (*mixParticipantList)[audio_frame->id_] = *participant;
542 RTC_DCHECK(mixParticipantList->size() <=
543 kMaximumAmountOfMixedParticipants);
544 }
545 } else {
546 if (wasMixed) {
547 ParticipantFrameStruct* part_struct =
548 new ParticipantFrameStruct(*participant, audio_frame, muted);
549 passiveWasMixedList.push_back(part_struct);
550 } else if (mustAddToPassiveList) {
551 if (!muted) {
552 RampIn(*audio_frame);
553 }
554 ParticipantFrameStruct* part_struct =
555 new ParticipantFrameStruct(*participant, audio_frame, muted);
556 passiveWasNotMixedList.push_back(part_struct);
557 }
558 }
559 } 509 }
560 RTC_DCHECK(activeList.size() <= *maxAudioFrameCounter); 510 return result;
561 // At this point it is known which participants should be mixed. Transfer
562 // this information to this functions output parameters.
563 for (AudioFrameList::const_iterator iter = activeList.begin();
564 iter != activeList.end(); ++iter) {
565 mixList->push_back(*iter);
566 }
567 activeList.clear();
568 // Always mix a constant number of AudioFrames. If there aren't enough
569 // active participants mix passive ones. Starting with those that was mixed
570 // last iteration.
571 for (ParticipantFrameStructList::const_iterator iter =
572 passiveWasMixedList.begin();
573 iter != passiveWasMixedList.end(); ++iter) {
574 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
575 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
576 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
577 RTC_DCHECK(mixParticipantList->size() <=
578 kMaximumAmountOfMixedParticipants);
579 }
580 delete *iter;
581 }
582 // And finally the ones that have not been mixed for a while.
583 for (ParticipantFrameStructList::const_iterator iter =
584 passiveWasNotMixedList.begin();
585 iter != passiveWasNotMixedList.end(); ++iter) {
586 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
587 mixList->push_back(FrameAndMuteInfo((*iter)->audioFrame, (*iter)->muted));
588 (*mixParticipantList)[(*iter)->audioFrame->id_] = (*iter)->participant;
589 RTC_DCHECK(mixParticipantList->size() <=
590 kMaximumAmountOfMixedParticipants);
591 }
592 delete *iter;
593 }
594 RTC_DCHECK(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
595 *maxAudioFrameCounter += mixListStartSize - mixList->size();
596 } 511 }
597 512
598 void NewAudioConferenceMixerImpl::GetAdditionalAudio( 513 void NewAudioConferenceMixerImpl::GetAdditionalAudio(
ossu 2016/07/08 11:02:14 Not necessary for this CL, but this should be chan
aleloi 2016/07/08 13:45:23 Acknowledged.
599 AudioFrameList* additionalFramesList) const { 514 AudioFrameList* additionalFramesList) const {
600 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 515 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
601 "GetAdditionalAudio(additionalFramesList)"); 516 "GetAdditionalAudio(additionalFramesList)");
602 // The GetAudioFrameWithMuted() callback may result in the participant being 517 // The GetAudioFrameWithMuted() callback may result in the participant being
603 // removed from additionalParticipantList_. If that happens it will 518 // removed from additionalParticipantList_. If that happens it will
604 // invalidate any iterators. Create a copy of the participants list such 519 // invalidate any iterators. Create a copy of the participants list such
605 // that the list of participants can be traversed safely. 520 // that the list of participants can be traversed safely.
606 MixerAudioSourceList additionalParticipantList; 521 MixerAudioSourceList additionalParticipantList;
607 additionalParticipantList.insert(additionalParticipantList.begin(), 522 additionalParticipantList.insert(additionalParticipantList.begin(),
608 _additionalParticipantList.begin(), 523 _additionalParticipantList.begin(),
(...skipping 13 matching lines...) Expand all
622 } 537 }
623 if (audio_frame->samples_per_channel_ == 0) { 538 if (audio_frame->samples_per_channel_ == 0) {
624 // Empty frame. Don't use it. 539 // Empty frame. Don't use it.
625 continue; 540 continue;
626 } 541 }
627 additionalFramesList->push_back(FrameAndMuteInfo( 542 additionalFramesList->push_back(FrameAndMuteInfo(
628 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); 543 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
629 } 544 }
630 } 545 }
631 546
632 void NewAudioConferenceMixerImpl::UpdateMixedStatus(
633 const std::map<int, MixerAudioSource*>& mixedParticipantsMap) const {
634 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
635 "UpdateMixedStatus(mixedParticipantsMap)");
636 RTC_DCHECK(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
637
638 // Loop through all participants. If they are in the mix map they
639 // were mixed.
640 for (MixerAudioSourceList::const_iterator participant =
641 _participantList.begin();
642 participant != _participantList.end(); ++participant) {
643 bool isMixed = false;
644 for (std::map<int, MixerAudioSource*>::const_iterator it =
645 mixedParticipantsMap.begin();
646 it != mixedParticipantsMap.end(); ++it) {
647 if (it->second == *participant) {
648 isMixed = true;
649 break;
650 }
651 }
652 (*participant)->_mixHistory->SetIsMixed(isMixed);
653 }
654 }
655
656 void NewAudioConferenceMixerImpl::ClearAudioFrameList(
657 AudioFrameList* audioFrameList) const {
658 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
659 "ClearAudioFrameList(audioFrameList)");
660 audioFrameList->clear();
661 }
662
663 bool NewAudioConferenceMixerImpl::IsParticipantInList( 547 bool NewAudioConferenceMixerImpl::IsParticipantInList(
664 const MixerAudioSource& participant, 548 const MixerAudioSource& participant,
665 const MixerAudioSourceList& participantList) const { 549 const MixerAudioSourceList& participantList) const {
666 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 550 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
667 "IsParticipantInList(participant,participantList)"); 551 "IsParticipantInList(participant,participantList)");
668 for (MixerAudioSourceList::const_iterator iter = participantList.begin(); 552 for (MixerAudioSourceList::const_iterator iter = participantList.begin();
669 iter != participantList.end(); ++iter) { 553 iter != participantList.end(); ++iter) {
670 if (&participant == *iter) { 554 if (&participant == *iter) {
671 return true; 555 return true;
672 } 556 }
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 672
789 if (error != _limiter->kNoError) { 673 if (error != _limiter->kNoError) {
790 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 674 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
791 "Error from AudioProcessing: %d", error); 675 "Error from AudioProcessing: %d", error);
792 RTC_DCHECK(false); 676 RTC_DCHECK(false);
793 return false; 677 return false;
794 } 678 }
795 return true; 679 return true;
796 } 680 }
797 } // namespace webrtc 681 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698