Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(510)

Side by Side Diff: webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.cc

Issue 2195633002: Revert of Rewrote UpdateToMix in the audio mixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@remove_memory_pool
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <functional>
15 14
16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h "
17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h"
18 #include "webrtc/modules/audio_processing/include/audio_processing.h" 17 #include "webrtc/modules/audio_processing/include/audio_processing.h"
19 #include "webrtc/modules/utility/include/audio_frame_operations.h" 18 #include "webrtc/modules/utility/include/audio_frame_operations.h"
20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h"
21 #include "webrtc/system_wrappers/include/trace.h" 20 #include "webrtc/system_wrappers/include/trace.h"
22 21
23 namespace webrtc { 22 namespace webrtc {
24 namespace { 23 namespace {
25 24
26 class SourceFrame { 25 struct AudioSourceWithFrame {
27 public: 26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m)
28 SourceFrame(MixerAudioSource* p, 27 : audio_source(p), audio_frame(a), muted(m) {}
29 AudioFrame* a, 28 MixerAudioSource* audio_source;
30 bool m, 29 AudioFrame* audio_frame;
31 bool was_mixed_before) 30 bool muted;
32 : audio_source_(p), 31 };
33 audio_frame_(a),
34 muted_(m),
35 was_mixed_before_(was_mixed_before) {
36 if (!muted_) {
37 energy_ = CalculateEnergy(*a);
38 }
39 }
40 32
41 // a.shouldMixBefore(b) is used to select mixer participants. 33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList;
42 bool shouldMixBefore(const SourceFrame& other) const {
43 if (muted_ != other.muted_) {
44 return other.muted_;
45 }
46
47 auto our_activity = audio_frame_->vad_activity_;
48 auto other_activity = other.audio_frame_->vad_activity_;
49
50 if (our_activity != other_activity) {
51 return our_activity == AudioFrame::kVadActive;
52 }
53
54 return energy_ > other.energy_;
55 }
56
57 MixerAudioSource* audio_source_;
58 AudioFrame* audio_frame_;
59 bool muted_;
60 uint32_t energy_;
61 bool was_mixed_before_;
62 };
63 34
64 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing.
65 // These effects are applied to |frame| itself prior to mixing. Assumes that 36 // These effects are applied to |frame| itself prior to mixing. Assumes that
66 // |mixed_frame| always has at least as many channels as |frame|. Supports 37 // |mixed_frame| always has at least as many channels as |frame|. Supports
67 // stereo at most. 38 // stereo at most.
68 // 39 //
69 // TODO(andrew): consider not modifying |frame| here. 40 // TODO(andrew): consider not modifying |frame| here.
70 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) {
71 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_);
72 if (use_limiter) { 43 if (use_limiter) {
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) 160 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError)
190 return false; 161 return false;
191 162
192 return true; 163 return true;
193 } 164 }
194 165
195 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { 166 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) {
196 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; 167 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources;
197 RTC_DCHECK(thread_checker_.CalledOnValidThread()); 168 RTC_DCHECK(thread_checker_.CalledOnValidThread());
198 AudioFrameList mixList; 169 AudioFrameList mixList;
170 AudioFrameList rampOutList;
199 AudioFrameList additionalFramesList; 171 AudioFrameList additionalFramesList;
200 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; 172 std::map<int, MixerAudioSource*> mixedAudioSourcesMap;
201 { 173 {
202 CriticalSectionScoped cs(_cbCrit.get()); 174 CriticalSectionScoped cs(_cbCrit.get());
203 175
204 int32_t lowFreq = GetLowestMixingFrequency(); 176 int32_t lowFreq = GetLowestMixingFrequency();
205 // SILK can run in 12 kHz and 24 kHz. These frequencies are not 177 // SILK can run in 12 kHz and 24 kHz. These frequencies are not
206 // supported so use the closest higher frequency to not lose any 178 // supported so use the closest higher frequency to not lose any
207 // information. 179 // information.
208 // TODO(aleloi): this is probably more appropriate to do in 180 // TODO(aleloi): this is probably more appropriate to do in
(...skipping 26 matching lines...) Expand all
235 if (OutputFrequency() != kFbInHz) { 207 if (OutputFrequency() != kFbInHz) {
236 SetOutputFrequency(kFbInHz); 208 SetOutputFrequency(kFbInHz);
237 } 209 }
238 break; 210 break;
239 default: 211 default:
240 RTC_NOTREACHED(); 212 RTC_NOTREACHED();
241 return; 213 return;
242 } 214 }
243 } 215 }
244 216
245 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); 217 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap,
246 remainingAudioSourcesAllowedToMix -= mixList.size(); 218 &remainingAudioSourcesAllowedToMix);
219
247 GetAdditionalAudio(&additionalFramesList); 220 GetAdditionalAudio(&additionalFramesList);
221 UpdateMixedStatus(mixedAudioSourcesMap);
248 } 222 }
249 223
250 // TODO(aleloi): it might be better to decide the number of channels 224 // TODO(aleloi): it might be better to decide the number of channels
251 // with an API instead of dynamically. 225 // with an API instead of dynamically.
252 226
253 // Find the max channels over all mixing lists. 227 // Find the max channels over all mixing lists.
254 const size_t num_mixed_channels = 228 const size_t num_mixed_channels = std::max(
255 std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList)); 229 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList),
230 MaxNumChannels(&rampOutList)));
256 231
257 audio_frame_for_mixing->UpdateFrame( 232 audio_frame_for_mixing->UpdateFrame(
258 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, 233 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech,
259 AudioFrame::kVadPassive, num_mixed_channels); 234 AudioFrame::kVadPassive, num_mixed_channels);
260 235
261 _timeStamp += static_cast<uint32_t>(_sampleSize); 236 _timeStamp += static_cast<uint32_t>(_sampleSize);
262 237
263 use_limiter_ = num_mixed_audio_sources_ > 1 && 238 use_limiter_ = num_mixed_audio_sources_ > 1 &&
264 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; 239 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz;
265 240
266 // We only use the limiter if it supports the output sample rate and 241 // We only use the limiter if it supports the output sample rate and
267 // we're actually mixing multiple streams. 242 // we're actually mixing multiple streams.
268 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); 243 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_);
269 244
270 { 245 {
271 CriticalSectionScoped cs(_crit.get()); 246 CriticalSectionScoped cs(_crit.get());
272 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); 247 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList);
248 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList);
273 249
274 if (audio_frame_for_mixing->samples_per_channel_ == 0) { 250 if (audio_frame_for_mixing->samples_per_channel_ == 0) {
275 // Nothing was mixed, set the audio samples to silence. 251 // Nothing was mixed, set the audio samples to silence.
276 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; 252 audio_frame_for_mixing->samples_per_channel_ = _sampleSize;
277 audio_frame_for_mixing->Mute(); 253 audio_frame_for_mixing->Mute();
278 } else { 254 } else {
279 // Only call the limiter if we have something to mix. 255 // Only call the limiter if we have something to mix.
280 LimitMixedAudio(audio_frame_for_mixing); 256 LimitMixedAudio(audio_frame_for_mixing);
281 } 257 }
282 } 258 }
259
260 ClearAudioFrameList(&mixList);
261 ClearAudioFrameList(&rampOutList);
262 ClearAudioFrameList(&additionalFramesList);
283 return; 263 return;
284 } 264 }
285 265
286 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( 266 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency(
287 const Frequency& frequency) { 267 const Frequency& frequency) {
288 CriticalSectionScoped cs(_crit.get()); 268 CriticalSectionScoped cs(_crit.get());
289 269
290 _outputFrequency = frequency; 270 _outputFrequency = frequency;
291 _sampleSize = 271 _sampleSize =
292 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); 272 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000);
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
439 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); 419 for (MixerAudioSourceList::const_iterator iter = mixList.begin();
440 iter != mixList.end(); ++iter) { 420 iter != mixList.end(); ++iter) {
441 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); 421 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
442 if (neededFrequency > highestFreq) { 422 if (neededFrequency > highestFreq) {
443 highestFreq = neededFrequency; 423 highestFreq = neededFrequency;
444 } 424 }
445 } 425 }
446 return highestFreq; 426 return highestFreq;
447 } 427 }
448 428
449 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( 429 void NewAudioConferenceMixerImpl::UpdateToMix(
450 size_t maxAudioFrameCounter) const { 430 AudioFrameList* mixList,
451 AudioFrameList result; 431 AudioFrameList* rampOutList,
452 std::vector<SourceFrame> audioSourceMixingDataList; 432 std::map<int, MixerAudioSource*>* mixAudioSourceList,
433 size_t* maxAudioFrameCounter) const {
434 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
435 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)",
436 *maxAudioFrameCounter);
437 const size_t mixListStartSize = mixList->size();
438 AudioFrameList activeList;
439 // Struct needed by the passive lists to keep track of which AudioFrame
440 // belongs to which MixerAudioSource.
441 AudioSourceWithFrameList passiveWasNotMixedList;
442 AudioSourceWithFrameList passiveWasMixedList;
443 for (MixerAudioSourceList::const_iterator audio_source =
444 audio_source_list_.begin();
445 audio_source != audio_source_list_.end(); ++audio_source) {
446 // Stop keeping track of passive audioSources if there are already
447 // enough audio sources available (they wont be mixed anyway).
448 bool mustAddToPassiveList =
449 (*maxAudioFrameCounter >
450 (activeList.size() + passiveWasMixedList.size() +
451 passiveWasNotMixedList.size()));
453 452
454 // Get audio source audio and put it in the struct vector. 453 bool wasMixed = false;
455 for (MixerAudioSource* audio_source : audio_source_list_) { 454 wasMixed = (*audio_source)->_mixHistory->WasMixed();
456 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted(
457 _id, static_cast<int>(_outputFrequency));
458 455
459 auto audio_frame_info = audio_frame_with_info.audio_frame_info; 456 auto audio_frame_with_info =
460 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; 457 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency);
461 458 auto ret = audio_frame_with_info.audio_frame_info;
462 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { 459 AudioFrame* audio_frame = audio_frame_with_info.audio_frame;
463 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 460 if (ret == MixerAudioSource::AudioFrameInfo::kError) {
464 "failed to GetAudioFrameWithMuted() from participant");
465 continue; 461 continue;
466 } 462 }
467 audioSourceMixingDataList.emplace_back( 463 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted);
468 audio_source, audio_source_audio_frame, 464 if (audio_source_list_.size() != 1) {
469 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, 465 // TODO(wu): Issue 3390, add support for multiple audio sources case.
470 audio_source->_mixHistory->WasMixed()); 466 audio_frame->ntp_time_ms_ = -1;
471 }
472
473 // Sort frames by sorting function.
474 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(),
475 std::mem_fn(&SourceFrame::shouldMixBefore));
476
477 // Go through list in order and put things in mixList.
478 for (SourceFrame& p : audioSourceMixingDataList) {
479 // Filter muted.
480 if (p.muted_) {
481 p.audio_source_->_mixHistory->SetIsMixed(false);
482 continue;
483 } 467 }
484 468
485 // Add frame to result vector for mixing. 469 // TODO(aleloi): this assert triggers in some test cases where SRTP is
486 bool is_mixed = false; 470 // used which prevents NetEQ from making a VAD. Temporarily disable this
487 if (maxAudioFrameCounter > 0) { 471 // assert until the problem is fixed on a higher level.
488 --maxAudioFrameCounter; 472 // RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown);
489 if (!p.was_mixed_before_) { 473 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) {
490 RampIn(*p.audio_frame_); 474 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
491 } 475 "invalid VAD state from audio source");
492 result.emplace_back(p.audio_frame_, false);
493 is_mixed = true;
494 } 476 }
495 477
496 // Ramp out unmuted. 478 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) {
497 if (p.was_mixed_before_ && !is_mixed) { 479 if (!wasMixed && !muted) {
498 RampOut(*p.audio_frame_); 480 RampIn(*audio_frame);
499 result.emplace_back(p.audio_frame_, false); 481 }
482
483 if (activeList.size() >= *maxAudioFrameCounter) {
484 // There are already more active audio sources than should be
485 // mixed. Only keep the ones with the highest energy.
486 AudioFrameList::iterator replaceItem;
487 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame);
488
489 bool found_replace_item = false;
490 for (AudioFrameList::iterator iter = activeList.begin();
491 iter != activeList.end(); ++iter) {
492 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame);
493 if (energy < lowestEnergy) {
494 replaceItem = iter;
495 lowestEnergy = energy;
496 found_replace_item = true;
497 }
498 }
499 if (found_replace_item) {
500 RTC_DCHECK(!muted); // Cannot replace with a muted frame.
501 FrameAndMuteInfo replaceFrame = *replaceItem;
502
503 bool replaceWasMixed = false;
504 std::map<int, MixerAudioSource*>::const_iterator it =
505 mixAudioSourceList->find(replaceFrame.frame->id_);
506
507 // When a frame is pushed to |activeList| it is also pushed
508 // to mixAudioSourceList with the frame's id. This means
509 // that the Find call above should never fail.
510 RTC_DCHECK(it != mixAudioSourceList->end());
511 replaceWasMixed = it->second->_mixHistory->WasMixed();
512
513 mixAudioSourceList->erase(replaceFrame.frame->id_);
514 activeList.erase(replaceItem);
515
516 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
517 (*mixAudioSourceList)[audio_frame->id_] = *audio_source;
518 RTC_DCHECK_LE(mixAudioSourceList->size(),
519 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
520
521 if (replaceWasMixed) {
522 if (!replaceFrame.muted) {
523 RampOut(*replaceFrame.frame);
524 }
525 rampOutList->push_back(replaceFrame);
526 RTC_DCHECK_LE(
527 rampOutList->size(),
528 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
529 }
530 } else {
531 if (wasMixed) {
532 if (!muted) {
533 RampOut(*audio_frame);
534 }
535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted));
536 RTC_DCHECK_LE(
537 rampOutList->size(),
538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
539 }
540 }
541 } else {
542 activeList.push_front(FrameAndMuteInfo(audio_frame, muted));
543 (*mixAudioSourceList)[audio_frame->id_] = *audio_source;
544 RTC_DCHECK_LE(mixAudioSourceList->size(),
545 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
546 }
547 } else {
548 if (wasMixed) {
549 AudioSourceWithFrame* part_struct =
550 new AudioSourceWithFrame(*audio_source, audio_frame, muted);
551 passiveWasMixedList.push_back(part_struct);
552 } else if (mustAddToPassiveList) {
553 if (!muted) {
554 RampIn(*audio_frame);
555 }
556 AudioSourceWithFrame* part_struct =
557 new AudioSourceWithFrame(*audio_source, audio_frame, muted);
558 passiveWasNotMixedList.push_back(part_struct);
559 }
500 } 560 }
501
502 p.audio_source_->_mixHistory->SetIsMixed(is_mixed);
503 } 561 }
504 return result; 562 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter);
563 // At this point it is known which audio sources should be mixed. Transfer
564 // this information to this functions output parameters.
565 for (AudioFrameList::const_iterator iter = activeList.begin();
566 iter != activeList.end(); ++iter) {
567 mixList->push_back(*iter);
568 }
569 activeList.clear();
570 // Always mix a constant number of AudioFrames. If there aren't enough
571 // active audio sources mix passive ones. Starting with those that was mixed
572 // last iteration.
573 for (AudioSourceWithFrameList::const_iterator iter =
574 passiveWasMixedList.begin();
575 iter != passiveWasMixedList.end(); ++iter) {
576 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
577 mixList->push_back(
578 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
579 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
580 RTC_DCHECK_LE(mixAudioSourceList->size(),
581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
582 }
583 delete *iter;
584 }
585 // And finally the ones that have not been mixed for a while.
586 for (AudioSourceWithFrameList::const_iterator iter =
587 passiveWasNotMixedList.begin();
588 iter != passiveWasNotMixedList.end(); ++iter) {
589 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
590 mixList->push_back(
591 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted));
592 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source;
593 RTC_DCHECK_LE(mixAudioSourceList->size(),
594 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
595 }
596 delete *iter;
597 }
598 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size());
599 *maxAudioFrameCounter += mixListStartSize - mixList->size();
505 } 600 }
506 601
507 void NewAudioConferenceMixerImpl::GetAdditionalAudio( 602 void NewAudioConferenceMixerImpl::GetAdditionalAudio(
508 AudioFrameList* additionalFramesList) const { 603 AudioFrameList* additionalFramesList) const {
509 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 604 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
510 "GetAdditionalAudio(additionalFramesList)"); 605 "GetAdditionalAudio(additionalFramesList)");
511 // The GetAudioFrameWithMuted() callback may result in the audio source being 606 // The GetAudioFrameWithMuted() callback may result in the audio source being
512 // removed from additionalAudioFramesList_. If that happens it will 607 // removed from additionalAudioFramesList_. If that happens it will
513 // invalidate any iterators. Create a copy of the audio sources list such 608 // invalidate any iterators. Create a copy of the audio sources list such
514 // that the list of participants can be traversed safely. 609 // that the list of participants can be traversed safely.
(...skipping 16 matching lines...) Expand all
531 } 626 }
532 if (audio_frame->samples_per_channel_ == 0) { 627 if (audio_frame->samples_per_channel_ == 0) {
533 // Empty frame. Don't use it. 628 // Empty frame. Don't use it.
534 continue; 629 continue;
535 } 630 }
536 additionalFramesList->push_back(FrameAndMuteInfo( 631 additionalFramesList->push_back(FrameAndMuteInfo(
537 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); 632 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted));
538 } 633 }
539 } 634 }
540 635
636 void NewAudioConferenceMixerImpl::UpdateMixedStatus(
637 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const {
638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
639 "UpdateMixedStatus(mixedAudioSourcesMap)");
640 RTC_DCHECK_LE(mixedAudioSourcesMap.size(),
641 static_cast<size_t>(kMaximumAmountOfMixedAudioSources));
642
643 // Loop through all audio_sources. If they are in the mix map they
644 // were mixed.
645 for (MixerAudioSourceList::const_iterator audio_source =
646 audio_source_list_.begin();
647 audio_source != audio_source_list_.end(); ++audio_source) {
648 bool isMixed = false;
649 for (std::map<int, MixerAudioSource*>::const_iterator it =
650 mixedAudioSourcesMap.begin();
651 it != mixedAudioSourcesMap.end(); ++it) {
652 if (it->second == *audio_source) {
653 isMixed = true;
654 break;
655 }
656 }
657 (*audio_source)->_mixHistory->SetIsMixed(isMixed);
658 }
659 }
660
661 void NewAudioConferenceMixerImpl::ClearAudioFrameList(
662 AudioFrameList* audioFrameList) const {
663 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
664 "ClearAudioFrameList(audioFrameList)");
665 audioFrameList->clear();
666 }
667
541 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( 668 bool NewAudioConferenceMixerImpl::IsAudioSourceInList(
542 const MixerAudioSource& audio_source, 669 const MixerAudioSource& audio_source,
543 const MixerAudioSourceList& audioSourceList) const { 670 const MixerAudioSourceList& audioSourceList) const {
544 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 671 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
545 "IsAudioSourceInList(audio_source,audioSourceList)"); 672 "IsAudioSourceInList(audio_source,audioSourceList)");
546 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); 673 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin();
547 iter != audioSourceList.end(); ++iter) { 674 iter != audioSourceList.end(); ++iter) {
548 if (&audio_source == *iter) { 675 if (&audio_source == *iter) {
549 return true; 676 return true;
550 } 677 }
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
666 793
667 if (error != _limiter->kNoError) { 794 if (error != _limiter->kNoError) {
668 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
669 "Error from AudioProcessing: %d", error); 796 "Error from AudioProcessing: %d", error);
670 RTC_NOTREACHED(); 797 RTC_NOTREACHED();
671 return false; 798 return false;
672 } 799 }
673 return true; 800 return true;
674 } 801 }
675 } // namespace webrtc 802 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698