OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | |
14 | 15 |
15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | 16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " |
16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" |
17 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
20 #include "webrtc/system_wrappers/include/trace.h" | 21 #include "webrtc/system_wrappers/include/trace.h" |
21 | 22 |
22 namespace webrtc { | 23 namespace webrtc { |
23 namespace { | 24 namespace { |
24 | 25 |
25 struct AudioSourceWithFrame { | 26 |
26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m) | 27 class AudioSourceAndFrame { |
ossu
2016/07/08 13:58:34
Maybe just SourceFrame is enough? You don't need t
aleloi
2016/07/28 09:24:58
Done.
| |
27 : audio_source(p), audio_frame(a), muted(m) {} | 28 public: |
28 MixerAudioSource* audio_source; | 29 AudioSourceAndFrame(MixerAudioSource* p, |
29 AudioFrame* audio_frame; | 30 AudioFrame* a, |
30 bool muted; | 31 bool m, |
32 bool was_mixed_before) | |
33 : audio_source_(p), | |
34 audio_frame_(a), | |
35 muted_(m), | |
36 was_mixed_before_(was_mixed_before) { | |
37 if (!muted_) { | |
38 energy_ = CalculateEnergy(*a); | |
39 } | |
40 } | |
41 | |
42 // a.shouldMixBefore(b) is used to select mixer participants. | |
43 bool shouldMixBefore(const AudioSourceAndFrame& other) const { | |
44 if (muted_ != other.muted_) { | |
45 return other.muted_; | |
46 } | |
47 | |
48 auto our_activity = audio_frame_->vad_activity_; | |
49 auto other_activity = other.audio_frame_->vad_activity_; | |
50 | |
51 if (our_activity != other_activity) { | |
52 return (other_activity == AudioFrame::kVadPassive || | |
53 other_activity == AudioFrame::kVadUnknown) && | |
54 our_activity == AudioFrame::kVadActive; | |
ivoc
2016/07/27 15:14:04
Because we already know that other_activity is dif
aleloi
2016/07/28 09:24:58
Well spotted! Done.
| |
55 } | |
56 | |
57 return energy_ > other.energy_; | |
58 } | |
59 | |
60 MixerAudioSource* audio_source_; | |
61 AudioFrame* audio_frame_; | |
62 bool muted_; | |
63 uint32_t energy_; | |
64 bool was_mixed_before_; | |
31 }; | 65 }; |
32 | 66 |
33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList; | 67 typedef std::list<AudioSourceAndFrame*> AudioSourceWithFrameList; |
ivoc
2016/07/27 15:14:04
Doesn't it make more sense to use a vector here?
aleloi
2016/07/28 09:24:58
It wasn't used. I removed it alltogether.
| |
34 | 68 |
35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 69 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
36 // These effects are applied to |frame| itself prior to mixing. Assumes that | 70 // These effects are applied to |frame| itself prior to mixing. Assumes that |
37 // |mixed_frame| always has at least as many channels as |frame|. Supports | 71 // |mixed_frame| always has at least as many channels as |frame|. Supports |
38 // stereo at most. | 72 // stereo at most. |
39 // | 73 // |
40 // TODO(andrew): consider not modifying |frame| here. | 74 // TODO(andrew): consider not modifying |frame| here. |
41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 75 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 76 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
43 if (use_limiter) { | 77 if (use_limiter) { |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
160 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 194 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
161 return false; | 195 return false; |
162 | 196 |
163 return true; | 197 return true; |
164 } | 198 } |
165 | 199 |
166 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 200 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
167 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 201 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; |
168 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 202 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
169 AudioFrameList mixList; | 203 AudioFrameList mixList; |
170 AudioFrameList rampOutList; | |
171 AudioFrameList additionalFramesList; | 204 AudioFrameList additionalFramesList; |
172 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 205 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
173 { | 206 { |
174 CriticalSectionScoped cs(_cbCrit.get()); | 207 CriticalSectionScoped cs(_cbCrit.get()); |
175 | 208 |
176 int32_t lowFreq = GetLowestMixingFrequency(); | 209 int32_t lowFreq = GetLowestMixingFrequency(); |
177 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 210 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
178 // supported so use the closest higher frequency to not lose any | 211 // supported so use the closest higher frequency to not lose any |
179 // information. | 212 // information. |
180 // TODO(aleloi): this is probably more appropriate to do in | 213 // TODO(aleloi): this is probably more appropriate to do in |
(...skipping 26 matching lines...) Expand all Loading... | |
207 if (OutputFrequency() != kFbInHz) { | 240 if (OutputFrequency() != kFbInHz) { |
208 SetOutputFrequency(kFbInHz); | 241 SetOutputFrequency(kFbInHz); |
209 } | 242 } |
210 break; | 243 break; |
211 default: | 244 default: |
212 RTC_NOTREACHED(); | 245 RTC_NOTREACHED(); |
213 return; | 246 return; |
214 } | 247 } |
215 } | 248 } |
216 | 249 |
217 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, | 250 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); |
218 &remainingAudioSourcesAllowedToMix); | 251 remainingAudioSourcesAllowedToMix -= mixList.size(); |
219 | |
220 GetAdditionalAudio(&additionalFramesList); | 252 GetAdditionalAudio(&additionalFramesList); |
221 UpdateMixedStatus(mixedAudioSourcesMap); | |
222 } | 253 } |
223 | 254 |
224 // TODO(aleloi): it might be better to decide the number of channels | 255 // TODO(aleloi): it might be better to decide the number of channels |
225 // with an API instead of dynamically. | 256 // with an API instead of dynamically. |
226 | 257 |
227 // Find the max channels over all mixing lists. | 258 // Find the max channels over all mixing lists. |
228 const size_t num_mixed_channels = std::max( | 259 const size_t num_mixed_channels = |
229 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), | 260 std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList)); |
230 MaxNumChannels(&rampOutList))); | |
231 | 261 |
232 audio_frame_for_mixing->UpdateFrame( | 262 audio_frame_for_mixing->UpdateFrame( |
233 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | 263 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, |
234 AudioFrame::kVadPassive, num_mixed_channels); | 264 AudioFrame::kVadPassive, num_mixed_channels); |
235 | 265 |
236 _timeStamp += static_cast<uint32_t>(_sampleSize); | 266 _timeStamp += static_cast<uint32_t>(_sampleSize); |
237 | 267 |
238 use_limiter_ = num_mixed_audio_sources_ > 1 && | 268 use_limiter_ = num_mixed_audio_sources_ > 1 && |
239 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 269 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
240 | 270 |
241 // We only use the limiter if it supports the output sample rate and | 271 // We only use the limiter if it supports the output sample rate and |
242 // we're actually mixing multiple streams. | 272 // we're actually mixing multiple streams. |
243 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | 273 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); |
244 | 274 |
245 { | 275 { |
246 CriticalSectionScoped cs(_crit.get()); | 276 CriticalSectionScoped cs(_crit.get()); |
247 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 277 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
248 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); | |
249 | 278 |
250 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 279 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
251 // Nothing was mixed, set the audio samples to silence. | 280 // Nothing was mixed, set the audio samples to silence. |
252 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | 281 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
253 audio_frame_for_mixing->Mute(); | 282 audio_frame_for_mixing->Mute(); |
254 } else { | 283 } else { |
255 // Only call the limiter if we have something to mix. | 284 // Only call the limiter if we have something to mix. |
256 LimitMixedAudio(audio_frame_for_mixing); | 285 LimitMixedAudio(audio_frame_for_mixing); |
257 } | 286 } |
258 } | 287 } |
259 | |
260 ClearAudioFrameList(&mixList); | |
261 ClearAudioFrameList(&rampOutList); | |
262 ClearAudioFrameList(&additionalFramesList); | |
263 return; | 288 return; |
264 } | 289 } |
265 | 290 |
266 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 291 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
267 const Frequency& frequency) { | 292 const Frequency& frequency) { |
268 CriticalSectionScoped cs(_crit.get()); | 293 CriticalSectionScoped cs(_crit.get()); |
269 | 294 |
270 _outputFrequency = frequency; | 295 _outputFrequency = frequency; |
271 _sampleSize = | 296 _sampleSize = |
272 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 297 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
419 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | 444 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); |
420 iter != mixList.end(); ++iter) { | 445 iter != mixList.end(); ++iter) { |
421 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | 446 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); |
422 if (neededFrequency > highestFreq) { | 447 if (neededFrequency > highestFreq) { |
423 highestFreq = neededFrequency; | 448 highestFreq = neededFrequency; |
424 } | 449 } |
425 } | 450 } |
426 return highestFreq; | 451 return highestFreq; |
427 } | 452 } |
428 | 453 |
429 void NewAudioConferenceMixerImpl::UpdateToMix( | |
430 AudioFrameList* mixList, | |
431 AudioFrameList* rampOutList, | |
432 std::map<int, MixerAudioSource*>* mixAudioSourceList, | |
433 size_t* maxAudioFrameCounter) const { | |
434 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
435 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)", | |
436 *maxAudioFrameCounter); | |
437 const size_t mixListStartSize = mixList->size(); | |
438 AudioFrameList activeList; | |
439 // Struct needed by the passive lists to keep track of which AudioFrame | |
440 // belongs to which MixerAudioSource. | |
441 AudioSourceWithFrameList passiveWasNotMixedList; | |
442 AudioSourceWithFrameList passiveWasMixedList; | |
443 for (MixerAudioSourceList::const_iterator audio_source = | |
444 audio_source_list_.begin(); | |
445 audio_source != audio_source_list_.end(); ++audio_source) { | |
446 // Stop keeping track of passive audioSources if there are already | |
447 // enough audio sources available (they wont be mixed anyway). | |
448 bool mustAddToPassiveList = | |
449 (*maxAudioFrameCounter > | |
450 (activeList.size() + passiveWasMixedList.size() + | |
451 passiveWasNotMixedList.size())); | |
452 | 454 |
453 bool wasMixed = false; | 455 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( |
454 wasMixed = (*audio_source)->_mixHistory->WasMixed(); | 456 size_t maxAudioFrameCounter) const { |
457 AudioFrameList result; | |
458 std::vector<AudioSourceAndFrame> audioSourceMixingDataList; | |
455 | 459 |
456 auto audio_frame_with_info = | 460 // Get audio source audio and put it in the struct vector. |
457 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 461 for (MixerAudioSource* audio_source : audio_source_list_) { |
458 auto ret = audio_frame_with_info.audio_frame_info; | 462 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
459 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 463 _id, static_cast<int>(_outputFrequency)); |
460 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 464 |
465 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | |
466 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | |
467 | |
468 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | |
469 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | |
470 "failed to GetAudioFrameWithMuted() from participant"); | |
461 continue; | 471 continue; |
462 } | 472 } |
463 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 473 audioSourceMixingDataList.emplace_back( |
464 if (audio_source_list_.size() != 1) { | 474 audio_source, |
465 // TODO(wu): Issue 3390, add support for multiple audio sources case. | 475 audio_source_audio_frame, |
466 audio_frame->ntp_time_ms_ = -1; | 476 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
477 audio_source->_mixHistory->WasMixed()); | |
478 } | |
479 | |
480 // Sort frames by sorting function. | |
481 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | |
482 std::mem_fn(&AudioSourceAndFrame::shouldMixBefore)); | |
483 | |
484 // Go through list in order and put things in mixList. | |
485 for (AudioSourceAndFrame& p : audioSourceMixingDataList) { | |
486 // Filter muted. | |
487 if (p.muted_) { | |
488 p.audio_source_->_mixHistory->SetIsMixed(false); | |
489 continue; | |
467 } | 490 } |
468 | 491 |
469 // TODO(aleloi): this assert triggers in some test cases where SRTP is | 492 // Add frame to result vector for mixing. |
470 // used which prevents NetEQ from making a VAD. Temporarily disable this | 493 bool is_mixed = false; |
471 // assert until the problem is fixed on a higher level. | 494 if (maxAudioFrameCounter > 0) { |
472 // RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown); | 495 --maxAudioFrameCounter; |
473 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) { | 496 if (!p.was_mixed_before_) { |
474 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 497 RampIn(*p.audio_frame_); |
475 "invalid VAD state from audio source"); | 498 } |
499 result.emplace_back(p.audio_frame_, false); | |
500 is_mixed = true; | |
476 } | 501 } |
477 | 502 |
478 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) { | 503 // Ramp out unmuted. |
479 if (!wasMixed && !muted) { | 504 if (p.was_mixed_before_ && !is_mixed) { |
480 RampIn(*audio_frame); | 505 RampOut(*p.audio_frame_); |
481 } | 506 result.emplace_back(p.audio_frame_, false); |
507 } | |
482 | 508 |
483 if (activeList.size() >= *maxAudioFrameCounter) { | 509 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
484 // There are already more active audio sources than should be | |
485 // mixed. Only keep the ones with the highest energy. | |
486 AudioFrameList::iterator replaceItem; | |
487 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame); | |
488 | |
489 bool found_replace_item = false; | |
490 for (AudioFrameList::iterator iter = activeList.begin(); | |
491 iter != activeList.end(); ++iter) { | |
492 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | |
493 if (energy < lowestEnergy) { | |
494 replaceItem = iter; | |
495 lowestEnergy = energy; | |
496 found_replace_item = true; | |
497 } | |
498 } | |
499 if (found_replace_item) { | |
500 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | |
501 FrameAndMuteInfo replaceFrame = *replaceItem; | |
502 | |
503 bool replaceWasMixed = false; | |
504 std::map<int, MixerAudioSource*>::const_iterator it = | |
505 mixAudioSourceList->find(replaceFrame.frame->id_); | |
506 | |
507 // When a frame is pushed to |activeList| it is also pushed | |
508 // to mixAudioSourceList with the frame's id. This means | |
509 // that the Find call above should never fail. | |
510 RTC_DCHECK(it != mixAudioSourceList->end()); | |
511 replaceWasMixed = it->second->_mixHistory->WasMixed(); | |
512 | |
513 mixAudioSourceList->erase(replaceFrame.frame->id_); | |
514 activeList.erase(replaceItem); | |
515 | |
516 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
517 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
518 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
519 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
520 | |
521 if (replaceWasMixed) { | |
522 if (!replaceFrame.muted) { | |
523 RampOut(*replaceFrame.frame); | |
524 } | |
525 rampOutList->push_back(replaceFrame); | |
526 RTC_DCHECK_LE( | |
527 rampOutList->size(), | |
528 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
529 } | |
530 } else { | |
531 if (wasMixed) { | |
532 if (!muted) { | |
533 RampOut(*audio_frame); | |
534 } | |
535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted)); | |
536 RTC_DCHECK_LE( | |
537 rampOutList->size(), | |
538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
539 } | |
540 } | |
541 } else { | |
542 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
543 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
544 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
545 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
546 } | |
547 } else { | |
548 if (wasMixed) { | |
549 AudioSourceWithFrame* part_struct = | |
550 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
551 passiveWasMixedList.push_back(part_struct); | |
552 } else if (mustAddToPassiveList) { | |
553 if (!muted) { | |
554 RampIn(*audio_frame); | |
555 } | |
556 AudioSourceWithFrame* part_struct = | |
557 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
558 passiveWasNotMixedList.push_back(part_struct); | |
559 } | |
560 } | |
561 } | 510 } |
562 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); | 511 return result; |
563 // At this point it is known which audio sources should be mixed. Transfer | |
564 // this information to this functions output parameters. | |
565 for (AudioFrameList::const_iterator iter = activeList.begin(); | |
566 iter != activeList.end(); ++iter) { | |
567 mixList->push_back(*iter); | |
568 } | |
569 activeList.clear(); | |
570 // Always mix a constant number of AudioFrames. If there aren't enough | |
571 // active audio sources mix passive ones. Starting with those that was mixed | |
572 // last iteration. | |
573 for (AudioSourceWithFrameList::const_iterator iter = | |
574 passiveWasMixedList.begin(); | |
575 iter != passiveWasMixedList.end(); ++iter) { | |
576 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
577 mixList->push_back( | |
578 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
579 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
580 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
582 } | |
583 delete *iter; | |
584 } | |
585 // And finally the ones that have not been mixed for a while. | |
586 for (AudioSourceWithFrameList::const_iterator iter = | |
587 passiveWasNotMixedList.begin(); | |
588 iter != passiveWasNotMixedList.end(); ++iter) { | |
589 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
590 mixList->push_back( | |
591 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
592 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
593 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
594 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
595 } | |
596 delete *iter; | |
597 } | |
598 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); | |
599 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | |
600 } | 512 } |
601 | 513 |
602 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 514 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
603 AudioFrameList* additionalFramesList) const { | 515 AudioFrameList* additionalFramesList) const { |
604 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 516 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
605 "GetAdditionalAudio(additionalFramesList)"); | 517 "GetAdditionalAudio(additionalFramesList)"); |
606 // The GetAudioFrameWithMuted() callback may result in the audio source being | 518 // The GetAudioFrameWithMuted() callback may result in the audio source being |
607 // removed from additionalAudioFramesList_. If that happens it will | 519 // removed from additionalAudioFramesList_. If that happens it will |
608 // invalidate any iterators. Create a copy of the audio sources list such | 520 // invalidate any iterators. Create a copy of the audio sources list such |
609 // that the list of participants can be traversed safely. | 521 // that the list of participants can be traversed safely. |
610 MixerAudioSourceList additionalAudioSourceList; | 522 MixerAudioSourceList additionalAudioSourceList; |
611 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 523 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), |
612 additional_audio_source_list_.begin(), | 524 additional_audio_source_list_.begin(), |
613 additional_audio_source_list_.end()); | 525 additional_audio_source_list_.end()); |
614 | 526 |
615 for (MixerAudioSourceList::const_iterator audio_source = | 527 for (MixerAudioSourceList::const_iterator audio_source = |
616 additionalAudioSourceList.begin(); | 528 additionalAudioSourceList.begin(); |
617 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 529 audio_source != additionalAudioSourceList.end(); ++audio_source) { |
618 auto audio_frame_with_info = | 530 auto audio_frame_with_info = |
619 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 531 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); |
620 auto ret = audio_frame_with_info.audio_frame_info; | 532 auto ret = audio_frame_with_info.audio_frame_info; |
621 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 533 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; |
534 | |
622 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 535 if (ret == MixerAudioSource::AudioFrameInfo::kError) { |
623 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 536 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
624 "failed to GetAudioFrameWithMuted() from audio_source"); | 537 "failed to GetAudioFrameWithMuted() from audio_source"); |
625 continue; | 538 continue; |
626 } | 539 } |
627 if (audio_frame->samples_per_channel_ == 0) { | 540 if (audio_frame->samples_per_channel_ == 0) { |
628 // Empty frame. Don't use it. | 541 // Empty frame. Don't use it. |
629 continue; | 542 continue; |
630 } | 543 } |
631 additionalFramesList->push_back(FrameAndMuteInfo( | 544 additionalFramesList->push_back(FrameAndMuteInfo( |
632 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 545 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
633 } | 546 } |
634 } | 547 } |
635 | 548 |
636 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | |
637 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const { | |
638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
639 "UpdateMixedStatus(mixedAudioSourcesMap)"); | |
640 RTC_DCHECK_LE(mixedAudioSourcesMap.size(), | |
641 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
642 | |
643 // Loop through all audio_sources. If they are in the mix map they | |
644 // were mixed. | |
645 for (MixerAudioSourceList::const_iterator audio_source = | |
646 audio_source_list_.begin(); | |
647 audio_source != audio_source_list_.end(); ++audio_source) { | |
648 bool isMixed = false; | |
649 for (std::map<int, MixerAudioSource*>::const_iterator it = | |
650 mixedAudioSourcesMap.begin(); | |
651 it != mixedAudioSourcesMap.end(); ++it) { | |
652 if (it->second == *audio_source) { | |
653 isMixed = true; | |
654 break; | |
655 } | |
656 } | |
657 (*audio_source)->_mixHistory->SetIsMixed(isMixed); | |
658 } | |
659 } | |
660 | |
661 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | |
662 AudioFrameList* audioFrameList) const { | |
663 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
664 "ClearAudioFrameList(audioFrameList)"); | |
665 audioFrameList->clear(); | |
666 } | |
667 | |
668 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | 549 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( |
669 const MixerAudioSource& audio_source, | 550 const MixerAudioSource& audio_source, |
670 const MixerAudioSourceList& audioSourceList) const { | 551 const MixerAudioSourceList& audioSourceList) const { |
671 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 552 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
672 "IsAudioSourceInList(audio_source,audioSourceList)"); | 553 "IsAudioSourceInList(audio_source,audioSourceList)"); |
673 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); | 554 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); |
674 iter != audioSourceList.end(); ++iter) { | 555 iter != audioSourceList.end(); ++iter) { |
675 if (&audio_source == *iter) { | 556 if (&audio_source == *iter) { |
676 return true; | 557 return true; |
677 } | 558 } |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
793 | 674 |
794 if (error != _limiter->kNoError) { | 675 if (error != _limiter->kNoError) { |
795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 676 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
796 "Error from AudioProcessing: %d", error); | 677 "Error from AudioProcessing: %d", error); |
797 RTC_NOTREACHED(); | 678 RTC_NOTREACHED(); |
798 return false; | 679 return false; |
799 } | 680 } |
800 return true; | 681 return true; |
801 } | 682 } |
802 } // namespace webrtc | 683 } // namespace webrtc |
OLD | NEW |