| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <functional> |
| 14 | 15 |
| 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" | 16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h
" |
| 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" |
| 17 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
| 18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
| 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
| 20 #include "webrtc/system_wrappers/include/trace.h" | 21 #include "webrtc/system_wrappers/include/trace.h" |
| 21 | 22 |
| 22 namespace webrtc { | 23 namespace webrtc { |
| 23 namespace { | 24 namespace { |
| 24 | 25 |
| 25 struct AudioSourceWithFrame { | 26 class SourceFrame { |
| 26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m) | 27 public: |
| 27 : audio_source(p), audio_frame(a), muted(m) {} | 28 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
| 28 MixerAudioSource* audio_source; | 29 : audio_source_(p), |
| 29 AudioFrame* audio_frame; | 30 audio_frame_(a), |
| 30 bool muted; | 31 muted_(m), |
| 32 was_mixed_before_(was_mixed_before) { |
| 33 if (!muted_) { |
| 34 energy_ = CalculateEnergy(*a); |
| 35 } |
| 36 } |
| 37 |
| 38 // a.shouldMixBefore(b) is used to select mixer participants. |
| 39 bool shouldMixBefore(const SourceFrame& other) const { |
| 40 if (muted_ != other.muted_) { |
| 41 return other.muted_; |
| 42 } |
| 43 |
| 44 auto our_activity = audio_frame_->vad_activity_; |
| 45 auto other_activity = other.audio_frame_->vad_activity_; |
| 46 |
| 47 if (our_activity != other_activity) { |
| 48 return our_activity == AudioFrame::kVadActive; |
| 49 } |
| 50 |
| 51 return energy_ > other.energy_; |
| 52 } |
| 53 |
| 54 MixerAudioSource* audio_source_; |
| 55 AudioFrame* audio_frame_; |
| 56 bool muted_; |
| 57 uint32_t energy_; |
| 58 bool was_mixed_before_; |
| 31 }; | 59 }; |
| 32 | 60 |
| 33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList; | |
| 34 | |
| 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 61 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
| 36 // These effects are applied to |frame| itself prior to mixing. Assumes that | 62 // These effects are applied to |frame| itself prior to mixing. Assumes that |
| 37 // |mixed_frame| always has at least as many channels as |frame|. Supports | 63 // |mixed_frame| always has at least as many channels as |frame|. Supports |
| 38 // stereo at most. | 64 // stereo at most. |
| 39 // | 65 // |
| 40 // TODO(andrew): consider not modifying |frame| here. | 66 // TODO(andrew): consider not modifying |frame| here. |
| 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 67 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
| 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 68 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); |
| 43 if (use_limiter) { | 69 if (use_limiter) { |
| 44 // Divide by two to avoid saturation in the mixing. | 70 // Divide by two to avoid saturation in the mixing. |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 160 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 186 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) |
| 161 return false; | 187 return false; |
| 162 | 188 |
| 163 return true; | 189 return true; |
| 164 } | 190 } |
| 165 | 191 |
| 166 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 192 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { |
| 167 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 193 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; |
| 168 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 194 RTC_DCHECK(thread_checker_.CalledOnValidThread()); |
| 169 AudioFrameList mixList; | 195 AudioFrameList mixList; |
| 170 AudioFrameList rampOutList; | |
| 171 AudioFrameList additionalFramesList; | 196 AudioFrameList additionalFramesList; |
| 172 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 197 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; |
| 173 { | 198 { |
| 174 CriticalSectionScoped cs(_cbCrit.get()); | 199 CriticalSectionScoped cs(_cbCrit.get()); |
| 175 | 200 |
| 176 int32_t lowFreq = GetLowestMixingFrequency(); | 201 int32_t lowFreq = GetLowestMixingFrequency(); |
| 177 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 202 // SILK can run in 12 kHz and 24 kHz. These frequencies are not |
| 178 // supported so use the closest higher frequency to not lose any | 203 // supported so use the closest higher frequency to not lose any |
| 179 // information. | 204 // information. |
| 180 // TODO(aleloi): this is probably more appropriate to do in | 205 // TODO(aleloi): this is probably more appropriate to do in |
| (...skipping 26 matching lines...) Expand all Loading... |
| 207 if (OutputFrequency() != kFbInHz) { | 232 if (OutputFrequency() != kFbInHz) { |
| 208 SetOutputFrequency(kFbInHz); | 233 SetOutputFrequency(kFbInHz); |
| 209 } | 234 } |
| 210 break; | 235 break; |
| 211 default: | 236 default: |
| 212 RTC_NOTREACHED(); | 237 RTC_NOTREACHED(); |
| 213 return; | 238 return; |
| 214 } | 239 } |
| 215 } | 240 } |
| 216 | 241 |
| 217 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, | 242 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); |
| 218 &remainingAudioSourcesAllowedToMix); | 243 remainingAudioSourcesAllowedToMix -= mixList.size(); |
| 219 | |
| 220 GetAdditionalAudio(&additionalFramesList); | 244 GetAdditionalAudio(&additionalFramesList); |
| 221 UpdateMixedStatus(mixedAudioSourcesMap); | |
| 222 } | 245 } |
| 223 | 246 |
| 224 // TODO(aleloi): it might be better to decide the number of channels | 247 // TODO(aleloi): it might be better to decide the number of channels |
| 225 // with an API instead of dynamically. | 248 // with an API instead of dynamically. |
| 226 | 249 |
| 227 // Find the max channels over all mixing lists. | 250 // Find the max channels over all mixing lists. |
| 228 const size_t num_mixed_channels = std::max( | 251 const size_t num_mixed_channels = |
| 229 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), | 252 std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList)); |
| 230 MaxNumChannels(&rampOutList))); | |
| 231 | 253 |
| 232 audio_frame_for_mixing->UpdateFrame( | 254 audio_frame_for_mixing->UpdateFrame( |
| 233 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | 255 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, |
| 234 AudioFrame::kVadPassive, num_mixed_channels); | 256 AudioFrame::kVadPassive, num_mixed_channels); |
| 235 | 257 |
| 236 _timeStamp += static_cast<uint32_t>(_sampleSize); | 258 _timeStamp += static_cast<uint32_t>(_sampleSize); |
| 237 | 259 |
| 238 use_limiter_ = num_mixed_audio_sources_ > 1 && | 260 use_limiter_ = num_mixed_audio_sources_ > 1 && |
| 239 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 261 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; |
| 240 | 262 |
| 241 // We only use the limiter if it supports the output sample rate and | 263 // We only use the limiter if it supports the output sample rate and |
| 242 // we're actually mixing multiple streams. | 264 // we're actually mixing multiple streams. |
| 243 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | 265 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); |
| 244 | 266 |
| 245 { | 267 { |
| 246 CriticalSectionScoped cs(_crit.get()); | 268 CriticalSectionScoped cs(_crit.get()); |
| 247 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 269 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); |
| 248 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); | |
| 249 | 270 |
| 250 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 271 if (audio_frame_for_mixing->samples_per_channel_ == 0) { |
| 251 // Nothing was mixed, set the audio samples to silence. | 272 // Nothing was mixed, set the audio samples to silence. |
| 252 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | 273 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; |
| 253 audio_frame_for_mixing->Mute(); | 274 audio_frame_for_mixing->Mute(); |
| 254 } else { | 275 } else { |
| 255 // Only call the limiter if we have something to mix. | 276 // Only call the limiter if we have something to mix. |
| 256 LimitMixedAudio(audio_frame_for_mixing); | 277 LimitMixedAudio(audio_frame_for_mixing); |
| 257 } | 278 } |
| 258 } | 279 } |
| 259 | |
| 260 ClearAudioFrameList(&mixList); | |
| 261 ClearAudioFrameList(&rampOutList); | |
| 262 ClearAudioFrameList(&additionalFramesList); | |
| 263 return; | 280 return; |
| 264 } | 281 } |
| 265 | 282 |
| 266 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 283 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( |
| 267 const Frequency& frequency) { | 284 const Frequency& frequency) { |
| 268 CriticalSectionScoped cs(_crit.get()); | 285 CriticalSectionScoped cs(_crit.get()); |
| 269 | 286 |
| 270 _outputFrequency = frequency; | 287 _outputFrequency = frequency; |
| 271 _sampleSize = | 288 _sampleSize = |
| 272 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 289 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); |
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 419 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | 436 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); |
| 420 iter != mixList.end(); ++iter) { | 437 iter != mixList.end(); ++iter) { |
| 421 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | 438 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); |
| 422 if (neededFrequency > highestFreq) { | 439 if (neededFrequency > highestFreq) { |
| 423 highestFreq = neededFrequency; | 440 highestFreq = neededFrequency; |
| 424 } | 441 } |
| 425 } | 442 } |
| 426 return highestFreq; | 443 return highestFreq; |
| 427 } | 444 } |
| 428 | 445 |
| 429 void NewAudioConferenceMixerImpl::UpdateToMix( | 446 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( |
| 430 AudioFrameList* mixList, | 447 size_t maxAudioFrameCounter) const { |
| 431 AudioFrameList* rampOutList, | 448 AudioFrameList result; |
| 432 std::map<int, MixerAudioSource*>* mixAudioSourceList, | 449 std::vector<SourceFrame> audioSourceMixingDataList; |
| 433 size_t* maxAudioFrameCounter) const { | |
| 434 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 435 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)", | |
| 436 *maxAudioFrameCounter); | |
| 437 const size_t mixListStartSize = mixList->size(); | |
| 438 AudioFrameList activeList; | |
| 439 // Struct needed by the passive lists to keep track of which AudioFrame | |
| 440 // belongs to which MixerAudioSource. | |
| 441 AudioSourceWithFrameList passiveWasNotMixedList; | |
| 442 AudioSourceWithFrameList passiveWasMixedList; | |
| 443 for (MixerAudioSourceList::const_iterator audio_source = | |
| 444 audio_source_list_.begin(); | |
| 445 audio_source != audio_source_list_.end(); ++audio_source) { | |
| 446 // Stop keeping track of passive audioSources if there are already | |
| 447 // enough audio sources available (they wont be mixed anyway). | |
| 448 bool mustAddToPassiveList = | |
| 449 (*maxAudioFrameCounter > | |
| 450 (activeList.size() + passiveWasMixedList.size() + | |
| 451 passiveWasNotMixedList.size())); | |
| 452 | 450 |
| 453 bool wasMixed = false; | 451 // Get audio source audio and put it in the struct vector. |
| 454 wasMixed = (*audio_source)->_mixHistory->WasMixed(); | 452 for (MixerAudioSource* audio_source : audio_source_list_) { |
| 453 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( |
| 454 _id, static_cast<int>(_outputFrequency)); |
| 455 | 455 |
| 456 auto audio_frame_with_info = | 456 auto audio_frame_info = audio_frame_with_info.audio_frame_info; |
| 457 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 457 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; |
| 458 auto ret = audio_frame_with_info.audio_frame_info; | 458 |
| 459 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 459 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { |
| 460 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 460 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, |
| 461 "failed to GetAudioFrameWithMuted() from participant"); |
| 461 continue; | 462 continue; |
| 462 } | 463 } |
| 463 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 464 audioSourceMixingDataList.emplace_back( |
| 464 if (audio_source_list_.size() != 1) { | 465 audio_source, audio_source_audio_frame, |
| 465 // TODO(wu): Issue 3390, add support for multiple audio sources case. | 466 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, |
| 466 audio_frame->ntp_time_ms_ = -1; | 467 audio_source->_mixHistory->WasMixed()); |
| 468 } |
| 469 |
| 470 // Sort frames by sorting function. |
| 471 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), |
| 472 std::mem_fn(&SourceFrame::shouldMixBefore)); |
| 473 |
| 474 // Go through list in order and put things in mixList. |
| 475 for (SourceFrame& p : audioSourceMixingDataList) { |
| 476 // Filter muted. |
| 477 if (p.muted_) { |
| 478 p.audio_source_->_mixHistory->SetIsMixed(false); |
| 479 continue; |
| 467 } | 480 } |
| 468 | 481 |
| 469 // TODO(aleloi): this assert triggers in some test cases where SRTP is | 482 // Add frame to result vector for mixing. |
| 470 // used which prevents NetEQ from making a VAD. Temporarily disable this | 483 bool is_mixed = false; |
| 471 // assert until the problem is fixed on a higher level. | 484 if (maxAudioFrameCounter > 0) { |
| 472 // RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown); | 485 --maxAudioFrameCounter; |
| 473 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) { | 486 if (!p.was_mixed_before_) { |
| 474 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 487 RampIn(*p.audio_frame_); |
| 475 "invalid VAD state from audio source"); | 488 } |
| 489 result.emplace_back(p.audio_frame_, false); |
| 490 is_mixed = true; |
| 476 } | 491 } |
| 477 | 492 |
| 478 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) { | 493 // Ramp out unmuted. |
| 479 if (!wasMixed && !muted) { | 494 if (p.was_mixed_before_ && !is_mixed) { |
| 480 RampIn(*audio_frame); | 495 RampOut(*p.audio_frame_); |
| 481 } | 496 result.emplace_back(p.audio_frame_, false); |
| 497 } |
| 482 | 498 |
| 483 if (activeList.size() >= *maxAudioFrameCounter) { | 499 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); |
| 484 // There are already more active audio sources than should be | |
| 485 // mixed. Only keep the ones with the highest energy. | |
| 486 AudioFrameList::iterator replaceItem; | |
| 487 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame); | |
| 488 | |
| 489 bool found_replace_item = false; | |
| 490 for (AudioFrameList::iterator iter = activeList.begin(); | |
| 491 iter != activeList.end(); ++iter) { | |
| 492 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | |
| 493 if (energy < lowestEnergy) { | |
| 494 replaceItem = iter; | |
| 495 lowestEnergy = energy; | |
| 496 found_replace_item = true; | |
| 497 } | |
| 498 } | |
| 499 if (found_replace_item) { | |
| 500 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | |
| 501 FrameAndMuteInfo replaceFrame = *replaceItem; | |
| 502 | |
| 503 bool replaceWasMixed = false; | |
| 504 std::map<int, MixerAudioSource*>::const_iterator it = | |
| 505 mixAudioSourceList->find(replaceFrame.frame->id_); | |
| 506 | |
| 507 // When a frame is pushed to |activeList| it is also pushed | |
| 508 // to mixAudioSourceList with the frame's id. This means | |
| 509 // that the Find call above should never fail. | |
| 510 RTC_DCHECK(it != mixAudioSourceList->end()); | |
| 511 replaceWasMixed = it->second->_mixHistory->WasMixed(); | |
| 512 | |
| 513 mixAudioSourceList->erase(replaceFrame.frame->id_); | |
| 514 activeList.erase(replaceItem); | |
| 515 | |
| 516 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
| 517 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
| 518 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 519 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 520 | |
| 521 if (replaceWasMixed) { | |
| 522 if (!replaceFrame.muted) { | |
| 523 RampOut(*replaceFrame.frame); | |
| 524 } | |
| 525 rampOutList->push_back(replaceFrame); | |
| 526 RTC_DCHECK_LE( | |
| 527 rampOutList->size(), | |
| 528 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 529 } | |
| 530 } else { | |
| 531 if (wasMixed) { | |
| 532 if (!muted) { | |
| 533 RampOut(*audio_frame); | |
| 534 } | |
| 535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted)); | |
| 536 RTC_DCHECK_LE( | |
| 537 rampOutList->size(), | |
| 538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 539 } | |
| 540 } | |
| 541 } else { | |
| 542 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
| 543 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
| 544 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 545 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 546 } | |
| 547 } else { | |
| 548 if (wasMixed) { | |
| 549 AudioSourceWithFrame* part_struct = | |
| 550 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
| 551 passiveWasMixedList.push_back(part_struct); | |
| 552 } else if (mustAddToPassiveList) { | |
| 553 if (!muted) { | |
| 554 RampIn(*audio_frame); | |
| 555 } | |
| 556 AudioSourceWithFrame* part_struct = | |
| 557 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
| 558 passiveWasNotMixedList.push_back(part_struct); | |
| 559 } | |
| 560 } | |
| 561 } | 500 } |
| 562 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); | 501 return result; |
| 563 // At this point it is known which audio sources should be mixed. Transfer | |
| 564 // this information to this functions output parameters. | |
| 565 for (AudioFrameList::const_iterator iter = activeList.begin(); | |
| 566 iter != activeList.end(); ++iter) { | |
| 567 mixList->push_back(*iter); | |
| 568 } | |
| 569 activeList.clear(); | |
| 570 // Always mix a constant number of AudioFrames. If there aren't enough | |
| 571 // active audio sources mix passive ones. Starting with those that was mixed | |
| 572 // last iteration. | |
| 573 for (AudioSourceWithFrameList::const_iterator iter = | |
| 574 passiveWasMixedList.begin(); | |
| 575 iter != passiveWasMixedList.end(); ++iter) { | |
| 576 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
| 577 mixList->push_back( | |
| 578 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
| 579 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
| 580 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 582 } | |
| 583 delete *iter; | |
| 584 } | |
| 585 // And finally the ones that have not been mixed for a while. | |
| 586 for (AudioSourceWithFrameList::const_iterator iter = | |
| 587 passiveWasNotMixedList.begin(); | |
| 588 iter != passiveWasNotMixedList.end(); ++iter) { | |
| 589 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
| 590 mixList->push_back( | |
| 591 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
| 592 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
| 593 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 594 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 595 } | |
| 596 delete *iter; | |
| 597 } | |
| 598 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); | |
| 599 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | |
| 600 } | 502 } |
| 601 | 503 |
| 602 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 504 void NewAudioConferenceMixerImpl::GetAdditionalAudio( |
| 603 AudioFrameList* additionalFramesList) const { | 505 AudioFrameList* additionalFramesList) const { |
| 604 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 506 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 605 "GetAdditionalAudio(additionalFramesList)"); | 507 "GetAdditionalAudio(additionalFramesList)"); |
| 606 // The GetAudioFrameWithMuted() callback may result in the audio source being | 508 // The GetAudioFrameWithMuted() callback may result in the audio source being |
| 607 // removed from additionalAudioFramesList_. If that happens it will | 509 // removed from additionalAudioFramesList_. If that happens it will |
| 608 // invalidate any iterators. Create a copy of the audio sources list such | 510 // invalidate any iterators. Create a copy of the audio sources list such |
| 609 // that the list of participants can be traversed safely. | 511 // that the list of participants can be traversed safely. |
| (...skipping 16 matching lines...) Expand all Loading... |
| 626 } | 528 } |
| 627 if (audio_frame->samples_per_channel_ == 0) { | 529 if (audio_frame->samples_per_channel_ == 0) { |
| 628 // Empty frame. Don't use it. | 530 // Empty frame. Don't use it. |
| 629 continue; | 531 continue; |
| 630 } | 532 } |
| 631 additionalFramesList->push_back(FrameAndMuteInfo( | 533 additionalFramesList->push_back(FrameAndMuteInfo( |
| 632 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 534 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); |
| 633 } | 535 } |
| 634 } | 536 } |
| 635 | 537 |
| 636 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | |
| 637 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const { | |
| 638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 639 "UpdateMixedStatus(mixedAudioSourcesMap)"); | |
| 640 RTC_DCHECK_LE(mixedAudioSourcesMap.size(), | |
| 641 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 642 | |
| 643 // Loop through all audio_sources. If they are in the mix map they | |
| 644 // were mixed. | |
| 645 for (MixerAudioSourceList::const_iterator audio_source = | |
| 646 audio_source_list_.begin(); | |
| 647 audio_source != audio_source_list_.end(); ++audio_source) { | |
| 648 bool isMixed = false; | |
| 649 for (std::map<int, MixerAudioSource*>::const_iterator it = | |
| 650 mixedAudioSourcesMap.begin(); | |
| 651 it != mixedAudioSourcesMap.end(); ++it) { | |
| 652 if (it->second == *audio_source) { | |
| 653 isMixed = true; | |
| 654 break; | |
| 655 } | |
| 656 } | |
| 657 (*audio_source)->_mixHistory->SetIsMixed(isMixed); | |
| 658 } | |
| 659 } | |
| 660 | |
| 661 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | |
| 662 AudioFrameList* audioFrameList) const { | |
| 663 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 664 "ClearAudioFrameList(audioFrameList)"); | |
| 665 audioFrameList->clear(); | |
| 666 } | |
| 667 | |
| 668 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | 538 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( |
| 669 const MixerAudioSource& audio_source, | 539 const MixerAudioSource& audio_source, |
| 670 const MixerAudioSourceList& audioSourceList) const { | 540 const MixerAudioSourceList& audioSourceList) const { |
| 671 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 541 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
| 672 "IsAudioSourceInList(audio_source,audioSourceList)"); | 542 "IsAudioSourceInList(audio_source,audioSourceList)"); |
| 673 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); | 543 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); |
| 674 iter != audioSourceList.end(); ++iter) { | 544 iter != audioSourceList.end(); ++iter) { |
| 675 if (&audio_source == *iter) { | 545 if (&audio_source == *iter) { |
| 676 return true; | 546 return true; |
| 677 } | 547 } |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 793 | 663 |
| 794 if (error != _limiter->kNoError) { | 664 if (error != _limiter->kNoError) { |
| 795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 665 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
| 796 "Error from AudioProcessing: %d", error); | 666 "Error from AudioProcessing: %d", error); |
| 797 RTC_NOTREACHED(); | 667 RTC_NOTREACHED(); |
| 798 return false; | 668 return false; |
| 799 } | 669 } |
| 800 return true; | 670 return true; |
| 801 } | 671 } |
| 802 } // namespace webrtc | 672 } // namespace webrtc |
| OLD | NEW |