Chromium Code Reviews| OLD | NEW | 
|---|---|
| 1 /* | 1 /* | 
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 
| 3 * | 3 * | 
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license | 
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source | 
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found | 
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may | 
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. | 
| 9 */ | 9 */ | 
| 10 | 10 | 
| 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/source/new_audio_conference_mixer_impl.h" | 
| 12 | 12 | 
| 13 #include <algorithm> | 13 #include <algorithm> | 
| 14 #include <functional> | |
| 14 | 15 | 
| 15 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | 16 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | 
| 16 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 17 #include "webrtc/modules/audio_mixer/include/audio_mixer_defines.h" | 
| 17 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 18 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 
| 18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 19 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 
| 19 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 20 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 
| 20 #include "webrtc/system_wrappers/include/trace.h" | 21 #include "webrtc/system_wrappers/include/trace.h" | 
| 21 | 22 | 
| 22 namespace webrtc { | 23 namespace webrtc { | 
| 23 namespace { | 24 namespace { | 
| 24 | 25 | 
| 25 struct AudioSourceWithFrame { | 26 class AudioSourceAndFrame { | 
| 26 AudioSourceWithFrame(MixerAudioSource* p, AudioFrame* a, bool m) | 27 public: | 
| 27 : audio_source(p), audio_frame(a), muted(m) {} | 28 AudioSourceAndFrame(MixerAudioSource* p, | 
| 28 MixerAudioSource* audio_source; | 29 AudioFrame* a, | 
| 29 AudioFrame* audio_frame; | 30 bool m, | 
| 30 bool muted; | 31 bool was_mixed_before) | 
| 32 : audio_source_(p), | |
| 33 audio_frame_(a), | |
| 34 muted_(m), | |
| 35 was_mixed_before_(was_mixed_before) { | |
| 36 if (!muted_) { | |
| 37 energy_ = CalculateEnergy(*a); | |
| 38 } | |
| 39 } | |
| 40 | |
| 41 // a.shouldMixBefore(b) is used to select mixer participants. | |
| 42 bool shouldMixBefore(const AudioSourceAndFrame& other) const { | |
| 43 if (muted_ != other.muted_) { | |
| 44 return other.muted_; | |
| 45 } | |
| 46 | |
| 47 auto our_activity = audio_frame_->vad_activity_; | |
| 48 auto other_activity = other.audio_frame_->vad_activity_; | |
| 49 | |
| 50 if (our_activity != other_activity) { | |
| 51 return (other_activity == AudioFrame::kVadPassive || | |
| 52 other_activity == AudioFrame::kVadUnknown) && | |
| 53 our_activity == AudioFrame::kVadActive; | |
| 54 } | |
| 55 | |
| 56 return energy_ > other.energy_; | |
| 57 } | |
| 58 | |
| 59 MixerAudioSource* audio_source_; | |
| 60 AudioFrame* audio_frame_; | |
| 61 bool muted_; | |
| 62 uint32_t energy_; | |
| 63 bool was_mixed_before_; | |
| 31 }; | 64 }; | 
| 32 | 65 | 
| 33 typedef std::list<AudioSourceWithFrame*> AudioSourceWithFrameList; | 66 typedef std::list<AudioSourceAndFrame*> AudioSourceWithFrameList; | 
| 34 | 67 | 
| 35 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 68 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 
| 36 // These effects are applied to |frame| itself prior to mixing. Assumes that | 69 // These effects are applied to |frame| itself prior to mixing. Assumes that | 
| 37 // |mixed_frame| always has at least as many channels as |frame|. Supports | 70 // |mixed_frame| always has at least as many channels as |frame|. Supports | 
| 38 // stereo at most. | 71 // stereo at most. | 
| 39 // | 72 // | 
| 40 // TODO(andrew): consider not modifying |frame| here. | 73 // TODO(andrew): consider not modifying |frame| here. | 
| 41 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 74 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 
| 42 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 75 RTC_DCHECK_GE(mixed_frame->num_channels_, frame->num_channels_); | 
| 43 if (use_limiter) { | 76 if (use_limiter) { | 
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 160 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 193 if (_limiter->gain_control()->Enable(true) != _limiter->kNoError) | 
| 161 return false; | 194 return false; | 
| 162 | 195 | 
| 163 return true; | 196 return true; | 
| 164 } | 197 } | 
| 165 | 198 | 
| 166 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 199 void NewAudioConferenceMixerImpl::Mix(AudioFrame* audio_frame_for_mixing) { | 
| 167 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 200 size_t remainingAudioSourcesAllowedToMix = kMaximumAmountOfMixedAudioSources; | 
| 168 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 201 RTC_DCHECK(thread_checker_.CalledOnValidThread()); | 
| 169 AudioFrameList mixList; | 202 AudioFrameList mixList; | 
| 170 AudioFrameList rampOutList; | |
| 171 AudioFrameList additionalFramesList; | 203 AudioFrameList additionalFramesList; | 
| 172 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 204 std::map<int, MixerAudioSource*> mixedAudioSourcesMap; | 
| 173 { | 205 { | 
| 174 CriticalSectionScoped cs(_cbCrit.get()); | 206 CriticalSectionScoped cs(_cbCrit.get()); | 
| 175 | 207 | 
| 176 int32_t lowFreq = GetLowestMixingFrequency(); | 208 int32_t lowFreq = GetLowestMixingFrequency(); | 
| 177 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 209 // SILK can run in 12 kHz and 24 kHz. These frequencies are not | 
| 178 // supported so use the closest higher frequency to not lose any | 210 // supported so use the closest higher frequency to not lose any | 
| 179 // information. | 211 // information. | 
| 180 // TODO(aleloi): this is probably more appropriate to do in | 212 // TODO(aleloi): this is probably more appropriate to do in | 
| (...skipping 26 matching lines...) Expand all Loading... | |
| 207 if (OutputFrequency() != kFbInHz) { | 239 if (OutputFrequency() != kFbInHz) { | 
| 208 SetOutputFrequency(kFbInHz); | 240 SetOutputFrequency(kFbInHz); | 
| 209 } | 241 } | 
| 210 break; | 242 break; | 
| 211 default: | 243 default: | 
| 212 RTC_NOTREACHED(); | 244 RTC_NOTREACHED(); | 
| 213 return; | 245 return; | 
| 214 } | 246 } | 
| 215 } | 247 } | 
| 216 | 248 | 
| 217 UpdateToMix(&mixList, &rampOutList, &mixedAudioSourcesMap, | 249 mixList = UpdateToMix(remainingAudioSourcesAllowedToMix); | 
| 218 &remainingAudioSourcesAllowedToMix); | 250 remainingAudioSourcesAllowedToMix -= mixList.size(); | 
| 219 | |
| 220 GetAdditionalAudio(&additionalFramesList); | 251 GetAdditionalAudio(&additionalFramesList); | 
| 221 UpdateMixedStatus(mixedAudioSourcesMap); | |
| 222 } | 252 } | 
| 223 | 253 | 
| 224 // TODO(aleloi): it might be better to decide the number of channels | 254 // TODO(aleloi): it might be better to decide the number of channels | 
| 225 // with an API instead of dynamically. | 255 // with an API instead of dynamically. | 
| 226 | 256 | 
| 227 // Find the max channels over all mixing lists. | 257 // Find the max channels over all mixing lists. | 
| 228 const size_t num_mixed_channels = std::max( | 258 const size_t num_mixed_channels = | 
| 229 MaxNumChannels(&mixList), std::max(MaxNumChannels(&additionalFramesList), | 259 std::max(MaxNumChannels(&mixList), MaxNumChannels(&additionalFramesList)); | 
| 230 MaxNumChannels(&rampOutList))); | |
| 231 | 260 | 
| 232 audio_frame_for_mixing->UpdateFrame( | 261 audio_frame_for_mixing->UpdateFrame( | 
| 233 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | 262 -1, _timeStamp, NULL, 0, _outputFrequency, AudioFrame::kNormalSpeech, | 
| 234 AudioFrame::kVadPassive, num_mixed_channels); | 263 AudioFrame::kVadPassive, num_mixed_channels); | 
| 235 | 264 | 
| 236 _timeStamp += static_cast<uint32_t>(_sampleSize); | 265 _timeStamp += static_cast<uint32_t>(_sampleSize); | 
| 237 | 266 | 
| 238 use_limiter_ = num_mixed_audio_sources_ > 1 && | 267 use_limiter_ = num_mixed_audio_sources_ > 1 && | 
| 239 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 268 _outputFrequency <= AudioProcessing::kMaxNativeSampleRateHz; | 
| 240 | 269 | 
| 241 // We only use the limiter if it supports the output sample rate and | 270 // We only use the limiter if it supports the output sample rate and | 
| 242 // we're actually mixing multiple streams. | 271 // we're actually mixing multiple streams. | 
| 243 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | 272 MixFromList(audio_frame_for_mixing, mixList, _id, use_limiter_); | 
| 244 | 273 | 
| 245 { | 274 { | 
| 246 CriticalSectionScoped cs(_crit.get()); | 275 CriticalSectionScoped cs(_crit.get()); | 
| 247 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 276 MixAnonomouslyFromList(audio_frame_for_mixing, additionalFramesList); | 
| 248 MixAnonomouslyFromList(audio_frame_for_mixing, rampOutList); | |
| 249 | 277 | 
| 250 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 278 if (audio_frame_for_mixing->samples_per_channel_ == 0) { | 
| 251 // Nothing was mixed, set the audio samples to silence. | 279 // Nothing was mixed, set the audio samples to silence. | 
| 252 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | 280 audio_frame_for_mixing->samples_per_channel_ = _sampleSize; | 
| 253 audio_frame_for_mixing->Mute(); | 281 audio_frame_for_mixing->Mute(); | 
| 254 } else { | 282 } else { | 
| 255 // Only call the limiter if we have something to mix. | 283 // Only call the limiter if we have something to mix. | 
| 256 LimitMixedAudio(audio_frame_for_mixing); | 284 LimitMixedAudio(audio_frame_for_mixing); | 
| 257 } | 285 } | 
| 258 } | 286 } | 
| 259 | |
| 260 ClearAudioFrameList(&mixList); | |
| 261 ClearAudioFrameList(&rampOutList); | |
| 262 ClearAudioFrameList(&additionalFramesList); | |
| 263 return; | 287 return; | 
| 264 } | 288 } | 
| 265 | 289 | 
| 266 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 290 int32_t NewAudioConferenceMixerImpl::SetOutputFrequency( | 
| 267 const Frequency& frequency) { | 291 const Frequency& frequency) { | 
| 268 CriticalSectionScoped cs(_crit.get()); | 292 CriticalSectionScoped cs(_crit.get()); | 
| 269 | 293 | 
| 270 _outputFrequency = frequency; | 294 _outputFrequency = frequency; | 
| 271 _sampleSize = | 295 _sampleSize = | 
| 272 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 296 static_cast<size_t>((_outputFrequency * kProcessPeriodicityInMs) / 1000); | 
| (...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 419 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | 443 for (MixerAudioSourceList::const_iterator iter = mixList.begin(); | 
| 420 iter != mixList.end(); ++iter) { | 444 iter != mixList.end(); ++iter) { | 
| 421 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | 445 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); | 
| 422 if (neededFrequency > highestFreq) { | 446 if (neededFrequency > highestFreq) { | 
| 423 highestFreq = neededFrequency; | 447 highestFreq = neededFrequency; | 
| 424 } | 448 } | 
| 425 } | 449 } | 
| 426 return highestFreq; | 450 return highestFreq; | 
| 427 } | 451 } | 
| 428 | 452 | 
| 429 void NewAudioConferenceMixerImpl::UpdateToMix( | 453 AudioFrameList NewAudioConferenceMixerImpl::UpdateToMix( | 
| 430 AudioFrameList* mixList, | 454 size_t maxAudioFrameCounter) const { | 
| 431 AudioFrameList* rampOutList, | 455 AudioFrameList result; | 
| 432 std::map<int, MixerAudioSource*>* mixAudioSourceList, | 456 std::vector<AudioSourceAndFrame> audioSourceMixingDataList; | 
| 433 size_t* maxAudioFrameCounter) const { | |
| 434 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 435 "UpdateToMix(mixList,rampOutList,mixAudioSourceList,%d)", | |
| 436 *maxAudioFrameCounter); | |
| 437 const size_t mixListStartSize = mixList->size(); | |
| 438 AudioFrameList activeList; | |
| 439 // Struct needed by the passive lists to keep track of which AudioFrame | |
| 440 // belongs to which MixerAudioSource. | |
| 441 AudioSourceWithFrameList passiveWasNotMixedList; | |
| 442 AudioSourceWithFrameList passiveWasMixedList; | |
| 443 for (MixerAudioSourceList::const_iterator audio_source = | |
| 444 audio_source_list_.begin(); | |
| 445 audio_source != audio_source_list_.end(); ++audio_source) { | |
| 446 // Stop keeping track of passive audioSources if there are already | |
| 447 // enough audio sources available (they wont be mixed anyway). | |
| 448 bool mustAddToPassiveList = | |
| 449 (*maxAudioFrameCounter > | |
| 450 (activeList.size() + passiveWasMixedList.size() + | |
| 451 passiveWasNotMixedList.size())); | |
| 452 | 457 | 
| 453 bool wasMixed = false; | 458 // Get audio source audio and put it in the struct vector. | 
| 454 wasMixed = (*audio_source)->_mixHistory->WasMixed(); | 459 for (MixerAudioSource* audio_source : audio_source_list_) { | 
| 460 auto audio_frame_with_info = audio_source->GetAudioFrameWithMuted( | |
| 461 _id, static_cast<int>(_outputFrequency)); | |
| 455 | 462 | 
| 456 auto audio_frame_with_info = | 463 auto audio_frame_info = audio_frame_with_info.audio_frame_info; | 
| 457 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 464 AudioFrame* audio_source_audio_frame = audio_frame_with_info.audio_frame; | 
| 458 auto ret = audio_frame_with_info.audio_frame_info; | 465 | 
| 459 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 466 if (audio_frame_info == MixerAudioSource::AudioFrameInfo::kError) { | 
| 460 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 467 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 
| 468 "failed to GetAudioFrameWithMuted() from participant"); | |
| 461 continue; | 469 continue; | 
| 462 } | 470 } | 
| 463 const bool muted = (ret == MixerAudioSource::AudioFrameInfo::kMuted); | 471 audioSourceMixingDataList.emplace_back( | 
| 464 if (audio_source_list_.size() != 1) { | 472 audio_source, audio_source_audio_frame, | 
| 
 
ivoc
2016/07/27 15:14:05
Does git cl format approve of this?
 
ivoc
2016/07/28 09:34:25
So, does it?
 
aleloi
2016/07/28 10:39:59
Yes it does (missed the question, sorry).
 
 | |
| 465 // TODO(wu): Issue 3390, add support for multiple audio sources case. | 473 audio_frame_info == MixerAudioSource::AudioFrameInfo::kMuted, | 
| 466 audio_frame->ntp_time_ms_ = -1; | 474 audio_source->_mixHistory->WasMixed()); | 
| 475 } | |
| 476 | |
| 477 // Sort frames by sorting function. | |
| 478 std::sort(audioSourceMixingDataList.begin(), audioSourceMixingDataList.end(), | |
| 479 std::mem_fn(&AudioSourceAndFrame::shouldMixBefore)); | |
| 480 | |
| 481 // Go through list in order and put things in mixList. | |
| 482 for (AudioSourceAndFrame& p : audioSourceMixingDataList) { | |
| 483 // Filter muted. | |
| 484 if (p.muted_) { | |
| 485 p.audio_source_->_mixHistory->SetIsMixed(false); | |
| 486 continue; | |
| 467 } | 487 } | 
| 468 | 488 | 
| 469 // TODO(aleloi): this assert triggers in some test cases where SRTP is | 489 // Add frame to result vector for mixing. | 
| 470 // used which prevents NetEQ from making a VAD. Temporarily disable this | 490 bool is_mixed = false; | 
| 471 // assert until the problem is fixed on a higher level. | 491 if (maxAudioFrameCounter > 0) { | 
| 472 // RTC_DCHECK_NE(audio_frame->vad_activity_, AudioFrame::kVadUnknown); | 492 --maxAudioFrameCounter; | 
| 473 if (audio_frame->vad_activity_ == AudioFrame::kVadUnknown) { | 493 if (!p.was_mixed_before_) { | 
| 474 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 494 RampIn(*p.audio_frame_); | 
| 475 "invalid VAD state from audio source"); | 495 } | 
| 496 result.emplace_back(p.audio_frame_, false); | |
| 497 is_mixed = true; | |
| 476 } | 498 } | 
| 477 | 499 | 
| 478 if (audio_frame->vad_activity_ == AudioFrame::kVadActive) { | 500 // Ramp out unmuted. | 
| 479 if (!wasMixed && !muted) { | 501 if (p.was_mixed_before_ && !is_mixed) { | 
| 480 RampIn(*audio_frame); | 502 RampOut(*p.audio_frame_); | 
| 481 } | 503 result.emplace_back(p.audio_frame_, false); | 
| 504 } | |
| 482 | 505 | 
| 483 if (activeList.size() >= *maxAudioFrameCounter) { | 506 p.audio_source_->_mixHistory->SetIsMixed(is_mixed); | 
| 484 // There are already more active audio sources than should be | |
| 485 // mixed. Only keep the ones with the highest energy. | |
| 486 AudioFrameList::iterator replaceItem; | |
| 487 uint32_t lowestEnergy = muted ? 0 : CalculateEnergy(*audio_frame); | |
| 488 | |
| 489 bool found_replace_item = false; | |
| 490 for (AudioFrameList::iterator iter = activeList.begin(); | |
| 491 iter != activeList.end(); ++iter) { | |
| 492 const uint32_t energy = muted ? 0 : CalculateEnergy(*iter->frame); | |
| 493 if (energy < lowestEnergy) { | |
| 494 replaceItem = iter; | |
| 495 lowestEnergy = energy; | |
| 496 found_replace_item = true; | |
| 497 } | |
| 498 } | |
| 499 if (found_replace_item) { | |
| 500 RTC_DCHECK(!muted); // Cannot replace with a muted frame. | |
| 501 FrameAndMuteInfo replaceFrame = *replaceItem; | |
| 502 | |
| 503 bool replaceWasMixed = false; | |
| 504 std::map<int, MixerAudioSource*>::const_iterator it = | |
| 505 mixAudioSourceList->find(replaceFrame.frame->id_); | |
| 506 | |
| 507 // When a frame is pushed to |activeList| it is also pushed | |
| 508 // to mixAudioSourceList with the frame's id. This means | |
| 509 // that the Find call above should never fail. | |
| 510 RTC_DCHECK(it != mixAudioSourceList->end()); | |
| 511 replaceWasMixed = it->second->_mixHistory->WasMixed(); | |
| 512 | |
| 513 mixAudioSourceList->erase(replaceFrame.frame->id_); | |
| 514 activeList.erase(replaceItem); | |
| 515 | |
| 516 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
| 517 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
| 518 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 519 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 520 | |
| 521 if (replaceWasMixed) { | |
| 522 if (!replaceFrame.muted) { | |
| 523 RampOut(*replaceFrame.frame); | |
| 524 } | |
| 525 rampOutList->push_back(replaceFrame); | |
| 526 RTC_DCHECK_LE( | |
| 527 rampOutList->size(), | |
| 528 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 529 } | |
| 530 } else { | |
| 531 if (wasMixed) { | |
| 532 if (!muted) { | |
| 533 RampOut(*audio_frame); | |
| 534 } | |
| 535 rampOutList->push_back(FrameAndMuteInfo(audio_frame, muted)); | |
| 536 RTC_DCHECK_LE( | |
| 537 rampOutList->size(), | |
| 538 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 539 } | |
| 540 } | |
| 541 } else { | |
| 542 activeList.push_front(FrameAndMuteInfo(audio_frame, muted)); | |
| 543 (*mixAudioSourceList)[audio_frame->id_] = *audio_source; | |
| 544 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 545 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 546 } | |
| 547 } else { | |
| 548 if (wasMixed) { | |
| 549 AudioSourceWithFrame* part_struct = | |
| 550 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
| 551 passiveWasMixedList.push_back(part_struct); | |
| 552 } else if (mustAddToPassiveList) { | |
| 553 if (!muted) { | |
| 554 RampIn(*audio_frame); | |
| 555 } | |
| 556 AudioSourceWithFrame* part_struct = | |
| 557 new AudioSourceWithFrame(*audio_source, audio_frame, muted); | |
| 558 passiveWasNotMixedList.push_back(part_struct); | |
| 559 } | |
| 560 } | |
| 561 } | 507 } | 
| 562 RTC_DCHECK_LE(activeList.size(), *maxAudioFrameCounter); | 508 return result; | 
| 563 // At this point it is known which audio sources should be mixed. Transfer | |
| 564 // this information to this functions output parameters. | |
| 565 for (AudioFrameList::const_iterator iter = activeList.begin(); | |
| 566 iter != activeList.end(); ++iter) { | |
| 567 mixList->push_back(*iter); | |
| 568 } | |
| 569 activeList.clear(); | |
| 570 // Always mix a constant number of AudioFrames. If there aren't enough | |
| 571 // active audio sources mix passive ones. Starting with those that was mixed | |
| 572 // last iteration. | |
| 573 for (AudioSourceWithFrameList::const_iterator iter = | |
| 574 passiveWasMixedList.begin(); | |
| 575 iter != passiveWasMixedList.end(); ++iter) { | |
| 576 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
| 577 mixList->push_back( | |
| 578 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
| 579 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
| 580 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 581 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 582 } | |
| 583 delete *iter; | |
| 584 } | |
| 585 // And finally the ones that have not been mixed for a while. | |
| 586 for (AudioSourceWithFrameList::const_iterator iter = | |
| 587 passiveWasNotMixedList.begin(); | |
| 588 iter != passiveWasNotMixedList.end(); ++iter) { | |
| 589 if (mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | |
| 590 mixList->push_back( | |
| 591 FrameAndMuteInfo((*iter)->audio_frame, (*iter)->muted)); | |
| 592 (*mixAudioSourceList)[(*iter)->audio_frame->id_] = (*iter)->audio_source; | |
| 593 RTC_DCHECK_LE(mixAudioSourceList->size(), | |
| 594 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 595 } | |
| 596 delete *iter; | |
| 597 } | |
| 598 RTC_DCHECK_GE(*maxAudioFrameCounter + mixListStartSize, mixList->size()); | |
| 599 *maxAudioFrameCounter += mixListStartSize - mixList->size(); | |
| 600 } | 509 } | 
| 601 | 510 | 
| 602 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 511 void NewAudioConferenceMixerImpl::GetAdditionalAudio( | 
| 603 AudioFrameList* additionalFramesList) const { | 512 AudioFrameList* additionalFramesList) const { | 
| 604 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 513 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 
| 605 "GetAdditionalAudio(additionalFramesList)"); | 514 "GetAdditionalAudio(additionalFramesList)"); | 
| 606 // The GetAudioFrameWithMuted() callback may result in the audio source being | 515 // The GetAudioFrameWithMuted() callback may result in the audio source being | 
| 607 // removed from additionalAudioFramesList_. If that happens it will | 516 // removed from additionalAudioFramesList_. If that happens it will | 
| 608 // invalidate any iterators. Create a copy of the audio sources list such | 517 // invalidate any iterators. Create a copy of the audio sources list such | 
| 609 // that the list of participants can be traversed safely. | 518 // that the list of participants can be traversed safely. | 
| 610 MixerAudioSourceList additionalAudioSourceList; | 519 MixerAudioSourceList additionalAudioSourceList; | 
| 611 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 520 additionalAudioSourceList.insert(additionalAudioSourceList.begin(), | 
| 612 additional_audio_source_list_.begin(), | 521 additional_audio_source_list_.begin(), | 
| 613 additional_audio_source_list_.end()); | 522 additional_audio_source_list_.end()); | 
| 614 | 523 | 
| 615 for (MixerAudioSourceList::const_iterator audio_source = | 524 for (MixerAudioSourceList::const_iterator audio_source = | 
| 616 additionalAudioSourceList.begin(); | 525 additionalAudioSourceList.begin(); | 
| 617 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 526 audio_source != additionalAudioSourceList.end(); ++audio_source) { | 
| 618 auto audio_frame_with_info = | 527 auto audio_frame_with_info = | 
| 619 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 528 (*audio_source)->GetAudioFrameWithMuted(_id, _outputFrequency); | 
| 620 auto ret = audio_frame_with_info.audio_frame_info; | 529 auto ret = audio_frame_with_info.audio_frame_info; | 
| 621 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 530 AudioFrame* audio_frame = audio_frame_with_info.audio_frame; | 
| 531 | |
| 622 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 532 if (ret == MixerAudioSource::AudioFrameInfo::kError) { | 
| 623 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 533 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, | 
| 624 "failed to GetAudioFrameWithMuted() from audio_source"); | 534 "failed to GetAudioFrameWithMuted() from audio_source"); | 
| 625 continue; | 535 continue; | 
| 626 } | 536 } | 
| 627 if (audio_frame->samples_per_channel_ == 0) { | 537 if (audio_frame->samples_per_channel_ == 0) { | 
| 628 // Empty frame. Don't use it. | 538 // Empty frame. Don't use it. | 
| 629 continue; | 539 continue; | 
| 630 } | 540 } | 
| 631 additionalFramesList->push_back(FrameAndMuteInfo( | 541 additionalFramesList->push_back(FrameAndMuteInfo( | 
| 632 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 542 audio_frame, ret == MixerAudioSource::AudioFrameInfo::kMuted)); | 
| 633 } | 543 } | 
| 634 } | 544 } | 
| 635 | 545 | 
| 636 void NewAudioConferenceMixerImpl::UpdateMixedStatus( | |
| 637 const std::map<int, MixerAudioSource*>& mixedAudioSourcesMap) const { | |
| 638 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 639 "UpdateMixedStatus(mixedAudioSourcesMap)"); | |
| 640 RTC_DCHECK_LE(mixedAudioSourcesMap.size(), | |
| 641 static_cast<size_t>(kMaximumAmountOfMixedAudioSources)); | |
| 642 | |
| 643 // Loop through all audio_sources. If they are in the mix map they | |
| 644 // were mixed. | |
| 645 for (MixerAudioSourceList::const_iterator audio_source = | |
| 646 audio_source_list_.begin(); | |
| 647 audio_source != audio_source_list_.end(); ++audio_source) { | |
| 648 bool isMixed = false; | |
| 649 for (std::map<int, MixerAudioSource*>::const_iterator it = | |
| 650 mixedAudioSourcesMap.begin(); | |
| 651 it != mixedAudioSourcesMap.end(); ++it) { | |
| 652 if (it->second == *audio_source) { | |
| 653 isMixed = true; | |
| 654 break; | |
| 655 } | |
| 656 } | |
| 657 (*audio_source)->_mixHistory->SetIsMixed(isMixed); | |
| 658 } | |
| 659 } | |
| 660 | |
| 661 void NewAudioConferenceMixerImpl::ClearAudioFrameList( | |
| 662 AudioFrameList* audioFrameList) const { | |
| 663 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | |
| 664 "ClearAudioFrameList(audioFrameList)"); | |
| 665 audioFrameList->clear(); | |
| 666 } | |
| 667 | |
| 668 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | 546 bool NewAudioConferenceMixerImpl::IsAudioSourceInList( | 
| 669 const MixerAudioSource& audio_source, | 547 const MixerAudioSource& audio_source, | 
| 670 const MixerAudioSourceList& audioSourceList) const { | 548 const MixerAudioSourceList& audioSourceList) const { | 
| 671 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 549 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 
| 672 "IsAudioSourceInList(audio_source,audioSourceList)"); | 550 "IsAudioSourceInList(audio_source,audioSourceList)"); | 
| 673 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); | 551 for (MixerAudioSourceList::const_iterator iter = audioSourceList.begin(); | 
| 674 iter != audioSourceList.end(); ++iter) { | 552 iter != audioSourceList.end(); ++iter) { | 
| 675 if (&audio_source == *iter) { | 553 if (&audio_source == *iter) { | 
| 676 return true; | 554 return true; | 
| 677 } | 555 } | 
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 793 | 671 | 
| 794 if (error != _limiter->kNoError) { | 672 if (error != _limiter->kNoError) { | 
| 795 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 673 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 
| 796 "Error from AudioProcessing: %d", error); | 674 "Error from AudioProcessing: %d", error); | 
| 797 RTC_NOTREACHED(); | 675 RTC_NOTREACHED(); | 
| 798 return false; | 676 return false; | 
| 799 } | 677 } | 
| 800 return true; | 678 return true; | 
| 801 } | 679 } | 
| 802 } // namespace webrtc | 680 } // namespace webrtc | 
| OLD | NEW |