Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(305)

Side by Side Diff: webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc

Issue 1311733003: Stylizing AudioConferenceMixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: refine two commenting Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 } // namespace 63 } // namespace
64 64
65 MixerParticipant::MixerParticipant() 65 MixerParticipant::MixerParticipant()
66 : _mixHistory(new MixHistory()) { 66 : _mixHistory(new MixHistory()) {
67 } 67 }
68 68
69 MixerParticipant::~MixerParticipant() { 69 MixerParticipant::~MixerParticipant() {
70 delete _mixHistory; 70 delete _mixHistory;
71 } 71 }
72 72
73 int32_t MixerParticipant::IsMixed(bool& mixed) const { 73 bool MixerParticipant::IsMixed() const {
74 return _mixHistory->IsMixed(mixed); 74 return _mixHistory->IsMixed();
75 } 75 }
76 76
77 MixHistory::MixHistory() 77 MixHistory::MixHistory()
78 : _isMixed(0) { 78 : _isMixed(0) {
79 } 79 }
80 80
81 MixHistory::~MixHistory() { 81 MixHistory::~MixHistory() {
82 } 82 }
83 83
84 int32_t MixHistory::IsMixed(bool& mixed) const { 84 bool MixHistory::IsMixed() const {
85 mixed = _isMixed; 85 return _isMixed;
86 return 0;
87 } 86 }
88 87
89 int32_t MixHistory::WasMixed(bool& wasMixed) const { 88 bool MixHistory::WasMixed() const {
90 // Was mixed is the same as is mixed depending on perspective. This function 89 // Was mixed is the same as is mixed depending on perspective. This function
91 // is for the perspective of AudioConferenceMixerImpl. 90 // is for the perspective of AudioConferenceMixerImpl.
92 return IsMixed(wasMixed); 91 return IsMixed();
93 } 92 }
94 93
95 int32_t MixHistory::SetIsMixed(const bool mixed) { 94 int32_t MixHistory::SetIsMixed(const bool mixed) {
96 _isMixed = mixed; 95 _isMixed = mixed;
97 return 0; 96 return 0;
98 } 97 }
99 98
100 void MixHistory::ResetMixedStatus() { 99 void MixHistory::ResetMixedStatus() {
101 _isMixed = false; 100 _isMixed = false;
102 } 101 }
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
249 default: 248 default:
250 assert(false); 249 assert(false);
251 250
252 CriticalSectionScoped cs(_crit.get()); 251 CriticalSectionScoped cs(_crit.get());
253 _processCalls--; 252 _processCalls--;
254 return -1; 253 return -1;
255 } 254 }
256 } 255 }
257 256
258 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, 257 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
259 remainingParticipantsAllowedToMix); 258 &remainingParticipantsAllowedToMix);
260 259
261 GetAdditionalAudio(&additionalFramesList); 260 GetAdditionalAudio(&additionalFramesList);
262 UpdateMixedStatus(mixedParticipantsMap); 261 UpdateMixedStatus(mixedParticipantsMap);
263 } 262 }
264 263
265 // Get an AudioFrame for mixing from the memory pool. 264 // Get an AudioFrame for mixing from the memory pool.
266 AudioFrame* mixedAudio = NULL; 265 AudioFrame* mixedAudio = NULL;
267 if(_audioFramePool->PopMemory(mixedAudio) == -1) { 266 if(_audioFramePool->PopMemory(mixedAudio) == -1) {
268 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 267 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
269 "failed PopMemory() call"); 268 "failed PopMemory() call");
(...skipping 17 matching lines...) Expand all
287 AudioFrame::kNormalSpeech, 286 AudioFrame::kNormalSpeech,
288 AudioFrame::kVadPassive, num_mixed_channels); 287 AudioFrame::kVadPassive, num_mixed_channels);
289 288
290 _timeStamp += static_cast<uint32_t>(_sampleSize); 289 _timeStamp += static_cast<uint32_t>(_sampleSize);
291 290
292 // We only use the limiter if it supports the output sample rate and 291 // We only use the limiter if it supports the output sample rate and
293 // we're actually mixing multiple streams. 292 // we're actually mixing multiple streams.
294 use_limiter_ = _numMixedParticipants > 1 && 293 use_limiter_ = _numMixedParticipants > 1 &&
295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz; 294 _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
296 295
297 MixFromList(*mixedAudio, &mixList); 296 MixFromList(mixedAudio, mixList);
298 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); 297 MixAnonomouslyFromList(mixedAudio, additionalFramesList);
299 MixAnonomouslyFromList(*mixedAudio, &rampOutList); 298 MixAnonomouslyFromList(mixedAudio, rampOutList);
300 299
301 if(mixedAudio->samples_per_channel_ == 0) { 300 if(mixedAudio->samples_per_channel_ == 0) {
302 // Nothing was mixed, set the audio samples to silence. 301 // Nothing was mixed, set the audio samples to silence.
303 mixedAudio->samples_per_channel_ = _sampleSize; 302 mixedAudio->samples_per_channel_ = _sampleSize;
304 mixedAudio->Mute(); 303 mixedAudio->Mute();
305 } else { 304 } else {
306 // Only call the limiter if we have something to mix. 305 // Only call the limiter if we have something to mix.
307 if(!LimitMixedAudio(*mixedAudio)) 306 if(!LimitMixedAudio(mixedAudio))
308 retval = -1; 307 retval = -1;
309 } 308 }
310 } 309 }
311 310
312 { 311 {
313 CriticalSectionScoped cs(_cbCrit.get()); 312 CriticalSectionScoped cs(_cbCrit.get());
314 if(_mixReceiver != NULL) { 313 if(_mixReceiver != NULL) {
315 const AudioFrame** dummy = NULL; 314 const AudioFrame** dummy = NULL;
316 _mixReceiver->NewMixedAudio( 315 _mixReceiver->NewMixedAudio(
317 _id, 316 _id,
318 *mixedAudio, 317 *mixedAudio,
319 dummy, 318 dummy,
320 0); 319 0);
321 } 320 }
322 } 321 }
323 322
324 // Reclaim all outstanding memory. 323 // Reclaim all outstanding memory.
325 _audioFramePool->PushMemory(mixedAudio); 324 _audioFramePool->PushMemory(mixedAudio);
326 ClearAudioFrameList(&mixList); 325 ClearAudioFrameList(&mixList);
327 ClearAudioFrameList(&rampOutList); 326 ClearAudioFrameList(&rampOutList);
328 ClearAudioFrameList(&additionalFramesList); 327 ClearAudioFrameList(&additionalFramesList);
329 { 328 {
330 CriticalSectionScoped cs(_crit.get()); 329 CriticalSectionScoped cs(_crit.get());
331 _processCalls--; 330 _processCalls--;
332 } 331 }
333 return retval; 332 return retval;
334 } 333 }
335 334
336 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( 335 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
337 AudioMixerOutputReceiver& mixReceiver) { 336 AudioMixerOutputReceiver* mixReceiver) {
338 CriticalSectionScoped cs(_cbCrit.get()); 337 CriticalSectionScoped cs(_cbCrit.get());
339 if(_mixReceiver != NULL) { 338 if(_mixReceiver != NULL) {
340 return -1; 339 return -1;
341 } 340 }
342 _mixReceiver = &mixReceiver; 341 _mixReceiver = mixReceiver;
343 return 0; 342 return 0;
344 } 343 }
345 344
346 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { 345 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
347 CriticalSectionScoped cs(_cbCrit.get()); 346 CriticalSectionScoped cs(_cbCrit.get());
348 if(_mixReceiver == NULL) { 347 if(_mixReceiver == NULL) {
349 return -1; 348 return -1;
350 } 349 }
351 _mixReceiver = NULL; 350 _mixReceiver = NULL;
352 return 0; 351 return 0;
353 } 352 }
354 353
355 int32_t AudioConferenceMixerImpl::SetOutputFrequency( 354 int32_t AudioConferenceMixerImpl::SetOutputFrequency(
356 const Frequency frequency) { 355 const Frequency& frequency) {
357 CriticalSectionScoped cs(_crit.get()); 356 CriticalSectionScoped cs(_crit.get());
358 357
359 _outputFrequency = frequency; 358 _outputFrequency = frequency;
360 _sampleSize = 359 _sampleSize =
361 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); 360 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000);
362 361
363 return 0; 362 return 0;
364 } 363 }
365 364
366 AudioConferenceMixer::Frequency 365 AudioConferenceMixer::Frequency
367 AudioConferenceMixerImpl::OutputFrequency() const { 366 AudioConferenceMixerImpl::OutputFrequency() const {
368 CriticalSectionScoped cs(_crit.get()); 367 CriticalSectionScoped cs(_crit.get());
369 return _outputFrequency; 368 return _outputFrequency;
370 } 369 }
371 370
372 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( 371 int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
373 MixerParticipant& participant, 372 MixerParticipant* participant, bool mixable) {
374 bool mixable) {
375 if (!mixable) { 373 if (!mixable) {
376 // Anonymous participants are in a separate list. Make sure that the 374 // Anonymous participants are in a separate list. Make sure that the
377 // participant is in the _participantList if it is being mixed. 375 // participant is in the _participantList if it is being mixed.
378 SetAnonymousMixabilityStatus(participant, false); 376 SetAnonymousMixabilityStatus(participant, false);
379 } 377 }
380 size_t numMixedParticipants; 378 size_t numMixedParticipants;
381 { 379 {
382 CriticalSectionScoped cs(_cbCrit.get()); 380 CriticalSectionScoped cs(_cbCrit.get());
383 const bool isMixed = 381 const bool isMixed =
384 IsParticipantInList(participant, &_participantList); 382 IsParticipantInList(*participant, _participantList);
385 // API must be called with a new state. 383 // API must be called with a new state.
386 if(!(mixable ^ isMixed)) { 384 if(!(mixable ^ isMixed)) {
387 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 385 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
388 "Mixable is aready %s", 386 "Mixable is aready %s",
389 isMixed ? "ON" : "off"); 387 isMixed ? "ON" : "off");
390 return -1; 388 return -1;
391 } 389 }
392 bool success = false; 390 bool success = false;
393 if(mixable) { 391 if(mixable) {
394 success = AddParticipantToList(participant, &_participantList); 392 success = AddParticipantToList(participant, &_participantList);
(...skipping 16 matching lines...) Expand all
411 numMixedNonAnonymous + _additionalParticipantList.size(); 409 numMixedNonAnonymous + _additionalParticipantList.size();
412 } 410 }
413 // A MixerParticipant was added or removed. Make sure the scratch 411 // A MixerParticipant was added or removed. Make sure the scratch
414 // buffer is updated if necessary. 412 // buffer is updated if necessary.
415 // Note: The scratch buffer may only be updated in Process(). 413 // Note: The scratch buffer may only be updated in Process().
416 CriticalSectionScoped cs(_crit.get()); 414 CriticalSectionScoped cs(_crit.get());
417 _numMixedParticipants = numMixedParticipants; 415 _numMixedParticipants = numMixedParticipants;
418 return 0; 416 return 0;
419 } 417 }
420 418
421 int32_t AudioConferenceMixerImpl::MixabilityStatus( 419 bool AudioConferenceMixerImpl::MixabilityStatus(
422 MixerParticipant& participant, 420 const MixerParticipant& participant) const {
423 bool& mixable) {
424 CriticalSectionScoped cs(_cbCrit.get()); 421 CriticalSectionScoped cs(_cbCrit.get());
425 mixable = IsParticipantInList(participant, &_participantList); 422 return IsParticipantInList(participant, _participantList);
426 return 0;
427 } 423 }
428 424
429 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus( 425 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
430 MixerParticipant& participant, const bool anonymous) { 426 MixerParticipant* participant, bool anonymous) {
431 CriticalSectionScoped cs(_cbCrit.get()); 427 CriticalSectionScoped cs(_cbCrit.get());
432 if(IsParticipantInList(participant, &_additionalParticipantList)) { 428 if(IsParticipantInList(*participant, _additionalParticipantList)) {
433 if(anonymous) { 429 if(anonymous) {
434 return 0; 430 return 0;
435 } 431 }
436 if(!RemoveParticipantFromList(participant, 432 if(!RemoveParticipantFromList(participant,
437 &_additionalParticipantList)) { 433 &_additionalParticipantList)) {
438 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 434 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
439 "unable to remove participant from anonymous list"); 435 "unable to remove participant from anonymous list");
440 assert(false); 436 assert(false);
441 return -1; 437 return -1;
442 } 438 }
(...skipping 11 matching lines...) Expand all
454 _id, 450 _id,
455 "participant must be registered before turning it into anonymous"); 451 "participant must be registered before turning it into anonymous");
456 // Setting anonymous status is only possible if MixerParticipant is 452 // Setting anonymous status is only possible if MixerParticipant is
457 // already registered. 453 // already registered.
458 return -1; 454 return -1;
459 } 455 }
460 return AddParticipantToList(participant, &_additionalParticipantList) ? 456 return AddParticipantToList(participant, &_additionalParticipantList) ?
461 0 : -1; 457 0 : -1;
462 } 458 }
463 459
464 int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus( 460 bool AudioConferenceMixerImpl::AnonymousMixabilityStatus(
465 MixerParticipant& participant, bool& mixable) { 461 const MixerParticipant& participant) const {
466 CriticalSectionScoped cs(_cbCrit.get()); 462 CriticalSectionScoped cs(_cbCrit.get());
467 mixable = IsParticipantInList(participant, 463 return IsParticipantInList(participant, _additionalParticipantList);
468 &_additionalParticipantList);
469 return 0;
470 } 464 }
471 465
472 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency( 466 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
473 Frequency freq) { 467 Frequency freq) {
474 // Make sure that only allowed sampling frequencies are used. Use closest 468 // Make sure that only allowed sampling frequencies are used. Use closest
475 // higher sampling frequency to avoid losing information. 469 // higher sampling frequency to avoid losing information.
476 if (static_cast<int>(freq) == 12000) { 470 if (static_cast<int>(freq) == 12000) {
477 freq = kWbInHz; 471 freq = kWbInHz;
478 } else if (static_cast<int>(freq) == 24000) { 472 } else if (static_cast<int>(freq) == 24000) {
479 freq = kSwbInHz; 473 freq = kSwbInHz;
480 } 474 }
481 475
482 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || 476 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
483 (freq == kLowestPossible)) { 477 (freq == kLowestPossible)) {
484 _minimumMixingFreq=freq; 478 _minimumMixingFreq=freq;
485 return 0; 479 return 0;
486 } else { 480 } else {
487 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 481 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
488 "SetMinimumMixingFrequency incorrect frequency: %i",freq); 482 "SetMinimumMixingFrequency incorrect frequency: %i",freq);
489 assert(false); 483 assert(false);
490 return -1; 484 return -1;
491 } 485 }
492 } 486 }
493 487
494 // Check all AudioFrames that are to be mixed. The highest sampling frequency 488 // Check all AudioFrames that are to be mixed. The highest sampling frequency
495 // found is the lowest that can be used without losing information. 489 // found is the lowest that can be used without losing information.
496 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() { 490 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const {
497 const int participantListFrequency = 491 const int participantListFrequency =
498 GetLowestMixingFrequencyFromList(&_participantList); 492 GetLowestMixingFrequencyFromList(_participantList);
499 const int anonymousListFrequency = 493 const int anonymousListFrequency =
500 GetLowestMixingFrequencyFromList(&_additionalParticipantList); 494 GetLowestMixingFrequencyFromList(_additionalParticipantList);
501 const int highestFreq = 495 const int highestFreq =
502 (participantListFrequency > anonymousListFrequency) ? 496 (participantListFrequency > anonymousListFrequency) ?
503 participantListFrequency : anonymousListFrequency; 497 participantListFrequency : anonymousListFrequency;
504 // Check if the user specified a lowest mixing frequency. 498 // Check if the user specified a lowest mixing frequency.
505 if(_minimumMixingFreq != kLowestPossible) { 499 if(_minimumMixingFreq != kLowestPossible) {
506 if(_minimumMixingFreq > highestFreq) { 500 if(_minimumMixingFreq > highestFreq) {
507 return _minimumMixingFreq; 501 return _minimumMixingFreq;
508 } 502 }
509 } 503 }
510 return highestFreq; 504 return highestFreq;
511 } 505 }
512 506
513 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( 507 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
514 MixerParticipantList* mixList) { 508 const MixerParticipantList& mixList) const {
515 int32_t highestFreq = 8000; 509 int32_t highestFreq = 8000;
516 for (MixerParticipantList::iterator iter = mixList->begin(); 510 for (MixerParticipantList::const_iterator iter = mixList.begin();
517 iter != mixList->end(); 511 iter != mixList.end();
518 ++iter) { 512 ++iter) {
519 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); 513 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
520 if(neededFrequency > highestFreq) { 514 if(neededFrequency > highestFreq) {
521 highestFreq = neededFrequency; 515 highestFreq = neededFrequency;
522 } 516 }
523 } 517 }
524 return highestFreq; 518 return highestFreq;
525 } 519 }
526 520
527 void AudioConferenceMixerImpl::UpdateToMix( 521 void AudioConferenceMixerImpl::UpdateToMix(
528 AudioFrameList* mixList, 522 AudioFrameList* mixList,
529 AudioFrameList* rampOutList, 523 AudioFrameList* rampOutList,
530 std::map<int, MixerParticipant*>* mixParticipantList, 524 std::map<int, MixerParticipant*>* mixParticipantList,
531 size_t& maxAudioFrameCounter) { 525 size_t* maxAudioFrameCounter) const {
532 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 526 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
533 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", 527 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
534 maxAudioFrameCounter); 528 *maxAudioFrameCounter);
535 const size_t mixListStartSize = mixList->size(); 529 const size_t mixListStartSize = mixList->size();
536 AudioFrameList activeList; 530 AudioFrameList activeList;
537 // Struct needed by the passive lists to keep track of which AudioFrame 531 // Struct needed by the passive lists to keep track of which AudioFrame
538 // belongs to which MixerParticipant. 532 // belongs to which MixerParticipant.
539 ParticipantFramePairList passiveWasNotMixedList; 533 ParticipantFramePairList passiveWasNotMixedList;
540 ParticipantFramePairList passiveWasMixedList; 534 ParticipantFramePairList passiveWasMixedList;
541 for (MixerParticipantList::iterator participant = _participantList.begin(); 535 for (MixerParticipantList::const_iterator participant =
542 participant != _participantList.end(); 536 _participantList.begin(); participant != _participantList.end();
543 ++participant) { 537 ++participant) {
544 // Stop keeping track of passive participants if there are already 538 // Stop keeping track of passive participants if there are already
545 // enough participants available (they wont be mixed anyway). 539 // enough participants available (they wont be mixed anyway).
546 bool mustAddToPassiveList = (maxAudioFrameCounter > 540 bool mustAddToPassiveList = (*maxAudioFrameCounter >
547 (activeList.size() + 541 (activeList.size() +
548 passiveWasMixedList.size() + 542 passiveWasMixedList.size() +
549 passiveWasNotMixedList.size())); 543 passiveWasNotMixedList.size()));
550 544
551 bool wasMixed = false; 545 bool wasMixed = false;
552 (*participant)->_mixHistory->WasMixed(wasMixed); 546 wasMixed = (*participant)->_mixHistory->WasMixed();
553 AudioFrame* audioFrame = NULL; 547 AudioFrame* audioFrame = NULL;
554 if(_audioFramePool->PopMemory(audioFrame) == -1) { 548 if(_audioFramePool->PopMemory(audioFrame) == -1) {
555 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 549 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
556 "failed PopMemory() call"); 550 "failed PopMemory() call");
557 assert(false); 551 assert(false);
558 return; 552 return;
559 } 553 }
560 audioFrame->sample_rate_hz_ = _outputFrequency; 554 audioFrame->sample_rate_hz_ = _outputFrequency;
561 555
562 if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) { 556 if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
563 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 557 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
564 "failed to GetAudioFrame() from participant"); 558 "failed to GetAudioFrame() from participant");
565 _audioFramePool->PushMemory(audioFrame); 559 _audioFramePool->PushMemory(audioFrame);
566 continue; 560 continue;
567 } 561 }
568 if (_participantList.size() != 1) { 562 if (_participantList.size() != 1) {
569 // TODO(wu): Issue 3390, add support for multiple participants case. 563 // TODO(wu): Issue 3390, add support for multiple participants case.
570 audioFrame->ntp_time_ms_ = -1; 564 audioFrame->ntp_time_ms_ = -1;
571 } 565 }
572 566
573 // TODO(henrike): this assert triggers in some test cases where SRTP is 567 // TODO(henrike): this assert triggers in some test cases where SRTP is
574 // used which prevents NetEQ from making a VAD. Temporarily disable this 568 // used which prevents NetEQ from making a VAD. Temporarily disable this
575 // assert until the problem is fixed on a higher level. 569 // assert until the problem is fixed on a higher level.
576 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); 570 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
577 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { 571 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
578 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 572 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
579 "invalid VAD state from participant"); 573 "invalid VAD state from participant");
580 } 574 }
581 575
582 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) { 576 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) {
583 if(!wasMixed) { 577 if(!wasMixed) {
584 RampIn(*audioFrame); 578 RampIn(*audioFrame);
585 } 579 }
586 580
587 if(activeList.size() >= maxAudioFrameCounter) { 581 if(activeList.size() >= *maxAudioFrameCounter) {
588 // There are already more active participants than should be 582 // There are already more active participants than should be
589 // mixed. Only keep the ones with the highest energy. 583 // mixed. Only keep the ones with the highest energy.
590 AudioFrameList::iterator replaceItem; 584 AudioFrameList::iterator replaceItem;
591 CalculateEnergy(*audioFrame); 585 CalculateEnergy(*audioFrame);
592 uint32_t lowestEnergy = audioFrame->energy_; 586 uint32_t lowestEnergy = audioFrame->energy_;
593 587
594 bool found_replace_item = false; 588 bool found_replace_item = false;
595 for (AudioFrameList::iterator iter = activeList.begin(); 589 for (AudioFrameList::iterator iter = activeList.begin();
596 iter != activeList.end(); 590 iter != activeList.end();
597 ++iter) { 591 ++iter) {
598 CalculateEnergy(**iter); 592 CalculateEnergy(**iter);
599 if((*iter)->energy_ < lowestEnergy) { 593 if((*iter)->energy_ < lowestEnergy) {
600 replaceItem = iter; 594 replaceItem = iter;
601 lowestEnergy = (*iter)->energy_; 595 lowestEnergy = (*iter)->energy_;
602 found_replace_item = true; 596 found_replace_item = true;
603 } 597 }
604 } 598 }
605 if(found_replace_item) { 599 if(found_replace_item) {
606 AudioFrame* replaceFrame = *replaceItem; 600 AudioFrame* replaceFrame = *replaceItem;
607 601
608 bool replaceWasMixed = false; 602 bool replaceWasMixed = false;
609 std::map<int, MixerParticipant*>::iterator it = 603 std::map<int, MixerParticipant*>::const_iterator it =
610 mixParticipantList->find(replaceFrame->id_); 604 mixParticipantList->find(replaceFrame->id_);
611 605
612 // When a frame is pushed to |activeList| it is also pushed 606 // When a frame is pushed to |activeList| it is also pushed
613 // to mixParticipantList with the frame's id. This means 607 // to mixParticipantList with the frame's id. This means
614 // that the Find call above should never fail. 608 // that the Find call above should never fail.
615 assert(it != mixParticipantList->end()); 609 assert(it != mixParticipantList->end());
616 it->second->_mixHistory->WasMixed(replaceWasMixed); 610 replaceWasMixed = it->second->_mixHistory->WasMixed();
617 611
618 mixParticipantList->erase(replaceFrame->id_); 612 mixParticipantList->erase(replaceFrame->id_);
619 activeList.erase(replaceItem); 613 activeList.erase(replaceItem);
620 614
621 activeList.push_front(audioFrame); 615 activeList.push_front(audioFrame);
622 (*mixParticipantList)[audioFrame->id_] = *participant; 616 (*mixParticipantList)[audioFrame->id_] = *participant;
623 assert(mixParticipantList->size() <= 617 assert(mixParticipantList->size() <=
624 kMaximumAmountOfMixedParticipants); 618 kMaximumAmountOfMixedParticipants);
625 619
626 if (replaceWasMixed) { 620 if (replaceWasMixed) {
(...skipping 30 matching lines...) Expand all
657 RampIn(*audioFrame); 651 RampIn(*audioFrame);
658 ParticipantFramePair* pair = new ParticipantFramePair; 652 ParticipantFramePair* pair = new ParticipantFramePair;
659 pair->audioFrame = audioFrame; 653 pair->audioFrame = audioFrame;
660 pair->participant = *participant; 654 pair->participant = *participant;
661 passiveWasNotMixedList.push_back(pair); 655 passiveWasNotMixedList.push_back(pair);
662 } else { 656 } else {
663 _audioFramePool->PushMemory(audioFrame); 657 _audioFramePool->PushMemory(audioFrame);
664 } 658 }
665 } 659 }
666 } 660 }
667 assert(activeList.size() <= maxAudioFrameCounter); 661 assert(activeList.size() <= *maxAudioFrameCounter);
668 // At this point it is known which participants should be mixed. Transfer 662 // At this point it is known which participants should be mixed. Transfer
669 // this information to this functions output parameters. 663 // this information to this functions output parameters.
670 for (AudioFrameList::iterator iter = activeList.begin(); 664 for (AudioFrameList::const_iterator iter = activeList.begin();
671 iter != activeList.end(); 665 iter != activeList.end();
672 ++iter) { 666 ++iter) {
673 mixList->push_back(*iter); 667 mixList->push_back(*iter);
674 } 668 }
675 activeList.clear(); 669 activeList.clear();
676 // Always mix a constant number of AudioFrames. If there aren't enough 670 // Always mix a constant number of AudioFrames. If there aren't enough
677 // active participants mix passive ones. Starting with those that was mixed 671 // active participants mix passive ones. Starting with those that was mixed
678 // last iteration. 672 // last iteration.
679 for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin(); 673 for (ParticipantFramePairList::const_iterator
680 iter != passiveWasMixedList.end(); 674 iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end();
681 ++iter) { 675 ++iter) {
682 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) { 676 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
683 mixList->push_back((*iter)->audioFrame); 677 mixList->push_back((*iter)->audioFrame);
684 (*mixParticipantList)[(*iter)->audioFrame->id_] = 678 (*mixParticipantList)[(*iter)->audioFrame->id_] =
685 (*iter)->participant; 679 (*iter)->participant;
686 assert(mixParticipantList->size() <= 680 assert(mixParticipantList->size() <=
687 kMaximumAmountOfMixedParticipants); 681 kMaximumAmountOfMixedParticipants);
688 } else { 682 } else {
689 _audioFramePool->PushMemory((*iter)->audioFrame); 683 _audioFramePool->PushMemory((*iter)->audioFrame);
690 } 684 }
691 delete *iter; 685 delete *iter;
692 } 686 }
693 // And finally the ones that have not been mixed for a while. 687 // And finally the ones that have not been mixed for a while.
694 for (ParticipantFramePairList::iterator iter = 688 for (ParticipantFramePairList::const_iterator iter =
695 passiveWasNotMixedList.begin(); 689 passiveWasNotMixedList.begin();
696 iter != passiveWasNotMixedList.end(); 690 iter != passiveWasNotMixedList.end();
697 ++iter) { 691 ++iter) {
698 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) { 692 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
699 mixList->push_back((*iter)->audioFrame); 693 mixList->push_back((*iter)->audioFrame);
700 (*mixParticipantList)[(*iter)->audioFrame->id_] = 694 (*mixParticipantList)[(*iter)->audioFrame->id_] =
701 (*iter)->participant; 695 (*iter)->participant;
702 assert(mixParticipantList->size() <= 696 assert(mixParticipantList->size() <=
703 kMaximumAmountOfMixedParticipants); 697 kMaximumAmountOfMixedParticipants);
704 } else { 698 } else {
705 _audioFramePool->PushMemory((*iter)->audioFrame); 699 _audioFramePool->PushMemory((*iter)->audioFrame);
706 } 700 }
707 delete *iter; 701 delete *iter;
708 } 702 }
709 assert(maxAudioFrameCounter + mixListStartSize >= mixList->size()); 703 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
710 maxAudioFrameCounter += mixListStartSize - mixList->size(); 704 *maxAudioFrameCounter += mixListStartSize - mixList->size();
711 } 705 }
712 706
713 void AudioConferenceMixerImpl::GetAdditionalAudio( 707 void AudioConferenceMixerImpl::GetAdditionalAudio(
714 AudioFrameList* additionalFramesList) { 708 AudioFrameList* additionalFramesList) const {
715 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 709 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
716 "GetAdditionalAudio(additionalFramesList)"); 710 "GetAdditionalAudio(additionalFramesList)");
717 // The GetAudioFrame() callback may result in the participant being removed 711 // The GetAudioFrame() callback may result in the participant being removed
718 // from additionalParticipantList_. If that happens it will invalidate any 712 // from additionalParticipantList_. If that happens it will invalidate any
719 // iterators. Create a copy of the participants list such that the list of 713 // iterators. Create a copy of the participants list such that the list of
720 // participants can be traversed safely. 714 // participants can be traversed safely.
721 MixerParticipantList additionalParticipantList; 715 MixerParticipantList additionalParticipantList;
722 additionalParticipantList.insert(additionalParticipantList.begin(), 716 additionalParticipantList.insert(additionalParticipantList.begin(),
723 _additionalParticipantList.begin(), 717 _additionalParticipantList.begin(),
724 _additionalParticipantList.end()); 718 _additionalParticipantList.end());
725 719
726 for (MixerParticipantList::iterator participant = 720 for (MixerParticipantList::const_iterator participant =
727 additionalParticipantList.begin(); 721 additionalParticipantList.begin();
728 participant != additionalParticipantList.end(); 722 participant != additionalParticipantList.end();
729 ++participant) { 723 ++participant) {
730 AudioFrame* audioFrame = NULL; 724 AudioFrame* audioFrame = NULL;
731 if(_audioFramePool->PopMemory(audioFrame) == -1) { 725 if(_audioFramePool->PopMemory(audioFrame) == -1) {
732 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 726 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
733 "failed PopMemory() call"); 727 "failed PopMemory() call");
734 assert(false); 728 assert(false);
735 return; 729 return;
736 } 730 }
737 audioFrame->sample_rate_hz_ = _outputFrequency; 731 audioFrame->sample_rate_hz_ = _outputFrequency;
738 if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) { 732 if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
739 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 733 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
740 "failed to GetAudioFrame() from participant"); 734 "failed to GetAudioFrame() from participant");
741 _audioFramePool->PushMemory(audioFrame); 735 _audioFramePool->PushMemory(audioFrame);
742 continue; 736 continue;
743 } 737 }
744 if(audioFrame->samples_per_channel_ == 0) { 738 if(audioFrame->samples_per_channel_ == 0) {
745 // Empty frame. Don't use it. 739 // Empty frame. Don't use it.
746 _audioFramePool->PushMemory(audioFrame); 740 _audioFramePool->PushMemory(audioFrame);
747 continue; 741 continue;
748 } 742 }
749 additionalFramesList->push_back(audioFrame); 743 additionalFramesList->push_back(audioFrame);
750 } 744 }
751 } 745 }
752 746
753 void AudioConferenceMixerImpl::UpdateMixedStatus( 747 void AudioConferenceMixerImpl::UpdateMixedStatus(
754 std::map<int, MixerParticipant*>& mixedParticipantsMap) { 748 const std::map<int, MixerParticipant*>& mixedParticipantsMap) const {
755 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 749 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
756 "UpdateMixedStatus(mixedParticipantsMap)"); 750 "UpdateMixedStatus(mixedParticipantsMap)");
757 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); 751 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
758 752
759 // Loop through all participants. If they are in the mix map they 753 // Loop through all participants. If they are in the mix map they
760 // were mixed. 754 // were mixed.
761 for (MixerParticipantList::iterator participant = _participantList.begin(); 755 for (MixerParticipantList::const_iterator
762 participant != _participantList.end(); 756 participant =_participantList.begin();
757 participant != _participantList.end();
763 ++participant) { 758 ++participant) {
764 bool isMixed = false; 759 bool isMixed = false;
765 for (std::map<int, MixerParticipant*>::iterator it = 760 for (std::map<int, MixerParticipant*>::const_iterator it =
766 mixedParticipantsMap.begin(); 761 mixedParticipantsMap.begin();
767 it != mixedParticipantsMap.end(); 762 it != mixedParticipantsMap.end();
768 ++it) { 763 ++it) {
769 if (it->second == *participant) { 764 if (it->second == *participant) {
770 isMixed = true; 765 isMixed = true;
771 break; 766 break;
772 } 767 }
773 } 768 }
774 (*participant)->_mixHistory->SetIsMixed(isMixed); 769 (*participant)->_mixHistory->SetIsMixed(isMixed);
775 } 770 }
776 } 771 }
777 772
778 void AudioConferenceMixerImpl::ClearAudioFrameList( 773 void AudioConferenceMixerImpl::ClearAudioFrameList(
779 AudioFrameList* audioFrameList) { 774 AudioFrameList* audioFrameList) const {
780 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 775 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
781 "ClearAudioFrameList(audioFrameList)"); 776 "ClearAudioFrameList(audioFrameList)");
782 for (AudioFrameList::iterator iter = audioFrameList->begin(); 777 for (AudioFrameList::iterator iter = audioFrameList->begin();
783 iter != audioFrameList->end(); 778 iter != audioFrameList->end();
784 ++iter) { 779 ++iter) {
785 _audioFramePool->PushMemory(*iter); 780 _audioFramePool->PushMemory(*iter);
786 } 781 }
787 audioFrameList->clear(); 782 audioFrameList->clear();
788 } 783 }
789 784
790 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants( 785 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
791 AudioFrameList* mixList) { 786 AudioFrameList* mixList) const {
792 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 787 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
793 "UpdateVADPositiveParticipants(mixList)"); 788 "UpdateVADPositiveParticipants(mixList)");
794 789
795 for (AudioFrameList::iterator iter = mixList->begin(); 790 for (AudioFrameList::const_iterator iter = mixList->begin();
796 iter != mixList->end(); 791 iter != mixList->end();
797 ++iter) { 792 ++iter) {
798 CalculateEnergy(**iter); 793 CalculateEnergy(**iter);
799 } 794 }
800 } 795 }
801 796
802 bool AudioConferenceMixerImpl::IsParticipantInList( 797 bool AudioConferenceMixerImpl::IsParticipantInList(
803 MixerParticipant& participant, 798 const MixerParticipant& participant,
804 MixerParticipantList* participantList) const { 799 const MixerParticipantList& participantList) const {
805 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 800 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
806 "IsParticipantInList(participant,participantList)"); 801 "IsParticipantInList(participant,participantList)");
807 for (MixerParticipantList::const_iterator iter = participantList->begin(); 802 for (MixerParticipantList::const_iterator iter = participantList.begin();
808 iter != participantList->end(); 803 iter != participantList.end();
809 ++iter) { 804 ++iter) {
810 if(&participant == *iter) { 805 if(&participant == *iter) {
811 return true; 806 return true;
812 } 807 }
813 } 808 }
814 return false; 809 return false;
815 } 810 }
816 811
817 bool AudioConferenceMixerImpl::AddParticipantToList( 812 bool AudioConferenceMixerImpl::AddParticipantToList(
818 MixerParticipant& participant, 813 MixerParticipant* participant,
819 MixerParticipantList* participantList) { 814 MixerParticipantList* participantList) const {
820 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 815 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
821 "AddParticipantToList(participant, participantList)"); 816 "AddParticipantToList(participant, participantList)");
822 participantList->push_back(&participant); 817 participantList->push_back(participant);
823 // Make sure that the mixed status is correct for new MixerParticipant. 818 // Make sure that the mixed status is correct for new MixerParticipant.
824 participant._mixHistory->ResetMixedStatus(); 819 participant->_mixHistory->ResetMixedStatus();
825 return true; 820 return true;
826 } 821 }
827 822
828 bool AudioConferenceMixerImpl::RemoveParticipantFromList( 823 bool AudioConferenceMixerImpl::RemoveParticipantFromList(
829 MixerParticipant& participant, 824 MixerParticipant* participant,
830 MixerParticipantList* participantList) { 825 MixerParticipantList* participantList) const {
831 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 826 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
832 "RemoveParticipantFromList(participant, participantList)"); 827 "RemoveParticipantFromList(participant, participantList)");
833 for (MixerParticipantList::iterator iter = participantList->begin(); 828 for (MixerParticipantList::iterator iter = participantList->begin();
834 iter != participantList->end(); 829 iter != participantList->end();
835 ++iter) { 830 ++iter) {
836 if(*iter == &participant) { 831 if(*iter == participant) {
837 participantList->erase(iter); 832 participantList->erase(iter);
838 // Participant is no longer mixed, reset to default. 833 // Participant is no longer mixed, reset to default.
839 participant._mixHistory->ResetMixedStatus(); 834 participant->_mixHistory->ResetMixedStatus();
840 return true; 835 return true;
841 } 836 }
842 } 837 }
843 return false; 838 return false;
844 } 839 }
845 840
846 int32_t AudioConferenceMixerImpl::MixFromList( 841 int32_t AudioConferenceMixerImpl::MixFromList(
847 AudioFrame& mixedAudio, 842 AudioFrame* mixedAudio,
848 const AudioFrameList* audioFrameList) { 843 const AudioFrameList& audioFrameList) const {
849 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 844 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
850 "MixFromList(mixedAudio, audioFrameList)"); 845 "MixFromList(mixedAudio, audioFrameList)");
851 if(audioFrameList->empty()) return 0; 846 if(audioFrameList.empty()) return 0;
852 847
853 uint32_t position = 0; 848 uint32_t position = 0;
854 849
855 if (_numMixedParticipants == 1) { 850 if (_numMixedParticipants == 1) {
856 mixedAudio.timestamp_ = audioFrameList->front()->timestamp_; 851 mixedAudio->timestamp_ = audioFrameList.front()->timestamp_;
857 mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_; 852 mixedAudio->elapsed_time_ms_ = audioFrameList.front()->elapsed_time_ms_;
858 } else { 853 } else {
859 // TODO(wu): Issue 3390. 854 // TODO(wu): Issue 3390.
860 // Audio frame timestamp is only supported in one channel case. 855 // Audio frame timestamp is only supported in one channel case.
861 mixedAudio.timestamp_ = 0; 856 mixedAudio->timestamp_ = 0;
862 mixedAudio.elapsed_time_ms_ = -1; 857 mixedAudio->elapsed_time_ms_ = -1;
863 } 858 }
864 859
865 for (AudioFrameList::const_iterator iter = audioFrameList->begin(); 860 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
866 iter != audioFrameList->end(); 861 iter != audioFrameList.end();
867 ++iter) { 862 ++iter) {
868 if(position >= kMaximumAmountOfMixedParticipants) { 863 if(position >= kMaximumAmountOfMixedParticipants) {
869 WEBRTC_TRACE( 864 WEBRTC_TRACE(
870 kTraceMemory, 865 kTraceMemory,
871 kTraceAudioMixerServer, 866 kTraceAudioMixerServer,
872 _id, 867 _id,
873 "Trying to mix more than max amount of mixed participants:%d!", 868 "Trying to mix more than max amount of mixed participants:%d!",
874 kMaximumAmountOfMixedParticipants); 869 kMaximumAmountOfMixedParticipants);
875 // Assert and avoid crash 870 // Assert and avoid crash
876 assert(false); 871 assert(false);
877 position = 0; 872 position = 0;
878 } 873 }
879 MixFrames(&mixedAudio, (*iter), use_limiter_); 874 MixFrames(mixedAudio, (*iter), use_limiter_);
880 875
881 position++; 876 position++;
882 } 877 }
883 878
884 return 0; 879 return 0;
885 } 880 }
886 881
887 // TODO(andrew): consolidate this function with MixFromList. 882 // TODO(andrew): consolidate this function with MixFromList.
888 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList( 883 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
889 AudioFrame& mixedAudio, 884 AudioFrame* mixedAudio,
890 const AudioFrameList* audioFrameList) { 885 const AudioFrameList& audioFrameList) const {
891 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 886 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
892 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); 887 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
893 888
894 if(audioFrameList->empty()) return 0; 889 if(audioFrameList.empty()) return 0;
895 890
896 for (AudioFrameList::const_iterator iter = audioFrameList->begin(); 891 for (AudioFrameList::const_iterator iter = audioFrameList.begin();
897 iter != audioFrameList->end(); 892 iter != audioFrameList.end();
898 ++iter) { 893 ++iter) {
899 MixFrames(&mixedAudio, *iter, use_limiter_); 894 MixFrames(mixedAudio, *iter, use_limiter_);
900 } 895 }
901 return 0; 896 return 0;
902 } 897 }
903 898
904 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) { 899 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
905 if (!use_limiter_) { 900 if (!use_limiter_) {
906 return true; 901 return true;
907 } 902 }
908 903
909 // Smoothly limit the mixed frame. 904 // Smoothly limit the mixed frame.
910 const int error = _limiter->ProcessStream(&mixedAudio); 905 const int error = _limiter->ProcessStream(mixedAudio);
911 906
912 // And now we can safely restore the level. This procedure results in 907 // And now we can safely restore the level. This procedure results in
913 // some loss of resolution, deemed acceptable. 908 // some loss of resolution, deemed acceptable.
914 // 909 //
915 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 910 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
916 // and compression gain of 6 dB). However, in the transition frame when this 911 // and compression gain of 6 dB). However, in the transition frame when this
917 // is enabled (moving from one to two participants) it has the potential to 912 // is enabled (moving from one to two participants) it has the potential to
918 // create discontinuities in the mixed frame. 913 // create discontinuities in the mixed frame.
919 // 914 //
920 // Instead we double the frame (with addition since left-shifting a 915 // Instead we double the frame (with addition since left-shifting a
921 // negative value is undefined). 916 // negative value is undefined).
922 mixedAudio += mixedAudio; 917 *mixedAudio += *mixedAudio;
923 918
924 if(error != _limiter->kNoError) { 919 if(error != _limiter->kNoError) {
925 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 920 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
926 "Error from AudioProcessing: %d", error); 921 "Error from AudioProcessing: %d", error);
927 assert(false); 922 assert(false);
928 return false; 923 return false;
929 } 924 }
930 return true; 925 return true;
931 } 926 }
932 } // namespace webrtc 927 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.h ('k') | webrtc/voice_engine/channel.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698