Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(12)

Side by Side Diff: webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc

Issue 1311733003: Stylizing AudioConferenceMixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 } // namespace 63 } // namespace
64 64
65 MixerParticipant::MixerParticipant() 65 MixerParticipant::MixerParticipant()
66 : _mixHistory(new MixHistory()) { 66 : _mixHistory(new MixHistory()) {
67 } 67 }
68 68
69 MixerParticipant::~MixerParticipant() { 69 MixerParticipant::~MixerParticipant() {
70 delete _mixHistory; 70 delete _mixHistory;
71 } 71 }
72 72
73 int32_t MixerParticipant::IsMixed(bool& mixed) const { 73 int32_t MixerParticipant::IsMixed(bool* mixed) const {
74 return _mixHistory->IsMixed(mixed); 74 return _mixHistory->IsMixed(mixed);
75 } 75 }
76 76
77 MixHistory::MixHistory() 77 MixHistory::MixHistory()
78 : _isMixed(0) { 78 : _isMixed(0) {
79 } 79 }
80 80
81 MixHistory::~MixHistory() { 81 MixHistory::~MixHistory() {
82 } 82 }
83 83
84 int32_t MixHistory::IsMixed(bool& mixed) const { 84 int32_t MixHistory::IsMixed(bool* mixed) const {
85 mixed = _isMixed; 85 *mixed = _isMixed;
86 return 0; 86 return 0;
87 } 87 }
88 88
89 int32_t MixHistory::WasMixed(bool& wasMixed) const { 89 int32_t MixHistory::WasMixed(bool* wasMixed) const {
90 // Was mixed is the same as is mixed depending on perspective. This function 90 // Was mixed is the same as is mixed depending on perspective. This function
91 // is for the perspective of AudioConferenceMixerImpl. 91 // is for the perspective of AudioConferenceMixerImpl.
92 return IsMixed(wasMixed); 92 return IsMixed(wasMixed);
93 } 93 }
94 94
95 int32_t MixHistory::SetIsMixed(const bool mixed) { 95 int32_t MixHistory::SetIsMixed(const bool mixed) {
96 _isMixed = mixed; 96 _isMixed = mixed;
97 return 0; 97 return 0;
98 } 98 }
99 99
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
249 default: 249 default:
250 assert(false); 250 assert(false);
251 251
252 CriticalSectionScoped cs(_crit.get()); 252 CriticalSectionScoped cs(_crit.get());
253 _processCalls--; 253 _processCalls--;
254 return -1; 254 return -1;
255 } 255 }
256 } 256 }
257 257
258 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, 258 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
259 remainingParticipantsAllowedToMix); 259 &remainingParticipantsAllowedToMix);
260 260
261 GetAdditionalAudio(&additionalFramesList); 261 GetAdditionalAudio(&additionalFramesList);
262 UpdateMixedStatus(mixedParticipantsMap); 262 UpdateMixedStatus(mixedParticipantsMap);
263 } 263 }
264 264
265 // Get an AudioFrame for mixing from the memory pool. 265 // Get an AudioFrame for mixing from the memory pool.
266 AudioFrame* mixedAudio = NULL; 266 AudioFrame* mixedAudio = NULL;
267 if(_audioFramePool->PopMemory(mixedAudio) == -1) { 267 if(_audioFramePool->PopMemory(mixedAudio) == -1) {
268 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 268 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
269 "failed PopMemory() call"); 269 "failed PopMemory() call");
(...skipping 17 matching lines...) Expand all
287 AudioFrame::kNormalSpeech, 287 AudioFrame::kNormalSpeech,
288 AudioFrame::kVadPassive, num_mixed_channels); 288 AudioFrame::kVadPassive, num_mixed_channels);
289 289
290 _timeStamp += static_cast<uint32_t>(_sampleSize); 290 _timeStamp += static_cast<uint32_t>(_sampleSize);
291 291
292 // We only use the limiter if it supports the output sample rate and 292 // We only use the limiter if it supports the output sample rate and
293 // we're actually mixing multiple streams. 293 // we're actually mixing multiple streams.
294 use_limiter_ = _numMixedParticipants > 1 && 294 use_limiter_ = _numMixedParticipants > 1 &&
295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz; 295 _outputFrequency <= kAudioProcMaxNativeSampleRateHz;
296 296
297 MixFromList(*mixedAudio, &mixList); 297 MixFromList(mixedAudio, &mixList);
298 MixAnonomouslyFromList(*mixedAudio, &additionalFramesList); 298 MixAnonomouslyFromList(mixedAudio, &additionalFramesList);
299 MixAnonomouslyFromList(*mixedAudio, &rampOutList); 299 MixAnonomouslyFromList(mixedAudio, &rampOutList);
300 300
301 if(mixedAudio->samples_per_channel_ == 0) { 301 if(mixedAudio->samples_per_channel_ == 0) {
302 // Nothing was mixed, set the audio samples to silence. 302 // Nothing was mixed, set the audio samples to silence.
303 mixedAudio->samples_per_channel_ = _sampleSize; 303 mixedAudio->samples_per_channel_ = _sampleSize;
304 mixedAudio->Mute(); 304 mixedAudio->Mute();
305 } else { 305 } else {
306 // Only call the limiter if we have something to mix. 306 // Only call the limiter if we have something to mix.
307 if(!LimitMixedAudio(*mixedAudio)) 307 if(!LimitMixedAudio(mixedAudio))
308 retval = -1; 308 retval = -1;
309 } 309 }
310 } 310 }
311 311
312 { 312 {
313 CriticalSectionScoped cs(_cbCrit.get()); 313 CriticalSectionScoped cs(_cbCrit.get());
314 if(_mixReceiver != NULL) { 314 if(_mixReceiver != NULL) {
315 const AudioFrame** dummy = NULL; 315 const AudioFrame** dummy = NULL;
316 _mixReceiver->NewMixedAudio( 316 _mixReceiver->NewMixedAudio(
317 _id, 317 _id,
318 *mixedAudio, 318 *mixedAudio,
319 dummy, 319 dummy,
320 0); 320 0);
321 } 321 }
322 } 322 }
323 323
324 // Reclaim all outstanding memory. 324 // Reclaim all outstanding memory.
325 _audioFramePool->PushMemory(mixedAudio); 325 _audioFramePool->PushMemory(mixedAudio);
326 ClearAudioFrameList(&mixList); 326 ClearAudioFrameList(&mixList);
327 ClearAudioFrameList(&rampOutList); 327 ClearAudioFrameList(&rampOutList);
328 ClearAudioFrameList(&additionalFramesList); 328 ClearAudioFrameList(&additionalFramesList);
329 { 329 {
330 CriticalSectionScoped cs(_crit.get()); 330 CriticalSectionScoped cs(_crit.get());
331 _processCalls--; 331 _processCalls--;
332 } 332 }
333 return retval; 333 return retval;
334 } 334 }
335 335
336 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( 336 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
337 AudioMixerOutputReceiver& mixReceiver) { 337 AudioMixerOutputReceiver* mixReceiver) {
338 CriticalSectionScoped cs(_cbCrit.get()); 338 CriticalSectionScoped cs(_cbCrit.get());
339 if(_mixReceiver != NULL) { 339 if(_mixReceiver != NULL) {
340 return -1; 340 return -1;
341 } 341 }
342 _mixReceiver = &mixReceiver; 342 _mixReceiver = mixReceiver;
343 return 0; 343 return 0;
344 } 344 }
345 345
346 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() { 346 int32_t AudioConferenceMixerImpl::UnRegisterMixedStreamCallback() {
347 CriticalSectionScoped cs(_cbCrit.get()); 347 CriticalSectionScoped cs(_cbCrit.get());
348 if(_mixReceiver == NULL) { 348 if(_mixReceiver == NULL) {
349 return -1; 349 return -1;
350 } 350 }
351 _mixReceiver = NULL; 351 _mixReceiver = NULL;
352 return 0; 352 return 0;
353 } 353 }
354 354
355 int32_t AudioConferenceMixerImpl::SetOutputFrequency( 355 int32_t AudioConferenceMixerImpl::SetOutputFrequency(
356 const Frequency frequency) { 356 const Frequency& frequency) {
357 CriticalSectionScoped cs(_crit.get()); 357 CriticalSectionScoped cs(_crit.get());
358 358
359 _outputFrequency = frequency; 359 _outputFrequency = frequency;
360 _sampleSize = 360 _sampleSize =
361 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000); 361 static_cast<size_t>((_outputFrequency*kProcessPeriodicityInMs) / 1000);
362 362
363 return 0; 363 return 0;
364 } 364 }
365 365
366 AudioConferenceMixer::Frequency 366 AudioConferenceMixer::Frequency
367 AudioConferenceMixerImpl::OutputFrequency() const { 367 AudioConferenceMixerImpl::OutputFrequency() const {
368 CriticalSectionScoped cs(_crit.get()); 368 CriticalSectionScoped cs(_crit.get());
369 return _outputFrequency; 369 return _outputFrequency;
370 } 370 }
371 371
372 int32_t AudioConferenceMixerImpl::SetMixabilityStatus( 372 int32_t AudioConferenceMixerImpl::SetMixabilityStatus(
373 MixerParticipant& participant, 373 MixerParticipant* participant, bool mixable) {
374 bool mixable) {
375 if (!mixable) { 374 if (!mixable) {
376 // Anonymous participants are in a separate list. Make sure that the 375 // Anonymous participants are in a separate list. Make sure that the
377 // participant is in the _participantList if it is being mixed. 376 // participant is in the _participantList if it is being mixed.
378 SetAnonymousMixabilityStatus(participant, false); 377 SetAnonymousMixabilityStatus(participant, false);
379 } 378 }
380 size_t numMixedParticipants; 379 size_t numMixedParticipants;
381 { 380 {
382 CriticalSectionScoped cs(_cbCrit.get()); 381 CriticalSectionScoped cs(_cbCrit.get());
383 const bool isMixed = 382 const bool isMixed =
384 IsParticipantInList(participant, &_participantList); 383 IsParticipantInList(*participant, &_participantList);
385 // API must be called with a new state. 384 // API must be called with a new state.
386 if(!(mixable ^ isMixed)) { 385 if(!(mixable ^ isMixed)) {
387 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 386 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
388 "Mixable is aready %s", 387 "Mixable is aready %s",
389 isMixed ? "ON" : "off"); 388 isMixed ? "ON" : "off");
390 return -1; 389 return -1;
391 } 390 }
392 bool success = false; 391 bool success = false;
393 if(mixable) { 392 if(mixable) {
394 success = AddParticipantToList(participant, &_participantList); 393 success = AddParticipantToList(participant, &_participantList);
(...skipping 17 matching lines...) Expand all
412 } 411 }
413 // A MixerParticipant was added or removed. Make sure the scratch 412 // A MixerParticipant was added or removed. Make sure the scratch
414 // buffer is updated if necessary. 413 // buffer is updated if necessary.
415 // Note: The scratch buffer may only be updated in Process(). 414 // Note: The scratch buffer may only be updated in Process().
416 CriticalSectionScoped cs(_crit.get()); 415 CriticalSectionScoped cs(_crit.get());
417 _numMixedParticipants = numMixedParticipants; 416 _numMixedParticipants = numMixedParticipants;
418 return 0; 417 return 0;
419 } 418 }
420 419
421 int32_t AudioConferenceMixerImpl::MixabilityStatus( 420 int32_t AudioConferenceMixerImpl::MixabilityStatus(
422 MixerParticipant& participant, 421 const MixerParticipant& participant, bool* mixable) const {
423 bool& mixable) {
424 CriticalSectionScoped cs(_cbCrit.get()); 422 CriticalSectionScoped cs(_cbCrit.get());
425 mixable = IsParticipantInList(participant, &_participantList); 423 *mixable = IsParticipantInList(participant, &_participantList);
426 return 0; 424 return 0;
427 } 425 }
428 426
429 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus( 427 int32_t AudioConferenceMixerImpl::SetAnonymousMixabilityStatus(
430 MixerParticipant& participant, const bool anonymous) { 428 MixerParticipant* participant, bool anonymous) {
431 CriticalSectionScoped cs(_cbCrit.get()); 429 CriticalSectionScoped cs(_cbCrit.get());
432 if(IsParticipantInList(participant, &_additionalParticipantList)) { 430 if(IsParticipantInList(*participant, &_additionalParticipantList)) {
433 if(anonymous) { 431 if(anonymous) {
434 return 0; 432 return 0;
435 } 433 }
436 if(!RemoveParticipantFromList(participant, 434 if(!RemoveParticipantFromList(participant,
437 &_additionalParticipantList)) { 435 &_additionalParticipantList)) {
438 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 436 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
439 "unable to remove participant from anonymous list"); 437 "unable to remove participant from anonymous list");
440 assert(false); 438 assert(false);
441 return -1; 439 return -1;
442 } 440 }
(...skipping 12 matching lines...) Expand all
455 "participant must be registered before turning it into anonymous"); 453 "participant must be registered before turning it into anonymous");
456 // Setting anonymous status is only possible if MixerParticipant is 454 // Setting anonymous status is only possible if MixerParticipant is
457 // already registered. 455 // already registered.
458 return -1; 456 return -1;
459 } 457 }
460 return AddParticipantToList(participant, &_additionalParticipantList) ? 458 return AddParticipantToList(participant, &_additionalParticipantList) ?
461 0 : -1; 459 0 : -1;
462 } 460 }
463 461
464 int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus( 462 int32_t AudioConferenceMixerImpl::AnonymousMixabilityStatus(
465 MixerParticipant& participant, bool& mixable) { 463 const MixerParticipant& participant, bool* mixable) const {
466 CriticalSectionScoped cs(_cbCrit.get()); 464 CriticalSectionScoped cs(_cbCrit.get());
467 mixable = IsParticipantInList(participant, 465 *mixable = IsParticipantInList(participant,
468 &_additionalParticipantList); 466 &_additionalParticipantList);
469 return 0; 467 return 0;
470 } 468 }
471 469
472 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency( 470 int32_t AudioConferenceMixerImpl::SetMinimumMixingFrequency(
473 Frequency freq) { 471 Frequency freq) {
474 // Make sure that only allowed sampling frequencies are used. Use closest 472 // Make sure that only allowed sampling frequencies are used. Use closest
475 // higher sampling frequency to avoid losing information. 473 // higher sampling frequency to avoid losing information.
476 if (static_cast<int>(freq) == 12000) { 474 if (static_cast<int>(freq) == 12000) {
477 freq = kWbInHz; 475 freq = kWbInHz;
478 } else if (static_cast<int>(freq) == 24000) { 476 } else if (static_cast<int>(freq) == 24000) {
479 freq = kSwbInHz; 477 freq = kSwbInHz;
480 } 478 }
481 479
482 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) || 480 if((freq == kNbInHz) || (freq == kWbInHz) || (freq == kSwbInHz) ||
483 (freq == kLowestPossible)) { 481 (freq == kLowestPossible)) {
484 _minimumMixingFreq=freq; 482 _minimumMixingFreq=freq;
485 return 0; 483 return 0;
486 } else { 484 } else {
487 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 485 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
488 "SetMinimumMixingFrequency incorrect frequency: %i",freq); 486 "SetMinimumMixingFrequency incorrect frequency: %i",freq);
489 assert(false); 487 assert(false);
490 return -1; 488 return -1;
491 } 489 }
492 } 490 }
493 491
494 // Check all AudioFrames that are to be mixed. The highest sampling frequency 492 // Check all AudioFrames that are to be mixed. The highest sampling frequency
495 // found is the lowest that can be used without losing information. 493 // found is the lowest that can be used without losing information.
496 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() { 494 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequency() const {
497 const int participantListFrequency = 495 const int participantListFrequency =
498 GetLowestMixingFrequencyFromList(&_participantList); 496 GetLowestMixingFrequencyFromList(&_participantList);
499 const int anonymousListFrequency = 497 const int anonymousListFrequency =
500 GetLowestMixingFrequencyFromList(&_additionalParticipantList); 498 GetLowestMixingFrequencyFromList(&_additionalParticipantList);
501 const int highestFreq = 499 const int highestFreq =
502 (participantListFrequency > anonymousListFrequency) ? 500 (participantListFrequency > anonymousListFrequency) ?
503 participantListFrequency : anonymousListFrequency; 501 participantListFrequency : anonymousListFrequency;
504 // Check if the user specified a lowest mixing frequency. 502 // Check if the user specified a lowest mixing frequency.
505 if(_minimumMixingFreq != kLowestPossible) { 503 if(_minimumMixingFreq != kLowestPossible) {
506 if(_minimumMixingFreq > highestFreq) { 504 if(_minimumMixingFreq > highestFreq) {
507 return _minimumMixingFreq; 505 return _minimumMixingFreq;
508 } 506 }
509 } 507 }
510 return highestFreq; 508 return highestFreq;
511 } 509 }
512 510
513 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList( 511 int32_t AudioConferenceMixerImpl::GetLowestMixingFrequencyFromList(
514 MixerParticipantList* mixList) { 512 const MixerParticipantList* mixList) const {
515 int32_t highestFreq = 8000; 513 int32_t highestFreq = 8000;
516 for (MixerParticipantList::iterator iter = mixList->begin(); 514 for (MixerParticipantList::const_iterator iter = mixList->begin();
Andrew MacDonald 2015/08/26 15:56:01 You should be able to use range based for loops in
Andrew MacDonald 2015/08/28 18:33:20 No change necessary, just wanted to make sure you
517 iter != mixList->end(); 515 iter != mixList->end();
518 ++iter) { 516 ++iter) {
519 const int32_t neededFrequency = (*iter)->NeededFrequency(_id); 517 const int32_t neededFrequency = (*iter)->NeededFrequency(_id);
520 if(neededFrequency > highestFreq) { 518 if(neededFrequency > highestFreq) {
521 highestFreq = neededFrequency; 519 highestFreq = neededFrequency;
522 } 520 }
523 } 521 }
524 return highestFreq; 522 return highestFreq;
525 } 523 }
526 524
527 void AudioConferenceMixerImpl::UpdateToMix( 525 void AudioConferenceMixerImpl::UpdateToMix(
528 AudioFrameList* mixList, 526 AudioFrameList* mixList,
529 AudioFrameList* rampOutList, 527 AudioFrameList* rampOutList,
530 std::map<int, MixerParticipant*>* mixParticipantList, 528 std::map<int, MixerParticipant*>* mixParticipantList,
531 size_t& maxAudioFrameCounter) { 529 size_t* maxAudioFrameCounter) const {
532 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 530 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
533 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", 531 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)",
534 maxAudioFrameCounter); 532 *maxAudioFrameCounter);
535 const size_t mixListStartSize = mixList->size(); 533 const size_t mixListStartSize = mixList->size();
536 AudioFrameList activeList; 534 AudioFrameList activeList;
537 // Struct needed by the passive lists to keep track of which AudioFrame 535 // Struct needed by the passive lists to keep track of which AudioFrame
538 // belongs to which MixerParticipant. 536 // belongs to which MixerParticipant.
539 ParticipantFramePairList passiveWasNotMixedList; 537 ParticipantFramePairList passiveWasNotMixedList;
540 ParticipantFramePairList passiveWasMixedList; 538 ParticipantFramePairList passiveWasMixedList;
541 for (MixerParticipantList::iterator participant = _participantList.begin(); 539 for (MixerParticipantList::const_iterator participant =
542 participant != _participantList.end(); 540 _participantList.begin(); participant != _participantList.end();
543 ++participant) { 541 ++participant) {
544 // Stop keeping track of passive participants if there are already 542 // Stop keeping track of passive participants if there are already
545 // enough participants available (they wont be mixed anyway). 543 // enough participants available (they wont be mixed anyway).
546 bool mustAddToPassiveList = (maxAudioFrameCounter > 544 bool mustAddToPassiveList = (*maxAudioFrameCounter >
547 (activeList.size() + 545 (activeList.size() +
548 passiveWasMixedList.size() + 546 passiveWasMixedList.size() +
549 passiveWasNotMixedList.size())); 547 passiveWasNotMixedList.size()));
550 548
551 bool wasMixed = false; 549 bool wasMixed = false;
552 (*participant)->_mixHistory->WasMixed(wasMixed); 550 (*participant)->_mixHistory->WasMixed(&wasMixed);
553 AudioFrame* audioFrame = NULL; 551 AudioFrame* audioFrame = NULL;
554 if(_audioFramePool->PopMemory(audioFrame) == -1) { 552 if(_audioFramePool->PopMemory(audioFrame) == -1) {
555 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 553 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
556 "failed PopMemory() call"); 554 "failed PopMemory() call");
557 assert(false); 555 assert(false);
558 return; 556 return;
559 } 557 }
560 audioFrame->sample_rate_hz_ = _outputFrequency; 558 audioFrame->sample_rate_hz_ = _outputFrequency;
561 559
562 if((*participant)->GetAudioFrame(_id,*audioFrame) != 0) { 560 if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
563 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 561 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
564 "failed to GetAudioFrame() from participant"); 562 "failed to GetAudioFrame() from participant");
565 _audioFramePool->PushMemory(audioFrame); 563 _audioFramePool->PushMemory(audioFrame);
566 continue; 564 continue;
567 } 565 }
568 if (_participantList.size() != 1) { 566 if (_participantList.size() != 1) {
569 // TODO(wu): Issue 3390, add support for multiple participants case. 567 // TODO(wu): Issue 3390, add support for multiple participants case.
570 audioFrame->ntp_time_ms_ = -1; 568 audioFrame->ntp_time_ms_ = -1;
571 } 569 }
572 570
573 // TODO(henrike): this assert triggers in some test cases where SRTP is 571 // TODO(henrike): this assert triggers in some test cases where SRTP is
574 // used which prevents NetEQ from making a VAD. Temporarily disable this 572 // used which prevents NetEQ from making a VAD. Temporarily disable this
575 // assert until the problem is fixed on a higher level. 573 // assert until the problem is fixed on a higher level.
576 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown); 574 // assert(audioFrame->vad_activity_ != AudioFrame::kVadUnknown);
577 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) { 575 if (audioFrame->vad_activity_ == AudioFrame::kVadUnknown) {
578 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 576 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
579 "invalid VAD state from participant"); 577 "invalid VAD state from participant");
580 } 578 }
581 579
582 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) { 580 if(audioFrame->vad_activity_ == AudioFrame::kVadActive) {
583 if(!wasMixed) { 581 if(!wasMixed) {
584 RampIn(*audioFrame); 582 RampIn(*audioFrame);
585 } 583 }
586 584
587 if(activeList.size() >= maxAudioFrameCounter) { 585 if(activeList.size() >= *maxAudioFrameCounter) {
588 // There are already more active participants than should be 586 // There are already more active participants than should be
589 // mixed. Only keep the ones with the highest energy. 587 // mixed. Only keep the ones with the highest energy.
590 AudioFrameList::iterator replaceItem; 588 AudioFrameList::iterator replaceItem;
591 CalculateEnergy(*audioFrame); 589 CalculateEnergy(*audioFrame);
592 uint32_t lowestEnergy = audioFrame->energy_; 590 uint32_t lowestEnergy = audioFrame->energy_;
593 591
594 bool found_replace_item = false; 592 bool found_replace_item = false;
595 for (AudioFrameList::iterator iter = activeList.begin(); 593 for (AudioFrameList::iterator iter = activeList.begin();
596 iter != activeList.end(); 594 iter != activeList.end();
597 ++iter) { 595 ++iter) {
598 CalculateEnergy(**iter); 596 CalculateEnergy(**iter);
599 if((*iter)->energy_ < lowestEnergy) { 597 if((*iter)->energy_ < lowestEnergy) {
600 replaceItem = iter; 598 replaceItem = iter;
601 lowestEnergy = (*iter)->energy_; 599 lowestEnergy = (*iter)->energy_;
602 found_replace_item = true; 600 found_replace_item = true;
603 } 601 }
604 } 602 }
605 if(found_replace_item) { 603 if(found_replace_item) {
606 AudioFrame* replaceFrame = *replaceItem; 604 AudioFrame* replaceFrame = *replaceItem;
607 605
608 bool replaceWasMixed = false; 606 bool replaceWasMixed = false;
609 std::map<int, MixerParticipant*>::iterator it = 607 std::map<int, MixerParticipant*>::const_iterator it =
610 mixParticipantList->find(replaceFrame->id_); 608 mixParticipantList->find(replaceFrame->id_);
611 609
612 // When a frame is pushed to |activeList| it is also pushed 610 // When a frame is pushed to |activeList| it is also pushed
613 // to mixParticipantList with the frame's id. This means 611 // to mixParticipantList with the frame's id. This means
614 // that the Find call above should never fail. 612 // that the Find call above should never fail.
615 assert(it != mixParticipantList->end()); 613 assert(it != mixParticipantList->end());
616 it->second->_mixHistory->WasMixed(replaceWasMixed); 614 it->second->_mixHistory->WasMixed(&replaceWasMixed);
617 615
618 mixParticipantList->erase(replaceFrame->id_); 616 mixParticipantList->erase(replaceFrame->id_);
619 activeList.erase(replaceItem); 617 activeList.erase(replaceItem);
620 618
621 activeList.push_front(audioFrame); 619 activeList.push_front(audioFrame);
622 (*mixParticipantList)[audioFrame->id_] = *participant; 620 (*mixParticipantList)[audioFrame->id_] = *participant;
623 assert(mixParticipantList->size() <= 621 assert(mixParticipantList->size() <=
624 kMaximumAmountOfMixedParticipants); 622 kMaximumAmountOfMixedParticipants);
625 623
626 if (replaceWasMixed) { 624 if (replaceWasMixed) {
(...skipping 30 matching lines...) Expand all
657 RampIn(*audioFrame); 655 RampIn(*audioFrame);
658 ParticipantFramePair* pair = new ParticipantFramePair; 656 ParticipantFramePair* pair = new ParticipantFramePair;
659 pair->audioFrame = audioFrame; 657 pair->audioFrame = audioFrame;
660 pair->participant = *participant; 658 pair->participant = *participant;
661 passiveWasNotMixedList.push_back(pair); 659 passiveWasNotMixedList.push_back(pair);
662 } else { 660 } else {
663 _audioFramePool->PushMemory(audioFrame); 661 _audioFramePool->PushMemory(audioFrame);
664 } 662 }
665 } 663 }
666 } 664 }
667 assert(activeList.size() <= maxAudioFrameCounter); 665 assert(activeList.size() <= *maxAudioFrameCounter);
668 // At this point it is known which participants should be mixed. Transfer 666 // At this point it is known which participants should be mixed. Transfer
669 // this information to this functions output parameters. 667 // this information to this functions output parameters.
670 for (AudioFrameList::iterator iter = activeList.begin(); 668 for (AudioFrameList::const_iterator iter = activeList.begin();
671 iter != activeList.end(); 669 iter != activeList.end();
672 ++iter) { 670 ++iter) {
673 mixList->push_back(*iter); 671 mixList->push_back(*iter);
674 } 672 }
675 activeList.clear(); 673 activeList.clear();
676 // Always mix a constant number of AudioFrames. If there aren't enough 674 // Always mix a constant number of AudioFrames. If there aren't enough
677 // active participants mix passive ones. Starting with those that was mixed 675 // active participants mix passive ones. Starting with those that was mixed
678 // last iteration. 676 // last iteration.
679 for (ParticipantFramePairList::iterator iter = passiveWasMixedList.begin(); 677 for (ParticipantFramePairList::const_iterator
680 iter != passiveWasMixedList.end(); 678 iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end();
681 ++iter) { 679 ++iter) {
682 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) { 680 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
683 mixList->push_back((*iter)->audioFrame); 681 mixList->push_back((*iter)->audioFrame);
684 (*mixParticipantList)[(*iter)->audioFrame->id_] = 682 (*mixParticipantList)[(*iter)->audioFrame->id_] =
685 (*iter)->participant; 683 (*iter)->participant;
686 assert(mixParticipantList->size() <= 684 assert(mixParticipantList->size() <=
687 kMaximumAmountOfMixedParticipants); 685 kMaximumAmountOfMixedParticipants);
688 } else { 686 } else {
689 _audioFramePool->PushMemory((*iter)->audioFrame); 687 _audioFramePool->PushMemory((*iter)->audioFrame);
690 } 688 }
691 delete *iter; 689 delete *iter;
692 } 690 }
693 // And finally the ones that have not been mixed for a while. 691 // And finally the ones that have not been mixed for a while.
694 for (ParticipantFramePairList::iterator iter = 692 for (ParticipantFramePairList::const_iterator iter =
695 passiveWasNotMixedList.begin(); 693 passiveWasNotMixedList.begin();
696 iter != passiveWasNotMixedList.end(); 694 iter != passiveWasNotMixedList.end();
697 ++iter) { 695 ++iter) {
698 if(mixList->size() < maxAudioFrameCounter + mixListStartSize) { 696 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) {
699 mixList->push_back((*iter)->audioFrame); 697 mixList->push_back((*iter)->audioFrame);
700 (*mixParticipantList)[(*iter)->audioFrame->id_] = 698 (*mixParticipantList)[(*iter)->audioFrame->id_] =
701 (*iter)->participant; 699 (*iter)->participant;
702 assert(mixParticipantList->size() <= 700 assert(mixParticipantList->size() <=
703 kMaximumAmountOfMixedParticipants); 701 kMaximumAmountOfMixedParticipants);
704 } else { 702 } else {
705 _audioFramePool->PushMemory((*iter)->audioFrame); 703 _audioFramePool->PushMemory((*iter)->audioFrame);
706 } 704 }
707 delete *iter; 705 delete *iter;
708 } 706 }
709 assert(maxAudioFrameCounter + mixListStartSize >= mixList->size()); 707 assert(*maxAudioFrameCounter + mixListStartSize >= mixList->size());
710 maxAudioFrameCounter += mixListStartSize - mixList->size(); 708 *maxAudioFrameCounter += mixListStartSize - mixList->size();
711 } 709 }
712 710
713 void AudioConferenceMixerImpl::GetAdditionalAudio( 711 void AudioConferenceMixerImpl::GetAdditionalAudio(
714 AudioFrameList* additionalFramesList) { 712 AudioFrameList* additionalFramesList) const {
715 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 713 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
716 "GetAdditionalAudio(additionalFramesList)"); 714 "GetAdditionalAudio(additionalFramesList)");
717 // The GetAudioFrame() callback may result in the participant being removed 715 // The GetAudioFrame() callback may result in the participant being removed
718 // from additionalParticipantList_. If that happens it will invalidate any 716 // from additionalParticipantList_. If that happens it will invalidate any
719 // iterators. Create a copy of the participants list such that the list of 717 // iterators. Create a copy of the participants list such that the list of
720 // participants can be traversed safely. 718 // participants can be traversed safely.
721 MixerParticipantList additionalParticipantList; 719 MixerParticipantList additionalParticipantList;
722 additionalParticipantList.insert(additionalParticipantList.begin(), 720 additionalParticipantList.insert(additionalParticipantList.begin(),
723 _additionalParticipantList.begin(), 721 _additionalParticipantList.begin(),
724 _additionalParticipantList.end()); 722 _additionalParticipantList.end());
725 723
726 for (MixerParticipantList::iterator participant = 724 for (MixerParticipantList::const_iterator participant =
727 additionalParticipantList.begin(); 725 additionalParticipantList.begin();
728 participant != additionalParticipantList.end(); 726 participant != additionalParticipantList.end();
729 ++participant) { 727 ++participant) {
730 AudioFrame* audioFrame = NULL; 728 AudioFrame* audioFrame = NULL;
731 if(_audioFramePool->PopMemory(audioFrame) == -1) { 729 if(_audioFramePool->PopMemory(audioFrame) == -1) {
732 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 730 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
733 "failed PopMemory() call"); 731 "failed PopMemory() call");
734 assert(false); 732 assert(false);
735 return; 733 return;
736 } 734 }
737 audioFrame->sample_rate_hz_ = _outputFrequency; 735 audioFrame->sample_rate_hz_ = _outputFrequency;
738 if((*participant)->GetAudioFrame(_id, *audioFrame) != 0) { 736 if((*participant)->GetAudioFrame(_id, audioFrame) != 0) {
739 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id, 737 WEBRTC_TRACE(kTraceWarning, kTraceAudioMixerServer, _id,
740 "failed to GetAudioFrame() from participant"); 738 "failed to GetAudioFrame() from participant");
741 _audioFramePool->PushMemory(audioFrame); 739 _audioFramePool->PushMemory(audioFrame);
742 continue; 740 continue;
743 } 741 }
744 if(audioFrame->samples_per_channel_ == 0) { 742 if(audioFrame->samples_per_channel_ == 0) {
745 // Empty frame. Don't use it. 743 // Empty frame. Don't use it.
746 _audioFramePool->PushMemory(audioFrame); 744 _audioFramePool->PushMemory(audioFrame);
747 continue; 745 continue;
748 } 746 }
749 additionalFramesList->push_back(audioFrame); 747 additionalFramesList->push_back(audioFrame);
750 } 748 }
751 } 749 }
752 750
753 void AudioConferenceMixerImpl::UpdateMixedStatus( 751 void AudioConferenceMixerImpl::UpdateMixedStatus(
754 std::map<int, MixerParticipant*>& mixedParticipantsMap) { 752 const std::map<int, MixerParticipant*>& mixedParticipantsMap) const {
755 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 753 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
756 "UpdateMixedStatus(mixedParticipantsMap)"); 754 "UpdateMixedStatus(mixedParticipantsMap)");
757 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants); 755 assert(mixedParticipantsMap.size() <= kMaximumAmountOfMixedParticipants);
758 756
759 // Loop through all participants. If they are in the mix map they 757 // Loop through all participants. If they are in the mix map they
760 // were mixed. 758 // were mixed.
761 for (MixerParticipantList::iterator participant = _participantList.begin(); 759 for (MixerParticipantList::const_iterator
762 participant != _participantList.end(); 760 participant =_participantList.begin();
761 participant != _participantList.end();
763 ++participant) { 762 ++participant) {
764 bool isMixed = false; 763 bool isMixed = false;
765 for (std::map<int, MixerParticipant*>::iterator it = 764 for (std::map<int, MixerParticipant*>::const_iterator it =
766 mixedParticipantsMap.begin(); 765 mixedParticipantsMap.begin();
767 it != mixedParticipantsMap.end(); 766 it != mixedParticipantsMap.end();
768 ++it) { 767 ++it) {
769 if (it->second == *participant) { 768 if (it->second == *participant) {
770 isMixed = true; 769 isMixed = true;
771 break; 770 break;
772 } 771 }
773 } 772 }
774 (*participant)->_mixHistory->SetIsMixed(isMixed); 773 (*participant)->_mixHistory->SetIsMixed(isMixed);
775 } 774 }
776 } 775 }
777 776
778 void AudioConferenceMixerImpl::ClearAudioFrameList( 777 void AudioConferenceMixerImpl::ClearAudioFrameList(
779 AudioFrameList* audioFrameList) { 778 AudioFrameList* audioFrameList) const {
780 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 779 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
781 "ClearAudioFrameList(audioFrameList)"); 780 "ClearAudioFrameList(audioFrameList)");
782 for (AudioFrameList::iterator iter = audioFrameList->begin(); 781 for (AudioFrameList::iterator iter = audioFrameList->begin();
783 iter != audioFrameList->end(); 782 iter != audioFrameList->end();
784 ++iter) { 783 ++iter) {
785 _audioFramePool->PushMemory(*iter); 784 _audioFramePool->PushMemory(*iter);
786 } 785 }
787 audioFrameList->clear(); 786 audioFrameList->clear();
788 } 787 }
789 788
790 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants( 789 void AudioConferenceMixerImpl::UpdateVADPositiveParticipants(
791 AudioFrameList* mixList) { 790 AudioFrameList* mixList) const {
792 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 791 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
793 "UpdateVADPositiveParticipants(mixList)"); 792 "UpdateVADPositiveParticipants(mixList)");
794 793
795 for (AudioFrameList::iterator iter = mixList->begin(); 794 for (AudioFrameList::const_iterator iter = mixList->begin();
796 iter != mixList->end(); 795 iter != mixList->end();
797 ++iter) { 796 ++iter) {
798 CalculateEnergy(**iter); 797 CalculateEnergy(**iter);
799 } 798 }
800 } 799 }
801 800
802 bool AudioConferenceMixerImpl::IsParticipantInList( 801 bool AudioConferenceMixerImpl::IsParticipantInList(
803 MixerParticipant& participant, 802 const MixerParticipant& participant,
804 MixerParticipantList* participantList) const { 803 const MixerParticipantList* participantList) const {
805 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 804 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
806 "IsParticipantInList(participant,participantList)"); 805 "IsParticipantInList(participant,participantList)");
807 for (MixerParticipantList::const_iterator iter = participantList->begin(); 806 for (MixerParticipantList::const_iterator iter = participantList->begin();
808 iter != participantList->end(); 807 iter != participantList->end();
809 ++iter) { 808 ++iter) {
810 if(&participant == *iter) { 809 if(&participant == *iter) {
811 return true; 810 return true;
812 } 811 }
813 } 812 }
814 return false; 813 return false;
815 } 814 }
816 815
817 bool AudioConferenceMixerImpl::AddParticipantToList( 816 bool AudioConferenceMixerImpl::AddParticipantToList(
818 MixerParticipant& participant, 817 MixerParticipant* participant,
819 MixerParticipantList* participantList) { 818 MixerParticipantList* participantList) const {
820 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 819 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
821 "AddParticipantToList(participant, participantList)"); 820 "AddParticipantToList(participant, participantList)");
822 participantList->push_back(&participant); 821 participantList->push_back(participant);
823 // Make sure that the mixed status is correct for new MixerParticipant. 822 // Make sure that the mixed status is correct for new MixerParticipant.
824 participant._mixHistory->ResetMixedStatus(); 823 participant->_mixHistory->ResetMixedStatus();
825 return true; 824 return true;
826 } 825 }
827 826
828 bool AudioConferenceMixerImpl::RemoveParticipantFromList( 827 bool AudioConferenceMixerImpl::RemoveParticipantFromList(
829 MixerParticipant& participant, 828 MixerParticipant* participant,
830 MixerParticipantList* participantList) { 829 MixerParticipantList* participantList) const {
831 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 830 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
832 "RemoveParticipantFromList(participant, participantList)"); 831 "RemoveParticipantFromList(participant, participantList)");
833 for (MixerParticipantList::iterator iter = participantList->begin(); 832 for (MixerParticipantList::iterator iter = participantList->begin();
834 iter != participantList->end(); 833 iter != participantList->end();
835 ++iter) { 834 ++iter) {
836 if(*iter == &participant) { 835 if(*iter == participant) {
837 participantList->erase(iter); 836 participantList->erase(iter);
838 // Participant is no longer mixed, reset to default. 837 // Participant is no longer mixed, reset to default.
839 participant._mixHistory->ResetMixedStatus(); 838 participant->_mixHistory->ResetMixedStatus();
840 return true; 839 return true;
841 } 840 }
842 } 841 }
843 return false; 842 return false;
844 } 843 }
845 844
846 int32_t AudioConferenceMixerImpl::MixFromList( 845 int32_t AudioConferenceMixerImpl::MixFromList(
847 AudioFrame& mixedAudio, 846 AudioFrame* mixedAudio,
848 const AudioFrameList* audioFrameList) { 847 const AudioFrameList* audioFrameList) const {
849 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 848 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
850 "MixFromList(mixedAudio, audioFrameList)"); 849 "MixFromList(mixedAudio, audioFrameList)");
851 if(audioFrameList->empty()) return 0; 850 if(audioFrameList->empty()) return 0;
852 851
853 uint32_t position = 0; 852 uint32_t position = 0;
854 853
855 if (_numMixedParticipants == 1) { 854 if (_numMixedParticipants == 1) {
856 mixedAudio.timestamp_ = audioFrameList->front()->timestamp_; 855 mixedAudio->timestamp_ = audioFrameList->front()->timestamp_;
857 mixedAudio.elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_; 856 mixedAudio->elapsed_time_ms_ = audioFrameList->front()->elapsed_time_ms_;
858 } else { 857 } else {
859 // TODO(wu): Issue 3390. 858 // TODO(wu): Issue 3390.
860 // Audio frame timestamp is only supported in one channel case. 859 // Audio frame timestamp is only supported in one channel case.
861 mixedAudio.timestamp_ = 0; 860 mixedAudio->timestamp_ = 0;
862 mixedAudio.elapsed_time_ms_ = -1; 861 mixedAudio->elapsed_time_ms_ = -1;
863 } 862 }
864 863
865 for (AudioFrameList::const_iterator iter = audioFrameList->begin(); 864 for (AudioFrameList::const_iterator iter = audioFrameList->begin();
866 iter != audioFrameList->end(); 865 iter != audioFrameList->end();
867 ++iter) { 866 ++iter) {
868 if(position >= kMaximumAmountOfMixedParticipants) { 867 if(position >= kMaximumAmountOfMixedParticipants) {
869 WEBRTC_TRACE( 868 WEBRTC_TRACE(
870 kTraceMemory, 869 kTraceMemory,
871 kTraceAudioMixerServer, 870 kTraceAudioMixerServer,
872 _id, 871 _id,
873 "Trying to mix more than max amount of mixed participants:%d!", 872 "Trying to mix more than max amount of mixed participants:%d!",
874 kMaximumAmountOfMixedParticipants); 873 kMaximumAmountOfMixedParticipants);
875 // Assert and avoid crash 874 // Assert and avoid crash
876 assert(false); 875 assert(false);
877 position = 0; 876 position = 0;
878 } 877 }
879 MixFrames(&mixedAudio, (*iter), use_limiter_); 878 MixFrames(mixedAudio, (*iter), use_limiter_);
880 879
881 position++; 880 position++;
882 } 881 }
883 882
884 return 0; 883 return 0;
885 } 884 }
886 885
887 // TODO(andrew): consolidate this function with MixFromList. 886 // TODO(andrew): consolidate this function with MixFromList.
888 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList( 887 int32_t AudioConferenceMixerImpl::MixAnonomouslyFromList(
889 AudioFrame& mixedAudio, 888 AudioFrame* mixedAudio,
890 const AudioFrameList* audioFrameList) { 889 const AudioFrameList* audioFrameList) const {
891 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, 890 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id,
892 "MixAnonomouslyFromList(mixedAudio, audioFrameList)"); 891 "MixAnonomouslyFromList(mixedAudio, audioFrameList)");
893 892
894 if(audioFrameList->empty()) return 0; 893 if(audioFrameList->empty()) return 0;
895 894
896 for (AudioFrameList::const_iterator iter = audioFrameList->begin(); 895 for (AudioFrameList::const_iterator iter = audioFrameList->begin();
897 iter != audioFrameList->end(); 896 iter != audioFrameList->end();
898 ++iter) { 897 ++iter) {
899 MixFrames(&mixedAudio, *iter, use_limiter_); 898 MixFrames(mixedAudio, *iter, use_limiter_);
900 } 899 }
901 return 0; 900 return 0;
902 } 901 }
903 902
904 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame& mixedAudio) { 903 bool AudioConferenceMixerImpl::LimitMixedAudio(AudioFrame* mixedAudio) const {
Andrew MacDonald 2015/08/26 15:56:01 Even though you can, I don't think you should mark
minyue-webrtc 2015/08/27 11:31:15 const means that the class members are unaltered.
Andrew MacDonald 2015/08/28 18:33:20 Right, I know what const methods are. There's the
905 if (!use_limiter_) { 904 if (!use_limiter_) {
906 return true; 905 return true;
907 } 906 }
908 907
909 // Smoothly limit the mixed frame. 908 // Smoothly limit the mixed frame.
910 const int error = _limiter->ProcessStream(&mixedAudio); 909 const int error = _limiter->ProcessStream(mixedAudio);
911 910
912 // And now we can safely restore the level. This procedure results in 911 // And now we can safely restore the level. This procedure results in
913 // some loss of resolution, deemed acceptable. 912 // some loss of resolution, deemed acceptable.
914 // 913 //
915 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS 914 // It's possible to apply the gain in the AGC (with a target level of 0 dbFS
916 // and compression gain of 6 dB). However, in the transition frame when this 915 // and compression gain of 6 dB). However, in the transition frame when this
917 // is enabled (moving from one to two participants) it has the potential to 916 // is enabled (moving from one to two participants) it has the potential to
918 // create discontinuities in the mixed frame. 917 // create discontinuities in the mixed frame.
919 // 918 //
920 // Instead we double the frame (with addition since left-shifting a 919 // Instead we double the frame (with addition since left-shifting a
921 // negative value is undefined). 920 // negative value is undefined).
922 mixedAudio += mixedAudio; 921 *mixedAudio += *mixedAudio;
923 922
924 if(error != _limiter->kNoError) { 923 if(error != _limiter->kNoError) {
925 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 924 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
926 "Error from AudioProcessing: %d", error); 925 "Error from AudioProcessing: %d", error);
927 assert(false); 926 assert(false);
928 return false; 927 return false;
929 } 928 }
930 return true; 929 return true;
931 } 930 }
932 } // namespace webrtc 931 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698