OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_d efines.h" | 11 #include "webrtc/modules/audio_conference_mixer/include/audio_conference_mixer_d efines.h" |
12 #include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_im pl.h" | 12 #include "webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_im pl.h" |
13 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " | 13 #include "webrtc/modules/audio_conference_mixer/source/audio_frame_manipulator.h " |
14 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 14 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
15 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 15 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | 16 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
17 #include "webrtc/system_wrappers/include/trace.h" | 17 #include "webrtc/system_wrappers/include/trace.h" |
18 | 18 |
19 namespace webrtc { | 19 namespace webrtc { |
20 namespace { | 20 namespace { |
21 | 21 |
22 struct ParticipantFramePair { | 22 struct ParticipantFrameStruct { |
23 ParticipantFrameStruct(MixerParticipant* p, AudioFrame* a, bool m) | |
24 : participant(p), audioFrame(a), muted(m) {} | |
23 MixerParticipant* participant; | 25 MixerParticipant* participant; |
24 AudioFrame* audioFrame; | 26 AudioFrame* audioFrame; |
minyue-webrtc
2016/05/17 15:27:30
I know it is late to ask this, but would it be pos
hlundin-webrtc
2016/05/17 18:09:08
We did consider that. I had discussions with solen
| |
27 bool muted; | |
25 }; | 28 }; |
26 | 29 |
27 typedef std::list<ParticipantFramePair*> ParticipantFramePairList; | 30 typedef std::list<ParticipantFrameStruct*> ParticipantFrameStructList; |
28 | 31 |
29 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. | 32 // Mix |frame| into |mixed_frame|, with saturation protection and upmixing. |
30 // These effects are applied to |frame| itself prior to mixing. Assumes that | 33 // These effects are applied to |frame| itself prior to mixing. Assumes that |
31 // |mixed_frame| always has at least as many channels as |frame|. Supports | 34 // |mixed_frame| always has at least as many channels as |frame|. Supports |
32 // stereo at most. | 35 // stereo at most. |
33 // | 36 // |
34 // TODO(andrew): consider not modifying |frame| here. | 37 // TODO(andrew): consider not modifying |frame| here. |
35 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { | 38 void MixFrames(AudioFrame* mixed_frame, AudioFrame* frame, bool use_limiter) { |
36 assert(mixed_frame->num_channels_ >= frame->num_channels_); | 39 assert(mixed_frame->num_channels_ >= frame->num_channels_); |
37 if (use_limiter) { | 40 if (use_limiter) { |
(...skipping 484 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
522 AudioFrameList* rampOutList, | 525 AudioFrameList* rampOutList, |
523 std::map<int, MixerParticipant*>* mixParticipantList, | 526 std::map<int, MixerParticipant*>* mixParticipantList, |
524 size_t* maxAudioFrameCounter) const { | 527 size_t* maxAudioFrameCounter) const { |
525 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, | 528 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, _id, |
526 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", | 529 "UpdateToMix(mixList,rampOutList,mixParticipantList,%d)", |
527 *maxAudioFrameCounter); | 530 *maxAudioFrameCounter); |
528 const size_t mixListStartSize = mixList->size(); | 531 const size_t mixListStartSize = mixList->size(); |
529 AudioFrameList activeList; | 532 AudioFrameList activeList; |
530 // Struct needed by the passive lists to keep track of which AudioFrame | 533 // Struct needed by the passive lists to keep track of which AudioFrame |
531 // belongs to which MixerParticipant. | 534 // belongs to which MixerParticipant. |
532 ParticipantFramePairList passiveWasNotMixedList; | 535 ParticipantFrameStructList passiveWasNotMixedList; |
533 ParticipantFramePairList passiveWasMixedList; | 536 ParticipantFrameStructList passiveWasMixedList; |
534 for (MixerParticipantList::const_iterator participant = | 537 for (MixerParticipantList::const_iterator participant = |
535 _participantList.begin(); participant != _participantList.end(); | 538 _participantList.begin(); participant != _participantList.end(); |
536 ++participant) { | 539 ++participant) { |
537 // Stop keeping track of passive participants if there are already | 540 // Stop keeping track of passive participants if there are already |
538 // enough participants available (they wont be mixed anyway). | 541 // enough participants available (they wont be mixed anyway). |
539 bool mustAddToPassiveList = (*maxAudioFrameCounter > | 542 bool mustAddToPassiveList = (*maxAudioFrameCounter > |
540 (activeList.size() + | 543 (activeList.size() + |
541 passiveWasMixedList.size() + | 544 passiveWasMixedList.size() + |
542 passiveWasNotMixedList.size())); | 545 passiveWasNotMixedList.size())); |
543 | 546 |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
634 } | 637 } |
635 } | 638 } |
636 } else { | 639 } else { |
637 activeList.push_front(audioFrame); | 640 activeList.push_front(audioFrame); |
638 (*mixParticipantList)[audioFrame->id_] = *participant; | 641 (*mixParticipantList)[audioFrame->id_] = *participant; |
639 assert(mixParticipantList->size() <= | 642 assert(mixParticipantList->size() <= |
640 kMaximumAmountOfMixedParticipants); | 643 kMaximumAmountOfMixedParticipants); |
641 } | 644 } |
642 } else { | 645 } else { |
643 if(wasMixed) { | 646 if(wasMixed) { |
644 ParticipantFramePair* pair = new ParticipantFramePair; | 647 ParticipantFrameStruct* part_struct = |
645 pair->audioFrame = audioFrame; | 648 new ParticipantFrameStruct(*participant, audioFrame, false); |
646 pair->participant = *participant; | 649 passiveWasMixedList.push_back(part_struct); |
647 passiveWasMixedList.push_back(pair); | |
648 } else if(mustAddToPassiveList) { | 650 } else if(mustAddToPassiveList) { |
649 RampIn(*audioFrame); | 651 RampIn(*audioFrame); |
650 ParticipantFramePair* pair = new ParticipantFramePair; | 652 ParticipantFrameStruct* part_struct = |
651 pair->audioFrame = audioFrame; | 653 new ParticipantFrameStruct(*participant, audioFrame, false); |
652 pair->participant = *participant; | 654 passiveWasNotMixedList.push_back(part_struct); |
653 passiveWasNotMixedList.push_back(pair); | |
654 } else { | 655 } else { |
655 _audioFramePool->PushMemory(audioFrame); | 656 _audioFramePool->PushMemory(audioFrame); |
656 } | 657 } |
657 } | 658 } |
658 } | 659 } |
659 assert(activeList.size() <= *maxAudioFrameCounter); | 660 assert(activeList.size() <= *maxAudioFrameCounter); |
660 // At this point it is known which participants should be mixed. Transfer | 661 // At this point it is known which participants should be mixed. Transfer |
661 // this information to this functions output parameters. | 662 // this information to this functions output parameters. |
662 for (AudioFrameList::const_iterator iter = activeList.begin(); | 663 for (AudioFrameList::const_iterator iter = activeList.begin(); |
663 iter != activeList.end(); | 664 iter != activeList.end(); |
664 ++iter) { | 665 ++iter) { |
665 mixList->push_back(*iter); | 666 mixList->push_back(*iter); |
666 } | 667 } |
667 activeList.clear(); | 668 activeList.clear(); |
668 // Always mix a constant number of AudioFrames. If there aren't enough | 669 // Always mix a constant number of AudioFrames. If there aren't enough |
669 // active participants mix passive ones. Starting with those that was mixed | 670 // active participants mix passive ones. Starting with those that was mixed |
670 // last iteration. | 671 // last iteration. |
671 for (ParticipantFramePairList::const_iterator | 672 for (ParticipantFrameStructList::const_iterator |
672 iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end(); | 673 iter = passiveWasMixedList.begin(); iter != passiveWasMixedList.end(); |
673 ++iter) { | 674 ++iter) { |
674 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 675 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
675 mixList->push_back((*iter)->audioFrame); | 676 mixList->push_back((*iter)->audioFrame); |
676 (*mixParticipantList)[(*iter)->audioFrame->id_] = | 677 (*mixParticipantList)[(*iter)->audioFrame->id_] = |
677 (*iter)->participant; | 678 (*iter)->participant; |
678 assert(mixParticipantList->size() <= | 679 assert(mixParticipantList->size() <= |
679 kMaximumAmountOfMixedParticipants); | 680 kMaximumAmountOfMixedParticipants); |
680 } else { | 681 } else { |
681 _audioFramePool->PushMemory((*iter)->audioFrame); | 682 _audioFramePool->PushMemory((*iter)->audioFrame); |
682 } | 683 } |
683 delete *iter; | 684 delete *iter; |
684 } | 685 } |
685 // And finally the ones that have not been mixed for a while. | 686 // And finally the ones that have not been mixed for a while. |
686 for (ParticipantFramePairList::const_iterator iter = | 687 for (ParticipantFrameStructList::const_iterator iter = |
687 passiveWasNotMixedList.begin(); | 688 passiveWasNotMixedList.begin(); |
688 iter != passiveWasNotMixedList.end(); | 689 iter != passiveWasNotMixedList.end(); |
689 ++iter) { | 690 ++iter) { |
690 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { | 691 if(mixList->size() < *maxAudioFrameCounter + mixListStartSize) { |
691 mixList->push_back((*iter)->audioFrame); | 692 mixList->push_back((*iter)->audioFrame); |
692 (*mixParticipantList)[(*iter)->audioFrame->id_] = | 693 (*mixParticipantList)[(*iter)->audioFrame->id_] = |
693 (*iter)->participant; | 694 (*iter)->participant; |
694 assert(mixParticipantList->size() <= | 695 assert(mixParticipantList->size() <= |
695 kMaximumAmountOfMixedParticipants); | 696 kMaximumAmountOfMixedParticipants); |
696 } else { | 697 } else { |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
904 | 905 |
905 if(error != _limiter->kNoError) { | 906 if(error != _limiter->kNoError) { |
906 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, | 907 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, |
907 "Error from AudioProcessing: %d", error); | 908 "Error from AudioProcessing: %d", error); |
908 assert(false); | 909 assert(false); |
909 return false; | 910 return false; |
910 } | 911 } |
911 return true; | 912 return true; |
912 } | 913 } |
913 } // namespace webrtc | 914 } // namespace webrtc |
OLD | NEW |