Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: voice_engine/channel.cc

Issue 3015553002: Remove voe::OutputMixer and AudioConferenceMixer. (Closed)
Patch Set: rebase Created 3 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 21 matching lines...) Expand all
32 #include "rtc_base/criticalsection.h" 32 #include "rtc_base/criticalsection.h"
33 #include "rtc_base/format_macros.h" 33 #include "rtc_base/format_macros.h"
34 #include "rtc_base/location.h" 34 #include "rtc_base/location.h"
35 #include "rtc_base/logging.h" 35 #include "rtc_base/logging.h"
36 #include "rtc_base/rate_limiter.h" 36 #include "rtc_base/rate_limiter.h"
37 #include "rtc_base/task_queue.h" 37 #include "rtc_base/task_queue.h"
38 #include "rtc_base/thread_checker.h" 38 #include "rtc_base/thread_checker.h"
39 #include "rtc_base/timeutils.h" 39 #include "rtc_base/timeutils.h"
40 #include "system_wrappers/include/field_trial.h" 40 #include "system_wrappers/include/field_trial.h"
41 #include "system_wrappers/include/trace.h" 41 #include "system_wrappers/include/trace.h"
42 #include "voice_engine/output_mixer.h"
43 #include "voice_engine/statistics.h" 42 #include "voice_engine/statistics.h"
44 #include "voice_engine/utility.h" 43 #include "voice_engine/utility.h"
45 44
46 namespace webrtc { 45 namespace webrtc {
47 namespace voe { 46 namespace voe {
48 47
49 namespace { 48 namespace {
50 49
51 constexpr double kAudioSampleDurationSeconds = 0.01; 50 constexpr double kAudioSampleDurationSeconds = 0.01;
52 constexpr int64_t kMaxRetransmissionWindowMs = 1000; 51 constexpr int64_t kMaxRetransmissionWindowMs = 1000;
(...skipping 559 matching lines...) Expand 10 before | Expand all | Expand 10 after
612 "IncomingPacket invalid RTP header"); 611 "IncomingPacket invalid RTP header");
613 return false; 612 return false;
614 } 613 }
615 header.payload_type_frequency = 614 header.payload_type_frequency =
616 rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType); 615 rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
617 if (header.payload_type_frequency < 0) 616 if (header.payload_type_frequency < 0)
618 return false; 617 return false;
619 return ReceivePacket(rtp_packet, rtp_packet_length, header, false); 618 return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
620 } 619 }
621 620
622 MixerParticipant::AudioFrameInfo Channel::GetAudioFrameWithMuted( 621 AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo(
623 int32_t id, 622 int sample_rate_hz,
624 AudioFrame* audioFrame) { 623 AudioFrame* audio_frame) {
624 audio_frame->sample_rate_hz_ = sample_rate_hz;
625
625 unsigned int ssrc; 626 unsigned int ssrc;
626 RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0); 627 RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
627 event_log_proxy_->LogAudioPlayout(ssrc); 628 event_log_proxy_->LogAudioPlayout(ssrc);
628 // Get 10ms raw PCM data from the ACM (mixer limits output frequency) 629 // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
629 bool muted; 630 bool muted;
630 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_, audioFrame, 631 if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
631 &muted) == -1) { 632 &muted) == -1) {
632 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 633 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId),
633 "Channel::GetAudioFrame() PlayoutData10Ms() failed!"); 634 "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
634 // In all likelihood, the audio in this frame is garbage. We return an 635 // In all likelihood, the audio in this frame is garbage. We return an
635 // error so that the audio mixer module doesn't add it to the mix. As 636 // error so that the audio mixer module doesn't add it to the mix. As
636 // a result, it won't be played out and the actions skipped here are 637 // a result, it won't be played out and the actions skipped here are
637 // irrelevant. 638 // irrelevant.
638 return MixerParticipant::AudioFrameInfo::kError; 639 return AudioMixer::Source::AudioFrameInfo::kError;
639 } 640 }
640 641
641 if (muted) { 642 if (muted) {
642 // TODO(henrik.lundin): We should be able to do better than this. But we 643 // TODO(henrik.lundin): We should be able to do better than this. But we
643 // will have to go through all the cases below where the audio samples may 644 // will have to go through all the cases below where the audio samples may
644 // be used, and handle the muted case in some way. 645 // be used, and handle the muted case in some way.
645 AudioFrameOperations::Mute(audioFrame); 646 AudioFrameOperations::Mute(audio_frame);
646 } 647 }
647 648
648 // Convert module ID to internal VoE channel ID 649 // Convert module ID to internal VoE channel ID
649 audioFrame->id_ = VoEChannelId(audioFrame->id_); 650 audio_frame->id_ = VoEChannelId(audio_frame->id_);
hlundin-webrtc 2017/09/22 09:17:44 Out of curiosity: is this ID ever used for anythin
the sun 2017/09/22 10:49:00 I think you're right about that.
650 // Store speech type for dead-or-alive detection 651 // Store speech type for dead-or-alive detection
651 _outputSpeechType = audioFrame->speech_type_; 652 _outputSpeechType = audio_frame->speech_type_;
652 653
653 { 654 {
654 // Pass the audio buffers to an optional sink callback, before applying 655 // Pass the audio buffers to an optional sink callback, before applying
655 // scaling/panning, as that applies to the mix operation. 656 // scaling/panning, as that applies to the mix operation.
656 // External recipients of the audio (e.g. via AudioTrack), will do their 657 // External recipients of the audio (e.g. via AudioTrack), will do their
657 // own mixing/dynamic processing. 658 // own mixing/dynamic processing.
658 rtc::CritScope cs(&_callbackCritSect); 659 rtc::CritScope cs(&_callbackCritSect);
659 if (audio_sink_) { 660 if (audio_sink_) {
660 AudioSinkInterface::Data data( 661 AudioSinkInterface::Data data(
661 audioFrame->data(), audioFrame->samples_per_channel_, 662 audio_frame->data(), audio_frame->samples_per_channel_,
662 audioFrame->sample_rate_hz_, audioFrame->num_channels_, 663 audio_frame->sample_rate_hz_, audio_frame->num_channels_,
663 audioFrame->timestamp_); 664 audio_frame->timestamp_);
664 audio_sink_->OnData(data); 665 audio_sink_->OnData(data);
665 } 666 }
666 } 667 }
667 668
668 float output_gain = 1.0f; 669 float output_gain = 1.0f;
669 { 670 {
670 rtc::CritScope cs(&volume_settings_critsect_); 671 rtc::CritScope cs(&volume_settings_critsect_);
671 output_gain = _outputGain; 672 output_gain = _outputGain;
672 } 673 }
673 674
674 // Output volume scaling 675 // Output volume scaling
675 if (output_gain < 0.99f || output_gain > 1.01f) { 676 if (output_gain < 0.99f || output_gain > 1.01f) {
676 // TODO(solenberg): Combine with mute state - this can cause clicks! 677 // TODO(solenberg): Combine with mute state - this can cause clicks!
677 AudioFrameOperations::ScaleWithSat(output_gain, audioFrame); 678 AudioFrameOperations::ScaleWithSat(output_gain, audio_frame);
678 } 679 }
679 680
680 // Measure audio level (0-9) 681 // Measure audio level (0-9)
681 // TODO(henrik.lundin) Use the |muted| information here too. 682 // TODO(henrik.lundin) Use the |muted| information here too.
682 // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see 683 // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see
683 // https://crbug.com/webrtc/7517). 684 // https://crbug.com/webrtc/7517).
684 _outputAudioLevel.ComputeLevel(*audioFrame, kAudioSampleDurationSeconds); 685 _outputAudioLevel.ComputeLevel(*audio_frame, kAudioSampleDurationSeconds);
685 686
686 if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) { 687 if (capture_start_rtp_time_stamp_ < 0 && audio_frame->timestamp_ != 0) {
687 // The first frame with a valid rtp timestamp. 688 // The first frame with a valid rtp timestamp.
688 capture_start_rtp_time_stamp_ = audioFrame->timestamp_; 689 capture_start_rtp_time_stamp_ = audio_frame->timestamp_;
689 } 690 }
690 691
691 if (capture_start_rtp_time_stamp_ >= 0) { 692 if (capture_start_rtp_time_stamp_ >= 0) {
692 // audioFrame.timestamp_ should be valid from now on. 693 // audio_frame.timestamp_ should be valid from now on.
693 694
694 // Compute elapsed time. 695 // Compute elapsed time.
695 int64_t unwrap_timestamp = 696 int64_t unwrap_timestamp =
696 rtp_ts_wraparound_handler_->Unwrap(audioFrame->timestamp_); 697 rtp_ts_wraparound_handler_->Unwrap(audio_frame->timestamp_);
697 audioFrame->elapsed_time_ms_ = 698 audio_frame->elapsed_time_ms_ =
698 (unwrap_timestamp - capture_start_rtp_time_stamp_) / 699 (unwrap_timestamp - capture_start_rtp_time_stamp_) /
699 (GetRtpTimestampRateHz() / 1000); 700 (GetRtpTimestampRateHz() / 1000);
700 701
701 { 702 {
702 rtc::CritScope lock(&ts_stats_lock_); 703 rtc::CritScope lock(&ts_stats_lock_);
703 // Compute ntp time. 704 // Compute ntp time.
704 audioFrame->ntp_time_ms_ = 705 audio_frame->ntp_time_ms_ =
705 ntp_estimator_.Estimate(audioFrame->timestamp_); 706 ntp_estimator_.Estimate(audio_frame->timestamp_);
706 // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received. 707 // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
707 if (audioFrame->ntp_time_ms_ > 0) { 708 if (audio_frame->ntp_time_ms_ > 0) {
708 // Compute |capture_start_ntp_time_ms_| so that 709 // Compute |capture_start_ntp_time_ms_| so that
709 // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_| 710 // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
710 capture_start_ntp_time_ms_ = 711 capture_start_ntp_time_ms_ =
711 audioFrame->ntp_time_ms_ - audioFrame->elapsed_time_ms_; 712 audio_frame->ntp_time_ms_ - audio_frame->elapsed_time_ms_;
712 } 713 }
713 } 714 }
714 } 715 }
715 716
716 return muted ? MixerParticipant::AudioFrameInfo::kMuted 717 return muted ? AudioMixer::Source::AudioFrameInfo::kMuted
717 : MixerParticipant::AudioFrameInfo::kNormal; 718 : AudioMixer::Source::AudioFrameInfo::kNormal;
718 } 719 }
719 720
720 AudioMixer::Source::AudioFrameInfo Channel::GetAudioFrameWithInfo( 721 int Channel::PreferredSampleRate() const {
721 int sample_rate_hz,
722 AudioFrame* audio_frame) {
723 audio_frame->sample_rate_hz_ = sample_rate_hz;
724
725 const auto frame_info = GetAudioFrameWithMuted(-1, audio_frame);
726
727 using FrameInfo = AudioMixer::Source::AudioFrameInfo;
728 FrameInfo new_audio_frame_info = FrameInfo::kError;
729 switch (frame_info) {
730 case MixerParticipant::AudioFrameInfo::kNormal:
731 new_audio_frame_info = FrameInfo::kNormal;
732 break;
733 case MixerParticipant::AudioFrameInfo::kMuted:
734 new_audio_frame_info = FrameInfo::kMuted;
735 break;
736 case MixerParticipant::AudioFrameInfo::kError:
737 new_audio_frame_info = FrameInfo::kError;
738 break;
739 }
740 return new_audio_frame_info;
741 }
742
743 int32_t Channel::NeededFrequency(int32_t id) const {
744 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
745 "Channel::NeededFrequency(id=%d)", id);
746
747 int highestNeeded = 0;
748
749 // Determine highest needed receive frequency
750 int32_t receiveFrequency = audio_coding_->ReceiveFrequency();
751
752 // Return the bigger of playout and receive frequency in the ACM. 722 // Return the bigger of playout and receive frequency in the ACM.
753 if (audio_coding_->PlayoutFrequency() > receiveFrequency) { 723 return std::max(audio_coding_->ReceiveFrequency(),
754 highestNeeded = audio_coding_->PlayoutFrequency(); 724 audio_coding_->PlayoutFrequency());
755 } else {
756 highestNeeded = receiveFrequency;
757 }
758
759 return highestNeeded;
760 } 725 }
761 726
762 int32_t Channel::CreateChannel(Channel*& channel, 727 int32_t Channel::CreateChannel(Channel*& channel,
763 int32_t channelId, 728 int32_t channelId,
764 uint32_t instanceId, 729 uint32_t instanceId,
765 const VoEBase::ChannelConfig& config) { 730 const VoEBase::ChannelConfig& config) {
766 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), 731 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId),
767 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, 732 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId,
768 instanceId); 733 instanceId);
769 734
(...skipping 29 matching lines...) Expand all
799 _timeStamp(0), // This is just an offset, RTP module will add it's own 764 _timeStamp(0), // This is just an offset, RTP module will add it's own
800 // random offset 765 // random offset
801 ntp_estimator_(Clock::GetRealTimeClock()), 766 ntp_estimator_(Clock::GetRealTimeClock()),
802 playout_timestamp_rtp_(0), 767 playout_timestamp_rtp_(0),
803 playout_delay_ms_(0), 768 playout_delay_ms_(0),
804 send_sequence_number_(0), 769 send_sequence_number_(0),
805 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), 770 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
806 capture_start_rtp_time_stamp_(-1), 771 capture_start_rtp_time_stamp_(-1),
807 capture_start_ntp_time_ms_(-1), 772 capture_start_ntp_time_ms_(-1),
808 _engineStatisticsPtr(NULL), 773 _engineStatisticsPtr(NULL),
809 _outputMixerPtr(NULL),
810 _moduleProcessThreadPtr(NULL), 774 _moduleProcessThreadPtr(NULL),
811 _audioDeviceModulePtr(NULL), 775 _audioDeviceModulePtr(NULL),
812 _voiceEngineObserverPtr(NULL), 776 _voiceEngineObserverPtr(NULL),
813 _callbackCritSectPtr(NULL), 777 _callbackCritSectPtr(NULL),
814 _transportPtr(NULL), 778 _transportPtr(NULL),
815 input_mute_(false), 779 input_mute_(false),
816 previous_frame_muted_(false), 780 previous_frame_muted_(false),
817 _outputGain(1.0f), 781 _outputGain(1.0f),
818 _includeAudioLevelIndication(false), 782 _includeAudioLevelIndication(false),
819 transport_overhead_per_packet_(0), 783 transport_overhead_per_packet_(0),
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
976 } 940 }
977 941
978 // De-register modules in process thread 942 // De-register modules in process thread
979 if (_moduleProcessThreadPtr) 943 if (_moduleProcessThreadPtr)
980 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); 944 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
981 945
982 // End of modules shutdown 946 // End of modules shutdown
983 } 947 }
984 948
985 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 949 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
986 OutputMixer& outputMixer,
987 ProcessThread& moduleProcessThread, 950 ProcessThread& moduleProcessThread,
988 AudioDeviceModule& audioDeviceModule, 951 AudioDeviceModule& audioDeviceModule,
989 VoiceEngineObserver* voiceEngineObserver, 952 VoiceEngineObserver* voiceEngineObserver,
990 rtc::CriticalSection* callbackCritSect, 953 rtc::CriticalSection* callbackCritSect,
991 rtc::TaskQueue* encoder_queue) { 954 rtc::TaskQueue* encoder_queue) {
992 RTC_DCHECK(encoder_queue); 955 RTC_DCHECK(encoder_queue);
993 RTC_DCHECK(!encoder_queue_); 956 RTC_DCHECK(!encoder_queue_);
994 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 957 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
995 "Channel::SetEngineInformation()"); 958 "Channel::SetEngineInformation()");
996 _engineStatisticsPtr = &engineStatistics; 959 _engineStatisticsPtr = &engineStatistics;
997 _outputMixerPtr = &outputMixer;
998 _moduleProcessThreadPtr = &moduleProcessThread; 960 _moduleProcessThreadPtr = &moduleProcessThread;
999 _audioDeviceModulePtr = &audioDeviceModule; 961 _audioDeviceModulePtr = &audioDeviceModule;
1000 _voiceEngineObserverPtr = voiceEngineObserver; 962 _voiceEngineObserverPtr = voiceEngineObserver;
1001 _callbackCritSectPtr = callbackCritSect; 963 _callbackCritSectPtr = callbackCritSect;
1002 encoder_queue_ = encoder_queue; 964 encoder_queue_ = encoder_queue;
1003 return 0; 965 return 0;
1004 } 966 }
1005 967
1006 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 968 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1007 rtc::CritScope cs(&_callbackCritSect); 969 rtc::CritScope cs(&_callbackCritSect);
1008 audio_sink_ = std::move(sink); 970 audio_sink_ = std::move(sink);
1009 } 971 }
1010 972
1011 const rtc::scoped_refptr<AudioDecoderFactory>& 973 const rtc::scoped_refptr<AudioDecoderFactory>&
1012 Channel::GetAudioDecoderFactory() const { 974 Channel::GetAudioDecoderFactory() const {
1013 return decoder_factory_; 975 return decoder_factory_;
1014 } 976 }
1015 977
1016 int32_t Channel::StartPlayout() { 978 int32_t Channel::StartPlayout() {
1017 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 979 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1018 "Channel::StartPlayout()"); 980 "Channel::StartPlayout()");
1019 if (channel_state_.Get().playing) { 981 if (channel_state_.Get().playing) {
1020 return 0; 982 return 0;
1021 } 983 }
1022 984
1023 // Add participant as candidates for mixing.
1024 if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0) {
1025 _engineStatisticsPtr->SetLastError(
1026 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1027 "StartPlayout() failed to add participant to mixer");
1028 return -1;
1029 }
1030
1031 channel_state_.SetPlaying(true); 985 channel_state_.SetPlaying(true);
1032 986
1033 return 0; 987 return 0;
1034 } 988 }
1035 989
1036 int32_t Channel::StopPlayout() { 990 int32_t Channel::StopPlayout() {
1037 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 991 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1038 "Channel::StopPlayout()"); 992 "Channel::StopPlayout()");
1039 if (!channel_state_.Get().playing) { 993 if (!channel_state_.Get().playing) {
1040 return 0; 994 return 0;
1041 } 995 }
1042 996
1043 // Remove participant as candidates for mixing
1044 if (_outputMixerPtr->SetMixabilityStatus(*this, false) != 0) {
1045 _engineStatisticsPtr->SetLastError(
1046 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError,
1047 "StopPlayout() failed to remove participant from mixer");
1048 return -1;
1049 }
1050
1051 channel_state_.SetPlaying(false); 997 channel_state_.SetPlaying(false);
1052 _outputAudioLevel.Clear(); 998 _outputAudioLevel.Clear();
1053 999
1054 return 0; 1000 return 0;
1055 } 1001 }
1056 1002
1057 int32_t Channel::StartSend() { 1003 int32_t Channel::StartSend() {
1058 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1004 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1059 "Channel::StartSend()"); 1005 "Channel::StartSend()");
1060 if (channel_state_.Get().sending) { 1006 if (channel_state_.Get().sending) {
(...skipping 1022 matching lines...) Expand 10 before | Expand all | Expand 10 after
2083 int64_t min_rtt = 0; 2029 int64_t min_rtt = 0;
2084 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 2030 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
2085 0) { 2031 0) {
2086 return 0; 2032 return 0;
2087 } 2033 }
2088 return rtt; 2034 return rtt;
2089 } 2035 }
2090 2036
2091 } // namespace voe 2037 } // namespace voe
2092 } // namespace webrtc 2038 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698