| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 628 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 639 return AudioMixer::Source::AudioFrameInfo::kError; | 639 return AudioMixer::Source::AudioFrameInfo::kError; |
| 640 } | 640 } |
| 641 | 641 |
| 642 if (muted) { | 642 if (muted) { |
| 643 // TODO(henrik.lundin): We should be able to do better than this. But we | 643 // TODO(henrik.lundin): We should be able to do better than this. But we |
| 644 // will have to go through all the cases below where the audio samples may | 644 // will have to go through all the cases below where the audio samples may |
| 645 // be used, and handle the muted case in some way. | 645 // be used, and handle the muted case in some way. |
| 646 AudioFrameOperations::Mute(audio_frame); | 646 AudioFrameOperations::Mute(audio_frame); |
| 647 } | 647 } |
| 648 | 648 |
| 649 // Convert module ID to internal VoE channel ID | |
| 650 audio_frame->id_ = VoEChannelId(audio_frame->id_); | |
| 651 // Store speech type for dead-or-alive detection | 649 // Store speech type for dead-or-alive detection |
| 652 _outputSpeechType = audio_frame->speech_type_; | 650 _outputSpeechType = audio_frame->speech_type_; |
| 653 | 651 |
| 654 { | 652 { |
| 655 // Pass the audio buffers to an optional sink callback, before applying | 653 // Pass the audio buffers to an optional sink callback, before applying |
| 656 // scaling/panning, as that applies to the mix operation. | 654 // scaling/panning, as that applies to the mix operation. |
| 657 // External recipients of the audio (e.g. via AudioTrack), will do their | 655 // External recipients of the audio (e.g. via AudioTrack), will do their |
| 658 // own mixing/dynamic processing. | 656 // own mixing/dynamic processing. |
| 659 rtc::CritScope cs(&_callbackCritSect); | 657 rtc::CritScope cs(&_callbackCritSect); |
| 660 if (audio_sink_) { | 658 if (audio_sink_) { |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 790 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), | 788 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), |
| 791 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), | 789 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), |
| 792 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), | 790 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), |
| 793 kMaxRetransmissionWindowMs)), | 791 kMaxRetransmissionWindowMs)), |
| 794 decoder_factory_(config.acm_config.decoder_factory), | 792 decoder_factory_(config.acm_config.decoder_factory), |
| 795 use_twcc_plr_for_ana_( | 793 use_twcc_plr_for_ana_( |
| 796 webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled") { | 794 webrtc::field_trial::FindFullName("UseTwccPlrForAna") == "Enabled") { |
| 797 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), | 795 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), |
| 798 "Channel::Channel() - ctor"); | 796 "Channel::Channel() - ctor"); |
| 799 AudioCodingModule::Config acm_config(config.acm_config); | 797 AudioCodingModule::Config acm_config(config.acm_config); |
| 800 acm_config.id = VoEModuleId(instanceId, channelId); | |
| 801 acm_config.neteq_config.enable_muted_state = true; | 798 acm_config.neteq_config.enable_muted_state = true; |
| 802 audio_coding_.reset(AudioCodingModule::Create(acm_config)); | 799 audio_coding_.reset(AudioCodingModule::Create(acm_config)); |
| 803 | 800 |
| 804 _outputAudioLevel.Clear(); | 801 _outputAudioLevel.Clear(); |
| 805 | 802 |
| 806 RtpRtcp::Configuration configuration; | 803 RtpRtcp::Configuration configuration; |
| 807 configuration.audio = true; | 804 configuration.audio = true; |
| 808 configuration.outgoing_transport = this; | 805 configuration.outgoing_transport = this; |
| 809 configuration.overhead_observer = this; | 806 configuration.overhead_observer = this; |
| 810 configuration.receive_statistics = rtp_receive_statistics_.get(); | 807 configuration.receive_statistics = rtp_receive_statistics_.get(); |
| (...skipping 897 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1708 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) { | 1705 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) { |
| 1709 // Avoid posting any new tasks if sending was already stopped in StopSend(). | 1706 // Avoid posting any new tasks if sending was already stopped in StopSend(). |
| 1710 rtc::CritScope cs(&encoder_queue_lock_); | 1707 rtc::CritScope cs(&encoder_queue_lock_); |
| 1711 if (!encoder_queue_is_active_) { | 1708 if (!encoder_queue_is_active_) { |
| 1712 return; | 1709 return; |
| 1713 } | 1710 } |
| 1714 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); | 1711 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
| 1715 // TODO(henrika): try to avoid copying by moving ownership of audio frame | 1712 // TODO(henrika): try to avoid copying by moving ownership of audio frame |
| 1716 // either into pool of frames or into the task itself. | 1713 // either into pool of frames or into the task itself. |
| 1717 audio_frame->CopyFrom(audio_input); | 1714 audio_frame->CopyFrom(audio_input); |
| 1718 audio_frame->id_ = ChannelId(); | |
| 1719 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( | 1715 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
| 1720 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); | 1716 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); |
| 1721 } | 1717 } |
| 1722 | 1718 |
| 1723 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data, | 1719 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data, |
| 1724 int sample_rate, | 1720 int sample_rate, |
| 1725 size_t number_of_frames, | 1721 size_t number_of_frames, |
| 1726 size_t number_of_channels) { | 1722 size_t number_of_channels) { |
| 1727 // Avoid posting as new task if sending was already stopped in StopSend(). | 1723 // Avoid posting as new task if sending was already stopped in StopSend(). |
| 1728 rtc::CritScope cs(&encoder_queue_lock_); | 1724 rtc::CritScope cs(&encoder_queue_lock_); |
| 1729 if (!encoder_queue_is_active_) { | 1725 if (!encoder_queue_is_active_) { |
| 1730 return; | 1726 return; |
| 1731 } | 1727 } |
| 1732 CodecInst codec; | 1728 CodecInst codec; |
| 1733 const int result = GetSendCodec(codec); | 1729 const int result = GetSendCodec(codec); |
| 1734 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); | 1730 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
| 1735 audio_frame->id_ = ChannelId(); | |
| 1736 // TODO(ossu): Investigate how this could happen. b/62909493 | 1731 // TODO(ossu): Investigate how this could happen. b/62909493 |
| 1737 if (result == 0) { | 1732 if (result == 0) { |
| 1738 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate); | 1733 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate); |
| 1739 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels); | 1734 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels); |
| 1740 } else { | 1735 } else { |
| 1741 audio_frame->sample_rate_hz_ = sample_rate; | 1736 audio_frame->sample_rate_hz_ = sample_rate; |
| 1742 audio_frame->num_channels_ = number_of_channels; | 1737 audio_frame->num_channels_ = number_of_channels; |
| 1743 LOG(LS_WARNING) << "Unable to get send codec for channel " << ChannelId(); | 1738 LOG(LS_WARNING) << "Unable to get send codec for channel " << ChannelId(); |
| 1744 RTC_NOTREACHED(); | 1739 RTC_NOTREACHED(); |
| 1745 } | 1740 } |
| 1746 RemixAndResample(audio_data, number_of_frames, number_of_channels, | 1741 RemixAndResample(audio_data, number_of_frames, number_of_channels, |
| 1747 sample_rate, &input_resampler_, audio_frame.get()); | 1742 sample_rate, &input_resampler_, audio_frame.get()); |
| 1748 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( | 1743 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
| 1749 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); | 1744 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); |
| 1750 } | 1745 } |
| 1751 | 1746 |
| 1752 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { | 1747 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { |
| 1753 RTC_DCHECK_RUN_ON(encoder_queue_); | 1748 RTC_DCHECK_RUN_ON(encoder_queue_); |
| 1754 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); | 1749 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); |
| 1755 RTC_DCHECK_LE(audio_input->num_channels_, 2); | 1750 RTC_DCHECK_LE(audio_input->num_channels_, 2); |
| 1756 RTC_DCHECK_EQ(audio_input->id_, ChannelId()); | |
| 1757 | 1751 |
| 1758 bool is_muted = InputMute(); | 1752 bool is_muted = InputMute(); |
| 1759 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); | 1753 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
| 1760 | 1754 |
| 1761 if (_includeAudioLevelIndication) { | 1755 if (_includeAudioLevelIndication) { |
| 1762 size_t length = | 1756 size_t length = |
| 1763 audio_input->samples_per_channel_ * audio_input->num_channels_; | 1757 audio_input->samples_per_channel_ * audio_input->num_channels_; |
| 1764 RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes); | 1758 RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes); |
| 1765 if (is_muted && previous_frame_muted_) { | 1759 if (is_muted && previous_frame_muted_) { |
| 1766 rms_level_.AnalyzeMuted(length); | 1760 rms_level_.AnalyzeMuted(length); |
| (...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2029 int64_t min_rtt = 0; | 2023 int64_t min_rtt = 0; |
| 2030 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 2024 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
| 2031 0) { | 2025 0) { |
| 2032 return 0; | 2026 return 0; |
| 2033 } | 2027 } |
| 2034 return rtt; | 2028 return rtt; |
| 2035 } | 2029 } |
| 2036 | 2030 |
| 2037 } // namespace voe | 2031 } // namespace voe |
| 2038 } // namespace webrtc | 2032 } // namespace webrtc |
| OLD | NEW |