| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 633 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 644 // will have to go through all the cases below where the audio samples may | 644 // will have to go through all the cases below where the audio samples may |
| 645 // be used, and handle the muted case in some way. | 645 // be used, and handle the muted case in some way. |
| 646 AudioFrameOperations::Mute(audioFrame); | 646 AudioFrameOperations::Mute(audioFrame); |
| 647 } | 647 } |
| 648 | 648 |
| 649 // Convert module ID to internal VoE channel ID | 649 // Convert module ID to internal VoE channel ID |
| 650 audioFrame->id_ = VoEChannelId(audioFrame->id_); | 650 audioFrame->id_ = VoEChannelId(audioFrame->id_); |
| 651 // Store speech type for dead-or-alive detection | 651 // Store speech type for dead-or-alive detection |
| 652 _outputSpeechType = audioFrame->speech_type_; | 652 _outputSpeechType = audioFrame->speech_type_; |
| 653 | 653 |
| 654 ChannelState::State state = channel_state_.Get(); | |
| 655 | |
| 656 { | 654 { |
| 657 // Pass the audio buffers to an optional sink callback, before applying | 655 // Pass the audio buffers to an optional sink callback, before applying |
| 658 // scaling/panning, as that applies to the mix operation. | 656 // scaling/panning, as that applies to the mix operation. |
| 659 // External recipients of the audio (e.g. via AudioTrack), will do their | 657 // External recipients of the audio (e.g. via AudioTrack), will do their |
| 660 // own mixing/dynamic processing. | 658 // own mixing/dynamic processing. |
| 661 rtc::CritScope cs(&_callbackCritSect); | 659 rtc::CritScope cs(&_callbackCritSect); |
| 662 if (audio_sink_) { | 660 if (audio_sink_) { |
| 663 AudioSinkInterface::Data data( | 661 AudioSinkInterface::Data data( |
| 664 audioFrame->data(), audioFrame->samples_per_channel_, | 662 audioFrame->data(), audioFrame->samples_per_channel_, |
| 665 audioFrame->sample_rate_hz_, audioFrame->num_channels_, | 663 audioFrame->sample_rate_hz_, audioFrame->num_channels_, |
| 666 audioFrame->timestamp_); | 664 audioFrame->timestamp_); |
| 667 audio_sink_->OnData(data); | 665 audio_sink_->OnData(data); |
| 668 } | 666 } |
| 669 } | 667 } |
| 670 | 668 |
| 671 float output_gain = 1.0f; | 669 float output_gain = 1.0f; |
| 672 { | 670 { |
| 673 rtc::CritScope cs(&volume_settings_critsect_); | 671 rtc::CritScope cs(&volume_settings_critsect_); |
| 674 output_gain = _outputGain; | 672 output_gain = _outputGain; |
| 675 } | 673 } |
| 676 | 674 |
| 677 // Output volume scaling | 675 // Output volume scaling |
| 678 if (output_gain < 0.99f || output_gain > 1.01f) { | 676 if (output_gain < 0.99f || output_gain > 1.01f) { |
| 679 // TODO(solenberg): Combine with mute state - this can cause clicks! | 677 // TODO(solenberg): Combine with mute state - this can cause clicks! |
| 680 AudioFrameOperations::ScaleWithSat(output_gain, audioFrame); | 678 AudioFrameOperations::ScaleWithSat(output_gain, audioFrame); |
| 681 } | 679 } |
| 682 | 680 |
| 683 // Mix decoded PCM output with file if file mixing is enabled | |
| 684 if (state.output_file_playing) { | |
| 685 MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_); | |
| 686 muted = false; // We may have added non-zero samples. | |
| 687 } | |
| 688 | |
| 689 // Record playout if enabled | |
| 690 { | |
| 691 rtc::CritScope cs(&_fileCritSect); | |
| 692 | |
| 693 if (_outputFileRecording && output_file_recorder_) { | |
| 694 output_file_recorder_->RecordAudioToFile(*audioFrame); | |
| 695 } | |
| 696 } | |
| 697 | |
| 698 // Measure audio level (0-9) | 681 // Measure audio level (0-9) |
| 699 // TODO(henrik.lundin) Use the |muted| information here too. | 682 // TODO(henrik.lundin) Use the |muted| information here too. |
| 700 // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see | 683 // TODO(deadbeef): Use RmsLevel for |_outputAudioLevel| (see |
| 701 // https://crbug.com/webrtc/7517). | 684 // https://crbug.com/webrtc/7517). |
| 702 _outputAudioLevel.ComputeLevel(*audioFrame, kAudioSampleDurationSeconds); | 685 _outputAudioLevel.ComputeLevel(*audioFrame, kAudioSampleDurationSeconds); |
| 703 | 686 |
| 704 if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) { | 687 if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) { |
| 705 // The first frame with a valid rtp timestamp. | 688 // The first frame with a valid rtp timestamp. |
| 706 capture_start_rtp_time_stamp_ = audioFrame->timestamp_; | 689 capture_start_rtp_time_stamp_ = audioFrame->timestamp_; |
| 707 } | 690 } |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 767 // Determine highest needed receive frequency | 750 // Determine highest needed receive frequency |
| 768 int32_t receiveFrequency = audio_coding_->ReceiveFrequency(); | 751 int32_t receiveFrequency = audio_coding_->ReceiveFrequency(); |
| 769 | 752 |
| 770 // Return the bigger of playout and receive frequency in the ACM. | 753 // Return the bigger of playout and receive frequency in the ACM. |
| 771 if (audio_coding_->PlayoutFrequency() > receiveFrequency) { | 754 if (audio_coding_->PlayoutFrequency() > receiveFrequency) { |
| 772 highestNeeded = audio_coding_->PlayoutFrequency(); | 755 highestNeeded = audio_coding_->PlayoutFrequency(); |
| 773 } else { | 756 } else { |
| 774 highestNeeded = receiveFrequency; | 757 highestNeeded = receiveFrequency; |
| 775 } | 758 } |
| 776 | 759 |
| 777 // Special case, if we're playing a file on the playout side | 760 return highestNeeded; |
| 778 // we take that frequency into consideration as well | |
| 779 // This is not needed on sending side, since the codec will | |
| 780 // limit the spectrum anyway. | |
| 781 if (channel_state_.Get().output_file_playing) { | |
| 782 rtc::CritScope cs(&_fileCritSect); | |
| 783 if (output_file_player_) { | |
| 784 if (output_file_player_->Frequency() > highestNeeded) { | |
| 785 highestNeeded = output_file_player_->Frequency(); | |
| 786 } | |
| 787 } | |
| 788 } | |
| 789 | |
| 790 return (highestNeeded); | |
| 791 } | 761 } |
| 792 | 762 |
| 793 int32_t Channel::CreateChannel(Channel*& channel, | 763 int32_t Channel::CreateChannel(Channel*& channel, |
| 794 int32_t channelId, | 764 int32_t channelId, |
| 795 uint32_t instanceId, | 765 uint32_t instanceId, |
| 796 const VoEBase::ChannelConfig& config) { | 766 const VoEBase::ChannelConfig& config) { |
| 797 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 767 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
| 798 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, | 768 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, |
| 799 instanceId); | 769 instanceId); |
| 800 | 770 |
| 801 channel = new Channel(channelId, instanceId, config); | 771 channel = new Channel(channelId, instanceId, config); |
| 802 if (channel == NULL) { | 772 if (channel == NULL) { |
| 803 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 773 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
| 804 "Channel::CreateChannel() unable to allocate memory for" | 774 "Channel::CreateChannel() unable to allocate memory for" |
| 805 " channel"); | 775 " channel"); |
| 806 return -1; | 776 return -1; |
| 807 } | 777 } |
| 808 return 0; | 778 return 0; |
| 809 } | 779 } |
| 810 | 780 |
| 811 void Channel::PlayNotification(int32_t id, uint32_t durationMs) { | |
| 812 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 813 "Channel::PlayNotification(id=%d, durationMs=%d)", id, | |
| 814 durationMs); | |
| 815 | |
| 816 // Not implement yet | |
| 817 } | |
| 818 | |
| 819 void Channel::RecordNotification(int32_t id, uint32_t durationMs) { | |
| 820 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 821 "Channel::RecordNotification(id=%d, durationMs=%d)", id, | |
| 822 durationMs); | |
| 823 | |
| 824 // Not implement yet | |
| 825 } | |
| 826 | |
| 827 void Channel::PlayFileEnded(int32_t id) { | |
| 828 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 829 "Channel::PlayFileEnded(id=%d)", id); | |
| 830 | |
| 831 if (id == _inputFilePlayerId) { | |
| 832 channel_state_.SetInputFilePlaying(false); | |
| 833 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 834 "Channel::PlayFileEnded() => input file player module is" | |
| 835 " shutdown"); | |
| 836 } else if (id == _outputFilePlayerId) { | |
| 837 channel_state_.SetOutputFilePlaying(false); | |
| 838 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 839 "Channel::PlayFileEnded() => output file player module is" | |
| 840 " shutdown"); | |
| 841 } | |
| 842 } | |
| 843 | |
| 844 void Channel::RecordFileEnded(int32_t id) { | |
| 845 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 846 "Channel::RecordFileEnded(id=%d)", id); | |
| 847 | |
| 848 assert(id == _outputFileRecorderId); | |
| 849 | |
| 850 rtc::CritScope cs(&_fileCritSect); | |
| 851 | |
| 852 _outputFileRecording = false; | |
| 853 WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 854 "Channel::RecordFileEnded() => output file recorder module is" | |
| 855 " shutdown"); | |
| 856 } | |
| 857 | |
| 858 Channel::Channel(int32_t channelId, | 781 Channel::Channel(int32_t channelId, |
| 859 uint32_t instanceId, | 782 uint32_t instanceId, |
| 860 const VoEBase::ChannelConfig& config) | 783 const VoEBase::ChannelConfig& config) |
| 861 : _instanceId(instanceId), | 784 : _instanceId(instanceId), |
| 862 _channelId(channelId), | 785 _channelId(channelId), |
| 863 event_log_proxy_(new RtcEventLogProxy()), | 786 event_log_proxy_(new RtcEventLogProxy()), |
| 864 rtcp_rtt_stats_proxy_(new RtcpRttStatsProxy()), | 787 rtcp_rtt_stats_proxy_(new RtcpRttStatsProxy()), |
| 865 rtp_header_parser_(RtpHeaderParser::Create()), | 788 rtp_header_parser_(RtpHeaderParser::Create()), |
| 866 rtp_payload_registry_(new RTPPayloadRegistry()), | 789 rtp_payload_registry_(new RTPPayloadRegistry()), |
| 867 rtp_receive_statistics_( | 790 rtp_receive_statistics_( |
| 868 ReceiveStatistics::Create(Clock::GetRealTimeClock())), | 791 ReceiveStatistics::Create(Clock::GetRealTimeClock())), |
| 869 rtp_receiver_( | 792 rtp_receiver_( |
| 870 RtpReceiver::CreateAudioReceiver(Clock::GetRealTimeClock(), | 793 RtpReceiver::CreateAudioReceiver(Clock::GetRealTimeClock(), |
| 871 this, | 794 this, |
| 872 this, | 795 this, |
| 873 rtp_payload_registry_.get())), | 796 rtp_payload_registry_.get())), |
| 874 telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()), | 797 telephone_event_handler_(rtp_receiver_->GetTelephoneEventHandler()), |
| 875 _outputAudioLevel(), | 798 _outputAudioLevel(), |
| 876 _externalTransport(false), | 799 _externalTransport(false), |
| 877 // Avoid conflict with other channels by adding 1024 - 1026, | |
| 878 // won't use as much as 1024 channels. | |
| 879 _inputFilePlayerId(VoEModuleId(instanceId, channelId) + 1024), | |
| 880 _outputFilePlayerId(VoEModuleId(instanceId, channelId) + 1025), | |
| 881 _outputFileRecorderId(VoEModuleId(instanceId, channelId) + 1026), | |
| 882 _outputFileRecording(false), | |
| 883 _timeStamp(0), // This is just an offset, RTP module will add it's own | 800 _timeStamp(0), // This is just an offset, RTP module will add it's own |
| 884 // random offset | 801 // random offset |
| 885 ntp_estimator_(Clock::GetRealTimeClock()), | 802 ntp_estimator_(Clock::GetRealTimeClock()), |
| 886 playout_timestamp_rtp_(0), | 803 playout_timestamp_rtp_(0), |
| 887 playout_delay_ms_(0), | 804 playout_delay_ms_(0), |
| 888 send_sequence_number_(0), | 805 send_sequence_number_(0), |
| 889 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), | 806 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), |
| 890 capture_start_rtp_time_stamp_(-1), | 807 capture_start_rtp_time_stamp_(-1), |
| 891 capture_start_ntp_time_ms_(-1), | 808 capture_start_ntp_time_ms_(-1), |
| 892 _engineStatisticsPtr(NULL), | 809 _engineStatisticsPtr(NULL), |
| 893 _outputMixerPtr(NULL), | 810 _outputMixerPtr(NULL), |
| 894 _moduleProcessThreadPtr(NULL), | 811 _moduleProcessThreadPtr(NULL), |
| 895 _audioDeviceModulePtr(NULL), | 812 _audioDeviceModulePtr(NULL), |
| 896 _voiceEngineObserverPtr(NULL), | 813 _voiceEngineObserverPtr(NULL), |
| 897 _callbackCritSectPtr(NULL), | 814 _callbackCritSectPtr(NULL), |
| 898 _transportPtr(NULL), | 815 _transportPtr(NULL), |
| 899 input_mute_(false), | 816 input_mute_(false), |
| 900 previous_frame_muted_(false), | 817 previous_frame_muted_(false), |
| 901 _outputGain(1.0f), | 818 _outputGain(1.0f), |
| 902 _mixFileWithMicrophone(false), | |
| 903 _includeAudioLevelIndication(false), | 819 _includeAudioLevelIndication(false), |
| 904 transport_overhead_per_packet_(0), | 820 transport_overhead_per_packet_(0), |
| 905 rtp_overhead_per_packet_(0), | 821 rtp_overhead_per_packet_(0), |
| 906 _outputSpeechType(AudioFrame::kNormalSpeech), | 822 _outputSpeechType(AudioFrame::kNormalSpeech), |
| 907 rtcp_observer_(new VoERtcpObserver(this)), | 823 rtcp_observer_(new VoERtcpObserver(this)), |
| 908 associate_send_channel_(ChannelOwner(nullptr)), | 824 associate_send_channel_(ChannelOwner(nullptr)), |
| 909 pacing_enabled_(config.enable_voice_pacing), | 825 pacing_enabled_(config.enable_voice_pacing), |
| 910 feedback_observer_proxy_(new TransportFeedbackProxy()), | 826 feedback_observer_proxy_(new TransportFeedbackProxy()), |
| 911 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), | 827 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), |
| 912 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), | 828 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), |
| (...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1081 RTC_DCHECK(construction_thread_.CalledOnValidThread()); | 997 RTC_DCHECK(construction_thread_.CalledOnValidThread()); |
| 1082 // Must be called on the same thread as Init(). | 998 // Must be called on the same thread as Init(). |
| 1083 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), | 999 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1084 "Channel::Terminate"); | 1000 "Channel::Terminate"); |
| 1085 | 1001 |
| 1086 rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL); | 1002 rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL); |
| 1087 | 1003 |
| 1088 StopSend(); | 1004 StopSend(); |
| 1089 StopPlayout(); | 1005 StopPlayout(); |
| 1090 | 1006 |
| 1091 { | |
| 1092 rtc::CritScope cs(&_fileCritSect); | |
| 1093 if (input_file_player_) { | |
| 1094 input_file_player_->RegisterModuleFileCallback(NULL); | |
| 1095 input_file_player_->StopPlayingFile(); | |
| 1096 } | |
| 1097 if (output_file_player_) { | |
| 1098 output_file_player_->RegisterModuleFileCallback(NULL); | |
| 1099 output_file_player_->StopPlayingFile(); | |
| 1100 } | |
| 1101 if (output_file_recorder_) { | |
| 1102 output_file_recorder_->RegisterModuleFileCallback(NULL); | |
| 1103 output_file_recorder_->StopRecording(); | |
| 1104 } | |
| 1105 } | |
| 1106 | |
| 1107 // The order to safely shutdown modules in a channel is: | 1007 // The order to safely shutdown modules in a channel is: |
| 1108 // 1. De-register callbacks in modules | 1008 // 1. De-register callbacks in modules |
| 1109 // 2. De-register modules in process thread | 1009 // 2. De-register modules in process thread |
| 1110 // 3. Destroy modules | 1010 // 3. Destroy modules |
| 1111 if (audio_coding_->RegisterTransportCallback(NULL) == -1) { | 1011 if (audio_coding_->RegisterTransportCallback(NULL) == -1) { |
| 1112 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 1012 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1113 "Terminate() failed to de-register transport callback" | 1013 "Terminate() failed to de-register transport callback" |
| 1114 " (Audio coding module)"); | 1014 " (Audio coding module)"); |
| 1115 } | 1015 } |
| 1116 | 1016 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1167 | 1067 |
| 1168 // Add participant as candidates for mixing. | 1068 // Add participant as candidates for mixing. |
| 1169 if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0) { | 1069 if (_outputMixerPtr->SetMixabilityStatus(*this, true) != 0) { |
| 1170 _engineStatisticsPtr->SetLastError( | 1070 _engineStatisticsPtr->SetLastError( |
| 1171 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError, | 1071 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError, |
| 1172 "StartPlayout() failed to add participant to mixer"); | 1072 "StartPlayout() failed to add participant to mixer"); |
| 1173 return -1; | 1073 return -1; |
| 1174 } | 1074 } |
| 1175 | 1075 |
| 1176 channel_state_.SetPlaying(true); | 1076 channel_state_.SetPlaying(true); |
| 1177 if (RegisterFilePlayingToMixer() != 0) | |
| 1178 return -1; | |
| 1179 | 1077 |
| 1180 return 0; | 1078 return 0; |
| 1181 } | 1079 } |
| 1182 | 1080 |
| 1183 int32_t Channel::StopPlayout() { | 1081 int32_t Channel::StopPlayout() { |
| 1184 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1082 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1185 "Channel::StopPlayout()"); | 1083 "Channel::StopPlayout()"); |
| 1186 if (!channel_state_.Get().playing) { | 1084 if (!channel_state_.Get().playing) { |
| 1187 return 0; | 1085 return 0; |
| 1188 } | 1086 } |
| (...skipping 627 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1816 return 0; | 1714 return 0; |
| 1817 } | 1715 } |
| 1818 | 1716 |
| 1819 { | 1717 { |
| 1820 rtc::CritScope lock(&ts_stats_lock_); | 1718 rtc::CritScope lock(&ts_stats_lock_); |
| 1821 ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp); | 1719 ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp); |
| 1822 } | 1720 } |
| 1823 return 0; | 1721 return 0; |
| 1824 } | 1722 } |
| 1825 | 1723 |
| 1826 int Channel::StartPlayingFileLocally(const char* fileName, | |
| 1827 bool loop, | |
| 1828 FileFormats format, | |
| 1829 int startPosition, | |
| 1830 float volumeScaling, | |
| 1831 int stopPosition, | |
| 1832 const CodecInst* codecInst) { | |
| 1833 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 1834 "Channel::StartPlayingFileLocally(fileNameUTF8[]=%s, loop=%d," | |
| 1835 " format=%d, volumeScaling=%5.3f, startPosition=%d, " | |
| 1836 "stopPosition=%d)", | |
| 1837 fileName, loop, format, volumeScaling, startPosition, | |
| 1838 stopPosition); | |
| 1839 | |
| 1840 if (channel_state_.Get().output_file_playing) { | |
| 1841 _engineStatisticsPtr->SetLastError( | |
| 1842 VE_ALREADY_PLAYING, kTraceError, | |
| 1843 "StartPlayingFileLocally() is already playing"); | |
| 1844 return -1; | |
| 1845 } | |
| 1846 | |
| 1847 { | |
| 1848 rtc::CritScope cs(&_fileCritSect); | |
| 1849 | |
| 1850 if (output_file_player_) { | |
| 1851 output_file_player_->RegisterModuleFileCallback(NULL); | |
| 1852 output_file_player_.reset(); | |
| 1853 } | |
| 1854 | |
| 1855 output_file_player_ = FilePlayer::CreateFilePlayer( | |
| 1856 _outputFilePlayerId, (const FileFormats)format); | |
| 1857 | |
| 1858 if (!output_file_player_) { | |
| 1859 _engineStatisticsPtr->SetLastError( | |
| 1860 VE_INVALID_ARGUMENT, kTraceError, | |
| 1861 "StartPlayingFileLocally() filePlayer format is not correct"); | |
| 1862 return -1; | |
| 1863 } | |
| 1864 | |
| 1865 const uint32_t notificationTime(0); | |
| 1866 | |
| 1867 if (output_file_player_->StartPlayingFile( | |
| 1868 fileName, loop, startPosition, volumeScaling, notificationTime, | |
| 1869 stopPosition, (const CodecInst*)codecInst) != 0) { | |
| 1870 _engineStatisticsPtr->SetLastError( | |
| 1871 VE_BAD_FILE, kTraceError, | |
| 1872 "StartPlayingFile() failed to start file playout"); | |
| 1873 output_file_player_->StopPlayingFile(); | |
| 1874 output_file_player_.reset(); | |
| 1875 return -1; | |
| 1876 } | |
| 1877 output_file_player_->RegisterModuleFileCallback(this); | |
| 1878 channel_state_.SetOutputFilePlaying(true); | |
| 1879 } | |
| 1880 | |
| 1881 if (RegisterFilePlayingToMixer() != 0) | |
| 1882 return -1; | |
| 1883 | |
| 1884 return 0; | |
| 1885 } | |
| 1886 | |
| 1887 int Channel::StartPlayingFileLocally(InStream* stream, | |
| 1888 FileFormats format, | |
| 1889 int startPosition, | |
| 1890 float volumeScaling, | |
| 1891 int stopPosition, | |
| 1892 const CodecInst* codecInst) { | |
| 1893 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 1894 "Channel::StartPlayingFileLocally(format=%d," | |
| 1895 " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)", | |
| 1896 format, volumeScaling, startPosition, stopPosition); | |
| 1897 | |
| 1898 if (stream == NULL) { | |
| 1899 _engineStatisticsPtr->SetLastError( | |
| 1900 VE_BAD_FILE, kTraceError, | |
| 1901 "StartPlayingFileLocally() NULL as input stream"); | |
| 1902 return -1; | |
| 1903 } | |
| 1904 | |
| 1905 if (channel_state_.Get().output_file_playing) { | |
| 1906 _engineStatisticsPtr->SetLastError( | |
| 1907 VE_ALREADY_PLAYING, kTraceError, | |
| 1908 "StartPlayingFileLocally() is already playing"); | |
| 1909 return -1; | |
| 1910 } | |
| 1911 | |
| 1912 { | |
| 1913 rtc::CritScope cs(&_fileCritSect); | |
| 1914 | |
| 1915 // Destroy the old instance | |
| 1916 if (output_file_player_) { | |
| 1917 output_file_player_->RegisterModuleFileCallback(NULL); | |
| 1918 output_file_player_.reset(); | |
| 1919 } | |
| 1920 | |
| 1921 // Create the instance | |
| 1922 output_file_player_ = FilePlayer::CreateFilePlayer( | |
| 1923 _outputFilePlayerId, (const FileFormats)format); | |
| 1924 | |
| 1925 if (!output_file_player_) { | |
| 1926 _engineStatisticsPtr->SetLastError( | |
| 1927 VE_INVALID_ARGUMENT, kTraceError, | |
| 1928 "StartPlayingFileLocally() filePlayer format isnot correct"); | |
| 1929 return -1; | |
| 1930 } | |
| 1931 | |
| 1932 const uint32_t notificationTime(0); | |
| 1933 | |
| 1934 if (output_file_player_->StartPlayingFile(stream, startPosition, | |
| 1935 volumeScaling, notificationTime, | |
| 1936 stopPosition, codecInst) != 0) { | |
| 1937 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError, | |
| 1938 "StartPlayingFile() failed to " | |
| 1939 "start file playout"); | |
| 1940 output_file_player_->StopPlayingFile(); | |
| 1941 output_file_player_.reset(); | |
| 1942 return -1; | |
| 1943 } | |
| 1944 output_file_player_->RegisterModuleFileCallback(this); | |
| 1945 channel_state_.SetOutputFilePlaying(true); | |
| 1946 } | |
| 1947 | |
| 1948 if (RegisterFilePlayingToMixer() != 0) | |
| 1949 return -1; | |
| 1950 | |
| 1951 return 0; | |
| 1952 } | |
| 1953 | |
| 1954 int Channel::StopPlayingFileLocally() { | |
| 1955 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 1956 "Channel::StopPlayingFileLocally()"); | |
| 1957 | |
| 1958 if (!channel_state_.Get().output_file_playing) { | |
| 1959 return 0; | |
| 1960 } | |
| 1961 | |
| 1962 { | |
| 1963 rtc::CritScope cs(&_fileCritSect); | |
| 1964 | |
| 1965 if (output_file_player_->StopPlayingFile() != 0) { | |
| 1966 _engineStatisticsPtr->SetLastError( | |
| 1967 VE_STOP_RECORDING_FAILED, kTraceError, | |
| 1968 "StopPlayingFile() could not stop playing"); | |
| 1969 return -1; | |
| 1970 } | |
| 1971 output_file_player_->RegisterModuleFileCallback(NULL); | |
| 1972 output_file_player_.reset(); | |
| 1973 channel_state_.SetOutputFilePlaying(false); | |
| 1974 } | |
| 1975 // _fileCritSect cannot be taken while calling | |
| 1976 // SetAnonymousMixibilityStatus. Refer to comments in | |
| 1977 // StartPlayingFileLocally(const char* ...) for more details. | |
| 1978 if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, false) != 0) { | |
| 1979 _engineStatisticsPtr->SetLastError( | |
| 1980 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError, | |
| 1981 "StopPlayingFile() failed to stop participant from playing as" | |
| 1982 "file in the mixer"); | |
| 1983 return -1; | |
| 1984 } | |
| 1985 | |
| 1986 return 0; | |
| 1987 } | |
| 1988 | |
| 1989 int Channel::IsPlayingFileLocally() const { | |
| 1990 return channel_state_.Get().output_file_playing; | |
| 1991 } | |
| 1992 | |
| 1993 int Channel::RegisterFilePlayingToMixer() { | |
| 1994 // Return success for not registering for file playing to mixer if: | |
| 1995 // 1. playing file before playout is started on that channel. | |
| 1996 // 2. starting playout without file playing on that channel. | |
| 1997 if (!channel_state_.Get().playing || | |
| 1998 !channel_state_.Get().output_file_playing) { | |
| 1999 return 0; | |
| 2000 } | |
| 2001 | |
| 2002 // |_fileCritSect| cannot be taken while calling | |
| 2003 // SetAnonymousMixabilityStatus() since as soon as the participant is added | |
| 2004 // frames can be pulled by the mixer. Since the frames are generated from | |
| 2005 // the file, _fileCritSect will be taken. This would result in a deadlock. | |
| 2006 if (_outputMixerPtr->SetAnonymousMixabilityStatus(*this, true) != 0) { | |
| 2007 channel_state_.SetOutputFilePlaying(false); | |
| 2008 rtc::CritScope cs(&_fileCritSect); | |
| 2009 _engineStatisticsPtr->SetLastError( | |
| 2010 VE_AUDIO_CONF_MIX_MODULE_ERROR, kTraceError, | |
| 2011 "StartPlayingFile() failed to add participant as file to mixer"); | |
| 2012 output_file_player_->StopPlayingFile(); | |
| 2013 output_file_player_.reset(); | |
| 2014 return -1; | |
| 2015 } | |
| 2016 | |
| 2017 return 0; | |
| 2018 } | |
| 2019 | |
| 2020 int Channel::StartPlayingFileAsMicrophone(const char* fileName, | |
| 2021 bool loop, | |
| 2022 FileFormats format, | |
| 2023 int startPosition, | |
| 2024 float volumeScaling, | |
| 2025 int stopPosition, | |
| 2026 const CodecInst* codecInst) { | |
| 2027 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2028 "Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, " | |
| 2029 "loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, " | |
| 2030 "stopPosition=%d)", | |
| 2031 fileName, loop, format, volumeScaling, startPosition, | |
| 2032 stopPosition); | |
| 2033 | |
| 2034 rtc::CritScope cs(&_fileCritSect); | |
| 2035 | |
| 2036 if (channel_state_.Get().input_file_playing) { | |
| 2037 _engineStatisticsPtr->SetLastError( | |
| 2038 VE_ALREADY_PLAYING, kTraceWarning, | |
| 2039 "StartPlayingFileAsMicrophone() filePlayer is playing"); | |
| 2040 return 0; | |
| 2041 } | |
| 2042 | |
| 2043 // Destroy the old instance | |
| 2044 if (input_file_player_) { | |
| 2045 input_file_player_->RegisterModuleFileCallback(NULL); | |
| 2046 input_file_player_.reset(); | |
| 2047 } | |
| 2048 | |
| 2049 // Create the instance | |
| 2050 input_file_player_ = FilePlayer::CreateFilePlayer(_inputFilePlayerId, | |
| 2051 (const FileFormats)format); | |
| 2052 | |
| 2053 if (!input_file_player_) { | |
| 2054 _engineStatisticsPtr->SetLastError( | |
| 2055 VE_INVALID_ARGUMENT, kTraceError, | |
| 2056 "StartPlayingFileAsMicrophone() filePlayer format isnot correct"); | |
| 2057 return -1; | |
| 2058 } | |
| 2059 | |
| 2060 const uint32_t notificationTime(0); | |
| 2061 | |
| 2062 if (input_file_player_->StartPlayingFile( | |
| 2063 fileName, loop, startPosition, volumeScaling, notificationTime, | |
| 2064 stopPosition, (const CodecInst*)codecInst) != 0) { | |
| 2065 _engineStatisticsPtr->SetLastError( | |
| 2066 VE_BAD_FILE, kTraceError, | |
| 2067 "StartPlayingFile() failed to start file playout"); | |
| 2068 input_file_player_->StopPlayingFile(); | |
| 2069 input_file_player_.reset(); | |
| 2070 return -1; | |
| 2071 } | |
| 2072 input_file_player_->RegisterModuleFileCallback(this); | |
| 2073 channel_state_.SetInputFilePlaying(true); | |
| 2074 | |
| 2075 return 0; | |
| 2076 } | |
| 2077 | |
| 2078 int Channel::StartPlayingFileAsMicrophone(InStream* stream, | |
| 2079 FileFormats format, | |
| 2080 int startPosition, | |
| 2081 float volumeScaling, | |
| 2082 int stopPosition, | |
| 2083 const CodecInst* codecInst) { | |
| 2084 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2085 "Channel::StartPlayingFileAsMicrophone(format=%d, " | |
| 2086 "volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)", | |
| 2087 format, volumeScaling, startPosition, stopPosition); | |
| 2088 | |
| 2089 if (stream == NULL) { | |
| 2090 _engineStatisticsPtr->SetLastError( | |
| 2091 VE_BAD_FILE, kTraceError, | |
| 2092 "StartPlayingFileAsMicrophone NULL as input stream"); | |
| 2093 return -1; | |
| 2094 } | |
| 2095 | |
| 2096 rtc::CritScope cs(&_fileCritSect); | |
| 2097 | |
| 2098 if (channel_state_.Get().input_file_playing) { | |
| 2099 _engineStatisticsPtr->SetLastError( | |
| 2100 VE_ALREADY_PLAYING, kTraceWarning, | |
| 2101 "StartPlayingFileAsMicrophone() is playing"); | |
| 2102 return 0; | |
| 2103 } | |
| 2104 | |
| 2105 // Destroy the old instance | |
| 2106 if (input_file_player_) { | |
| 2107 input_file_player_->RegisterModuleFileCallback(NULL); | |
| 2108 input_file_player_.reset(); | |
| 2109 } | |
| 2110 | |
| 2111 // Create the instance | |
| 2112 input_file_player_ = FilePlayer::CreateFilePlayer(_inputFilePlayerId, | |
| 2113 (const FileFormats)format); | |
| 2114 | |
| 2115 if (!input_file_player_) { | |
| 2116 _engineStatisticsPtr->SetLastError( | |
| 2117 VE_INVALID_ARGUMENT, kTraceError, | |
| 2118 "StartPlayingInputFile() filePlayer format isnot correct"); | |
| 2119 return -1; | |
| 2120 } | |
| 2121 | |
| 2122 const uint32_t notificationTime(0); | |
| 2123 | |
| 2124 if (input_file_player_->StartPlayingFile(stream, startPosition, volumeScaling, | |
| 2125 notificationTime, stopPosition, | |
| 2126 codecInst) != 0) { | |
| 2127 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError, | |
| 2128 "StartPlayingFile() failed to start " | |
| 2129 "file playout"); | |
| 2130 input_file_player_->StopPlayingFile(); | |
| 2131 input_file_player_.reset(); | |
| 2132 return -1; | |
| 2133 } | |
| 2134 | |
| 2135 input_file_player_->RegisterModuleFileCallback(this); | |
| 2136 channel_state_.SetInputFilePlaying(true); | |
| 2137 | |
| 2138 return 0; | |
| 2139 } | |
| 2140 | |
| 2141 int Channel::StopPlayingFileAsMicrophone() { | |
| 2142 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2143 "Channel::StopPlayingFileAsMicrophone()"); | |
| 2144 | |
| 2145 rtc::CritScope cs(&_fileCritSect); | |
| 2146 | |
| 2147 if (!channel_state_.Get().input_file_playing) { | |
| 2148 return 0; | |
| 2149 } | |
| 2150 | |
| 2151 if (input_file_player_->StopPlayingFile() != 0) { | |
| 2152 _engineStatisticsPtr->SetLastError( | |
| 2153 VE_STOP_RECORDING_FAILED, kTraceError, | |
| 2154 "StopPlayingFile() could not stop playing"); | |
| 2155 return -1; | |
| 2156 } | |
| 2157 input_file_player_->RegisterModuleFileCallback(NULL); | |
| 2158 input_file_player_.reset(); | |
| 2159 channel_state_.SetInputFilePlaying(false); | |
| 2160 | |
| 2161 return 0; | |
| 2162 } | |
| 2163 | |
| 2164 int Channel::IsPlayingFileAsMicrophone() const { | |
| 2165 return channel_state_.Get().input_file_playing; | |
| 2166 } | |
| 2167 | |
| 2168 int Channel::StartRecordingPlayout(const char* fileName, | |
| 2169 const CodecInst* codecInst) { | |
| 2170 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2171 "Channel::StartRecordingPlayout(fileName=%s)", fileName); | |
| 2172 | |
| 2173 if (_outputFileRecording) { | |
| 2174 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), | |
| 2175 "StartRecordingPlayout() is already recording"); | |
| 2176 return 0; | |
| 2177 } | |
| 2178 | |
| 2179 FileFormats format; | |
| 2180 const uint32_t notificationTime(0); // Not supported in VoE | |
| 2181 CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000}; | |
| 2182 | |
| 2183 if ((codecInst != NULL) && | |
| 2184 ((codecInst->channels < 1) || (codecInst->channels > 2))) { | |
| 2185 _engineStatisticsPtr->SetLastError( | |
| 2186 VE_BAD_ARGUMENT, kTraceError, | |
| 2187 "StartRecordingPlayout() invalid compression"); | |
| 2188 return (-1); | |
| 2189 } | |
| 2190 if (codecInst == NULL) { | |
| 2191 format = kFileFormatPcm16kHzFile; | |
| 2192 codecInst = &dummyCodec; | |
| 2193 } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) || | |
| 2194 (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) || | |
| 2195 (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) { | |
| 2196 format = kFileFormatWavFile; | |
| 2197 } else { | |
| 2198 format = kFileFormatCompressedFile; | |
| 2199 } | |
| 2200 | |
| 2201 rtc::CritScope cs(&_fileCritSect); | |
| 2202 | |
| 2203 // Destroy the old instance | |
| 2204 if (output_file_recorder_) { | |
| 2205 output_file_recorder_->RegisterModuleFileCallback(NULL); | |
| 2206 output_file_recorder_.reset(); | |
| 2207 } | |
| 2208 | |
| 2209 output_file_recorder_ = FileRecorder::CreateFileRecorder( | |
| 2210 _outputFileRecorderId, (const FileFormats)format); | |
| 2211 if (!output_file_recorder_) { | |
| 2212 _engineStatisticsPtr->SetLastError( | |
| 2213 VE_INVALID_ARGUMENT, kTraceError, | |
| 2214 "StartRecordingPlayout() fileRecorder format isnot correct"); | |
| 2215 return -1; | |
| 2216 } | |
| 2217 | |
| 2218 if (output_file_recorder_->StartRecordingAudioFile( | |
| 2219 fileName, (const CodecInst&)*codecInst, notificationTime) != 0) { | |
| 2220 _engineStatisticsPtr->SetLastError( | |
| 2221 VE_BAD_FILE, kTraceError, | |
| 2222 "StartRecordingAudioFile() failed to start file recording"); | |
| 2223 output_file_recorder_->StopRecording(); | |
| 2224 output_file_recorder_.reset(); | |
| 2225 return -1; | |
| 2226 } | |
| 2227 output_file_recorder_->RegisterModuleFileCallback(this); | |
| 2228 _outputFileRecording = true; | |
| 2229 | |
| 2230 return 0; | |
| 2231 } | |
| 2232 | |
| 2233 int Channel::StartRecordingPlayout(OutStream* stream, | |
| 2234 const CodecInst* codecInst) { | |
| 2235 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2236 "Channel::StartRecordingPlayout()"); | |
| 2237 | |
| 2238 if (_outputFileRecording) { | |
| 2239 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1), | |
| 2240 "StartRecordingPlayout() is already recording"); | |
| 2241 return 0; | |
| 2242 } | |
| 2243 | |
| 2244 FileFormats format; | |
| 2245 const uint32_t notificationTime(0); // Not supported in VoE | |
| 2246 CodecInst dummyCodec = {100, "L16", 16000, 320, 1, 320000}; | |
| 2247 | |
| 2248 if (codecInst != NULL && codecInst->channels != 1) { | |
| 2249 _engineStatisticsPtr->SetLastError( | |
| 2250 VE_BAD_ARGUMENT, kTraceError, | |
| 2251 "StartRecordingPlayout() invalid compression"); | |
| 2252 return (-1); | |
| 2253 } | |
| 2254 if (codecInst == NULL) { | |
| 2255 format = kFileFormatPcm16kHzFile; | |
| 2256 codecInst = &dummyCodec; | |
| 2257 } else if ((STR_CASE_CMP(codecInst->plname, "L16") == 0) || | |
| 2258 (STR_CASE_CMP(codecInst->plname, "PCMU") == 0) || | |
| 2259 (STR_CASE_CMP(codecInst->plname, "PCMA") == 0)) { | |
| 2260 format = kFileFormatWavFile; | |
| 2261 } else { | |
| 2262 format = kFileFormatCompressedFile; | |
| 2263 } | |
| 2264 | |
| 2265 rtc::CritScope cs(&_fileCritSect); | |
| 2266 | |
| 2267 // Destroy the old instance | |
| 2268 if (output_file_recorder_) { | |
| 2269 output_file_recorder_->RegisterModuleFileCallback(NULL); | |
| 2270 output_file_recorder_.reset(); | |
| 2271 } | |
| 2272 | |
| 2273 output_file_recorder_ = FileRecorder::CreateFileRecorder( | |
| 2274 _outputFileRecorderId, (const FileFormats)format); | |
| 2275 if (!output_file_recorder_) { | |
| 2276 _engineStatisticsPtr->SetLastError( | |
| 2277 VE_INVALID_ARGUMENT, kTraceError, | |
| 2278 "StartRecordingPlayout() fileRecorder format isnot correct"); | |
| 2279 return -1; | |
| 2280 } | |
| 2281 | |
| 2282 if (output_file_recorder_->StartRecordingAudioFile(stream, *codecInst, | |
| 2283 notificationTime) != 0) { | |
| 2284 _engineStatisticsPtr->SetLastError(VE_BAD_FILE, kTraceError, | |
| 2285 "StartRecordingPlayout() failed to " | |
| 2286 "start file recording"); | |
| 2287 output_file_recorder_->StopRecording(); | |
| 2288 output_file_recorder_.reset(); | |
| 2289 return -1; | |
| 2290 } | |
| 2291 | |
| 2292 output_file_recorder_->RegisterModuleFileCallback(this); | |
| 2293 _outputFileRecording = true; | |
| 2294 | |
| 2295 return 0; | |
| 2296 } | |
| 2297 | |
| 2298 int Channel::StopRecordingPlayout() { | |
| 2299 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1), | |
| 2300 "Channel::StopRecordingPlayout()"); | |
| 2301 | |
| 2302 if (!_outputFileRecording) { | |
| 2303 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1), | |
| 2304 "StopRecordingPlayout() isnot recording"); | |
| 2305 return -1; | |
| 2306 } | |
| 2307 | |
| 2308 rtc::CritScope cs(&_fileCritSect); | |
| 2309 | |
| 2310 if (output_file_recorder_->StopRecording() != 0) { | |
| 2311 _engineStatisticsPtr->SetLastError( | |
| 2312 VE_STOP_RECORDING_FAILED, kTraceError, | |
| 2313 "StopRecording() could not stop recording"); | |
| 2314 return (-1); | |
| 2315 } | |
| 2316 output_file_recorder_->RegisterModuleFileCallback(NULL); | |
| 2317 output_file_recorder_.reset(); | |
| 2318 _outputFileRecording = false; | |
| 2319 | |
| 2320 return 0; | |
| 2321 } | |
| 2322 | |
| 2323 void Channel::SetMixWithMicStatus(bool mix) { | |
| 2324 rtc::CritScope cs(&_fileCritSect); | |
| 2325 _mixFileWithMicrophone = mix; | |
| 2326 } | |
| 2327 | |
| 2328 int Channel::GetSpeechOutputLevel() const { | 1724 int Channel::GetSpeechOutputLevel() const { |
| 2329 return _outputAudioLevel.Level(); | 1725 return _outputAudioLevel.Level(); |
| 2330 } | 1726 } |
| 2331 | 1727 |
| 2332 int Channel::GetSpeechOutputLevelFullRange() const { | 1728 int Channel::GetSpeechOutputLevelFullRange() const { |
| 2333 return _outputAudioLevel.LevelFullRange(); | 1729 return _outputAudioLevel.LevelFullRange(); |
| 2334 } | 1730 } |
| 2335 | 1731 |
| 2336 double Channel::GetTotalOutputEnergy() const { | 1732 double Channel::GetTotalOutputEnergy() const { |
| 2337 return _outputAudioLevel.TotalEnergy(); | 1733 return _outputAudioLevel.TotalEnergy(); |
| (...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2753 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( | 2149 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
| 2754 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); | 2150 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); |
| 2755 } | 2151 } |
| 2756 | 2152 |
| 2757 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { | 2153 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { |
| 2758 RTC_DCHECK_RUN_ON(encoder_queue_); | 2154 RTC_DCHECK_RUN_ON(encoder_queue_); |
| 2759 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); | 2155 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); |
| 2760 RTC_DCHECK_LE(audio_input->num_channels_, 2); | 2156 RTC_DCHECK_LE(audio_input->num_channels_, 2); |
| 2761 RTC_DCHECK_EQ(audio_input->id_, ChannelId()); | 2157 RTC_DCHECK_EQ(audio_input->id_, ChannelId()); |
| 2762 | 2158 |
| 2763 if (channel_state_.Get().input_file_playing) { | |
| 2764 MixOrReplaceAudioWithFile(audio_input); | |
| 2765 } | |
| 2766 | |
| 2767 bool is_muted = InputMute(); | 2159 bool is_muted = InputMute(); |
| 2768 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); | 2160 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
| 2769 | 2161 |
| 2770 if (_includeAudioLevelIndication) { | 2162 if (_includeAudioLevelIndication) { |
| 2771 size_t length = | 2163 size_t length = |
| 2772 audio_input->samples_per_channel_ * audio_input->num_channels_; | 2164 audio_input->samples_per_channel_ * audio_input->num_channels_; |
| 2773 RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes); | 2165 RTC_CHECK_LE(length, AudioFrame::kMaxDataSizeBytes); |
| 2774 if (is_muted && previous_frame_muted_) { | 2166 if (is_muted && previous_frame_muted_) { |
| 2775 rms_level_.AnalyzeMuted(length); | 2167 rms_level_.AnalyzeMuted(length); |
| 2776 } else { | 2168 } else { |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2896 return 0; | 2288 return 0; |
| 2897 } | 2289 } |
| 2898 | 2290 |
| 2899 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, | 2291 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, |
| 2900 RtpReceiver** rtp_receiver) const { | 2292 RtpReceiver** rtp_receiver) const { |
| 2901 *rtpRtcpModule = _rtpRtcpModule.get(); | 2293 *rtpRtcpModule = _rtpRtcpModule.get(); |
| 2902 *rtp_receiver = rtp_receiver_.get(); | 2294 *rtp_receiver = rtp_receiver_.get(); |
| 2903 return 0; | 2295 return 0; |
| 2904 } | 2296 } |
| 2905 | 2297 |
| 2906 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use | |
| 2907 // a shared helper. | |
| 2908 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) { | |
| 2909 RTC_DCHECK_RUN_ON(encoder_queue_); | |
| 2910 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); | |
| 2911 size_t fileSamples(0); | |
| 2912 const int mixingFrequency = audio_input->sample_rate_hz_; | |
| 2913 { | |
| 2914 rtc::CritScope cs(&_fileCritSect); | |
| 2915 | |
| 2916 if (!input_file_player_) { | |
| 2917 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2918 "Channel::MixOrReplaceAudioWithFile() fileplayer" | |
| 2919 " doesnt exist"); | |
| 2920 return -1; | |
| 2921 } | |
| 2922 | |
| 2923 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, | |
| 2924 mixingFrequency) == -1) { | |
| 2925 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2926 "Channel::MixOrReplaceAudioWithFile() file mixing " | |
| 2927 "failed"); | |
| 2928 return -1; | |
| 2929 } | |
| 2930 if (fileSamples == 0) { | |
| 2931 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2932 "Channel::MixOrReplaceAudioWithFile() file is ended"); | |
| 2933 return 0; | |
| 2934 } | |
| 2935 } | |
| 2936 | |
| 2937 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples); | |
| 2938 | |
| 2939 if (_mixFileWithMicrophone) { | |
| 2940 // Currently file stream is always mono. | |
| 2941 // TODO(xians): Change the code when FilePlayer supports real stereo. | |
| 2942 MixWithSat(audio_input->mutable_data(), audio_input->num_channels_, | |
| 2943 fileBuffer.get(), 1, fileSamples); | |
| 2944 } else { | |
| 2945 // Replace ACM audio with file. | |
| 2946 // Currently file stream is always mono. | |
| 2947 // TODO(xians): Change the code when FilePlayer supports real stereo. | |
| 2948 audio_input->UpdateFrame( | |
| 2949 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, | |
| 2950 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); | |
| 2951 } | |
| 2952 return 0; | |
| 2953 } | |
| 2954 | |
| 2955 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { | |
| 2956 assert(mixingFrequency <= 48000); | |
| 2957 | |
| 2958 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); | |
| 2959 size_t fileSamples(0); | |
| 2960 | |
| 2961 { | |
| 2962 rtc::CritScope cs(&_fileCritSect); | |
| 2963 | |
| 2964 if (!output_file_player_) { | |
| 2965 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2966 "Channel::MixAudioWithFile() file mixing failed"); | |
| 2967 return -1; | |
| 2968 } | |
| 2969 | |
| 2970 // We should get the frequency we ask for. | |
| 2971 if (output_file_player_->Get10msAudioFromFile( | |
| 2972 fileBuffer.get(), &fileSamples, mixingFrequency) == -1) { | |
| 2973 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2974 "Channel::MixAudioWithFile() file mixing failed"); | |
| 2975 return -1; | |
| 2976 } | |
| 2977 } | |
| 2978 | |
| 2979 if (audioFrame.samples_per_channel_ == fileSamples) { | |
| 2980 // Currently file stream is always mono. | |
| 2981 // TODO(xians): Change the code when FilePlayer supports real stereo. | |
| 2982 MixWithSat(audioFrame.mutable_data(), audioFrame.num_channels_, | |
| 2983 fileBuffer.get(), 1, fileSamples); | |
| 2984 } else { | |
| 2985 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2986 "Channel::MixAudioWithFile() samples_per_channel_(%" PRIuS | |
| 2987 ") != " | |
| 2988 "fileSamples(%" PRIuS ")", | |
| 2989 audioFrame.samples_per_channel_, fileSamples); | |
| 2990 return -1; | |
| 2991 } | |
| 2992 | |
| 2993 return 0; | |
| 2994 } | |
| 2995 | |
| 2996 void Channel::UpdatePlayoutTimestamp(bool rtcp) { | 2298 void Channel::UpdatePlayoutTimestamp(bool rtcp) { |
| 2997 jitter_buffer_playout_timestamp_ = audio_coding_->PlayoutTimestamp(); | 2299 jitter_buffer_playout_timestamp_ = audio_coding_->PlayoutTimestamp(); |
| 2998 | 2300 |
| 2999 if (!jitter_buffer_playout_timestamp_) { | 2301 if (!jitter_buffer_playout_timestamp_) { |
| 3000 // This can happen if this channel has not received any RTP packets. In | 2302 // This can happen if this channel has not received any RTP packets. In |
| 3001 // this case, NetEq is not capable of computing a playout timestamp. | 2303 // this case, NetEq is not capable of computing a playout timestamp. |
| 3002 return; | 2304 return; |
| 3003 } | 2305 } |
| 3004 | 2306 |
| 3005 uint16_t delay_ms = 0; | 2307 uint16_t delay_ms = 0; |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3128 int64_t min_rtt = 0; | 2430 int64_t min_rtt = 0; |
| 3129 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 2431 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
| 3130 0) { | 2432 0) { |
| 3131 return 0; | 2433 return 0; |
| 3132 } | 2434 } |
| 3133 return rtt; | 2435 return rtt; |
| 3134 } | 2436 } |
| 3135 | 2437 |
| 3136 } // namespace voe | 2438 } // namespace voe |
| 3137 } // namespace webrtc | 2439 } // namespace webrtc |
| OLD | NEW |