| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/voice_engine/channel.h" | 11 #include "webrtc/voice_engine/channel.h" |
| 12 | 12 |
| 13 #include <algorithm> | 13 #include <algorithm> |
| 14 #include <utility> | 14 #include <utility> |
| 15 | 15 |
| 16 #include "webrtc/audio/utility/audio_frame_operations.h" | 16 #include "webrtc/audio/utility/audio_frame_operations.h" |
| 17 #include "webrtc/base/array_view.h" | 17 #include "webrtc/base/array_view.h" |
| 18 #include "webrtc/base/checks.h" | 18 #include "webrtc/base/checks.h" |
| 19 #include "webrtc/base/criticalsection.h" | 19 #include "webrtc/base/criticalsection.h" |
| 20 #include "webrtc/base/format_macros.h" | 20 #include "webrtc/base/format_macros.h" |
| 21 #include "webrtc/base/location.h" | 21 #include "webrtc/base/location.h" |
| 22 #include "webrtc/base/logging.h" | 22 #include "webrtc/base/logging.h" |
| 23 #include "webrtc/base/rate_limiter.h" | 23 #include "webrtc/base/rate_limiter.h" |
| 24 #include "webrtc/base/task_queue.h" |
| 25 #include "webrtc/base/thread_checker.h" |
| 24 #include "webrtc/base/timeutils.h" | 26 #include "webrtc/base/timeutils.h" |
| 25 #include "webrtc/call/rtp_transport_controller_send.h" | 27 #include "webrtc/call/rtp_transport_controller_send.h" |
| 26 #include "webrtc/config.h" | 28 #include "webrtc/config.h" |
| 27 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" | 29 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" |
| 28 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" | 30 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" |
| 29 #include "webrtc/modules/audio_device/include/audio_device.h" | 31 #include "webrtc/modules/audio_device/include/audio_device.h" |
| 30 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 32 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
| 31 #include "webrtc/modules/include/module_common_types.h" | 33 #include "webrtc/modules/include/module_common_types.h" |
| 32 #include "webrtc/modules/pacing/packet_router.h" | 34 #include "webrtc/modules/pacing/packet_router.h" |
| 33 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" | 35 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" |
| (...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 402 } | 404 } |
| 403 | 405 |
| 404 private: | 406 private: |
| 405 Channel* owner_; | 407 Channel* owner_; |
| 406 // Maps remote side ssrc to extended highest sequence number received. | 408 // Maps remote side ssrc to extended highest sequence number received. |
| 407 std::map<uint32_t, uint32_t> extended_max_sequence_number_; | 409 std::map<uint32_t, uint32_t> extended_max_sequence_number_; |
| 408 rtc::CriticalSection crit_; | 410 rtc::CriticalSection crit_; |
| 409 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); | 411 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); |
| 410 }; | 412 }; |
| 411 | 413 |
| 414 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask { |
| 415 public: |
| 416 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame, |
| 417 Channel* channel) |
| 418 : audio_frame_(std::move(audio_frame)), channel_(channel) { |
| 419 RTC_DCHECK(channel_); |
| 420 } |
| 421 |
| 422 private: |
| 423 bool Run() override { |
| 424 RTC_DCHECK_RUN_ON(channel_->encoder_queue_); |
| 425 channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get()); |
| 426 return true; |
| 427 } |
| 428 |
| 429 std::unique_ptr<AudioFrame> audio_frame_; |
| 430 Channel* const channel_; |
| 431 }; |
| 432 |
| 412 int32_t Channel::SendData(FrameType frameType, | 433 int32_t Channel::SendData(FrameType frameType, |
| 413 uint8_t payloadType, | 434 uint8_t payloadType, |
| 414 uint32_t timeStamp, | 435 uint32_t timeStamp, |
| 415 const uint8_t* payloadData, | 436 const uint8_t* payloadData, |
| 416 size_t payloadSize, | 437 size_t payloadSize, |
| 417 const RTPFragmentationHeader* fragmentation) { | 438 const RTPFragmentationHeader* fragmentation) { |
| 439 RTC_DCHECK_RUN_ON(encoder_queue_); |
| 418 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 440 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
| 419 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," | 441 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," |
| 420 " payloadSize=%" PRIuS ", fragmentation=0x%x)", | 442 " payloadSize=%" PRIuS ", fragmentation=0x%x)", |
| 421 frameType, payloadType, timeStamp, payloadSize, fragmentation); | 443 frameType, payloadType, timeStamp, payloadSize, fragmentation); |
| 422 | 444 |
| 423 if (_includeAudioLevelIndication) { | 445 if (_includeAudioLevelIndication) { |
| 424 // Store current audio level in the RTP/RTCP module. | 446 // Store current audio level in the RTP/RTCP module. |
| 425 // The level will be used in combination with voice-activity state | 447 // The level will be used in combination with voice-activity state |
| 426 // (frameType) to add an RTP header extension | 448 // (frameType) to add an RTP header extension |
| 427 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); | 449 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); |
| 428 } | 450 } |
| 429 | 451 |
| 430 // Push data from ACM to RTP/RTCP-module to deliver audio frame for | 452 // Push data from ACM to RTP/RTCP-module to deliver audio frame for |
| 431 // packetization. | 453 // packetization. |
| 432 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. | 454 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. |
| 433 if (!_rtpRtcpModule->SendOutgoingData( | 455 if (!_rtpRtcpModule->SendOutgoingData( |
| 434 (FrameType&)frameType, payloadType, timeStamp, | 456 (FrameType&)frameType, payloadType, timeStamp, |
| 435 // Leaving the time when this frame was | 457 // Leaving the time when this frame was |
| 436 // received from the capture device as | 458 // received from the capture device as |
| 437 // undefined for voice for now. | 459 // undefined for voice for now. |
| 438 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { | 460 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { |
| 439 _engineStatisticsPtr->SetLastError( | 461 _engineStatisticsPtr->SetLastError( |
| 440 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, | 462 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, |
| 441 "Channel::SendData() failed to send data to RTP/RTCP module"); | 463 "Channel::SendData() failed to send data to RTP/RTCP module"); |
| 442 return -1; | 464 return -1; |
| 443 } | 465 } |
| 444 | 466 |
| 445 _lastLocalTimeStamp = timeStamp; | |
| 446 _lastPayloadType = payloadType; | |
| 447 | |
| 448 return 0; | 467 return 0; |
| 449 } | 468 } |
| 450 | 469 |
| 451 bool Channel::SendRtp(const uint8_t* data, | 470 bool Channel::SendRtp(const uint8_t* data, |
| 452 size_t len, | 471 size_t len, |
| 453 const PacketOptions& options) { | 472 const PacketOptions& options) { |
| 454 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 473 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
| 455 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); | 474 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); |
| 456 | 475 |
| 457 rtc::CritScope cs(&_callbackCritSect); | 476 rtc::CritScope cs(&_callbackCritSect); |
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 772 if (output_file_player_) { | 791 if (output_file_player_) { |
| 773 if (output_file_player_->Frequency() > highestNeeded) { | 792 if (output_file_player_->Frequency() > highestNeeded) { |
| 774 highestNeeded = output_file_player_->Frequency(); | 793 highestNeeded = output_file_player_->Frequency(); |
| 775 } | 794 } |
| 776 } | 795 } |
| 777 } | 796 } |
| 778 | 797 |
| 779 return (highestNeeded); | 798 return (highestNeeded); |
| 780 } | 799 } |
| 781 | 800 |
| 782 int32_t Channel::CreateChannel( | 801 int32_t Channel::CreateChannel(Channel*& channel, |
| 783 Channel*& channel, | 802 int32_t channelId, |
| 784 int32_t channelId, | 803 uint32_t instanceId, |
| 785 uint32_t instanceId, | 804 const VoEBase::ChannelConfig& config) { |
| 786 const VoEBase::ChannelConfig& config) { | |
| 787 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 805 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
| 788 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, | 806 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, |
| 789 instanceId); | 807 instanceId); |
| 790 | 808 |
| 791 channel = new Channel(channelId, instanceId, config); | 809 channel = new Channel(channelId, instanceId, config); |
| 792 if (channel == NULL) { | 810 if (channel == NULL) { |
| 793 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 811 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
| 794 "Channel::CreateChannel() unable to allocate memory for" | 812 "Channel::CreateChannel() unable to allocate memory for" |
| 795 " channel"); | 813 " channel"); |
| 796 return -1; | 814 return -1; |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 883 _outputMixerPtr(NULL), | 901 _outputMixerPtr(NULL), |
| 884 _moduleProcessThreadPtr(NULL), | 902 _moduleProcessThreadPtr(NULL), |
| 885 _audioDeviceModulePtr(NULL), | 903 _audioDeviceModulePtr(NULL), |
| 886 _voiceEngineObserverPtr(NULL), | 904 _voiceEngineObserverPtr(NULL), |
| 887 _callbackCritSectPtr(NULL), | 905 _callbackCritSectPtr(NULL), |
| 888 _transportPtr(NULL), | 906 _transportPtr(NULL), |
| 889 input_mute_(false), | 907 input_mute_(false), |
| 890 previous_frame_muted_(false), | 908 previous_frame_muted_(false), |
| 891 _outputGain(1.0f), | 909 _outputGain(1.0f), |
| 892 _mixFileWithMicrophone(false), | 910 _mixFileWithMicrophone(false), |
| 893 _lastLocalTimeStamp(0), | |
| 894 _lastPayloadType(0), | |
| 895 _includeAudioLevelIndication(false), | 911 _includeAudioLevelIndication(false), |
| 896 transport_overhead_per_packet_(0), | 912 transport_overhead_per_packet_(0), |
| 897 rtp_overhead_per_packet_(0), | 913 rtp_overhead_per_packet_(0), |
| 898 _outputSpeechType(AudioFrame::kNormalSpeech), | 914 _outputSpeechType(AudioFrame::kNormalSpeech), |
| 899 restored_packet_in_use_(false), | 915 restored_packet_in_use_(false), |
| 900 rtcp_observer_(new VoERtcpObserver(this)), | 916 rtcp_observer_(new VoERtcpObserver(this)), |
| 901 associate_send_channel_(ChannelOwner(nullptr)), | 917 associate_send_channel_(ChannelOwner(nullptr)), |
| 902 pacing_enabled_(config.enable_voice_pacing), | 918 pacing_enabled_(config.enable_voice_pacing), |
| 903 feedback_observer_proxy_(new TransportFeedbackProxy()), | 919 feedback_observer_proxy_(new TransportFeedbackProxy()), |
| 904 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), | 920 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), |
| (...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1118 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); | 1134 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); |
| 1119 | 1135 |
| 1120 // End of modules shutdown | 1136 // End of modules shutdown |
| 1121 } | 1137 } |
| 1122 | 1138 |
| 1123 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, | 1139 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, |
| 1124 OutputMixer& outputMixer, | 1140 OutputMixer& outputMixer, |
| 1125 ProcessThread& moduleProcessThread, | 1141 ProcessThread& moduleProcessThread, |
| 1126 AudioDeviceModule& audioDeviceModule, | 1142 AudioDeviceModule& audioDeviceModule, |
| 1127 VoiceEngineObserver* voiceEngineObserver, | 1143 VoiceEngineObserver* voiceEngineObserver, |
| 1128 rtc::CriticalSection* callbackCritSect) { | 1144 rtc::CriticalSection* callbackCritSect, |
| 1145 rtc::TaskQueue* encoder_queue) { |
| 1146 RTC_DCHECK(encoder_queue); |
| 1147 RTC_DCHECK(!encoder_queue_); |
| 1129 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1148 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1130 "Channel::SetEngineInformation()"); | 1149 "Channel::SetEngineInformation()"); |
| 1131 _engineStatisticsPtr = &engineStatistics; | 1150 _engineStatisticsPtr = &engineStatistics; |
| 1132 _outputMixerPtr = &outputMixer; | 1151 _outputMixerPtr = &outputMixer; |
| 1133 _moduleProcessThreadPtr = &moduleProcessThread; | 1152 _moduleProcessThreadPtr = &moduleProcessThread; |
| 1134 _audioDeviceModulePtr = &audioDeviceModule; | 1153 _audioDeviceModulePtr = &audioDeviceModule; |
| 1135 _voiceEngineObserverPtr = voiceEngineObserver; | 1154 _voiceEngineObserverPtr = voiceEngineObserver; |
| 1136 _callbackCritSectPtr = callbackCritSect; | 1155 _callbackCritSectPtr = callbackCritSect; |
| 1156 encoder_queue_ = encoder_queue; |
| 1137 return 0; | 1157 return 0; |
| 1138 } | 1158 } |
| 1139 | 1159 |
| 1140 int32_t Channel::UpdateLocalTimeStamp() { | |
| 1141 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); | |
| 1142 return 0; | |
| 1143 } | |
| 1144 | |
| 1145 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { | 1160 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { |
| 1146 rtc::CritScope cs(&_callbackCritSect); | 1161 rtc::CritScope cs(&_callbackCritSect); |
| 1147 audio_sink_ = std::move(sink); | 1162 audio_sink_ = std::move(sink); |
| 1148 } | 1163 } |
| 1149 | 1164 |
| 1150 const rtc::scoped_refptr<AudioDecoderFactory>& | 1165 const rtc::scoped_refptr<AudioDecoderFactory>& |
| 1151 Channel::GetAudioDecoderFactory() const { | 1166 Channel::GetAudioDecoderFactory() const { |
| 1152 return decoder_factory_; | 1167 return decoder_factory_; |
| 1153 } | 1168 } |
| 1154 | 1169 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1215 "StartSend() RTP/RTCP failed to start sending"); | 1230 "StartSend() RTP/RTCP failed to start sending"); |
| 1216 _rtpRtcpModule->SetSendingMediaStatus(false); | 1231 _rtpRtcpModule->SetSendingMediaStatus(false); |
| 1217 rtc::CritScope cs(&_callbackCritSect); | 1232 rtc::CritScope cs(&_callbackCritSect); |
| 1218 channel_state_.SetSending(false); | 1233 channel_state_.SetSending(false); |
| 1219 return -1; | 1234 return -1; |
| 1220 } | 1235 } |
| 1221 | 1236 |
| 1222 return 0; | 1237 return 0; |
| 1223 } | 1238 } |
| 1224 | 1239 |
| 1225 int32_t Channel::StopSend() { | 1240 void Channel::StopSend() { |
| 1226 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1241 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1227 "Channel::StopSend()"); | 1242 "Channel::StopSend()"); |
| 1228 if (!channel_state_.Get().sending) { | 1243 if (!channel_state_.Get().sending) { |
| 1229 return 0; | 1244 return; |
| 1230 } | 1245 } |
| 1231 channel_state_.SetSending(false); | 1246 channel_state_.SetSending(false); |
| 1232 | 1247 |
| 1248 // Post a task to the encoder thread which sets an event when the task is |
| 1249 // executed. We know that no more encoding tasks will be added to the task |
| 1250 // queue for this channel since sending is now deactivated. It means that, |
| 1251 // if we wait for the event to bet set, we know that no more pending tasks |
| 1252 // exists and it is therfore guaranteed that the task queue will never try |
| 1253 // to acccess and invalid channel object. |
| 1254 RTC_DCHECK(encoder_queue_); |
| 1255 rtc::Event flush(false, false); |
| 1256 encoder_queue_->PostTask([&flush]() { flush.Set(); }); |
| 1257 flush.Wait(rtc::Event::kForever); |
| 1258 |
| 1233 // Store the sequence number to be able to pick up the same sequence for | 1259 // Store the sequence number to be able to pick up the same sequence for |
| 1234 // the next StartSend(). This is needed for restarting device, otherwise | 1260 // the next StartSend(). This is needed for restarting device, otherwise |
| 1235 // it might cause libSRTP to complain about packets being replayed. | 1261 // it might cause libSRTP to complain about packets being replayed. |
| 1236 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring | 1262 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring |
| 1237 // CL is landed. See issue | 1263 // CL is landed. See issue |
| 1238 // https://code.google.com/p/webrtc/issues/detail?id=2111 . | 1264 // https://code.google.com/p/webrtc/issues/detail?id=2111 . |
| 1239 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); | 1265 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); |
| 1240 | 1266 |
| 1241 // Reset sending SSRC and sequence number and triggers direct transmission | 1267 // Reset sending SSRC and sequence number and triggers direct transmission |
| 1242 // of RTCP BYE | 1268 // of RTCP BYE |
| 1243 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { | 1269 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { |
| 1244 _engineStatisticsPtr->SetLastError( | 1270 _engineStatisticsPtr->SetLastError( |
| 1245 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, | 1271 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, |
| 1246 "StartSend() RTP/RTCP failed to stop sending"); | 1272 "StartSend() RTP/RTCP failed to stop sending"); |
| 1247 } | 1273 } |
| 1248 _rtpRtcpModule->SetSendingMediaStatus(false); | 1274 _rtpRtcpModule->SetSendingMediaStatus(false); |
| 1249 | |
| 1250 return 0; | |
| 1251 } | 1275 } |
| 1252 | 1276 |
| 1253 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { | 1277 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { |
| 1254 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1278 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
| 1255 "Channel::RegisterVoiceEngineObserver()"); | 1279 "Channel::RegisterVoiceEngineObserver()"); |
| 1256 rtc::CritScope cs(&_callbackCritSect); | 1280 rtc::CritScope cs(&_callbackCritSect); |
| 1257 | 1281 |
| 1258 if (_voiceEngineObserverPtr) { | 1282 if (_voiceEngineObserverPtr) { |
| 1259 _engineStatisticsPtr->SetLastError( | 1283 _engineStatisticsPtr->SetLastError( |
| 1260 VE_INVALID_OPERATION, kTraceError, | 1284 VE_INVALID_OPERATION, kTraceError, |
| (...skipping 1380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2641 audio_coding_->EnableNack(maxNumberOfPackets); | 2665 audio_coding_->EnableNack(maxNumberOfPackets); |
| 2642 else | 2666 else |
| 2643 audio_coding_->DisableNack(); | 2667 audio_coding_->DisableNack(); |
| 2644 } | 2668 } |
| 2645 | 2669 |
| 2646 // Called when we are missing one or more packets. | 2670 // Called when we are missing one or more packets. |
| 2647 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { | 2671 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { |
| 2648 return _rtpRtcpModule->SendNACK(sequence_numbers, length); | 2672 return _rtpRtcpModule->SendNACK(sequence_numbers, length); |
| 2649 } | 2673 } |
| 2650 | 2674 |
| 2651 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { | 2675 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) { |
| 2652 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 2676 RTC_DCHECK(channel_state_.Get().sending); |
| 2653 "Channel::Demultiplex()"); | 2677 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
| 2654 _audioFrame.CopyFrom(audioFrame); | 2678 // TODO(henrika): try to avoid copying by moving ownership of audio frame |
| 2655 _audioFrame.id_ = _channelId; | 2679 // either into pool of frames or into the task itself. |
| 2656 return 0; | 2680 audio_frame->CopyFrom(audio_input); |
| 2681 audio_frame->id_ = ChannelId(); |
| 2682 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
| 2683 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); |
| 2657 } | 2684 } |
| 2658 | 2685 |
| 2659 void Channel::Demultiplex(const int16_t* audio_data, | 2686 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data, |
| 2660 int sample_rate, | 2687 int sample_rate, |
| 2661 size_t number_of_frames, | 2688 size_t number_of_frames, |
| 2662 size_t number_of_channels) { | 2689 size_t number_of_channels) { |
| 2690 RTC_DCHECK(channel_state_.Get().sending); |
| 2663 CodecInst codec; | 2691 CodecInst codec; |
| 2664 GetSendCodec(codec); | 2692 GetSendCodec(codec); |
| 2665 | 2693 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
| 2666 // Never upsample or upmix the capture signal here. This should be done at the | 2694 audio_frame->id_ = ChannelId(); |
| 2667 // end of the send chain. | 2695 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate); |
| 2668 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); | 2696 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels); |
| 2669 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels); | |
| 2670 RemixAndResample(audio_data, number_of_frames, number_of_channels, | 2697 RemixAndResample(audio_data, number_of_frames, number_of_channels, |
| 2671 sample_rate, &input_resampler_, &_audioFrame); | 2698 sample_rate, &input_resampler_, audio_frame.get()); |
| 2699 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
| 2700 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); |
| 2672 } | 2701 } |
| 2673 | 2702 |
| 2674 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { | 2703 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { |
| 2675 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 2704 RTC_DCHECK_RUN_ON(encoder_queue_); |
| 2676 "Channel::PrepareEncodeAndSend()"); | 2705 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); |
| 2706 RTC_DCHECK_LE(audio_input->num_channels_, 2); |
| 2707 RTC_DCHECK_EQ(audio_input->id_, ChannelId()); |
| 2677 | 2708 |
| 2678 if (_audioFrame.samples_per_channel_ == 0) { | 2709 if (channel_state_.Get().input_file_playing) { |
| 2679 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2710 MixOrReplaceAudioWithFile(audio_input); |
| 2680 "Channel::PrepareEncodeAndSend() invalid audio frame"); | |
| 2681 return 0xFFFFFFFF; | |
| 2682 } | 2711 } |
| 2683 | 2712 |
| 2684 if (channel_state_.Get().input_file_playing) { | 2713 bool is_muted = InputMute(); |
| 2685 MixOrReplaceAudioWithFile(mixingFrequency); | 2714 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
| 2686 } | |
| 2687 | |
| 2688 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. | |
| 2689 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); | |
| 2690 | 2715 |
| 2691 if (_includeAudioLevelIndication) { | 2716 if (_includeAudioLevelIndication) { |
| 2692 size_t length = | 2717 size_t length = |
| 2693 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; | 2718 audio_input->samples_per_channel_ * audio_input->num_channels_; |
| 2694 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); | 2719 RTC_CHECK_LE(length, sizeof(audio_input->data_)); |
| 2695 if (is_muted && previous_frame_muted_) { | 2720 if (is_muted && previous_frame_muted_) { |
| 2696 rms_level_.AnalyzeMuted(length); | 2721 rms_level_.AnalyzeMuted(length); |
| 2697 } else { | 2722 } else { |
| 2698 rms_level_.Analyze( | 2723 rms_level_.Analyze( |
| 2699 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); | 2724 rtc::ArrayView<const int16_t>(audio_input->data_, length)); |
| 2700 } | 2725 } |
| 2701 } | 2726 } |
| 2702 previous_frame_muted_ = is_muted; | 2727 previous_frame_muted_ = is_muted; |
| 2703 | 2728 |
| 2704 return 0; | 2729 // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. |
| 2705 } | |
| 2706 | |
| 2707 uint32_t Channel::EncodeAndSend() { | |
| 2708 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2709 "Channel::EncodeAndSend()"); | |
| 2710 | |
| 2711 assert(_audioFrame.num_channels_ <= 2); | |
| 2712 if (_audioFrame.samples_per_channel_ == 0) { | |
| 2713 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
| 2714 "Channel::EncodeAndSend() invalid audio frame"); | |
| 2715 return 0xFFFFFFFF; | |
| 2716 } | |
| 2717 | |
| 2718 _audioFrame.id_ = _channelId; | |
| 2719 | |
| 2720 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. | |
| 2721 | 2730 |
| 2722 // The ACM resamples internally. | 2731 // The ACM resamples internally. |
| 2723 _audioFrame.timestamp_ = _timeStamp; | 2732 audio_input->timestamp_ = _timeStamp; |
| 2724 // This call will trigger AudioPacketizationCallback::SendData if encoding | 2733 // This call will trigger AudioPacketizationCallback::SendData if encoding |
| 2725 // is done and payload is ready for packetization and transmission. | 2734 // is done and payload is ready for packetization and transmission. |
| 2726 // Otherwise, it will return without invoking the callback. | 2735 // Otherwise, it will return without invoking the callback. |
| 2727 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { | 2736 if (audio_coding_->Add10MsData(*audio_input) < 0) { |
| 2728 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), | 2737 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId; |
| 2729 "Channel::EncodeAndSend() ACM encoding failed"); | 2738 return; |
| 2730 return 0xFFFFFFFF; | |
| 2731 } | 2739 } |
| 2732 | 2740 |
| 2733 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); | 2741 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_); |
| 2734 return 0; | |
| 2735 } | 2742 } |
| 2736 | 2743 |
| 2737 void Channel::set_associate_send_channel(const ChannelOwner& channel) { | 2744 void Channel::set_associate_send_channel(const ChannelOwner& channel) { |
| 2738 RTC_DCHECK(!channel.channel() || | 2745 RTC_DCHECK(!channel.channel() || |
| 2739 channel.channel()->ChannelId() != _channelId); | 2746 channel.channel()->ChannelId() != _channelId); |
| 2740 rtc::CritScope lock(&assoc_send_channel_lock_); | 2747 rtc::CritScope lock(&assoc_send_channel_lock_); |
| 2741 associate_send_channel_ = channel; | 2748 associate_send_channel_ = channel; |
| 2742 } | 2749 } |
| 2743 | 2750 |
| 2744 void Channel::DisassociateSendChannel(int channel_id) { | 2751 void Channel::DisassociateSendChannel(int channel_id) { |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2833 | 2840 |
| 2834 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, | 2841 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, |
| 2835 RtpReceiver** rtp_receiver) const { | 2842 RtpReceiver** rtp_receiver) const { |
| 2836 *rtpRtcpModule = _rtpRtcpModule.get(); | 2843 *rtpRtcpModule = _rtpRtcpModule.get(); |
| 2837 *rtp_receiver = rtp_receiver_.get(); | 2844 *rtp_receiver = rtp_receiver_.get(); |
| 2838 return 0; | 2845 return 0; |
| 2839 } | 2846 } |
| 2840 | 2847 |
| 2841 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use | 2848 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use |
| 2842 // a shared helper. | 2849 // a shared helper. |
| 2843 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { | 2850 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) { |
| 2851 RTC_DCHECK_RUN_ON(encoder_queue_); |
| 2844 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); | 2852 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); |
| 2845 size_t fileSamples(0); | 2853 size_t fileSamples(0); |
| 2846 | 2854 const int mixingFrequency = audio_input->sample_rate_hz_; |
| 2847 { | 2855 { |
| 2848 rtc::CritScope cs(&_fileCritSect); | 2856 rtc::CritScope cs(&_fileCritSect); |
| 2849 | 2857 |
| 2850 if (!input_file_player_) { | 2858 if (!input_file_player_) { |
| 2851 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2859 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 2852 "Channel::MixOrReplaceAudioWithFile() fileplayer" | 2860 "Channel::MixOrReplaceAudioWithFile() fileplayer" |
| 2853 " doesnt exist"); | 2861 " doesnt exist"); |
| 2854 return -1; | 2862 return -1; |
| 2855 } | 2863 } |
| 2856 | 2864 |
| 2857 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, | 2865 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, |
| 2858 mixingFrequency) == -1) { | 2866 mixingFrequency) == -1) { |
| 2859 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2867 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 2860 "Channel::MixOrReplaceAudioWithFile() file mixing " | 2868 "Channel::MixOrReplaceAudioWithFile() file mixing " |
| 2861 "failed"); | 2869 "failed"); |
| 2862 return -1; | 2870 return -1; |
| 2863 } | 2871 } |
| 2864 if (fileSamples == 0) { | 2872 if (fileSamples == 0) { |
| 2865 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2873 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
| 2866 "Channel::MixOrReplaceAudioWithFile() file is ended"); | 2874 "Channel::MixOrReplaceAudioWithFile() file is ended"); |
| 2867 return 0; | 2875 return 0; |
| 2868 } | 2876 } |
| 2869 } | 2877 } |
| 2870 | 2878 |
| 2871 assert(_audioFrame.samples_per_channel_ == fileSamples); | 2879 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples); |
| 2872 | 2880 |
| 2873 if (_mixFileWithMicrophone) { | 2881 if (_mixFileWithMicrophone) { |
| 2874 // Currently file stream is always mono. | 2882 // Currently file stream is always mono. |
| 2875 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2883 // TODO(xians): Change the code when FilePlayer supports real stereo. |
| 2876 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), | 2884 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(), |
| 2877 1, fileSamples); | 2885 1, fileSamples); |
| 2878 } else { | 2886 } else { |
| 2879 // Replace ACM audio with file. | 2887 // Replace ACM audio with file. |
| 2880 // Currently file stream is always mono. | 2888 // Currently file stream is always mono. |
| 2881 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2889 // TODO(xians): Change the code when FilePlayer supports real stereo. |
| 2882 _audioFrame.UpdateFrame( | 2890 audio_input->UpdateFrame( |
| 2883 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, | 2891 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, |
| 2884 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); | 2892 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); |
| 2885 } | 2893 } |
| 2886 return 0; | 2894 return 0; |
| 2887 } | 2895 } |
| 2888 | 2896 |
| 2889 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { | 2897 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { |
| 2890 assert(mixingFrequency <= 48000); | 2898 assert(mixingFrequency <= 48000); |
| 2891 | 2899 |
| 2892 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); | 2900 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3062 int64_t min_rtt = 0; | 3070 int64_t min_rtt = 0; |
| 3063 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 3071 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
| 3064 0) { | 3072 0) { |
| 3065 return 0; | 3073 return 0; |
| 3066 } | 3074 } |
| 3067 return rtt; | 3075 return rtt; |
| 3068 } | 3076 } |
| 3069 | 3077 |
| 3070 } // namespace voe | 3078 } // namespace voe |
| 3071 } // namespace webrtc | 3079 } // namespace webrtc |
| OLD | NEW |