OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/voice_engine/channel.h" | 11 #include "webrtc/voice_engine/channel.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <utility> | 14 #include <utility> |
15 | 15 |
16 #include "webrtc/audio/utility/audio_frame_operations.h" | 16 #include "webrtc/audio/utility/audio_frame_operations.h" |
17 #include "webrtc/base/array_view.h" | 17 #include "webrtc/base/array_view.h" |
18 #include "webrtc/base/checks.h" | 18 #include "webrtc/base/checks.h" |
19 #include "webrtc/base/criticalsection.h" | 19 #include "webrtc/base/criticalsection.h" |
20 #include "webrtc/base/format_macros.h" | 20 #include "webrtc/base/format_macros.h" |
21 #include "webrtc/base/location.h" | 21 #include "webrtc/base/location.h" |
22 #include "webrtc/base/logging.h" | 22 #include "webrtc/base/logging.h" |
23 #include "webrtc/base/rate_limiter.h" | 23 #include "webrtc/base/rate_limiter.h" |
24 #include "webrtc/base/task_queue.h" | |
25 #include "webrtc/base/thread_checker.h" | |
24 #include "webrtc/base/timeutils.h" | 26 #include "webrtc/base/timeutils.h" |
25 #include "webrtc/call/rtp_transport_controller_send.h" | 27 #include "webrtc/call/rtp_transport_controller_send.h" |
26 #include "webrtc/config.h" | 28 #include "webrtc/config.h" |
27 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" | 29 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" |
28 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" | 30 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" |
29 #include "webrtc/modules/audio_device/include/audio_device.h" | 31 #include "webrtc/modules/audio_device/include/audio_device.h" |
30 #include "webrtc/modules/audio_processing/include/audio_processing.h" | 32 #include "webrtc/modules/audio_processing/include/audio_processing.h" |
31 #include "webrtc/modules/include/module_common_types.h" | 33 #include "webrtc/modules/include/module_common_types.h" |
32 #include "webrtc/modules/pacing/packet_router.h" | 34 #include "webrtc/modules/pacing/packet_router.h" |
33 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" | 35 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" |
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
402 } | 404 } |
403 | 405 |
404 private: | 406 private: |
405 Channel* owner_; | 407 Channel* owner_; |
406 // Maps remote side ssrc to extended highest sequence number received. | 408 // Maps remote side ssrc to extended highest sequence number received. |
407 std::map<uint32_t, uint32_t> extended_max_sequence_number_; | 409 std::map<uint32_t, uint32_t> extended_max_sequence_number_; |
408 rtc::CriticalSection crit_; | 410 rtc::CriticalSection crit_; |
409 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); | 411 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); |
410 }; | 412 }; |
411 | 413 |
414 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask { | |
415 public: | |
416 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame, | |
417 Channel* channel) | |
418 : audio_frame_(std::move(audio_frame)), channel_(channel) {} | |
the sun
2017/03/30 10:13:32
super nit: you should DCHECK(channel) here instead
henrika_webrtc
2017/03/30 11:16:31
Done.
| |
419 | |
420 private: | |
421 bool Run() override { | |
422 RTC_DCHECK_RUN_ON(channel_->encoder_queue_); | |
423 RTC_DCHECK(channel_); | |
424 channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get()); | |
425 return true; | |
426 } | |
427 | |
428 std::unique_ptr<AudioFrame> audio_frame_; | |
429 Channel* const channel_; | |
430 }; | |
431 | |
412 int32_t Channel::SendData(FrameType frameType, | 432 int32_t Channel::SendData(FrameType frameType, |
413 uint8_t payloadType, | 433 uint8_t payloadType, |
414 uint32_t timeStamp, | 434 uint32_t timeStamp, |
415 const uint8_t* payloadData, | 435 const uint8_t* payloadData, |
416 size_t payloadSize, | 436 size_t payloadSize, |
417 const RTPFragmentationHeader* fragmentation) { | 437 const RTPFragmentationHeader* fragmentation) { |
438 RTC_DCHECK_RUN_ON(encoder_queue_); | |
418 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 439 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
419 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," | 440 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," |
420 " payloadSize=%" PRIuS ", fragmentation=0x%x)", | 441 " payloadSize=%" PRIuS ", fragmentation=0x%x)", |
421 frameType, payloadType, timeStamp, payloadSize, fragmentation); | 442 frameType, payloadType, timeStamp, payloadSize, fragmentation); |
422 | 443 |
423 if (_includeAudioLevelIndication) { | 444 if (_includeAudioLevelIndication) { |
424 // Store current audio level in the RTP/RTCP module. | 445 // Store current audio level in the RTP/RTCP module. |
425 // The level will be used in combination with voice-activity state | 446 // The level will be used in combination with voice-activity state |
426 // (frameType) to add an RTP header extension | 447 // (frameType) to add an RTP header extension |
427 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); | 448 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); |
428 } | 449 } |
429 | 450 |
430 // Push data from ACM to RTP/RTCP-module to deliver audio frame for | 451 // Push data from ACM to RTP/RTCP-module to deliver audio frame for |
431 // packetization. | 452 // packetization. |
432 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. | 453 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. |
433 if (!_rtpRtcpModule->SendOutgoingData( | 454 if (!_rtpRtcpModule->SendOutgoingData( |
434 (FrameType&)frameType, payloadType, timeStamp, | 455 (FrameType&)frameType, payloadType, timeStamp, |
435 // Leaving the time when this frame was | 456 // Leaving the time when this frame was |
436 // received from the capture device as | 457 // received from the capture device as |
437 // undefined for voice for now. | 458 // undefined for voice for now. |
438 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { | 459 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { |
439 _engineStatisticsPtr->SetLastError( | 460 _engineStatisticsPtr->SetLastError( |
440 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, | 461 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, |
441 "Channel::SendData() failed to send data to RTP/RTCP module"); | 462 "Channel::SendData() failed to send data to RTP/RTCP module"); |
442 return -1; | 463 return -1; |
443 } | 464 } |
444 | 465 |
445 _lastLocalTimeStamp = timeStamp; | |
446 _lastPayloadType = payloadType; | |
447 | |
448 return 0; | 466 return 0; |
449 } | 467 } |
450 | 468 |
451 bool Channel::SendRtp(const uint8_t* data, | 469 bool Channel::SendRtp(const uint8_t* data, |
452 size_t len, | 470 size_t len, |
453 const PacketOptions& options) { | 471 const PacketOptions& options) { |
454 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 472 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
455 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); | 473 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); |
456 | 474 |
457 rtc::CritScope cs(&_callbackCritSect); | 475 rtc::CritScope cs(&_callbackCritSect); |
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
772 if (output_file_player_) { | 790 if (output_file_player_) { |
773 if (output_file_player_->Frequency() > highestNeeded) { | 791 if (output_file_player_->Frequency() > highestNeeded) { |
774 highestNeeded = output_file_player_->Frequency(); | 792 highestNeeded = output_file_player_->Frequency(); |
775 } | 793 } |
776 } | 794 } |
777 } | 795 } |
778 | 796 |
779 return (highestNeeded); | 797 return (highestNeeded); |
780 } | 798 } |
781 | 799 |
782 int32_t Channel::CreateChannel( | 800 int32_t Channel::CreateChannel(Channel*& channel, |
783 Channel*& channel, | 801 int32_t channelId, |
784 int32_t channelId, | 802 uint32_t instanceId, |
785 uint32_t instanceId, | 803 const VoEBase::ChannelConfig& config) { |
786 const VoEBase::ChannelConfig& config) { | |
787 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 804 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
788 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, | 805 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, |
789 instanceId); | 806 instanceId); |
790 | 807 |
791 channel = new Channel(channelId, instanceId, config); | 808 channel = new Channel(channelId, instanceId, config); |
792 if (channel == NULL) { | 809 if (channel == NULL) { |
793 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), | 810 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), |
794 "Channel::CreateChannel() unable to allocate memory for" | 811 "Channel::CreateChannel() unable to allocate memory for" |
795 " channel"); | 812 " channel"); |
796 return -1; | 813 return -1; |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
883 _outputMixerPtr(NULL), | 900 _outputMixerPtr(NULL), |
884 _moduleProcessThreadPtr(NULL), | 901 _moduleProcessThreadPtr(NULL), |
885 _audioDeviceModulePtr(NULL), | 902 _audioDeviceModulePtr(NULL), |
886 _voiceEngineObserverPtr(NULL), | 903 _voiceEngineObserverPtr(NULL), |
887 _callbackCritSectPtr(NULL), | 904 _callbackCritSectPtr(NULL), |
888 _transportPtr(NULL), | 905 _transportPtr(NULL), |
889 input_mute_(false), | 906 input_mute_(false), |
890 previous_frame_muted_(false), | 907 previous_frame_muted_(false), |
891 _outputGain(1.0f), | 908 _outputGain(1.0f), |
892 _mixFileWithMicrophone(false), | 909 _mixFileWithMicrophone(false), |
893 _lastLocalTimeStamp(0), | |
894 _lastPayloadType(0), | |
895 _includeAudioLevelIndication(false), | 910 _includeAudioLevelIndication(false), |
896 transport_overhead_per_packet_(0), | 911 transport_overhead_per_packet_(0), |
897 rtp_overhead_per_packet_(0), | 912 rtp_overhead_per_packet_(0), |
898 _outputSpeechType(AudioFrame::kNormalSpeech), | 913 _outputSpeechType(AudioFrame::kNormalSpeech), |
899 restored_packet_in_use_(false), | 914 restored_packet_in_use_(false), |
900 rtcp_observer_(new VoERtcpObserver(this)), | 915 rtcp_observer_(new VoERtcpObserver(this)), |
901 associate_send_channel_(ChannelOwner(nullptr)), | 916 associate_send_channel_(ChannelOwner(nullptr)), |
902 pacing_enabled_(config.enable_voice_pacing), | 917 pacing_enabled_(config.enable_voice_pacing), |
903 feedback_observer_proxy_(new TransportFeedbackProxy()), | 918 feedback_observer_proxy_(new TransportFeedbackProxy()), |
904 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), | 919 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1118 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); | 1133 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); |
1119 | 1134 |
1120 // End of modules shutdown | 1135 // End of modules shutdown |
1121 } | 1136 } |
1122 | 1137 |
1123 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, | 1138 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, |
1124 OutputMixer& outputMixer, | 1139 OutputMixer& outputMixer, |
1125 ProcessThread& moduleProcessThread, | 1140 ProcessThread& moduleProcessThread, |
1126 AudioDeviceModule& audioDeviceModule, | 1141 AudioDeviceModule& audioDeviceModule, |
1127 VoiceEngineObserver* voiceEngineObserver, | 1142 VoiceEngineObserver* voiceEngineObserver, |
1128 rtc::CriticalSection* callbackCritSect) { | 1143 rtc::CriticalSection* callbackCritSect, |
1144 rtc::TaskQueue* encoder_queue) { | |
1145 RTC_DCHECK(encoder_queue); | |
1129 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1146 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
1130 "Channel::SetEngineInformation()"); | 1147 "Channel::SetEngineInformation()"); |
1131 _engineStatisticsPtr = &engineStatistics; | 1148 _engineStatisticsPtr = &engineStatistics; |
1132 _outputMixerPtr = &outputMixer; | 1149 _outputMixerPtr = &outputMixer; |
1133 _moduleProcessThreadPtr = &moduleProcessThread; | 1150 _moduleProcessThreadPtr = &moduleProcessThread; |
1134 _audioDeviceModulePtr = &audioDeviceModule; | 1151 _audioDeviceModulePtr = &audioDeviceModule; |
1135 _voiceEngineObserverPtr = voiceEngineObserver; | 1152 _voiceEngineObserverPtr = voiceEngineObserver; |
1136 _callbackCritSectPtr = callbackCritSect; | 1153 _callbackCritSectPtr = callbackCritSect; |
1154 encoder_queue_ = encoder_queue; | |
1137 return 0; | 1155 return 0; |
1138 } | 1156 } |
1139 | 1157 |
1140 int32_t Channel::UpdateLocalTimeStamp() { | |
1141 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); | |
1142 return 0; | |
1143 } | |
1144 | |
1145 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { | 1158 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { |
1146 rtc::CritScope cs(&_callbackCritSect); | 1159 rtc::CritScope cs(&_callbackCritSect); |
1147 audio_sink_ = std::move(sink); | 1160 audio_sink_ = std::move(sink); |
1148 } | 1161 } |
1149 | 1162 |
1150 const rtc::scoped_refptr<AudioDecoderFactory>& | 1163 const rtc::scoped_refptr<AudioDecoderFactory>& |
1151 Channel::GetAudioDecoderFactory() const { | 1164 Channel::GetAudioDecoderFactory() const { |
1152 return decoder_factory_; | 1165 return decoder_factory_; |
1153 } | 1166 } |
1154 | 1167 |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1215 "StartSend() RTP/RTCP failed to start sending"); | 1228 "StartSend() RTP/RTCP failed to start sending"); |
1216 _rtpRtcpModule->SetSendingMediaStatus(false); | 1229 _rtpRtcpModule->SetSendingMediaStatus(false); |
1217 rtc::CritScope cs(&_callbackCritSect); | 1230 rtc::CritScope cs(&_callbackCritSect); |
1218 channel_state_.SetSending(false); | 1231 channel_state_.SetSending(false); |
1219 return -1; | 1232 return -1; |
1220 } | 1233 } |
1221 | 1234 |
1222 return 0; | 1235 return 0; |
1223 } | 1236 } |
1224 | 1237 |
1225 int32_t Channel::StopSend() { | 1238 void Channel::StopSend() { |
1226 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1239 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
1227 "Channel::StopSend()"); | 1240 "Channel::StopSend()"); |
1228 if (!channel_state_.Get().sending) { | 1241 if (!channel_state_.Get().sending) { |
1229 return 0; | 1242 return; |
1230 } | 1243 } |
1231 channel_state_.SetSending(false); | 1244 channel_state_.SetSending(false); |
1232 | 1245 |
1246 // Post a task to the encoder thread which sets an event when the task is | |
1247 // executed. We know that no more encoding tasks will be added to the task | |
1248 // queue for this channel since sending is now deactivated. It means that, | |
1249 // if we wait for the event to bet set, we know that no more pending tasks | |
1250 // exists and it is therfore guaranteed that the task queue will never try | |
1251 // to acccess and invalid channel object. | |
1252 RTC_DCHECK(encoder_queue_); | |
1253 rtc::Event flush(false, false); | |
1254 encoder_queue_->PostTask([&flush](){ flush.Set(); }); | |
1255 flush.Wait(rtc::Event::kForever); | |
1256 | |
1233 // Store the sequence number to be able to pick up the same sequence for | 1257 // Store the sequence number to be able to pick up the same sequence for |
1234 // the next StartSend(). This is needed for restarting device, otherwise | 1258 // the next StartSend(). This is needed for restarting device, otherwise |
1235 // it might cause libSRTP to complain about packets being replayed. | 1259 // it might cause libSRTP to complain about packets being replayed. |
1236 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring | 1260 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring |
1237 // CL is landed. See issue | 1261 // CL is landed. See issue |
1238 // https://code.google.com/p/webrtc/issues/detail?id=2111 . | 1262 // https://code.google.com/p/webrtc/issues/detail?id=2111 . |
1239 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); | 1263 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); |
1240 | 1264 |
1241 // Reset sending SSRC and sequence number and triggers direct transmission | 1265 // Reset sending SSRC and sequence number and triggers direct transmission |
1242 // of RTCP BYE | 1266 // of RTCP BYE |
1243 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { | 1267 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { |
1244 _engineStatisticsPtr->SetLastError( | 1268 _engineStatisticsPtr->SetLastError( |
1245 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, | 1269 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, |
1246 "StartSend() RTP/RTCP failed to stop sending"); | 1270 "StartSend() RTP/RTCP failed to stop sending"); |
1247 } | 1271 } |
1248 _rtpRtcpModule->SetSendingMediaStatus(false); | 1272 _rtpRtcpModule->SetSendingMediaStatus(false); |
1249 | |
1250 return 0; | |
1251 } | 1273 } |
1252 | 1274 |
1253 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { | 1275 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { |
1254 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 1276 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
1255 "Channel::RegisterVoiceEngineObserver()"); | 1277 "Channel::RegisterVoiceEngineObserver()"); |
1256 rtc::CritScope cs(&_callbackCritSect); | 1278 rtc::CritScope cs(&_callbackCritSect); |
1257 | 1279 |
1258 if (_voiceEngineObserverPtr) { | 1280 if (_voiceEngineObserverPtr) { |
1259 _engineStatisticsPtr->SetLastError( | 1281 _engineStatisticsPtr->SetLastError( |
1260 VE_INVALID_OPERATION, kTraceError, | 1282 VE_INVALID_OPERATION, kTraceError, |
(...skipping 1380 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2641 audio_coding_->EnableNack(maxNumberOfPackets); | 2663 audio_coding_->EnableNack(maxNumberOfPackets); |
2642 else | 2664 else |
2643 audio_coding_->DisableNack(); | 2665 audio_coding_->DisableNack(); |
2644 } | 2666 } |
2645 | 2667 |
2646 // Called when we are missing one or more packets. | 2668 // Called when we are missing one or more packets. |
2647 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { | 2669 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { |
2648 return _rtpRtcpModule->SendNACK(sequence_numbers, length); | 2670 return _rtpRtcpModule->SendNACK(sequence_numbers, length); |
2649 } | 2671 } |
2650 | 2672 |
2651 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { | 2673 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) { |
2652 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 2674 RTC_DCHECK(encoder_queue_); |
the sun
2017/03/30 10:13:33
Also
RTC_DCHECK(channel_state_.Get().sending);
henrika_webrtc
2017/03/30 11:16:31
Done.
| |
2653 "Channel::Demultiplex()"); | 2675 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
2654 _audioFrame.CopyFrom(audioFrame); | 2676 audio_frame->CopyFrom(audio_input); |
2655 _audioFrame.id_ = _channelId; | 2677 audio_frame->id_ = ChannelId(); |
2656 return 0; | 2678 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
2679 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); | |
2657 } | 2680 } |
2658 | 2681 |
2659 void Channel::Demultiplex(const int16_t* audio_data, | 2682 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data, |
2660 int sample_rate, | 2683 int sample_rate, |
2661 size_t number_of_frames, | 2684 size_t number_of_frames, |
2662 size_t number_of_channels) { | 2685 size_t number_of_channels) { |
2686 RTC_DCHECK(encoder_queue_); | |
the sun
2017/03/30 10:13:33
Here too:
RTC_DCHECK(channel_state_.Get().sending)
henrika_webrtc
2017/03/30 11:16:31
Done.
| |
2663 CodecInst codec; | 2687 CodecInst codec; |
2664 GetSendCodec(codec); | 2688 GetSendCodec(codec); |
2665 | 2689 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame()); |
2666 // Never upsample or upmix the capture signal here. This should be done at the | 2690 audio_frame->id_ = ChannelId(); |
2667 // end of the send chain. | 2691 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate); |
2668 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); | 2692 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels); |
2669 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels); | |
2670 RemixAndResample(audio_data, number_of_frames, number_of_channels, | 2693 RemixAndResample(audio_data, number_of_frames, number_of_channels, |
2671 sample_rate, &input_resampler_, &_audioFrame); | 2694 sample_rate, &input_resampler_, audio_frame.get()); |
2695 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( | |
2696 new ProcessAndEncodeAudioTask(std::move(audio_frame), this))); | |
2672 } | 2697 } |
2673 | 2698 |
2674 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { | 2699 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { |
2675 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 2700 RTC_DCHECK_RUN_ON(encoder_queue_); |
2676 "Channel::PrepareEncodeAndSend()"); | 2701 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0); |
2702 RTC_DCHECK_LE(audio_input->num_channels_, 2); | |
2703 RTC_DCHECK_EQ(audio_input->id_, ChannelId()); | |
2677 | 2704 |
2678 if (_audioFrame.samples_per_channel_ == 0) { | 2705 if (channel_state_.Get().input_file_playing) { |
2679 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2706 MixOrReplaceAudioWithFile(audio_input); |
2680 "Channel::PrepareEncodeAndSend() invalid audio frame"); | |
2681 return 0xFFFFFFFF; | |
2682 } | 2707 } |
2683 | 2708 |
2684 if (channel_state_.Get().input_file_playing) { | 2709 bool is_muted = InputMute(); |
2685 MixOrReplaceAudioWithFile(mixingFrequency); | 2710 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
2686 } | |
2687 | |
2688 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. | |
2689 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); | |
2690 | 2711 |
2691 if (_includeAudioLevelIndication) { | 2712 if (_includeAudioLevelIndication) { |
2692 size_t length = | 2713 size_t length = |
2693 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; | 2714 audio_input->samples_per_channel_ * audio_input->num_channels_; |
2694 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); | 2715 RTC_CHECK_LE(length, sizeof(audio_input->data_)); |
2695 if (is_muted && previous_frame_muted_) { | 2716 if (is_muted && previous_frame_muted_) { |
2696 rms_level_.AnalyzeMuted(length); | 2717 rms_level_.AnalyzeMuted(length); |
2697 } else { | 2718 } else { |
2698 rms_level_.Analyze( | 2719 rms_level_.Analyze( |
2699 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); | 2720 rtc::ArrayView<const int16_t>(audio_input->data_, length)); |
2700 } | 2721 } |
2701 } | 2722 } |
2702 previous_frame_muted_ = is_muted; | 2723 previous_frame_muted_ = is_muted; |
2703 | 2724 |
2704 return 0; | 2725 // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. |
2705 } | |
2706 | |
2707 uint32_t Channel::EncodeAndSend() { | |
2708 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
2709 "Channel::EncodeAndSend()"); | |
2710 | |
2711 assert(_audioFrame.num_channels_ <= 2); | |
2712 if (_audioFrame.samples_per_channel_ == 0) { | |
2713 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
2714 "Channel::EncodeAndSend() invalid audio frame"); | |
2715 return 0xFFFFFFFF; | |
2716 } | |
2717 | |
2718 _audioFrame.id_ = _channelId; | |
2719 | |
2720 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. | |
2721 | 2726 |
2722 // The ACM resamples internally. | 2727 // The ACM resamples internally. |
2723 _audioFrame.timestamp_ = _timeStamp; | 2728 audio_input->timestamp_ = _timeStamp; |
2724 // This call will trigger AudioPacketizationCallback::SendData if encoding | 2729 // This call will trigger AudioPacketizationCallback::SendData if encoding |
2725 // is done and payload is ready for packetization and transmission. | 2730 // is done and payload is ready for packetization and transmission. |
2726 // Otherwise, it will return without invoking the callback. | 2731 // Otherwise, it will return without invoking the callback. |
2727 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { | 2732 if (audio_coding_->Add10MsData(*audio_input) < 0) { |
2728 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), | 2733 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId; |
2729 "Channel::EncodeAndSend() ACM encoding failed"); | 2734 return; |
2730 return 0xFFFFFFFF; | |
2731 } | 2735 } |
2732 | 2736 |
2733 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); | 2737 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_); |
2734 return 0; | |
2735 } | 2738 } |
2736 | 2739 |
2737 void Channel::set_associate_send_channel(const ChannelOwner& channel) { | 2740 void Channel::set_associate_send_channel(const ChannelOwner& channel) { |
2738 RTC_DCHECK(!channel.channel() || | 2741 RTC_DCHECK(!channel.channel() || |
2739 channel.channel()->ChannelId() != _channelId); | 2742 channel.channel()->ChannelId() != _channelId); |
2740 rtc::CritScope lock(&assoc_send_channel_lock_); | 2743 rtc::CritScope lock(&assoc_send_channel_lock_); |
2741 associate_send_channel_ = channel; | 2744 associate_send_channel_ = channel; |
2742 } | 2745 } |
2743 | 2746 |
2744 void Channel::DisassociateSendChannel(int channel_id) { | 2747 void Channel::DisassociateSendChannel(int channel_id) { |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2833 | 2836 |
2834 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, | 2837 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, |
2835 RtpReceiver** rtp_receiver) const { | 2838 RtpReceiver** rtp_receiver) const { |
2836 *rtpRtcpModule = _rtpRtcpModule.get(); | 2839 *rtpRtcpModule = _rtpRtcpModule.get(); |
2837 *rtp_receiver = rtp_receiver_.get(); | 2840 *rtp_receiver = rtp_receiver_.get(); |
2838 return 0; | 2841 return 0; |
2839 } | 2842 } |
2840 | 2843 |
2841 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use | 2844 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use |
2842 // a shared helper. | 2845 // a shared helper. |
2843 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { | 2846 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) { |
2847 RTC_DCHECK_RUN_ON(encoder_queue_); | |
2844 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); | 2848 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); |
2845 size_t fileSamples(0); | 2849 size_t fileSamples(0); |
2850 const int mixingFrequency = audio_input->sample_rate_hz_; | |
2846 | 2851 |
2847 { | 2852 rtc::CritScope cs(&_fileCritSect); |
the sun
2017/03/30 10:13:33
fyi: you're holding the lock for longer now. don't
henrika_webrtc
2017/03/30 11:16:31
Let me fix that.
| |
2848 rtc::CritScope cs(&_fileCritSect); | |
2849 | 2853 |
2850 if (!input_file_player_) { | 2854 if (!input_file_player_) { |
2851 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 2855 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
2852 "Channel::MixOrReplaceAudioWithFile() fileplayer" | 2856 "Channel::MixOrReplaceAudioWithFile() fileplayer" |
2853 " doesnt exist"); | 2857 " doesnt exist"); |
2854 return -1; | 2858 return -1; |
2855 } | |
2856 | |
2857 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, | |
2858 mixingFrequency) == -1) { | |
2859 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
2860 "Channel::MixOrReplaceAudioWithFile() file mixing " | |
2861 "failed"); | |
2862 return -1; | |
2863 } | |
2864 if (fileSamples == 0) { | |
2865 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
2866 "Channel::MixOrReplaceAudioWithFile() file is ended"); | |
2867 return 0; | |
2868 } | |
2869 } | 2859 } |
2870 | 2860 |
2871 assert(_audioFrame.samples_per_channel_ == fileSamples); | 2861 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, |
2862 mixingFrequency) == -1) { | |
2863 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
2864 "Channel::MixOrReplaceAudioWithFile() file mixing " | |
2865 "failed"); | |
2866 return -1; | |
2867 } | |
2868 if (fileSamples == 0) { | |
2869 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | |
2870 "Channel::MixOrReplaceAudioWithFile() file is ended"); | |
2871 return 0; | |
2872 } | |
2873 | |
2874 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples); | |
2872 | 2875 |
2873 if (_mixFileWithMicrophone) { | 2876 if (_mixFileWithMicrophone) { |
2874 // Currently file stream is always mono. | 2877 // Currently file stream is always mono. |
2875 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2878 // TODO(xians): Change the code when FilePlayer supports real stereo. |
2876 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), | 2879 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(), |
2877 1, fileSamples); | 2880 1, fileSamples); |
2878 } else { | 2881 } else { |
2879 // Replace ACM audio with file. | 2882 // Replace ACM audio with file. |
2880 // Currently file stream is always mono. | 2883 // Currently file stream is always mono. |
2881 // TODO(xians): Change the code when FilePlayer supports real stereo. | 2884 // TODO(xians): Change the code when FilePlayer supports real stereo. |
2882 _audioFrame.UpdateFrame( | 2885 audio_input->UpdateFrame( |
2883 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, | 2886 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, |
2884 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); | 2887 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); |
2885 } | 2888 } |
2886 return 0; | 2889 return 0; |
2887 } | 2890 } |
2888 | 2891 |
2889 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { | 2892 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { |
2890 assert(mixingFrequency <= 48000); | 2893 assert(mixingFrequency <= 48000); |
2891 | 2894 |
2892 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); | 2895 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3062 int64_t min_rtt = 0; | 3065 int64_t min_rtt = 0; |
3063 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 3066 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
3064 0) { | 3067 0) { |
3065 return 0; | 3068 return 0; |
3066 } | 3069 } |
3067 return rtt; | 3070 return rtt; |
3068 } | 3071 } |
3069 | 3072 |
3070 } // namespace voe | 3073 } // namespace voe |
3071 } // namespace webrtc | 3074 } // namespace webrtc |
OLD | NEW |