Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(316)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: Fixed dependency Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility> 14 #include <utility>
15 15
16 #include "webrtc/audio/utility/audio_frame_operations.h" 16 #include "webrtc/audio/utility/audio_frame_operations.h"
17 #include "webrtc/base/array_view.h" 17 #include "webrtc/base/array_view.h"
18 #include "webrtc/base/checks.h" 18 #include "webrtc/base/checks.h"
19 #include "webrtc/base/criticalsection.h" 19 #include "webrtc/base/criticalsection.h"
20 #include "webrtc/base/format_macros.h" 20 #include "webrtc/base/format_macros.h"
21 #include "webrtc/base/location.h" 21 #include "webrtc/base/location.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/base/rate_limiter.h" 23 #include "webrtc/base/rate_limiter.h"
24 #include "webrtc/base/task_queue.h"
25 #include "webrtc/base/thread_checker.h"
24 #include "webrtc/base/timeutils.h" 26 #include "webrtc/base/timeutils.h"
25 #include "webrtc/call/rtp_transport_controller_send.h" 27 #include "webrtc/call/rtp_transport_controller_send.h"
26 #include "webrtc/config.h" 28 #include "webrtc/config.h"
27 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" 29 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
28 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" 30 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
29 #include "webrtc/modules/audio_device/include/audio_device.h" 31 #include "webrtc/modules/audio_device/include/audio_device.h"
30 #include "webrtc/modules/audio_processing/include/audio_processing.h" 32 #include "webrtc/modules/audio_processing/include/audio_processing.h"
31 #include "webrtc/modules/include/module_common_types.h" 33 #include "webrtc/modules/include/module_common_types.h"
32 #include "webrtc/modules/pacing/packet_router.h" 34 #include "webrtc/modules/pacing/packet_router.h"
33 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" 35 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
(...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after
401 } 403 }
402 404
403 private: 405 private:
404 Channel* owner_; 406 Channel* owner_;
405 // Maps remote side ssrc to extended highest sequence number received. 407 // Maps remote side ssrc to extended highest sequence number received.
406 std::map<uint32_t, uint32_t> extended_max_sequence_number_; 408 std::map<uint32_t, uint32_t> extended_max_sequence_number_;
407 rtc::CriticalSection crit_; 409 rtc::CriticalSection crit_;
408 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); 410 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
409 }; 411 };
410 412
413 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
414 public:
415 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame,
416 Channel* channel)
417 : audio_frame_(std::move(audio_frame)), channel_(channel) {}
418
419 private:
420 bool Run() override {
the sun 2017/03/28 12:57:50 Given the simplicity, could you even use a lambda
tommi 2017/03/28 13:47:20 fyi - there's one difference between this class an
the sun 2017/03/28 23:05:40 Ah, good point.
henrika_webrtc 2017/03/29 10:35:11 Given input from Tommi I would like to skip using
henrika_webrtc 2017/03/29 10:35:11 Done.
henrika_webrtc 2017/03/29 10:35:12 Acknowledged.
421 RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
422 RTC_DCHECK(channel_);
423 channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
424 return true;
425 }
426
427 std::unique_ptr<AudioFrame> audio_frame_;
428 Channel* const channel_;
429 };
430
411 int32_t Channel::SendData(FrameType frameType, 431 int32_t Channel::SendData(FrameType frameType,
412 uint8_t payloadType, 432 uint8_t payloadType,
413 uint32_t timeStamp, 433 uint32_t timeStamp,
414 const uint8_t* payloadData, 434 const uint8_t* payloadData,
415 size_t payloadSize, 435 size_t payloadSize,
416 const RTPFragmentationHeader* fragmentation) { 436 const RTPFragmentationHeader* fragmentation) {
437 RTC_DCHECK_RUN_ON(encoder_queue_);
417 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 438 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
418 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," 439 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
419 " payloadSize=%" PRIuS ", fragmentation=0x%x)", 440 " payloadSize=%" PRIuS ", fragmentation=0x%x)",
420 frameType, payloadType, timeStamp, payloadSize, fragmentation); 441 frameType, payloadType, timeStamp, payloadSize, fragmentation);
421 442
422 if (_includeAudioLevelIndication) { 443 if (_includeAudioLevelIndication) {
423 // Store current audio level in the RTP/RTCP module. 444 // Store current audio level in the RTP/RTCP module.
424 // The level will be used in combination with voice-activity state 445 // The level will be used in combination with voice-activity state
425 // (frameType) to add an RTP header extension 446 // (frameType) to add an RTP header extension
426 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); 447 _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
427 } 448 }
428 449
429 // Push data from ACM to RTP/RTCP-module to deliver audio frame for 450 // Push data from ACM to RTP/RTCP-module to deliver audio frame for
430 // packetization. 451 // packetization.
431 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. 452 // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
432 if (!_rtpRtcpModule->SendOutgoingData( 453 if (!_rtpRtcpModule->SendOutgoingData(
433 (FrameType&)frameType, payloadType, timeStamp, 454 (FrameType&)frameType, payloadType, timeStamp,
434 // Leaving the time when this frame was 455 // Leaving the time when this frame was
435 // received from the capture device as 456 // received from the capture device as
436 // undefined for voice for now. 457 // undefined for voice for now.
437 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { 458 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
438 _engineStatisticsPtr->SetLastError( 459 _engineStatisticsPtr->SetLastError(
439 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 460 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
440 "Channel::SendData() failed to send data to RTP/RTCP module"); 461 "Channel::SendData() failed to send data to RTP/RTCP module");
441 return -1; 462 return -1;
442 } 463 }
443 464
444 _lastLocalTimeStamp = timeStamp; 465 _lastLocalTimeStamp = timeStamp;
the sun 2017/03/28 13:28:29 Appears unused - remove.
henrika_webrtc 2017/03/29 10:35:12 Nice. Thanks!
445 _lastPayloadType = payloadType; 466 _lastPayloadType = payloadType;
the sun 2017/03/28 13:28:29 Appears unused - remove.
henrika_webrtc 2017/03/29 10:35:12 Done.
446
447 return 0; 467 return 0;
448 } 468 }
449 469
450 bool Channel::SendRtp(const uint8_t* data, 470 bool Channel::SendRtp(const uint8_t* data,
451 size_t len, 471 size_t len,
452 const PacketOptions& options) { 472 const PacketOptions& options) {
453 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 473 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
454 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); 474 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
455 475
456 rtc::CritScope cs(&_callbackCritSect); 476 rtc::CritScope cs(&_callbackCritSect);
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
877 send_sequence_number_(0), 897 send_sequence_number_(0),
878 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), 898 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
879 capture_start_rtp_time_stamp_(-1), 899 capture_start_rtp_time_stamp_(-1),
880 capture_start_ntp_time_ms_(-1), 900 capture_start_ntp_time_ms_(-1),
881 _engineStatisticsPtr(NULL), 901 _engineStatisticsPtr(NULL),
882 _outputMixerPtr(NULL), 902 _outputMixerPtr(NULL),
883 _moduleProcessThreadPtr(NULL), 903 _moduleProcessThreadPtr(NULL),
884 _audioDeviceModulePtr(NULL), 904 _audioDeviceModulePtr(NULL),
885 _voiceEngineObserverPtr(NULL), 905 _voiceEngineObserverPtr(NULL),
886 _callbackCritSectPtr(NULL), 906 _callbackCritSectPtr(NULL),
907 encoder_queue_(nullptr),
887 _transportPtr(NULL), 908 _transportPtr(NULL),
888 input_mute_(false), 909 input_mute_(false),
889 previous_frame_muted_(false), 910 previous_frame_muted_(false),
890 _outputGain(1.0f), 911 _outputGain(1.0f),
891 _mixFileWithMicrophone(false), 912 _mixFileWithMicrophone(false),
892 _lastLocalTimeStamp(0), 913 _lastLocalTimeStamp(0),
893 _lastPayloadType(0), 914 _lastPayloadType(0),
894 _includeAudioLevelIndication(false), 915 _includeAudioLevelIndication(false),
895 transport_overhead_per_packet_(0), 916 transport_overhead_per_packet_(0),
896 rtp_overhead_per_packet_(0), 917 rtp_overhead_per_packet_(0),
897 _outputSpeechType(AudioFrame::kNormalSpeech), 918 _outputSpeechType(AudioFrame::kNormalSpeech),
898 restored_packet_in_use_(false), 919 restored_packet_in_use_(false),
899 rtcp_observer_(new VoERtcpObserver(this)), 920 rtcp_observer_(new VoERtcpObserver(this)),
900 associate_send_channel_(ChannelOwner(nullptr)), 921 associate_send_channel_(ChannelOwner(nullptr)),
901 pacing_enabled_(config.enable_voice_pacing), 922 pacing_enabled_(config.enable_voice_pacing),
902 feedback_observer_proxy_(new TransportFeedbackProxy()), 923 feedback_observer_proxy_(new TransportFeedbackProxy()),
903 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), 924 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
904 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), 925 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
905 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), 926 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
906 kMaxRetransmissionWindowMs)), 927 kMaxRetransmissionWindowMs)),
907 decoder_factory_(config.acm_config.decoder_factory), 928 decoder_factory_(config.acm_config.decoder_factory),
908 // TODO(elad.alon): Subsequent CL experiments with PLR source. 929 // TODO(elad.alon): Subsequent CL experiments with PLR source.
909 use_twcc_plr_for_ana_(false) { 930 use_twcc_plr_for_ana_(false),
931 stop_send_event_(true /* manual_reset */, false) {
910 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), 932 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
911 "Channel::Channel() - ctor"); 933 "Channel::Channel() - ctor");
912 AudioCodingModule::Config acm_config(config.acm_config); 934 AudioCodingModule::Config acm_config(config.acm_config);
913 acm_config.id = VoEModuleId(instanceId, channelId); 935 acm_config.id = VoEModuleId(instanceId, channelId);
914 acm_config.neteq_config.enable_muted_state = true; 936 acm_config.neteq_config.enable_muted_state = true;
915 audio_coding_.reset(AudioCodingModule::Create(acm_config)); 937 audio_coding_.reset(AudioCodingModule::Create(acm_config));
916 938
917 _outputAudioLevel.Clear(); 939 _outputAudioLevel.Clear();
918 940
919 RtpRtcp::Configuration configuration; 941 RtpRtcp::Configuration configuration;
(...skipping 11 matching lines...) Expand all
931 configuration.event_log = &(*event_log_proxy_); 953 configuration.event_log = &(*event_log_proxy_);
932 configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_); 954 configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_);
933 configuration.retransmission_rate_limiter = 955 configuration.retransmission_rate_limiter =
934 retransmission_rate_limiter_.get(); 956 retransmission_rate_limiter_.get();
935 957
936 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); 958 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
937 _rtpRtcpModule->SetSendingMediaStatus(false); 959 _rtpRtcpModule->SetSendingMediaStatus(false);
938 } 960 }
939 961
940 Channel::~Channel() { 962 Channel::~Channel() {
963 // If sending ever has been activated, ensure that StopSend() has been called
964 // to flush out any pending tasks in the encoder queue.
965 if (channel_state_.Get().sending_has_been_activated) {
tommi 2017/03/28 13:47:20 calling Get() here grabs a lock and could actually
henrika_webrtc 2017/03/29 10:35:11 Did changes. Please check again.
966 RTC_DCHECK(stop_send_event_.Wait(0))
967 << "Must call StopSend() before destruction to clean up pending tasks";
968 }
941 RTC_DCHECK(!channel_state_.Get().sending); 969 RTC_DCHECK(!channel_state_.Get().sending);
942 RTC_DCHECK(!channel_state_.Get().playing); 970 RTC_DCHECK(!channel_state_.Get().playing);
943 } 971 }
944 972
945 int32_t Channel::Init() { 973 int32_t Channel::Init() {
946 RTC_DCHECK(construction_thread_.CalledOnValidThread()); 974 RTC_DCHECK(construction_thread_.CalledOnValidThread());
947 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 975 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
948 "Channel::Init()"); 976 "Channel::Init()");
949 977
950 channel_state_.Reset(); 978 channel_state_.Reset();
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after
1117 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); 1145 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
1118 1146
1119 // End of modules shutdown 1147 // End of modules shutdown
1120 } 1148 }
1121 1149
1122 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 1150 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
1123 OutputMixer& outputMixer, 1151 OutputMixer& outputMixer,
1124 ProcessThread& moduleProcessThread, 1152 ProcessThread& moduleProcessThread,
1125 AudioDeviceModule& audioDeviceModule, 1153 AudioDeviceModule& audioDeviceModule,
1126 VoiceEngineObserver* voiceEngineObserver, 1154 VoiceEngineObserver* voiceEngineObserver,
1127 rtc::CriticalSection* callbackCritSect) { 1155 rtc::CriticalSection* callbackCritSect,
1156 rtc::TaskQueue* encoder_queue) {
1128 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1157 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1129 "Channel::SetEngineInformation()"); 1158 "Channel::SetEngineInformation()");
1159 RTC_DCHECK(encoder_queue);
1130 _engineStatisticsPtr = &engineStatistics; 1160 _engineStatisticsPtr = &engineStatistics;
1131 _outputMixerPtr = &outputMixer; 1161 _outputMixerPtr = &outputMixer;
1132 _moduleProcessThreadPtr = &moduleProcessThread; 1162 _moduleProcessThreadPtr = &moduleProcessThread;
1133 _audioDeviceModulePtr = &audioDeviceModule; 1163 _audioDeviceModulePtr = &audioDeviceModule;
1134 _voiceEngineObserverPtr = voiceEngineObserver; 1164 _voiceEngineObserverPtr = voiceEngineObserver;
1135 _callbackCritSectPtr = callbackCritSect; 1165 _callbackCritSectPtr = callbackCritSect;
1166 encoder_queue_ = encoder_queue;
1136 return 0; 1167 return 0;
1137 } 1168 }
1138 1169
1139 int32_t Channel::UpdateLocalTimeStamp() {
1140 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1141 return 0;
1142 }
1143
1144 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 1170 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1145 rtc::CritScope cs(&_callbackCritSect); 1171 rtc::CritScope cs(&_callbackCritSect);
1146 audio_sink_ = std::move(sink); 1172 audio_sink_ = std::move(sink);
1147 } 1173 }
1148 1174
1149 const rtc::scoped_refptr<AudioDecoderFactory>& 1175 const rtc::scoped_refptr<AudioDecoderFactory>&
1150 Channel::GetAudioDecoderFactory() const { 1176 Channel::GetAudioDecoderFactory() const {
1151 return decoder_factory_; 1177 return decoder_factory_;
1152 } 1178 }
1153 1179
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1222 } 1248 }
1223 1249
1224 int32_t Channel::StopSend() { 1250 int32_t Channel::StopSend() {
1225 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1251 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1226 "Channel::StopSend()"); 1252 "Channel::StopSend()");
1227 if (!channel_state_.Get().sending) { 1253 if (!channel_state_.Get().sending) {
1228 return 0; 1254 return 0;
1229 } 1255 }
1230 channel_state_.SetSending(false); 1256 channel_state_.SetSending(false);
1231 1257
1258 // Post a task to the encoder thread which sets an event when the task is
1259 // executed. We know that no more encoding tasks will be added to the task
1260 // queue for this channel since sending is now deactivated. It means that,
1261 // if we wait for the event to bet set, we know that no more pending tasks
1262 // exists and it is therfore guaranteed that the task queue will never try
1263 // to acccess and invalid channel object.
1264 encoder_queue_->PostTask([this] {
1265 RTC_DCHECK_RUN_ON(encoder_queue_);
tommi 2017/03/28 13:47:19 nit: I don't think this is necessary. hmm... I do
henrika_webrtc 2017/03/29 10:35:11 Please let me wait with this larger task until the
henrika_webrtc 2017/03/29 11:36:20 I actually failed to do what was suggested since I
1266 stop_send_event_.Set();
1267 });
1268 stop_send_event_.Wait(rtc::Event::kForever);
1269
1232 // Store the sequence number to be able to pick up the same sequence for 1270 // Store the sequence number to be able to pick up the same sequence for
1233 // the next StartSend(). This is needed for restarting device, otherwise 1271 // the next StartSend(). This is needed for restarting device, otherwise
1234 // it might cause libSRTP to complain about packets being replayed. 1272 // it might cause libSRTP to complain about packets being replayed.
1235 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring 1273 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
1236 // CL is landed. See issue 1274 // CL is landed. See issue
1237 // https://code.google.com/p/webrtc/issues/detail?id=2111 . 1275 // https://code.google.com/p/webrtc/issues/detail?id=2111 .
tommi 2017/03/28 13:47:19 fredrik - looks like this might be ok to delete no
the sun 2017/03/28 23:05:40 It might - outside the scope of this CL though.
henrika_webrtc 2017/03/29 10:35:11 No action from my side in this CL.
henrika_webrtc 2017/03/29 10:35:11 Will wait for verification from Fredrik before mak
1238 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); 1276 send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
1239 1277
1240 // Reset sending SSRC and sequence number and triggers direct transmission 1278 // Reset sending SSRC and sequence number and triggers direct transmission
1241 // of RTCP BYE 1279 // of RTCP BYE
1242 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { 1280 if (_rtpRtcpModule->SetSendingStatus(false) == -1) {
1243 _engineStatisticsPtr->SetLastError( 1281 _engineStatisticsPtr->SetLastError(
1244 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 1282 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1245 "StartSend() RTP/RTCP failed to stop sending"); 1283 "StartSend() RTP/RTCP failed to stop sending");
1246 } 1284 }
1247 _rtpRtcpModule->SetSendingMediaStatus(false); 1285 _rtpRtcpModule->SetSendingMediaStatus(false);
(...skipping 1392 matching lines...) Expand 10 before | Expand all | Expand 10 after
2640 audio_coding_->EnableNack(maxNumberOfPackets); 2678 audio_coding_->EnableNack(maxNumberOfPackets);
2641 else 2679 else
2642 audio_coding_->DisableNack(); 2680 audio_coding_->DisableNack();
2643 } 2681 }
2644 2682
2645 // Called when we are missing one or more packets. 2683 // Called when we are missing one or more packets.
2646 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { 2684 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
2647 return _rtpRtcpModule->SendNACK(sequence_numbers, length); 2685 return _rtpRtcpModule->SendNACK(sequence_numbers, length);
2648 } 2686 }
2649 2687
2650 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { 2688 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
2651 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2689 RTC_DCHECK(encoder_queue_);
tommi 2017/03/28 13:47:20 did you intend to dcheck that you're running on th
henrika_webrtc 2017/03/29 10:35:11 Yes, since it is not set at ctor. See comments bel
2652 "Channel::Demultiplex()"); 2690 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
2653 _audioFrame.CopyFrom(audioFrame); 2691 audio_frame->CopyFrom(audio_input);
2654 _audioFrame.id_ = _channelId; 2692 audio_frame->id_ = ChannelId();
2655 return 0; 2693 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2694 new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
2656 } 2695 }
2657 2696
2658 void Channel::Demultiplex(const int16_t* audio_data, 2697 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
2659 int sample_rate, 2698 int sample_rate,
2660 size_t number_of_frames, 2699 size_t number_of_frames,
2661 size_t number_of_channels) { 2700 size_t number_of_channels) {
2701 RTC_DCHECK(encoder_queue_);
tommi 2017/03/28 13:47:20 same here... might want to do a search/replace for
henrika_webrtc 2017/03/29 10:35:11 Actually, looking at the TSAN issues I realize tha
2662 CodecInst codec; 2702 CodecInst codec;
2663 GetSendCodec(codec); 2703 GetSendCodec(codec);
2664 2704 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
2665 // Never upsample or upmix the capture signal here. This should be done at the 2705 audio_frame->id_ = ChannelId();
2666 // end of the send chain. 2706 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
2667 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); 2707 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels);
2668 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
2669 RemixAndResample(audio_data, number_of_frames, number_of_channels, 2708 RemixAndResample(audio_data, number_of_frames, number_of_channels,
2670 sample_rate, &input_resampler_, &_audioFrame); 2709 sample_rate, &input_resampler_, audio_frame.get());
2710 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2711 new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
2671 } 2712 }
2672 2713
2673 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { 2714 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
2674 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2715 RTC_DCHECK_RUN_ON(encoder_queue_);
2675 "Channel::PrepareEncodeAndSend()"); 2716 PrepareEncodeAndSend(audio_input);
2717 EncodeAndSend(audio_input);
2718 }
2676 2719
2677 if (_audioFrame.samples_per_channel_ == 0) { 2720 uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
the sun 2017/03/28 12:57:50 Could you fuse PrepareEncodeAndSend with EncodeAnd
henrika_webrtc 2017/03/29 10:35:11 Done.
2678 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2721 RTC_DCHECK_RUN_ON(encoder_queue_);
2679 "Channel::PrepareEncodeAndSend() invalid audio frame"); 2722 RTC_DCHECK(audio_input->samples_per_channel_);
2680 return 0xFFFFFFFF;
2681 }
2682 2723
2683 if (channel_state_.Get().input_file_playing) { 2724 if (channel_state_.Get().input_file_playing) {
2684 MixOrReplaceAudioWithFile(mixingFrequency); 2725 MixOrReplaceAudioWithFile(audio_input);
2685 } 2726 }
2686 2727
2687 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. 2728 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
2688 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); 2729 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
the sun 2017/03/28 13:28:29 Declare ACCESS_ON for previous_frame_muted_
henrika_webrtc 2017/03/29 10:35:11 Done.
2689 2730
2690 if (_includeAudioLevelIndication) { 2731 if (_includeAudioLevelIndication) {
the sun 2017/03/28 13:28:29 _includeAudioLevelIndication is now potentially ra
henrika_webrtc 2017/03/29 10:35:11 Yes, discussed offline. No action here. Adding tod
2691 size_t length = 2732 size_t length =
2692 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; 2733 audio_input->samples_per_channel_ * audio_input->num_channels_;
2693 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); 2734 RTC_CHECK_LE(length, sizeof(audio_input->data_));
2694 if (is_muted && previous_frame_muted_) { 2735 if (is_muted && previous_frame_muted_) {
2695 rms_level_.AnalyzeMuted(length); 2736 rms_level_.AnalyzeMuted(length);
the sun 2017/03/28 13:28:29 Declare ACCESS_ON for rms_level_
henrika_webrtc 2017/03/29 10:35:11 Done.
2696 } else { 2737 } else {
2697 rms_level_.Analyze( 2738 rms_level_.Analyze(
2698 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); 2739 rtc::ArrayView<const int16_t>(audio_input->data_, length));
2699 } 2740 }
2700 } 2741 }
2701 previous_frame_muted_ = is_muted; 2742 previous_frame_muted_ = is_muted;
2702
2703 return 0; 2743 return 0;
2704 } 2744 }
2705 2745
2706 uint32_t Channel::EncodeAndSend() { 2746 uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
the sun 2017/03/28 12:57:50 Remove return value - it is unused.
henrika_webrtc 2017/03/29 10:35:11 This method is now removed.
2707 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2747 RTC_DCHECK_RUN_ON(encoder_queue_);
2708 "Channel::EncodeAndSend()"); 2748 RTC_DCHECK_LE(audio_input->num_channels_, 2);
2749 RTC_DCHECK(audio_input->samples_per_channel_);
the sun 2017/03/28 12:57:50 _GT(..., 0);
henrika_webrtc 2017/03/29 10:35:11 Done.
2709 2750
2710 assert(_audioFrame.num_channels_ <= 2); 2751 audio_input->id_ = _channelId;
the sun 2017/03/28 12:57:50 Already did that in Channel::ProcessAndEncodeAudio
tommi 2017/03/28 13:47:20 change to a dcheck_eq?
henrika_webrtc 2017/03/29 10:35:11 Done.
henrika_webrtc 2017/03/29 10:35:11 Acknowledged.
2711 if (_audioFrame.samples_per_channel_ == 0) {
2712 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2713 "Channel::EncodeAndSend() invalid audio frame");
2714 return 0xFFFFFFFF;
2715 }
2716
2717 _audioFrame.id_ = _channelId;
2718 2752
2719 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. 2753 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2720 2754
2721 // The ACM resamples internally. 2755 // The ACM resamples internally.
2722 _audioFrame.timestamp_ = _timeStamp; 2756 audio_input->timestamp_ = _timeStamp;
2723 // This call will trigger AudioPacketizationCallback::SendData if encoding 2757 // This call will trigger AudioPacketizationCallback::SendData if encoding
2724 // is done and payload is ready for packetization and transmission. 2758 // is done and payload is ready for packetization and transmission.
2725 // Otherwise, it will return without invoking the callback. 2759 // Otherwise, it will return without invoking the callback.
2726 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { 2760 if (audio_coding_->Add10MsData(*audio_input) < 0) {
2727 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 2761 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
2728 "Channel::EncodeAndSend() ACM encoding failed");
2729 return 0xFFFFFFFF; 2762 return 0xFFFFFFFF;
2730 } 2763 }
2731 2764
2732 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 2765 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
the sun 2017/03/28 12:57:50 Can you use ACCESS_ON() in the .h, when _timeStamp
henrika_webrtc 2017/03/29 10:35:11 Should work. Yes.
2733 return 0; 2766 return 0;
2734 } 2767 }
2735 2768
2736 void Channel::set_associate_send_channel(const ChannelOwner& channel) { 2769 void Channel::set_associate_send_channel(const ChannelOwner& channel) {
2737 RTC_DCHECK(!channel.channel() || 2770 RTC_DCHECK(!channel.channel() ||
2738 channel.channel()->ChannelId() != _channelId); 2771 channel.channel()->ChannelId() != _channelId);
2739 rtc::CritScope lock(&assoc_send_channel_lock_); 2772 rtc::CritScope lock(&assoc_send_channel_lock_);
2740 associate_send_channel_ = channel; 2773 associate_send_channel_ = channel;
2741 } 2774 }
2742 2775
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
2832 2865
2833 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, 2866 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
2834 RtpReceiver** rtp_receiver) const { 2867 RtpReceiver** rtp_receiver) const {
2835 *rtpRtcpModule = _rtpRtcpModule.get(); 2868 *rtpRtcpModule = _rtpRtcpModule.get();
2836 *rtp_receiver = rtp_receiver_.get(); 2869 *rtp_receiver = rtp_receiver_.get();
2837 return 0; 2870 return 0;
2838 } 2871 }
2839 2872
2840 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use 2873 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
2841 // a shared helper. 2874 // a shared helper.
2842 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { 2875 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
2876 RTC_DCHECK_RUN_ON(encoder_queue_);
2843 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); 2877 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
2844 size_t fileSamples(0); 2878 size_t fileSamples(0);
2879 const int mixingFrequency = audio_input->sample_rate_hz_;
2845 2880
2846 { 2881 if (!input_file_player_) {
2847 rtc::CritScope cs(&_fileCritSect); 2882 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2848 2883 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2849 if (!input_file_player_) { 2884 " doesnt exist");
2850 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2885 return -1;
2851 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2852 " doesnt exist");
2853 return -1;
2854 }
2855
2856 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2857 mixingFrequency) == -1) {
2858 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2859 "Channel::MixOrReplaceAudioWithFile() file mixing "
2860 "failed");
2861 return -1;
2862 }
2863 if (fileSamples == 0) {
2864 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2865 "Channel::MixOrReplaceAudioWithFile() file is ended");
2866 return 0;
2867 }
2868 } 2886 }
2869 2887
2870 assert(_audioFrame.samples_per_channel_ == fileSamples); 2888 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
the sun 2017/03/28 12:57:50 We must still hold _fileCritSect while making this
tommi 2017/03/28 13:47:19 Alternatively, we could change input_file_player_
the sun 2017/03/28 23:05:40 This code is being stripped away with the VoEFile
henrika_webrtc 2017/03/29 10:35:11 Added _fileCritSect, Lot's of changes required to
2889 mixingFrequency) == -1) {
2890 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2891 "Channel::MixOrReplaceAudioWithFile() file mixing "
2892 "failed");
2893 return -1;
2894 }
2895 if (fileSamples == 0) {
2896 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2897 "Channel::MixOrReplaceAudioWithFile() file is ended");
2898 return 0;
2899 }
2900
2901 assert(audio_input->samples_per_channel_ == fileSamples);
the sun 2017/03/28 12:57:50 RTC_DCHECK
henrika_webrtc 2017/03/29 10:35:11 Done.
2871 2902
2872 if (_mixFileWithMicrophone) { 2903 if (_mixFileWithMicrophone) {
2873 // Currently file stream is always mono. 2904 // Currently file stream is always mono.
2874 // TODO(xians): Change the code when FilePlayer supports real stereo. 2905 // TODO(xians): Change the code when FilePlayer supports real stereo.
2875 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), 2906 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
2876 1, fileSamples); 2907 1, fileSamples);
2877 } else { 2908 } else {
2878 // Replace ACM audio with file. 2909 // Replace ACM audio with file.
2879 // Currently file stream is always mono. 2910 // Currently file stream is always mono.
2880 // TODO(xians): Change the code when FilePlayer supports real stereo. 2911 // TODO(xians): Change the code when FilePlayer supports real stereo.
2881 _audioFrame.UpdateFrame( 2912 audio_input->UpdateFrame(
2882 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, 2913 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
2883 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); 2914 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
2884 } 2915 }
2885 return 0; 2916 return 0;
2886 } 2917 }
2887 2918
2888 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { 2919 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) {
2889 assert(mixingFrequency <= 48000); 2920 assert(mixingFrequency <= 48000);
2890 2921
2891 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); 2922 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]);
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
3061 int64_t min_rtt = 0; 3092 int64_t min_rtt = 0;
3062 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 3093 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
3063 0) { 3094 0) {
3064 return 0; 3095 return 0;
3065 } 3096 }
3066 return rtt; 3097 return rtt;
3067 } 3098 }
3068 3099
3069 } // namespace voe 3100 } // namespace voe
3070 } // namespace webrtc 3101 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698