Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(170)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: nit Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility> 14 #include <utility>
15 15
16 #include "webrtc/audio/utility/audio_frame_operations.h" 16 #include "webrtc/audio/utility/audio_frame_operations.h"
17 #include "webrtc/base/array_view.h" 17 #include "webrtc/base/array_view.h"
18 #include "webrtc/base/checks.h" 18 #include "webrtc/base/checks.h"
19 #include "webrtc/base/criticalsection.h" 19 #include "webrtc/base/criticalsection.h"
20 #include "webrtc/base/format_macros.h" 20 #include "webrtc/base/format_macros.h"
21 #include "webrtc/base/location.h" 21 #include "webrtc/base/location.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/base/rate_limiter.h" 23 #include "webrtc/base/rate_limiter.h"
24 #include "webrtc/base/task_queue.h"
25 #include "webrtc/base/thread_checker.h"
24 #include "webrtc/base/timeutils.h" 26 #include "webrtc/base/timeutils.h"
25 #include "webrtc/config.h" 27 #include "webrtc/config.h"
26 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" 28 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
27 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" 29 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
28 #include "webrtc/modules/audio_device/include/audio_device.h" 30 #include "webrtc/modules/audio_device/include/audio_device.h"
29 #include "webrtc/modules/audio_processing/include/audio_processing.h" 31 #include "webrtc/modules/audio_processing/include/audio_processing.h"
30 #include "webrtc/modules/include/module_common_types.h" 32 #include "webrtc/modules/include/module_common_types.h"
31 #include "webrtc/modules/pacing/packet_router.h" 33 #include "webrtc/modules/pacing/packet_router.h"
32 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" 34 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
33 #include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h" 35 #include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after
400 } 402 }
401 403
402 private: 404 private:
403 Channel* owner_; 405 Channel* owner_;
404 // Maps remote side ssrc to extended highest sequence number received. 406 // Maps remote side ssrc to extended highest sequence number received.
405 std::map<uint32_t, uint32_t> extended_max_sequence_number_; 407 std::map<uint32_t, uint32_t> extended_max_sequence_number_;
406 rtc::CriticalSection crit_; 408 rtc::CriticalSection crit_;
407 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); 409 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
408 }; 410 };
409 411
412 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
413 public:
414 ProcessAndEncodeAudioTask(const AudioFrame& audio_input, Channel* channel)
415 : channel_(channel) {
416 audio_input_.CopyFrom(audio_input);
417 audio_input_.id_ = channel->ChannelId();
418 }
419
420 ProcessAndEncodeAudioTask(const int16_t* audio_data,
421 int sample_rate,
422 size_t number_of_frames,
423 size_t number_of_channels,
424 Channel* channel)
425 : channel_(channel) {
426 audio_input_.id_ = channel->ChannelId();
427 audio_input_.sample_rate_hz_ = sample_rate;
428 audio_input_.num_channels_ = number_of_channels;
429 RemixAndResample(audio_data, number_of_frames, number_of_channels,
aleloi 2017/03/24 16:59:50 Can resampling be done on the queue? E.g. copy the
henrika_webrtc 2017/03/24 17:09:36 My bad. That was actually my intention. I will rew
430 sample_rate, channel->input_resampler(),
431 &audio_input_);
432 }
433
434 private:
435 bool Run() override {
436 RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
437 RTC_DCHECK(channel_);
438 LOG(INFO) << "___Run";
439 channel_->ProcessAndEncodeAudioOnTaskQueue(&audio_input_);
440 return true;
441 }
442
443 AudioFrame audio_input_;
444 Channel* const channel_;
445 };
446
410 int32_t Channel::SendData(FrameType frameType, 447 int32_t Channel::SendData(FrameType frameType,
411 uint8_t payloadType, 448 uint8_t payloadType,
412 uint32_t timeStamp, 449 uint32_t timeStamp,
413 const uint8_t* payloadData, 450 const uint8_t* payloadData,
414 size_t payloadSize, 451 size_t payloadSize,
415 const RTPFragmentationHeader* fragmentation) { 452 const RTPFragmentationHeader* fragmentation) {
453 RTC_DCHECK_RUN_ON(encoder_queue_);
416 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 454 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
417 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," 455 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
418 " payloadSize=%" PRIuS ", fragmentation=0x%x)", 456 " payloadSize=%" PRIuS ", fragmentation=0x%x)",
419 frameType, payloadType, timeStamp, payloadSize, fragmentation); 457 frameType, payloadType, timeStamp, payloadSize, fragmentation);
420 458
421 if (_includeAudioLevelIndication) { 459 if (_includeAudioLevelIndication) {
422 // Store current audio level in the RTP/RTCP module. 460 // Store current audio level in the RTP/RTCP module.
423 // The level will be used in combination with voice-activity state 461 // The level will be used in combination with voice-activity state
424 // (frameType) to add an RTP header extension 462 // (frameType) to add an RTP header extension
425 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); 463 _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
426 } 464 }
427 465
428 // Push data from ACM to RTP/RTCP-module to deliver audio frame for 466 // Push data from ACM to RTP/RTCP-module to deliver audio frame for
429 // packetization. 467 // packetization.
430 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. 468 // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
431 if (!_rtpRtcpModule->SendOutgoingData( 469 if (!_rtpRtcpModule->SendOutgoingData(
432 (FrameType&)frameType, payloadType, timeStamp, 470 (FrameType&)frameType, payloadType, timeStamp,
433 // Leaving the time when this frame was 471 // Leaving the time when this frame was
434 // received from the capture device as 472 // received from the capture device as
435 // undefined for voice for now. 473 // undefined for voice for now.
436 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { 474 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
437 _engineStatisticsPtr->SetLastError( 475 _engineStatisticsPtr->SetLastError(
438 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 476 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
439 "Channel::SendData() failed to send data to RTP/RTCP module"); 477 "Channel::SendData() failed to send data to RTP/RTCP module");
440 return -1; 478 return -1;
441 } 479 }
442 480
443 _lastLocalTimeStamp = timeStamp; 481 _lastLocalTimeStamp = timeStamp;
444 _lastPayloadType = payloadType; 482 _lastPayloadType = payloadType;
445
446 return 0; 483 return 0;
447 } 484 }
448 485
449 bool Channel::SendRtp(const uint8_t* data, 486 bool Channel::SendRtp(const uint8_t* data,
450 size_t len, 487 size_t len,
451 const PacketOptions& options) { 488 const PacketOptions& options) {
452 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 489 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
453 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); 490 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
454 491
455 rtc::CritScope cs(&_callbackCritSect); 492 rtc::CritScope cs(&_callbackCritSect);
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 send_sequence_number_(0), 913 send_sequence_number_(0),
877 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), 914 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
878 capture_start_rtp_time_stamp_(-1), 915 capture_start_rtp_time_stamp_(-1),
879 capture_start_ntp_time_ms_(-1), 916 capture_start_ntp_time_ms_(-1),
880 _engineStatisticsPtr(NULL), 917 _engineStatisticsPtr(NULL),
881 _outputMixerPtr(NULL), 918 _outputMixerPtr(NULL),
882 _moduleProcessThreadPtr(NULL), 919 _moduleProcessThreadPtr(NULL),
883 _audioDeviceModulePtr(NULL), 920 _audioDeviceModulePtr(NULL),
884 _voiceEngineObserverPtr(NULL), 921 _voiceEngineObserverPtr(NULL),
885 _callbackCritSectPtr(NULL), 922 _callbackCritSectPtr(NULL),
923 encoder_queue_(nullptr),
886 _transportPtr(NULL), 924 _transportPtr(NULL),
887 input_mute_(false), 925 input_mute_(false),
888 previous_frame_muted_(false), 926 previous_frame_muted_(false),
889 _outputGain(1.0f), 927 _outputGain(1.0f),
890 _mixFileWithMicrophone(false), 928 _mixFileWithMicrophone(false),
891 _lastLocalTimeStamp(0), 929 _lastLocalTimeStamp(0),
892 _lastPayloadType(0), 930 _lastPayloadType(0),
893 _includeAudioLevelIndication(false), 931 _includeAudioLevelIndication(false),
894 transport_overhead_per_packet_(0), 932 transport_overhead_per_packet_(0),
895 rtp_overhead_per_packet_(0), 933 rtp_overhead_per_packet_(0),
896 _outputSpeechType(AudioFrame::kNormalSpeech), 934 _outputSpeechType(AudioFrame::kNormalSpeech),
897 restored_packet_in_use_(false), 935 restored_packet_in_use_(false),
898 rtcp_observer_(new VoERtcpObserver(this)), 936 rtcp_observer_(new VoERtcpObserver(this)),
899 associate_send_channel_(ChannelOwner(nullptr)), 937 associate_send_channel_(ChannelOwner(nullptr)),
900 pacing_enabled_(config.enable_voice_pacing), 938 pacing_enabled_(config.enable_voice_pacing),
901 feedback_observer_proxy_(new TransportFeedbackProxy()), 939 feedback_observer_proxy_(new TransportFeedbackProxy()),
902 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), 940 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
903 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), 941 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
904 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), 942 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
905 kMaxRetransmissionWindowMs)), 943 kMaxRetransmissionWindowMs)),
906 decoder_factory_(config.acm_config.decoder_factory), 944 decoder_factory_(config.acm_config.decoder_factory),
907 // TODO(elad.alon): Subsequent CL experiments with PLR source. 945 // TODO(elad.alon): Subsequent CL experiments with PLR source.
908 use_twcc_plr_for_ana_(false) { 946 use_twcc_plr_for_ana_(false),
947 stop_send_event_(true /* manual_reset */, false) {
948 LOG(INFO) << "___ctor: " << ChannelId();
909 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), 949 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
910 "Channel::Channel() - ctor"); 950 "Channel::Channel() - ctor");
911 AudioCodingModule::Config acm_config(config.acm_config); 951 AudioCodingModule::Config acm_config(config.acm_config);
912 acm_config.id = VoEModuleId(instanceId, channelId); 952 acm_config.id = VoEModuleId(instanceId, channelId);
913 acm_config.neteq_config.enable_muted_state = true; 953 acm_config.neteq_config.enable_muted_state = true;
914 audio_coding_.reset(AudioCodingModule::Create(acm_config)); 954 audio_coding_.reset(AudioCodingModule::Create(acm_config));
915 955
916 _outputAudioLevel.Clear(); 956 _outputAudioLevel.Clear();
917 957
918 RtpRtcp::Configuration configuration; 958 RtpRtcp::Configuration configuration;
(...skipping 11 matching lines...) Expand all
930 configuration.event_log = &(*event_log_proxy_); 970 configuration.event_log = &(*event_log_proxy_);
931 configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_); 971 configuration.rtt_stats = &(*rtcp_rtt_stats_proxy_);
932 configuration.retransmission_rate_limiter = 972 configuration.retransmission_rate_limiter =
933 retransmission_rate_limiter_.get(); 973 retransmission_rate_limiter_.get();
934 974
935 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); 975 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
936 _rtpRtcpModule->SetSendingMediaStatus(false); 976 _rtpRtcpModule->SetSendingMediaStatus(false);
937 } 977 }
938 978
939 Channel::~Channel() { 979 Channel::~Channel() {
980 LOG(INFO) << "___dtor: " << ChannelId();
981 // If sending ever has been activated, ensure that StopSend() has been called
982 // to flush out any pending tasks in the encoder queue.
983 if (channel_state_.Get().sending_has_been_activated) {
984 RTC_DCHECK(stop_send_event_.Wait(0))
985 << "Must call StopSend() before destruction to clean up pending tasks";
986 }
940 RTC_DCHECK(!channel_state_.Get().sending); 987 RTC_DCHECK(!channel_state_.Get().sending);
941 RTC_DCHECK(!channel_state_.Get().playing); 988 RTC_DCHECK(!channel_state_.Get().playing);
942 } 989 }
943 990
944 int32_t Channel::Init() { 991 int32_t Channel::Init() {
945 RTC_DCHECK(construction_thread_.CalledOnValidThread()); 992 RTC_DCHECK(construction_thread_.CalledOnValidThread());
946 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 993 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
947 "Channel::Init()"); 994 "Channel::Init()");
948 995
949 channel_state_.Reset(); 996 channel_state_.Reset();
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1092 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); 1139 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
1093 1140
1094 // End of modules shutdown 1141 // End of modules shutdown
1095 } 1142 }
1096 1143
1097 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 1144 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
1098 OutputMixer& outputMixer, 1145 OutputMixer& outputMixer,
1099 ProcessThread& moduleProcessThread, 1146 ProcessThread& moduleProcessThread,
1100 AudioDeviceModule& audioDeviceModule, 1147 AudioDeviceModule& audioDeviceModule,
1101 VoiceEngineObserver* voiceEngineObserver, 1148 VoiceEngineObserver* voiceEngineObserver,
1102 rtc::CriticalSection* callbackCritSect) { 1149 rtc::CriticalSection* callbackCritSect,
1150 rtc::TaskQueue* encoder_queue) {
1103 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1151 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1104 "Channel::SetEngineInformation()"); 1152 "Channel::SetEngineInformation()");
1153 RTC_DCHECK(encoder_queue);
1105 _engineStatisticsPtr = &engineStatistics; 1154 _engineStatisticsPtr = &engineStatistics;
1106 _outputMixerPtr = &outputMixer; 1155 _outputMixerPtr = &outputMixer;
1107 _moduleProcessThreadPtr = &moduleProcessThread; 1156 _moduleProcessThreadPtr = &moduleProcessThread;
1108 _audioDeviceModulePtr = &audioDeviceModule; 1157 _audioDeviceModulePtr = &audioDeviceModule;
1109 _voiceEngineObserverPtr = voiceEngineObserver; 1158 _voiceEngineObserverPtr = voiceEngineObserver;
1110 _callbackCritSectPtr = callbackCritSect; 1159 _callbackCritSectPtr = callbackCritSect;
1160 encoder_queue_ = encoder_queue;
1111 return 0; 1161 return 0;
1112 } 1162 }
1113 1163
1114 int32_t Channel::UpdateLocalTimeStamp() {
1115 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1116 return 0;
1117 }
1118
aleloi 2017/03/24 16:59:50 This is a cleanup of dead code, right?
henrika_webrtc 2017/03/24 17:09:36 Honestly don't know how it ended up in this CL. Mi
1119 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 1164 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1120 rtc::CritScope cs(&_callbackCritSect); 1165 rtc::CritScope cs(&_callbackCritSect);
1121 audio_sink_ = std::move(sink); 1166 audio_sink_ = std::move(sink);
1122 } 1167 }
1123 1168
1124 const rtc::scoped_refptr<AudioDecoderFactory>& 1169 const rtc::scoped_refptr<AudioDecoderFactory>&
1125 Channel::GetAudioDecoderFactory() const { 1170 Channel::GetAudioDecoderFactory() const {
1126 return decoder_factory_; 1171 return decoder_factory_;
1127 } 1172 }
1128 1173
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1196 return 0; 1241 return 0;
1197 } 1242 }
1198 1243
1199 int32_t Channel::StopSend() { 1244 int32_t Channel::StopSend() {
1200 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1245 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1201 "Channel::StopSend()"); 1246 "Channel::StopSend()");
1202 if (!channel_state_.Get().sending) { 1247 if (!channel_state_.Get().sending) {
1203 return 0; 1248 return 0;
1204 } 1249 }
1205 channel_state_.SetSending(false); 1250 channel_state_.SetSending(false);
1251 LOG(INFO) << "___StopSend: " << ChannelId();
1252
1253 // Post a task to the encoder thread which sets an event when the task is
1254 // executed. We know that no more encoding tasks will be added to the task
1255 // queue for this channel since sending is now deactivated. It means that,
1256 // if we wait for the event to bet set, we know that no more pending tasks
1257 // exists and it is therfore guaranteed that the task queue will never try
1258 // to acccess and invalid channel object.
1259 encoder_queue_->PostTask([this] {
1260 RTC_DCHECK_RUN_ON(encoder_queue_);
1261 stop_send_event_.Set();
1262 });
1263 stop_send_event_.Wait(rtc::Event::kForever);
1206 1264
1207 // Store the sequence number to be able to pick up the same sequence for 1265 // Store the sequence number to be able to pick up the same sequence for
1208 // the next StartSend(). This is needed for restarting device, otherwise 1266 // the next StartSend(). This is needed for restarting device, otherwise
1209 // it might cause libSRTP to complain about packets being replayed. 1267 // it might cause libSRTP to complain about packets being replayed.
1210 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring 1268 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
1211 // CL is landed. See issue 1269 // CL is landed. See issue
1212 // https://code.google.com/p/webrtc/issues/detail?id=2111 . 1270 // https://code.google.com/p/webrtc/issues/detail?id=2111 .
1213 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); 1271 send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
1214 1272
1215 // Reset sending SSRC and sequence number and triggers direct transmission 1273 // Reset sending SSRC and sequence number and triggers direct transmission
(...skipping 1396 matching lines...) Expand 10 before | Expand all | Expand 10 after
2612 audio_coding_->EnableNack(maxNumberOfPackets); 2670 audio_coding_->EnableNack(maxNumberOfPackets);
2613 else 2671 else
2614 audio_coding_->DisableNack(); 2672 audio_coding_->DisableNack();
2615 } 2673 }
2616 2674
2617 // Called when we are missing one or more packets. 2675 // Called when we are missing one or more packets.
2618 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { 2676 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
2619 return _rtpRtcpModule->SendNACK(sequence_numbers, length); 2677 return _rtpRtcpModule->SendNACK(sequence_numbers, length);
2620 } 2678 }
2621 2679
2622 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { 2680 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
2623 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2681 RTC_DCHECK(encoder_queue_);
2624 "Channel::Demultiplex()"); 2682 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2625 _audioFrame.CopyFrom(audioFrame); 2683 new ProcessAndEncodeAudioTask(audio_input, this)));
2626 _audioFrame.id_ = _channelId;
2627 return 0;
2628 } 2684 }
2629 2685
2630 void Channel::Demultiplex(const int16_t* audio_data, 2686 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
2631 int sample_rate, 2687 int sample_rate,
2632 size_t number_of_frames, 2688 size_t number_of_frames,
2633 size_t number_of_channels) { 2689 size_t number_of_channels) {
2690 RTC_DCHECK(encoder_queue_);
2634 CodecInst codec; 2691 CodecInst codec;
2635 GetSendCodec(codec); 2692 GetSendCodec(codec);
2636 2693 const int sample_rate_hz = std::min(codec.plfreq, sample_rate);
2637 // Never upsample or upmix the capture signal here. This should be done at the 2694 const size_t num_channels = std::min(number_of_channels, codec.channels);
2638 // end of the send chain. 2695 encoder_queue_->PostTask(
2639 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); 2696 std::unique_ptr<rtc::QueuedTask>(new ProcessAndEncodeAudioTask(
2640 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels); 2697 audio_data, sample_rate_hz, number_of_frames, num_channels, this)));
2641 RemixAndResample(audio_data, number_of_frames, number_of_channels,
2642 sample_rate, &input_resampler_, &_audioFrame);
2643 } 2698 }
2644 2699
2645 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { 2700 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
2646 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2701 RTC_DCHECK_RUN_ON(encoder_queue_);
2647 "Channel::PrepareEncodeAndSend()"); 2702 PrepareEncodeAndSend(audio_input);
2703 EncodeAndSend(audio_input);
2704 }
2648 2705
2649 if (_audioFrame.samples_per_channel_ == 0) { 2706 uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
2650 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2707 RTC_DCHECK_RUN_ON(encoder_queue_);
2651 "Channel::PrepareEncodeAndSend() invalid audio frame"); 2708 RTC_DCHECK(audio_input->samples_per_channel_);
2652 return 0xFFFFFFFF;
2653 }
2654 2709
2655 if (channel_state_.Get().input_file_playing) { 2710 if (channel_state_.Get().input_file_playing) {
2656 MixOrReplaceAudioWithFile(mixingFrequency); 2711 MixOrReplaceAudioWithFile(audio_input);
2657 } 2712 }
2658 2713
2659 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. 2714 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
2660 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); 2715 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
2661 2716
2662 if (_includeAudioLevelIndication) { 2717 if (_includeAudioLevelIndication) {
2663 size_t length = 2718 size_t length =
2664 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; 2719 audio_input->samples_per_channel_ * audio_input->num_channels_;
2665 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); 2720 RTC_CHECK_LE(length, sizeof(audio_input->data_));
2666 if (is_muted && previous_frame_muted_) { 2721 if (is_muted && previous_frame_muted_) {
2667 rms_level_.AnalyzeMuted(length); 2722 rms_level_.AnalyzeMuted(length);
2668 } else { 2723 } else {
2669 rms_level_.Analyze( 2724 rms_level_.Analyze(
2670 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); 2725 rtc::ArrayView<const int16_t>(audio_input->data_, length));
2671 } 2726 }
2672 } 2727 }
2673 previous_frame_muted_ = is_muted; 2728 previous_frame_muted_ = is_muted;
2674
2675 return 0; 2729 return 0;
2676 } 2730 }
2677 2731
2678 uint32_t Channel::EncodeAndSend() { 2732 uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
2679 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2733 RTC_DCHECK_RUN_ON(encoder_queue_);
2680 "Channel::EncodeAndSend()"); 2734 RTC_DCHECK_LE(audio_input->num_channels_, 2);
2735 RTC_DCHECK(audio_input->samples_per_channel_);
2681 2736
2682 assert(_audioFrame.num_channels_ <= 2); 2737 audio_input->id_ = _channelId;
2683 if (_audioFrame.samples_per_channel_ == 0) {
2684 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2685 "Channel::EncodeAndSend() invalid audio frame");
2686 return 0xFFFFFFFF;
2687 }
2688
2689 _audioFrame.id_ = _channelId;
2690 2738
2691 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. 2739 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2692 2740
2693 // The ACM resamples internally. 2741 // The ACM resamples internally.
2694 _audioFrame.timestamp_ = _timeStamp; 2742 audio_input->timestamp_ = _timeStamp;
2695 // This call will trigger AudioPacketizationCallback::SendData if encoding 2743 // This call will trigger AudioPacketizationCallback::SendData if encoding
2696 // is done and payload is ready for packetization and transmission. 2744 // is done and payload is ready for packetization and transmission.
2697 // Otherwise, it will return without invoking the callback. 2745 // Otherwise, it will return without invoking the callback.
2698 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { 2746 if (audio_coding_->Add10MsData(*audio_input) < 0) {
2699 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 2747 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
2700 "Channel::EncodeAndSend() ACM encoding failed");
2701 return 0xFFFFFFFF; 2748 return 0xFFFFFFFF;
2702 } 2749 }
2703 2750
2704 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 2751 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
2705 return 0; 2752 return 0;
2706 } 2753 }
2707 2754
2708 void Channel::set_associate_send_channel(const ChannelOwner& channel) { 2755 void Channel::set_associate_send_channel(const ChannelOwner& channel) {
2709 RTC_DCHECK(!channel.channel() || 2756 RTC_DCHECK(!channel.channel() ||
2710 channel.channel()->ChannelId() != _channelId); 2757 channel.channel()->ChannelId() != _channelId);
2711 rtc::CritScope lock(&assoc_send_channel_lock_); 2758 rtc::CritScope lock(&assoc_send_channel_lock_);
2712 associate_send_channel_ = channel; 2759 associate_send_channel_ = channel;
2713 } 2760 }
2714 2761
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
2804 2851
2805 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, 2852 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
2806 RtpReceiver** rtp_receiver) const { 2853 RtpReceiver** rtp_receiver) const {
2807 *rtpRtcpModule = _rtpRtcpModule.get(); 2854 *rtpRtcpModule = _rtpRtcpModule.get();
2808 *rtp_receiver = rtp_receiver_.get(); 2855 *rtp_receiver = rtp_receiver_.get();
2809 return 0; 2856 return 0;
2810 } 2857 }
2811 2858
2812 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use 2859 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
2813 // a shared helper. 2860 // a shared helper.
2814 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { 2861 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
2862 RTC_DCHECK_RUN_ON(encoder_queue_);
2815 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); 2863 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
2816 size_t fileSamples(0); 2864 size_t fileSamples(0);
2865 const int mixingFrequency = audio_input->sample_rate_hz_;
2817 2866
2818 { 2867 if (!input_file_player_) {
2819 rtc::CritScope cs(&_fileCritSect); 2868 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2820 2869 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2821 if (!input_file_player_) { 2870 " doesnt exist");
2822 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2871 return -1;
2823 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2824 " doesnt exist");
2825 return -1;
2826 }
2827
2828 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2829 mixingFrequency) == -1) {
2830 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2831 "Channel::MixOrReplaceAudioWithFile() file mixing "
2832 "failed");
2833 return -1;
2834 }
2835 if (fileSamples == 0) {
2836 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2837 "Channel::MixOrReplaceAudioWithFile() file is ended");
2838 return 0;
2839 }
2840 } 2872 }
2841 2873
2842 assert(_audioFrame.samples_per_channel_ == fileSamples); 2874 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2875 mixingFrequency) == -1) {
2876 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2877 "Channel::MixOrReplaceAudioWithFile() file mixing "
2878 "failed");
2879 return -1;
2880 }
2881 if (fileSamples == 0) {
2882 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2883 "Channel::MixOrReplaceAudioWithFile() file is ended");
2884 return 0;
2885 }
2886
2887 assert(audio_input->samples_per_channel_ == fileSamples);
2843 2888
2844 if (_mixFileWithMicrophone) { 2889 if (_mixFileWithMicrophone) {
2845 // Currently file stream is always mono. 2890 // Currently file stream is always mono.
2846 // TODO(xians): Change the code when FilePlayer supports real stereo. 2891 // TODO(xians): Change the code when FilePlayer supports real stereo.
2847 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), 2892 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
2848 1, fileSamples); 2893 1, fileSamples);
2849 } else { 2894 } else {
2850 // Replace ACM audio with file. 2895 // Replace ACM audio with file.
2851 // Currently file stream is always mono. 2896 // Currently file stream is always mono.
2852 // TODO(xians): Change the code when FilePlayer supports real stereo. 2897 // TODO(xians): Change the code when FilePlayer supports real stereo.
2853 _audioFrame.UpdateFrame( 2898 audio_input->UpdateFrame(
2854 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, 2899 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
2855 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); 2900 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
2856 } 2901 }
2857 return 0; 2902 return 0;
2858 } 2903 }
2859 2904
2860 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { 2905 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) {
2861 assert(mixingFrequency <= 48000); 2906 assert(mixingFrequency <= 48000);
2862 2907
2863 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); 2908 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]);
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
3033 int64_t min_rtt = 0; 3078 int64_t min_rtt = 0;
3034 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 3079 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
3035 0) { 3080 0) {
3036 return 0; 3081 return 0;
3037 } 3082 }
3038 return rtt; 3083 return rtt;
3039 } 3084 }
3040 3085
3041 } // namespace voe 3086 } // namespace voe
3042 } // namespace webrtc 3087 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698