Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: Removed debug logs in ADB Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/shared_data.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility> 14 #include <utility>
15 15
16 #include "webrtc/audio/utility/audio_frame_operations.h" 16 #include "webrtc/audio/utility/audio_frame_operations.h"
17 #include "webrtc/base/array_view.h" 17 #include "webrtc/base/array_view.h"
18 #include "webrtc/base/checks.h" 18 #include "webrtc/base/checks.h"
19 #include "webrtc/base/criticalsection.h" 19 #include "webrtc/base/criticalsection.h"
20 #include "webrtc/base/format_macros.h" 20 #include "webrtc/base/format_macros.h"
21 #include "webrtc/base/location.h" 21 #include "webrtc/base/location.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/base/rate_limiter.h" 23 #include "webrtc/base/rate_limiter.h"
24 #include "webrtc/base/task_queue.h"
25 #include "webrtc/base/thread_checker.h"
24 #include "webrtc/base/timeutils.h" 26 #include "webrtc/base/timeutils.h"
25 #include "webrtc/call/rtp_transport_controller_send.h" 27 #include "webrtc/call/rtp_transport_controller_send.h"
26 #include "webrtc/config.h" 28 #include "webrtc/config.h"
27 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" 29 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
28 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" 30 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
29 #include "webrtc/modules/audio_device/include/audio_device.h" 31 #include "webrtc/modules/audio_device/include/audio_device.h"
30 #include "webrtc/modules/audio_processing/include/audio_processing.h" 32 #include "webrtc/modules/audio_processing/include/audio_processing.h"
31 #include "webrtc/modules/include/module_common_types.h" 33 #include "webrtc/modules/include/module_common_types.h"
32 #include "webrtc/modules/pacing/packet_router.h" 34 #include "webrtc/modules/pacing/packet_router.h"
33 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" 35 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after
402 } 404 }
403 405
404 private: 406 private:
405 Channel* owner_; 407 Channel* owner_;
406 // Maps remote side ssrc to extended highest sequence number received. 408 // Maps remote side ssrc to extended highest sequence number received.
407 std::map<uint32_t, uint32_t> extended_max_sequence_number_; 409 std::map<uint32_t, uint32_t> extended_max_sequence_number_;
408 rtc::CriticalSection crit_; 410 rtc::CriticalSection crit_;
409 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); 411 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
410 }; 412 };
411 413
414 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
415 public:
416 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame,
417 Channel* channel)
418 : audio_frame_(std::move(audio_frame)), channel_(channel) {
419 RTC_DCHECK(channel_);
420 }
421
422 private:
423 bool Run() override {
424 RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
425 channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
426 return true;
427 }
428
429 std::unique_ptr<AudioFrame> audio_frame_;
430 Channel* const channel_;
431 };
432
412 int32_t Channel::SendData(FrameType frameType, 433 int32_t Channel::SendData(FrameType frameType,
413 uint8_t payloadType, 434 uint8_t payloadType,
414 uint32_t timeStamp, 435 uint32_t timeStamp,
415 const uint8_t* payloadData, 436 const uint8_t* payloadData,
416 size_t payloadSize, 437 size_t payloadSize,
417 const RTPFragmentationHeader* fragmentation) { 438 const RTPFragmentationHeader* fragmentation) {
439 RTC_DCHECK_RUN_ON(encoder_queue_);
418 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 440 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
419 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," 441 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
420 " payloadSize=%" PRIuS ", fragmentation=0x%x)", 442 " payloadSize=%" PRIuS ", fragmentation=0x%x)",
421 frameType, payloadType, timeStamp, payloadSize, fragmentation); 443 frameType, payloadType, timeStamp, payloadSize, fragmentation);
422 444
423 if (_includeAudioLevelIndication) { 445 if (_includeAudioLevelIndication) {
424 // Store current audio level in the RTP/RTCP module. 446 // Store current audio level in the RTP/RTCP module.
425 // The level will be used in combination with voice-activity state 447 // The level will be used in combination with voice-activity state
426 // (frameType) to add an RTP header extension 448 // (frameType) to add an RTP header extension
427 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); 449 _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
428 } 450 }
429 451
430 // Push data from ACM to RTP/RTCP-module to deliver audio frame for 452 // Push data from ACM to RTP/RTCP-module to deliver audio frame for
431 // packetization. 453 // packetization.
432 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. 454 // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
433 if (!_rtpRtcpModule->SendOutgoingData( 455 if (!_rtpRtcpModule->SendOutgoingData(
434 (FrameType&)frameType, payloadType, timeStamp, 456 (FrameType&)frameType, payloadType, timeStamp,
435 // Leaving the time when this frame was 457 // Leaving the time when this frame was
436 // received from the capture device as 458 // received from the capture device as
437 // undefined for voice for now. 459 // undefined for voice for now.
438 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { 460 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
439 _engineStatisticsPtr->SetLastError( 461 _engineStatisticsPtr->SetLastError(
440 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 462 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
441 "Channel::SendData() failed to send data to RTP/RTCP module"); 463 "Channel::SendData() failed to send data to RTP/RTCP module");
442 return -1; 464 return -1;
443 } 465 }
444 466
445 _lastLocalTimeStamp = timeStamp;
446 _lastPayloadType = payloadType;
447
448 return 0; 467 return 0;
449 } 468 }
450 469
451 bool Channel::SendRtp(const uint8_t* data, 470 bool Channel::SendRtp(const uint8_t* data,
452 size_t len, 471 size_t len,
453 const PacketOptions& options) { 472 const PacketOptions& options) {
454 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 473 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
455 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); 474 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
456 475
457 rtc::CritScope cs(&_callbackCritSect); 476 rtc::CritScope cs(&_callbackCritSect);
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
772 if (output_file_player_) { 791 if (output_file_player_) {
773 if (output_file_player_->Frequency() > highestNeeded) { 792 if (output_file_player_->Frequency() > highestNeeded) {
774 highestNeeded = output_file_player_->Frequency(); 793 highestNeeded = output_file_player_->Frequency();
775 } 794 }
776 } 795 }
777 } 796 }
778 797
779 return (highestNeeded); 798 return (highestNeeded);
780 } 799 }
781 800
782 int32_t Channel::CreateChannel( 801 int32_t Channel::CreateChannel(Channel*& channel,
783 Channel*& channel, 802 int32_t channelId,
784 int32_t channelId, 803 uint32_t instanceId,
785 uint32_t instanceId, 804 const VoEBase::ChannelConfig& config) {
786 const VoEBase::ChannelConfig& config) {
787 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), 805 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId),
788 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId, 806 "Channel::CreateChannel(channelId=%d, instanceId=%d)", channelId,
789 instanceId); 807 instanceId);
790 808
791 channel = new Channel(channelId, instanceId, config); 809 channel = new Channel(channelId, instanceId, config);
792 if (channel == NULL) { 810 if (channel == NULL) {
793 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId), 811 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, channelId),
794 "Channel::CreateChannel() unable to allocate memory for" 812 "Channel::CreateChannel() unable to allocate memory for"
795 " channel"); 813 " channel");
796 return -1; 814 return -1;
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
883 _outputMixerPtr(NULL), 901 _outputMixerPtr(NULL),
884 _moduleProcessThreadPtr(NULL), 902 _moduleProcessThreadPtr(NULL),
885 _audioDeviceModulePtr(NULL), 903 _audioDeviceModulePtr(NULL),
886 _voiceEngineObserverPtr(NULL), 904 _voiceEngineObserverPtr(NULL),
887 _callbackCritSectPtr(NULL), 905 _callbackCritSectPtr(NULL),
888 _transportPtr(NULL), 906 _transportPtr(NULL),
889 input_mute_(false), 907 input_mute_(false),
890 previous_frame_muted_(false), 908 previous_frame_muted_(false),
891 _outputGain(1.0f), 909 _outputGain(1.0f),
892 _mixFileWithMicrophone(false), 910 _mixFileWithMicrophone(false),
893 _lastLocalTimeStamp(0),
894 _lastPayloadType(0),
895 _includeAudioLevelIndication(false), 911 _includeAudioLevelIndication(false),
896 transport_overhead_per_packet_(0), 912 transport_overhead_per_packet_(0),
897 rtp_overhead_per_packet_(0), 913 rtp_overhead_per_packet_(0),
898 _outputSpeechType(AudioFrame::kNormalSpeech), 914 _outputSpeechType(AudioFrame::kNormalSpeech),
899 restored_packet_in_use_(false), 915 restored_packet_in_use_(false),
900 rtcp_observer_(new VoERtcpObserver(this)), 916 rtcp_observer_(new VoERtcpObserver(this)),
901 associate_send_channel_(ChannelOwner(nullptr)), 917 associate_send_channel_(ChannelOwner(nullptr)),
902 pacing_enabled_(config.enable_voice_pacing), 918 pacing_enabled_(config.enable_voice_pacing),
903 feedback_observer_proxy_(new TransportFeedbackProxy()), 919 feedback_observer_proxy_(new TransportFeedbackProxy()),
904 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), 920 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
1118 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); 1134 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
1119 1135
1120 // End of modules shutdown 1136 // End of modules shutdown
1121 } 1137 }
1122 1138
1123 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 1139 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
1124 OutputMixer& outputMixer, 1140 OutputMixer& outputMixer,
1125 ProcessThread& moduleProcessThread, 1141 ProcessThread& moduleProcessThread,
1126 AudioDeviceModule& audioDeviceModule, 1142 AudioDeviceModule& audioDeviceModule,
1127 VoiceEngineObserver* voiceEngineObserver, 1143 VoiceEngineObserver* voiceEngineObserver,
1128 rtc::CriticalSection* callbackCritSect) { 1144 rtc::CriticalSection* callbackCritSect,
1145 rtc::TaskQueue* encoder_queue) {
1146 RTC_DCHECK(encoder_queue);
tommi 2017/03/31 09:44:10 should we also add: RTC_DCHECK(!encoder_queue_);
henrika_webrtc 2017/03/31 11:42:46 Wow, why not actually ;-)
1129 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1147 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1130 "Channel::SetEngineInformation()"); 1148 "Channel::SetEngineInformation()");
1131 _engineStatisticsPtr = &engineStatistics; 1149 _engineStatisticsPtr = &engineStatistics;
1132 _outputMixerPtr = &outputMixer; 1150 _outputMixerPtr = &outputMixer;
1133 _moduleProcessThreadPtr = &moduleProcessThread; 1151 _moduleProcessThreadPtr = &moduleProcessThread;
1134 _audioDeviceModulePtr = &audioDeviceModule; 1152 _audioDeviceModulePtr = &audioDeviceModule;
1135 _voiceEngineObserverPtr = voiceEngineObserver; 1153 _voiceEngineObserverPtr = voiceEngineObserver;
1136 _callbackCritSectPtr = callbackCritSect; 1154 _callbackCritSectPtr = callbackCritSect;
1155 encoder_queue_ = encoder_queue;
1137 return 0; 1156 return 0;
1138 } 1157 }
1139 1158
1140 int32_t Channel::UpdateLocalTimeStamp() {
1141 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1142 return 0;
1143 }
1144
1145 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 1159 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1146 rtc::CritScope cs(&_callbackCritSect); 1160 rtc::CritScope cs(&_callbackCritSect);
1147 audio_sink_ = std::move(sink); 1161 audio_sink_ = std::move(sink);
1148 } 1162 }
1149 1163
1150 const rtc::scoped_refptr<AudioDecoderFactory>& 1164 const rtc::scoped_refptr<AudioDecoderFactory>&
1151 Channel::GetAudioDecoderFactory() const { 1165 Channel::GetAudioDecoderFactory() const {
1152 return decoder_factory_; 1166 return decoder_factory_;
1153 } 1167 }
1154 1168
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1215 "StartSend() RTP/RTCP failed to start sending"); 1229 "StartSend() RTP/RTCP failed to start sending");
1216 _rtpRtcpModule->SetSendingMediaStatus(false); 1230 _rtpRtcpModule->SetSendingMediaStatus(false);
1217 rtc::CritScope cs(&_callbackCritSect); 1231 rtc::CritScope cs(&_callbackCritSect);
1218 channel_state_.SetSending(false); 1232 channel_state_.SetSending(false);
1219 return -1; 1233 return -1;
1220 } 1234 }
1221 1235
1222 return 0; 1236 return 0;
1223 } 1237 }
1224 1238
1225 int32_t Channel::StopSend() { 1239 void Channel::StopSend() {
1226 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1240 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1227 "Channel::StopSend()"); 1241 "Channel::StopSend()");
1228 if (!channel_state_.Get().sending) { 1242 if (!channel_state_.Get().sending) {
1229 return 0; 1243 return;
1230 } 1244 }
1231 channel_state_.SetSending(false); 1245 channel_state_.SetSending(false);
tommi 2017/03/31 09:44:10 It's too bad that these two methods, Get() and Set
the sun 2017/03/31 10:19:37 I totally agree that the current construct is... t
tommi 2017/03/31 10:58:52 Sounds good. My preference as far as the flag goe
the sun 2017/03/31 11:04:10 I believe your suspicion is correct...
1232 1246
1247 // Post a task to the encoder thread which sets an event when the task is
1248 // executed. We know that no more encoding tasks will be added to the task
1249 // queue for this channel since sending is now deactivated. It means that,
1250 // if we wait for the event to bet set, we know that no more pending tasks
1251 // exists and it is therfore guaranteed that the task queue will never try
1252 // to acccess and invalid channel object.
1253 RTC_DCHECK(encoder_queue_);
1254 rtc::Event flush(false, false);
1255 encoder_queue_->PostTask([&flush]() { flush.Set(); });
1256 flush.Wait(rtc::Event::kForever);
1257
1233 // Store the sequence number to be able to pick up the same sequence for 1258 // Store the sequence number to be able to pick up the same sequence for
1234 // the next StartSend(). This is needed for restarting device, otherwise 1259 // the next StartSend(). This is needed for restarting device, otherwise
1235 // it might cause libSRTP to complain about packets being replayed. 1260 // it might cause libSRTP to complain about packets being replayed.
1236 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring 1261 // TODO(xians): Remove this workaround after RtpRtcpModule's refactoring
1237 // CL is landed. See issue 1262 // CL is landed. See issue
1238 // https://code.google.com/p/webrtc/issues/detail?id=2111 . 1263 // https://code.google.com/p/webrtc/issues/detail?id=2111 .
1239 send_sequence_number_ = _rtpRtcpModule->SequenceNumber(); 1264 send_sequence_number_ = _rtpRtcpModule->SequenceNumber();
1240 1265
1241 // Reset sending SSRC and sequence number and triggers direct transmission 1266 // Reset sending SSRC and sequence number and triggers direct transmission
1242 // of RTCP BYE 1267 // of RTCP BYE
1243 if (_rtpRtcpModule->SetSendingStatus(false) == -1) { 1268 if (_rtpRtcpModule->SetSendingStatus(false) == -1) {
1244 _engineStatisticsPtr->SetLastError( 1269 _engineStatisticsPtr->SetLastError(
1245 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 1270 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
1246 "StartSend() RTP/RTCP failed to stop sending"); 1271 "StartSend() RTP/RTCP failed to stop sending");
1247 } 1272 }
1248 _rtpRtcpModule->SetSendingMediaStatus(false); 1273 _rtpRtcpModule->SetSendingMediaStatus(false);
1249
1250 return 0;
1251 } 1274 }
1252 1275
1253 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) { 1276 int32_t Channel::RegisterVoiceEngineObserver(VoiceEngineObserver& observer) {
1254 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1277 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1255 "Channel::RegisterVoiceEngineObserver()"); 1278 "Channel::RegisterVoiceEngineObserver()");
1256 rtc::CritScope cs(&_callbackCritSect); 1279 rtc::CritScope cs(&_callbackCritSect);
1257 1280
1258 if (_voiceEngineObserverPtr) { 1281 if (_voiceEngineObserverPtr) {
1259 _engineStatisticsPtr->SetLastError( 1282 _engineStatisticsPtr->SetLastError(
1260 VE_INVALID_OPERATION, kTraceError, 1283 VE_INVALID_OPERATION, kTraceError,
(...skipping 1380 matching lines...) Expand 10 before | Expand all | Expand 10 after
2641 audio_coding_->EnableNack(maxNumberOfPackets); 2664 audio_coding_->EnableNack(maxNumberOfPackets);
2642 else 2665 else
2643 audio_coding_->DisableNack(); 2666 audio_coding_->DisableNack();
2644 } 2667 }
2645 2668
2646 // Called when we are missing one or more packets. 2669 // Called when we are missing one or more packets.
2647 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { 2670 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
2648 return _rtpRtcpModule->SendNACK(sequence_numbers, length); 2671 return _rtpRtcpModule->SendNACK(sequence_numbers, length);
2649 } 2672 }
2650 2673
2651 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { 2674 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
2652 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2675 RTC_DCHECK(encoder_queue_);
tommi 2017/03/31 09:44:10 we don't DCHECK pointers that we dereference anywa
henrika_webrtc 2017/03/31 11:42:46 Acknowledged.
2653 "Channel::Demultiplex()"); 2676 RTC_DCHECK(channel_state_.Get().sending);
2654 _audioFrame.CopyFrom(audioFrame); 2677 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
2655 _audioFrame.id_ = _channelId; 2678 audio_frame->CopyFrom(audio_input);
tommi 2017/03/31 09:44:10 It feels like we should be able to avoid this. Can
the sun 2017/03/31 10:19:37 The allocation can easily be avoided by aggregatin
tommi 2017/03/31 10:58:52 yes, something like that should work. A TODO or so
henrika_webrtc 2017/03/31 11:42:46 Done.
2656 return 0; 2679 audio_frame->id_ = ChannelId();
2680 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2681 new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
2657 } 2682 }
2658 2683
2659 void Channel::Demultiplex(const int16_t* audio_data, 2684 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
2660 int sample_rate, 2685 int sample_rate,
2661 size_t number_of_frames, 2686 size_t number_of_frames,
2662 size_t number_of_channels) { 2687 size_t number_of_channels) {
2688 RTC_DCHECK(encoder_queue_);
tommi 2017/03/31 09:44:10 this pointer is used anyway below, so no need to d
henrika_webrtc 2017/03/31 11:42:47 Done.
2689 RTC_DCHECK(channel_state_.Get().sending);
2663 CodecInst codec; 2690 CodecInst codec;
2664 GetSendCodec(codec); 2691 GetSendCodec(codec);
2665 2692 std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
2666 // Never upsample or upmix the capture signal here. This should be done at the 2693 audio_frame->id_ = ChannelId();
2667 // end of the send chain. 2694 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
2668 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); 2695 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels);
2669 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
2670 RemixAndResample(audio_data, number_of_frames, number_of_channels, 2696 RemixAndResample(audio_data, number_of_frames, number_of_channels,
2671 sample_rate, &input_resampler_, &_audioFrame); 2697 sample_rate, &input_resampler_, audio_frame.get());
2698 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2699 new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
2672 } 2700 }
2673 2701
2674 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { 2702 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
2675 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2703 RTC_DCHECK_RUN_ON(encoder_queue_);
2676 "Channel::PrepareEncodeAndSend()"); 2704 RTC_DCHECK_GT(audio_input->samples_per_channel_, 0);
2705 RTC_DCHECK_LE(audio_input->num_channels_, 2);
2706 RTC_DCHECK_EQ(audio_input->id_, ChannelId());
2677 2707
2678 if (_audioFrame.samples_per_channel_ == 0) { 2708 if (channel_state_.Get().input_file_playing) {
2679 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2709 MixOrReplaceAudioWithFile(audio_input);
2680 "Channel::PrepareEncodeAndSend() invalid audio frame");
2681 return 0xFFFFFFFF;
2682 } 2710 }
2683 2711
2684 if (channel_state_.Get().input_file_playing) { 2712 bool is_muted = InputMute();
2685 MixOrReplaceAudioWithFile(mixingFrequency); 2713 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
2686 }
2687
2688 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
2689 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted);
2690 2714
2691 if (_includeAudioLevelIndication) { 2715 if (_includeAudioLevelIndication) {
2692 size_t length = 2716 size_t length =
2693 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; 2717 audio_input->samples_per_channel_ * audio_input->num_channels_;
2694 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); 2718 RTC_CHECK_LE(length, sizeof(audio_input->data_));
2695 if (is_muted && previous_frame_muted_) { 2719 if (is_muted && previous_frame_muted_) {
2696 rms_level_.AnalyzeMuted(length); 2720 rms_level_.AnalyzeMuted(length);
2697 } else { 2721 } else {
2698 rms_level_.Analyze( 2722 rms_level_.Analyze(
2699 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); 2723 rtc::ArrayView<const int16_t>(audio_input->data_, length));
2700 } 2724 }
2701 } 2725 }
2702 previous_frame_muted_ = is_muted; 2726 previous_frame_muted_ = is_muted;
2703 2727
2704 return 0; 2728 // Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2705 }
2706
2707 uint32_t Channel::EncodeAndSend() {
2708 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
2709 "Channel::EncodeAndSend()");
2710
2711 assert(_audioFrame.num_channels_ <= 2);
2712 if (_audioFrame.samples_per_channel_ == 0) {
2713 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2714 "Channel::EncodeAndSend() invalid audio frame");
2715 return 0xFFFFFFFF;
2716 }
2717
2718 _audioFrame.id_ = _channelId;
2719
2720 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2721 2729
2722 // The ACM resamples internally. 2730 // The ACM resamples internally.
2723 _audioFrame.timestamp_ = _timeStamp; 2731 audio_input->timestamp_ = _timeStamp;
2724 // This call will trigger AudioPacketizationCallback::SendData if encoding 2732 // This call will trigger AudioPacketizationCallback::SendData if encoding
2725 // is done and payload is ready for packetization and transmission. 2733 // is done and payload is ready for packetization and transmission.
2726 // Otherwise, it will return without invoking the callback. 2734 // Otherwise, it will return without invoking the callback.
2727 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { 2735 if (audio_coding_->Add10MsData(*audio_input) < 0) {
2728 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 2736 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
2729 "Channel::EncodeAndSend() ACM encoding failed"); 2737 return;
2730 return 0xFFFFFFFF;
2731 } 2738 }
2732 2739
2733 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 2740 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
2734 return 0;
2735 } 2741 }
2736 2742
2737 void Channel::set_associate_send_channel(const ChannelOwner& channel) { 2743 void Channel::set_associate_send_channel(const ChannelOwner& channel) {
2738 RTC_DCHECK(!channel.channel() || 2744 RTC_DCHECK(!channel.channel() ||
2739 channel.channel()->ChannelId() != _channelId); 2745 channel.channel()->ChannelId() != _channelId);
2740 rtc::CritScope lock(&assoc_send_channel_lock_); 2746 rtc::CritScope lock(&assoc_send_channel_lock_);
2741 associate_send_channel_ = channel; 2747 associate_send_channel_ = channel;
2742 } 2748 }
2743 2749
2744 void Channel::DisassociateSendChannel(int channel_id) { 2750 void Channel::DisassociateSendChannel(int channel_id) {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
2833 2839
2834 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, 2840 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
2835 RtpReceiver** rtp_receiver) const { 2841 RtpReceiver** rtp_receiver) const {
2836 *rtpRtcpModule = _rtpRtcpModule.get(); 2842 *rtpRtcpModule = _rtpRtcpModule.get();
2837 *rtp_receiver = rtp_receiver_.get(); 2843 *rtp_receiver = rtp_receiver_.get();
2838 return 0; 2844 return 0;
2839 } 2845 }
2840 2846
2841 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use 2847 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
2842 // a shared helper. 2848 // a shared helper.
2843 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { 2849 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
2850 RTC_DCHECK_RUN_ON(encoder_queue_);
2844 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); 2851 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
2845 size_t fileSamples(0); 2852 size_t fileSamples(0);
2846 2853 const int mixingFrequency = audio_input->sample_rate_hz_;
2847 { 2854 {
2848 rtc::CritScope cs(&_fileCritSect); 2855 rtc::CritScope cs(&_fileCritSect);
2849 2856
2850 if (!input_file_player_) { 2857 if (!input_file_player_) {
2851 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2858 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2852 "Channel::MixOrReplaceAudioWithFile() fileplayer" 2859 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2853 " doesnt exist"); 2860 " doesnt exist");
2854 return -1; 2861 return -1;
2855 } 2862 }
2856 2863
2857 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, 2864 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2858 mixingFrequency) == -1) { 2865 mixingFrequency) == -1) {
2859 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2866 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2860 "Channel::MixOrReplaceAudioWithFile() file mixing " 2867 "Channel::MixOrReplaceAudioWithFile() file mixing "
2861 "failed"); 2868 "failed");
2862 return -1; 2869 return -1;
2863 } 2870 }
2864 if (fileSamples == 0) { 2871 if (fileSamples == 0) {
2865 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2872 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2866 "Channel::MixOrReplaceAudioWithFile() file is ended"); 2873 "Channel::MixOrReplaceAudioWithFile() file is ended");
2867 return 0; 2874 return 0;
2868 } 2875 }
2869 } 2876 }
2870 2877
2871 assert(_audioFrame.samples_per_channel_ == fileSamples); 2878 RTC_DCHECK_EQ(audio_input->samples_per_channel_, fileSamples);
2872 2879
2873 if (_mixFileWithMicrophone) { 2880 if (_mixFileWithMicrophone) {
2874 // Currently file stream is always mono. 2881 // Currently file stream is always mono.
2875 // TODO(xians): Change the code when FilePlayer supports real stereo. 2882 // TODO(xians): Change the code when FilePlayer supports real stereo.
2876 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), 2883 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
2877 1, fileSamples); 2884 1, fileSamples);
2878 } else { 2885 } else {
2879 // Replace ACM audio with file. 2886 // Replace ACM audio with file.
2880 // Currently file stream is always mono. 2887 // Currently file stream is always mono.
2881 // TODO(xians): Change the code when FilePlayer supports real stereo. 2888 // TODO(xians): Change the code when FilePlayer supports real stereo.
2882 _audioFrame.UpdateFrame( 2889 audio_input->UpdateFrame(
2883 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, 2890 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
2884 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); 2891 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
2885 } 2892 }
2886 return 0; 2893 return 0;
2887 } 2894 }
2888 2895
2889 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { 2896 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) {
2890 assert(mixingFrequency <= 48000); 2897 assert(mixingFrequency <= 48000);
2891 2898
2892 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); 2899 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]);
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
3062 int64_t min_rtt = 0; 3069 int64_t min_rtt = 0;
3063 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 3070 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
3064 0) { 3071 0) {
3065 return 0; 3072 return 0;
3066 } 3073 }
3067 return rtt; 3074 return rtt;
3068 } 3075 }
3069 3076
3070 } // namespace voe 3077 } // namespace voe
3071 } // namespace webrtc 3078 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/shared_data.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698