Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(828)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: Increased prio of queue Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility> 14 #include <utility>
15 15
16 #include "webrtc/audio/utility/audio_frame_operations.h" 16 #include "webrtc/audio/utility/audio_frame_operations.h"
17 #include "webrtc/base/array_view.h" 17 #include "webrtc/base/array_view.h"
18 #include "webrtc/base/checks.h" 18 #include "webrtc/base/checks.h"
19 #include "webrtc/base/criticalsection.h" 19 #include "webrtc/base/criticalsection.h"
20 #include "webrtc/base/format_macros.h" 20 #include "webrtc/base/format_macros.h"
21 #include "webrtc/base/location.h" 21 #include "webrtc/base/location.h"
22 #include "webrtc/base/logging.h" 22 #include "webrtc/base/logging.h"
23 #include "webrtc/base/rate_limiter.h" 23 #include "webrtc/base/rate_limiter.h"
24 #include "webrtc/base/task_queue.h"
25 #include "webrtc/base/thread_checker.h"
24 #include "webrtc/base/timeutils.h" 26 #include "webrtc/base/timeutils.h"
25 #include "webrtc/config.h" 27 #include "webrtc/config.h"
26 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" 28 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
27 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" 29 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
28 #include "webrtc/modules/audio_device/include/audio_device.h" 30 #include "webrtc/modules/audio_device/include/audio_device.h"
29 #include "webrtc/modules/audio_processing/include/audio_processing.h" 31 #include "webrtc/modules/audio_processing/include/audio_processing.h"
30 #include "webrtc/modules/include/module_common_types.h" 32 #include "webrtc/modules/include/module_common_types.h"
31 #include "webrtc/modules/pacing/packet_router.h" 33 #include "webrtc/modules/pacing/packet_router.h"
32 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" 34 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
33 #include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h" 35 #include "webrtc/modules/rtp_rtcp/include/rtp_payload_registry.h"
34 #include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h" 36 #include "webrtc/modules/rtp_rtcp/include/rtp_receiver.h"
35 #include "webrtc/modules/rtp_rtcp/source/rtp_packet_received.h" 37 #include "webrtc/modules/rtp_rtcp/source/rtp_packet_received.h"
36 #include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h" 38 #include "webrtc/modules/rtp_rtcp/source/rtp_receiver_strategy.h"
37 #include "webrtc/modules/utility/include/process_thread.h" 39 #include "webrtc/modules/utility/include/process_thread.h"
38 #include "webrtc/system_wrappers/include/trace.h" 40 #include "webrtc/system_wrappers/include/trace.h"
39 #include "webrtc/voice_engine/include/voe_rtp_rtcp.h" 41 #include "webrtc/voice_engine/include/voe_rtp_rtcp.h"
40 #include "webrtc/voice_engine/output_mixer.h" 42 #include "webrtc/voice_engine/output_mixer.h"
41 #include "webrtc/voice_engine/statistics.h" 43 #include "webrtc/voice_engine/statistics.h"
42 #include "webrtc/voice_engine/utility.h" 44 #include "webrtc/voice_engine/utility.h"
43 45
44 namespace webrtc { 46 namespace webrtc {
45 namespace voe { 47 namespace voe {
46 48
47 namespace { 49 namespace {
48 50
49 constexpr int64_t kMaxRetransmissionWindowMs = 1000; 51 constexpr int64_t kMaxRetransmissionWindowMs = 1000;
50 constexpr int64_t kMinRetransmissionWindowMs = 30; 52 constexpr int64_t kMinRetransmissionWindowMs = 30;
53 // Number of preallocated audio frames in the pool of audio frames.
54 // Local tests on Android devices have shown that we never reduce the size of
55 // the pool below 5.
56 constexpr size_t kAudioFramePoolSize = 10;
51 57
52 } // namespace 58 } // namespace
53 59
54 const int kTelephoneEventAttenuationdB = 10; 60 const int kTelephoneEventAttenuationdB = 10;
55 61
56 class RtcEventLogProxy final : public webrtc::RtcEventLog { 62 class RtcEventLogProxy final : public webrtc::RtcEventLog {
57 public: 63 public:
58 RtcEventLogProxy() : event_log_(nullptr) {} 64 RtcEventLogProxy() : event_log_(nullptr) {}
59 65
60 bool StartLogging(const std::string& file_name, 66 bool StartLogging(const std::string& file_name,
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after
399 } 405 }
400 406
401 private: 407 private:
402 Channel* owner_; 408 Channel* owner_;
403 // Maps remote side ssrc to extended highest sequence number received. 409 // Maps remote side ssrc to extended highest sequence number received.
404 std::map<uint32_t, uint32_t> extended_max_sequence_number_; 410 std::map<uint32_t, uint32_t> extended_max_sequence_number_;
405 rtc::CriticalSection crit_; 411 rtc::CriticalSection crit_;
406 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); 412 RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
407 }; 413 };
408 414
415 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
416 public:
417 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_input,
418 Channel* channel,
419 AudioFramePool* audio_frame_pool)
420 : audio_input_(std::move(audio_input)),
421 channel_(channel),
422 audio_frame_pool_(audio_frame_pool) {}
423
424 ~ProcessAndEncodeAudioTask() override {
425 // Return the utilized audio frame to the pool so it can be used again.
426 audio_frame_pool_->Push(std::move(audio_input_));
427 }
428
429 private:
430 bool Run() override {
431 RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
432 RTC_DCHECK(channel_);
433 channel_->ProcessAndEncodeAudioOnTaskQueue(audio_input_.get());
434 return true;
435 }
436
437 std::unique_ptr<AudioFrame> audio_input_;
438 Channel* const channel_;
439 AudioFramePool* audio_frame_pool_;
440 };
aleloi 2017/03/23 13:33:21 Since the task operates on a pool and channel poin
henrika_webrtc 2017/03/23 14:02:17 Thanks! The pool is now removed. Will take your co
441
409 int32_t Channel::SendData(FrameType frameType, 442 int32_t Channel::SendData(FrameType frameType,
410 uint8_t payloadType, 443 uint8_t payloadType,
411 uint32_t timeStamp, 444 uint32_t timeStamp,
412 const uint8_t* payloadData, 445 const uint8_t* payloadData,
413 size_t payloadSize, 446 size_t payloadSize,
414 const RTPFragmentationHeader* fragmentation) { 447 const RTPFragmentationHeader* fragmentation) {
448 RTC_DCHECK_RUN_ON(encoder_queue_);
415 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 449 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
416 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," 450 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
417 " payloadSize=%" PRIuS ", fragmentation=0x%x)", 451 " payloadSize=%" PRIuS ", fragmentation=0x%x)",
418 frameType, payloadType, timeStamp, payloadSize, fragmentation); 452 frameType, payloadType, timeStamp, payloadSize, fragmentation);
419 453
420 if (_includeAudioLevelIndication) { 454 if (_includeAudioLevelIndication) {
421 // Store current audio level in the RTP/RTCP module. 455 // Store current audio level in the RTP/RTCP module.
422 // The level will be used in combination with voice-activity state 456 // The level will be used in combination with voice-activity state
423 // (frameType) to add an RTP header extension 457 // (frameType) to add an RTP header extension
424 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); 458 _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
425 } 459 }
426 460
427 // Push data from ACM to RTP/RTCP-module to deliver audio frame for 461 // Push data from ACM to RTP/RTCP-module to deliver audio frame for
428 // packetization. 462 // packetization.
429 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. 463 // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
430 if (!_rtpRtcpModule->SendOutgoingData( 464 if (!_rtpRtcpModule->SendOutgoingData(
431 (FrameType&)frameType, payloadType, timeStamp, 465 (FrameType&)frameType, payloadType, timeStamp,
432 // Leaving the time when this frame was 466 // Leaving the time when this frame was
433 // received from the capture device as 467 // received from the capture device as
434 // undefined for voice for now. 468 // undefined for voice for now.
435 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { 469 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
436 _engineStatisticsPtr->SetLastError( 470 _engineStatisticsPtr->SetLastError(
437 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 471 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
438 "Channel::SendData() failed to send data to RTP/RTCP module"); 472 "Channel::SendData() failed to send data to RTP/RTCP module");
439 return -1; 473 return -1;
440 } 474 }
441 475
442 _lastLocalTimeStamp = timeStamp; 476 _lastLocalTimeStamp = timeStamp;
443 _lastPayloadType = payloadType; 477 _lastPayloadType = payloadType;
444
445 return 0; 478 return 0;
446 } 479 }
447 480
448 bool Channel::SendRtp(const uint8_t* data, 481 bool Channel::SendRtp(const uint8_t* data,
449 size_t len, 482 size_t len,
450 const PacketOptions& options) { 483 const PacketOptions& options) {
451 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 484 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
452 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); 485 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len);
453 486
454 rtc::CritScope cs(&_callbackCritSect); 487 rtc::CritScope cs(&_callbackCritSect);
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
875 send_sequence_number_(0), 908 send_sequence_number_(0),
876 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), 909 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
877 capture_start_rtp_time_stamp_(-1), 910 capture_start_rtp_time_stamp_(-1),
878 capture_start_ntp_time_ms_(-1), 911 capture_start_ntp_time_ms_(-1),
879 _engineStatisticsPtr(NULL), 912 _engineStatisticsPtr(NULL),
880 _outputMixerPtr(NULL), 913 _outputMixerPtr(NULL),
881 _moduleProcessThreadPtr(NULL), 914 _moduleProcessThreadPtr(NULL),
882 _audioDeviceModulePtr(NULL), 915 _audioDeviceModulePtr(NULL),
883 _voiceEngineObserverPtr(NULL), 916 _voiceEngineObserverPtr(NULL),
884 _callbackCritSectPtr(NULL), 917 _callbackCritSectPtr(NULL),
918 encoder_queue_(nullptr),
885 _transportPtr(NULL), 919 _transportPtr(NULL),
886 input_mute_(false), 920 input_mute_(false),
887 previous_frame_muted_(false), 921 previous_frame_muted_(false),
888 _outputGain(1.0f), 922 _outputGain(1.0f),
889 _mixFileWithMicrophone(false), 923 _mixFileWithMicrophone(false),
890 _lastLocalTimeStamp(0), 924 _lastLocalTimeStamp(0),
891 _lastPayloadType(0), 925 _lastPayloadType(0),
892 _includeAudioLevelIndication(false), 926 _includeAudioLevelIndication(false),
893 transport_overhead_per_packet_(0), 927 transport_overhead_per_packet_(0),
894 rtp_overhead_per_packet_(0), 928 rtp_overhead_per_packet_(0),
895 _outputSpeechType(AudioFrame::kNormalSpeech), 929 _outputSpeechType(AudioFrame::kNormalSpeech),
896 restored_packet_in_use_(false), 930 restored_packet_in_use_(false),
897 rtcp_observer_(new VoERtcpObserver(this)), 931 rtcp_observer_(new VoERtcpObserver(this)),
898 associate_send_channel_(ChannelOwner(nullptr)), 932 associate_send_channel_(ChannelOwner(nullptr)),
899 pacing_enabled_(config.enable_voice_pacing), 933 pacing_enabled_(config.enable_voice_pacing),
900 feedback_observer_proxy_(new TransportFeedbackProxy()), 934 feedback_observer_proxy_(new TransportFeedbackProxy()),
901 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), 935 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()),
902 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), 936 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
903 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), 937 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
904 kMaxRetransmissionWindowMs)), 938 kMaxRetransmissionWindowMs)),
939 audio_frame_pool_(kAudioFramePoolSize),
905 decoder_factory_(config.acm_config.decoder_factory) { 940 decoder_factory_(config.acm_config.decoder_factory) {
906 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), 941 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
907 "Channel::Channel() - ctor"); 942 "Channel::Channel() - ctor");
908 AudioCodingModule::Config acm_config(config.acm_config); 943 AudioCodingModule::Config acm_config(config.acm_config);
909 acm_config.id = VoEModuleId(instanceId, channelId); 944 acm_config.id = VoEModuleId(instanceId, channelId);
910 acm_config.neteq_config.enable_muted_state = true; 945 acm_config.neteq_config.enable_muted_state = true;
911 audio_coding_.reset(AudioCodingModule::Create(acm_config)); 946 audio_coding_.reset(AudioCodingModule::Create(acm_config));
912 947
913 _outputAudioLevel.Clear(); 948 _outputAudioLevel.Clear();
914 949
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1090 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get()); 1125 _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
1091 1126
1092 // End of modules shutdown 1127 // End of modules shutdown
1093 } 1128 }
1094 1129
1095 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 1130 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
1096 OutputMixer& outputMixer, 1131 OutputMixer& outputMixer,
1097 ProcessThread& moduleProcessThread, 1132 ProcessThread& moduleProcessThread,
1098 AudioDeviceModule& audioDeviceModule, 1133 AudioDeviceModule& audioDeviceModule,
1099 VoiceEngineObserver* voiceEngineObserver, 1134 VoiceEngineObserver* voiceEngineObserver,
1100 rtc::CriticalSection* callbackCritSect) { 1135 rtc::CriticalSection* callbackCritSect,
1136 rtc::TaskQueue* encoder_queue) {
1101 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1137 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1102 "Channel::SetEngineInformation()"); 1138 "Channel::SetEngineInformation()");
1139 RTC_DCHECK(encoder_queue);
1103 _engineStatisticsPtr = &engineStatistics; 1140 _engineStatisticsPtr = &engineStatistics;
1104 _outputMixerPtr = &outputMixer; 1141 _outputMixerPtr = &outputMixer;
1105 _moduleProcessThreadPtr = &moduleProcessThread; 1142 _moduleProcessThreadPtr = &moduleProcessThread;
1106 _audioDeviceModulePtr = &audioDeviceModule; 1143 _audioDeviceModulePtr = &audioDeviceModule;
1107 _voiceEngineObserverPtr = voiceEngineObserver; 1144 _voiceEngineObserverPtr = voiceEngineObserver;
1108 _callbackCritSectPtr = callbackCritSect; 1145 _callbackCritSectPtr = callbackCritSect;
1146 encoder_queue_ = encoder_queue;
1109 return 0; 1147 return 0;
1110 } 1148 }
1111 1149
1112 int32_t Channel::UpdateLocalTimeStamp() {
1113 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1114 return 0;
1115 }
1116
1117 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 1150 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1118 rtc::CritScope cs(&_callbackCritSect); 1151 rtc::CritScope cs(&_callbackCritSect);
1119 audio_sink_ = std::move(sink); 1152 audio_sink_ = std::move(sink);
1120 } 1153 }
1121 1154
1122 const rtc::scoped_refptr<AudioDecoderFactory>& 1155 const rtc::scoped_refptr<AudioDecoderFactory>&
1123 Channel::GetAudioDecoderFactory() const { 1156 Channel::GetAudioDecoderFactory() const {
1124 return decoder_factory_; 1157 return decoder_factory_;
1125 } 1158 }
1126 1159
(...skipping 1455 matching lines...) Expand 10 before | Expand all | Expand 10 after
2582 audio_coding_->EnableNack(maxNumberOfPackets); 2615 audio_coding_->EnableNack(maxNumberOfPackets);
2583 else 2616 else
2584 audio_coding_->DisableNack(); 2617 audio_coding_->DisableNack();
2585 } 2618 }
2586 2619
2587 // Called when we are missing one or more packets. 2620 // Called when we are missing one or more packets.
2588 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { 2621 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
2589 return _rtpRtcpModule->SendNACK(sequence_numbers, length); 2622 return _rtpRtcpModule->SendNACK(sequence_numbers, length);
2590 } 2623 }
2591 2624
2592 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { 2625 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
2593 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2626 RTC_DCHECK(encoder_queue_);
2594 "Channel::Demultiplex()"); 2627 std::unique_ptr<AudioFrame> audio_frame = audio_frame_pool_.Pop();
2595 _audioFrame.CopyFrom(audioFrame); 2628 RTC_DCHECK(audio_frame) << "Pool of audio frames is empty";
2596 _audioFrame.id_ = _channelId; 2629 if (audio_frame) {
2597 return 0; 2630 audio_frame->CopyFrom(audio_input);
2631 audio_frame->id_ = _channelId;
2632 PostTask(std::move(audio_frame));
2633 }
2598 } 2634 }
2599 2635
2600 void Channel::Demultiplex(const int16_t* audio_data, 2636 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
2601 int sample_rate, 2637 int sample_rate,
2602 size_t number_of_frames, 2638 size_t number_of_frames,
2603 size_t number_of_channels) { 2639 size_t number_of_channels) {
2640 RTC_DCHECK(encoder_queue_);
2604 CodecInst codec; 2641 CodecInst codec;
2605 GetSendCodec(codec); 2642 GetSendCodec(codec);
2606 2643 std::unique_ptr<AudioFrame> audio_frame = audio_frame_pool_.Pop();
2607 // Never upsample or upmix the capture signal here. This should be done at the 2644 RTC_DCHECK(audio_frame) << "Pool of audio frames is empty";
2608 // end of the send chain. 2645 if (audio_frame) {
2609 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); 2646 audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
2610 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels); 2647 audio_frame->num_channels_ = std::min(number_of_channels, codec.channels);
2611 RemixAndResample(audio_data, number_of_frames, number_of_channels, 2648 RemixAndResample(audio_data, number_of_frames, number_of_channels,
aleloi 2017/03/23 13:33:21 Perhaps resampling could also be done on the task
henrika_webrtc 2017/03/23 14:02:17 Goof point. Will have to create unique method for
2612 sample_rate, &input_resampler_, &_audioFrame); 2649 sample_rate, &input_resampler_, audio_frame.get());
2650 audio_frame->id_ = _channelId;
2651 PostTask(std::move(audio_frame));
2652 }
2613 } 2653 }
2614 2654
2615 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { 2655 void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
2616 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2656 RTC_DCHECK_RUN_ON(encoder_queue_);
2617 "Channel::PrepareEncodeAndSend()"); 2657 PrepareEncodeAndSend(audio_input);
2658 EncodeAndSend(audio_input);
2659 }
2618 2660
2619 if (_audioFrame.samples_per_channel_ == 0) { 2661 uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
2620 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2662 RTC_DCHECK_RUN_ON(encoder_queue_);
2621 "Channel::PrepareEncodeAndSend() invalid audio frame"); 2663 RTC_DCHECK(audio_input->samples_per_channel_);
2622 return 0xFFFFFFFF;
2623 }
2624 2664
2625 if (channel_state_.Get().input_file_playing) { 2665 if (channel_state_.Get().input_file_playing) {
2626 MixOrReplaceAudioWithFile(mixingFrequency); 2666 MixOrReplaceAudioWithFile(audio_input);
2627 } 2667 }
2628 2668
2629 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. 2669 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
2630 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); 2670 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
2631 2671
2632 if (_includeAudioLevelIndication) { 2672 if (_includeAudioLevelIndication) {
2633 size_t length = 2673 size_t length =
2634 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; 2674 audio_input->samples_per_channel_ * audio_input->num_channels_;
2635 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); 2675 RTC_CHECK_LE(length, sizeof(audio_input->data_));
2636 if (is_muted && previous_frame_muted_) { 2676 if (is_muted && previous_frame_muted_) {
2637 rms_level_.AnalyzeMuted(length); 2677 rms_level_.AnalyzeMuted(length);
2638 } else { 2678 } else {
2639 rms_level_.Analyze( 2679 rms_level_.Analyze(
2640 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); 2680 rtc::ArrayView<const int16_t>(audio_input->data_, length));
2641 } 2681 }
2642 } 2682 }
2643 previous_frame_muted_ = is_muted; 2683 previous_frame_muted_ = is_muted;
2644
2645 return 0; 2684 return 0;
2646 } 2685 }
2647 2686
2648 uint32_t Channel::EncodeAndSend() { 2687 uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
2649 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2688 RTC_DCHECK_RUN_ON(encoder_queue_);
2650 "Channel::EncodeAndSend()"); 2689 RTC_DCHECK_LE(audio_input->num_channels_, 2);
2690 RTC_DCHECK(audio_input->samples_per_channel_);
2651 2691
2652 assert(_audioFrame.num_channels_ <= 2); 2692 audio_input->id_ = _channelId;
2653 if (_audioFrame.samples_per_channel_ == 0) {
2654 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2655 "Channel::EncodeAndSend() invalid audio frame");
2656 return 0xFFFFFFFF;
2657 }
2658
2659 _audioFrame.id_ = _channelId;
2660 2693
2661 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. 2694 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2662 2695
2663 // The ACM resamples internally. 2696 // The ACM resamples internally.
2664 _audioFrame.timestamp_ = _timeStamp; 2697 audio_input->timestamp_ = _timeStamp;
2665 // This call will trigger AudioPacketizationCallback::SendData if encoding 2698 // This call will trigger AudioPacketizationCallback::SendData if encoding
2666 // is done and payload is ready for packetization and transmission. 2699 // is done and payload is ready for packetization and transmission.
2667 // Otherwise, it will return without invoking the callback. 2700 // Otherwise, it will return without invoking the callback.
2668 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { 2701 if (audio_coding_->Add10MsData(*audio_input) < 0) {
2669 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 2702 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
2670 "Channel::EncodeAndSend() ACM encoding failed");
2671 return 0xFFFFFFFF; 2703 return 0xFFFFFFFF;
2672 } 2704 }
2673 2705
2674 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 2706 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
2675 return 0; 2707 return 0;
2676 } 2708 }
2677 2709
2678 void Channel::set_associate_send_channel(const ChannelOwner& channel) { 2710 void Channel::set_associate_send_channel(const ChannelOwner& channel) {
2679 RTC_DCHECK(!channel.channel() || 2711 RTC_DCHECK(!channel.channel() ||
2680 channel.channel()->ChannelId() != _channelId); 2712 channel.channel()->ChannelId() != _channelId);
2681 rtc::CritScope lock(&assoc_send_channel_lock_); 2713 rtc::CritScope lock(&assoc_send_channel_lock_);
2682 associate_send_channel_ = channel; 2714 associate_send_channel_ = channel;
2683 } 2715 }
2684 2716
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
2774 2806
2775 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, 2807 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
2776 RtpReceiver** rtp_receiver) const { 2808 RtpReceiver** rtp_receiver) const {
2777 *rtpRtcpModule = _rtpRtcpModule.get(); 2809 *rtpRtcpModule = _rtpRtcpModule.get();
2778 *rtp_receiver = rtp_receiver_.get(); 2810 *rtp_receiver = rtp_receiver_.get();
2779 return 0; 2811 return 0;
2780 } 2812 }
2781 2813
2782 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use 2814 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
2783 // a shared helper. 2815 // a shared helper.
2784 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { 2816 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
2817 RTC_DCHECK_RUN_ON(encoder_queue_);
2785 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); 2818 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
2786 size_t fileSamples(0); 2819 size_t fileSamples(0);
2820 const int mixingFrequency = audio_input->sample_rate_hz_;
2787 2821
2788 { 2822 if (!input_file_player_) {
2789 rtc::CritScope cs(&_fileCritSect); 2823 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2790 2824 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2791 if (!input_file_player_) { 2825 " doesnt exist");
2792 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2826 return -1;
2793 "Channel::MixOrReplaceAudioWithFile() fileplayer"
2794 " doesnt exist");
2795 return -1;
2796 }
2797
2798 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2799 mixingFrequency) == -1) {
2800 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2801 "Channel::MixOrReplaceAudioWithFile() file mixing "
2802 "failed");
2803 return -1;
2804 }
2805 if (fileSamples == 0) {
2806 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2807 "Channel::MixOrReplaceAudioWithFile() file is ended");
2808 return 0;
2809 }
2810 } 2827 }
2811 2828
2812 assert(_audioFrame.samples_per_channel_ == fileSamples); 2829 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
2830 mixingFrequency) == -1) {
2831 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2832 "Channel::MixOrReplaceAudioWithFile() file mixing "
2833 "failed");
2834 return -1;
2835 }
2836 if (fileSamples == 0) {
2837 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2838 "Channel::MixOrReplaceAudioWithFile() file is ended");
2839 return 0;
2840 }
2841
2842 assert(audio_input->samples_per_channel_ == fileSamples);
2813 2843
2814 if (_mixFileWithMicrophone) { 2844 if (_mixFileWithMicrophone) {
2815 // Currently file stream is always mono. 2845 // Currently file stream is always mono.
2816 // TODO(xians): Change the code when FilePlayer supports real stereo. 2846 // TODO(xians): Change the code when FilePlayer supports real stereo.
2817 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), 2847 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
2818 1, fileSamples); 2848 1, fileSamples);
2819 } else { 2849 } else {
2820 // Replace ACM audio with file. 2850 // Replace ACM audio with file.
2821 // Currently file stream is always mono. 2851 // Currently file stream is always mono.
2822 // TODO(xians): Change the code when FilePlayer supports real stereo. 2852 // TODO(xians): Change the code when FilePlayer supports real stereo.
2823 _audioFrame.UpdateFrame( 2853 audio_input->UpdateFrame(
2824 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, 2854 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
2825 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); 2855 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
2826 } 2856 }
2827 return 0; 2857 return 0;
2828 } 2858 }
2829 2859
2830 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { 2860 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) {
2831 assert(mixingFrequency <= 48000); 2861 assert(mixingFrequency <= 48000);
2832 2862
2833 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); 2863 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]);
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
3001 int64_t avg_rtt = 0; 3031 int64_t avg_rtt = 0;
3002 int64_t max_rtt = 0; 3032 int64_t max_rtt = 0;
3003 int64_t min_rtt = 0; 3033 int64_t min_rtt = 0;
3004 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 3034 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
3005 0) { 3035 0) {
3006 return 0; 3036 return 0;
3007 } 3037 }
3008 return rtt; 3038 return rtt;
3009 } 3039 }
3010 3040
3041 void Channel::PostTask(std::unique_ptr<AudioFrame> audio_frame) {
3042 encoder_queue_->PostTask(
3043 std::unique_ptr<rtc::QueuedTask>(new ProcessAndEncodeAudioTask(
3044 std::move(audio_frame), this, &audio_frame_pool_)));
3045 }
3046
3011 } // namespace voe 3047 } // namespace voe
3012 } // namespace webrtc 3048 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698