Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1228)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: BUILD changes Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/shared_data.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/voice_engine/channel.h" 11 #include "webrtc/voice_engine/channel.h"
12 12
13 #include <algorithm> 13 #include <algorithm>
14 #include <utility> 14 #include <utility>
15 15
16 #include "webrtc/audio/utility/audio_frame_operations.h" 16 #include "webrtc/audio/utility/audio_frame_operations.h"
17 #include "webrtc/base/array_view.h" 17 #include "webrtc/base/array_view.h"
18 #include "webrtc/base/checks.h" 18 #include "webrtc/base/checks.h"
19 #include "webrtc/base/criticalsection.h" 19 #include "webrtc/base/criticalsection.h"
20 #include "webrtc/base/format_macros.h" 20 #include "webrtc/base/format_macros.h"
21 #include "webrtc/base/logging.h" 21 #include "webrtc/base/logging.h"
22 #include "webrtc/base/rate_limiter.h" 22 #include "webrtc/base/rate_limiter.h"
23 #include "webrtc/base/task_queue.h"
23 #include "webrtc/base/thread_checker.h" 24 #include "webrtc/base/thread_checker.h"
24 #include "webrtc/base/timeutils.h" 25 #include "webrtc/base/timeutils.h"
25 #include "webrtc/config.h" 26 #include "webrtc/config.h"
26 #include "webrtc/logging/rtc_event_log/rtc_event_log.h" 27 #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
27 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h" 28 #include "webrtc/modules/audio_coding/codecs/audio_format_conversion.h"
28 #include "webrtc/modules/audio_device/include/audio_device.h" 29 #include "webrtc/modules/audio_device/include/audio_device.h"
29 #include "webrtc/modules/audio_processing/include/audio_processing.h" 30 #include "webrtc/modules/audio_processing/include/audio_processing.h"
30 #include "webrtc/modules/include/module_common_types.h" 31 #include "webrtc/modules/include/module_common_types.h"
31 #include "webrtc/modules/pacing/packet_router.h" 32 #include "webrtc/modules/pacing/packet_router.h"
32 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h" 33 #include "webrtc/modules/rtp_rtcp/include/receive_statistics.h"
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
379 } 380 }
380 owner_->OnIncomingFractionLoss(weighted_fraction_lost); 381 owner_->OnIncomingFractionLoss(weighted_fraction_lost);
381 } 382 }
382 383
383 private: 384 private:
384 Channel* owner_; 385 Channel* owner_;
385 // Maps remote side ssrc to extended highest sequence number received. 386 // Maps remote side ssrc to extended highest sequence number received.
386 std::map<uint32_t, uint32_t> extended_max_sequence_number_; 387 std::map<uint32_t, uint32_t> extended_max_sequence_number_;
387 }; 388 };
388 389
390 class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
391 public:
392 ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_input,
393 Channel* channel)
394 : audio_input_(std::move(audio_input)), channel_(channel) {}
395
396 private:
397 bool Run() override {
398 RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
399 channel_->ProcessAndEncodeAudioOnTaskQueue(std::move(audio_input_));
400 return true;
401 }
402
403 std::unique_ptr<AudioFrame> audio_input_;
404 Channel* const channel_;
the sun 2017/01/31 13:34:33 We have us an ownership issue, I think: what if th
henrika_webrtc 2017/01/31 15:20:21 Will see if I can improve.
405 };
406
389 int32_t Channel::SendData(FrameType frameType, 407 int32_t Channel::SendData(FrameType frameType,
390 uint8_t payloadType, 408 uint8_t payloadType,
391 uint32_t timeStamp, 409 uint32_t timeStamp,
392 const uint8_t* payloadData, 410 const uint8_t* payloadData,
393 size_t payloadSize, 411 size_t payloadSize,
394 const RTPFragmentationHeader* fragmentation) { 412 const RTPFragmentationHeader* fragmentation) {
413 RTC_DCHECK_RUN_ON(encoder_queue_);
414 // LOG(INFO) << "SendData: " << payloadSize;
395 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 415 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
396 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," 416 "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
397 " payloadSize=%" PRIuS ", fragmentation=0x%x)", 417 " payloadSize=%" PRIuS ", fragmentation=0x%x)",
398 frameType, payloadType, timeStamp, payloadSize, fragmentation); 418 frameType, payloadType, timeStamp, payloadSize, fragmentation);
399 419
400 if (_includeAudioLevelIndication) { 420 if (_includeAudioLevelIndication) {
401 // Store current audio level in the RTP/RTCP module. 421 // Store current audio level in the RTP/RTCP module.
402 // The level will be used in combination with voice-activity state 422 // The level will be used in combination with voice-activity state
403 // (frameType) to add an RTP header extension 423 // (frameType) to add an RTP header extension
404 _rtpRtcpModule->SetAudioLevel(rms_level_.Average()); 424 _rtpRtcpModule->SetAudioLevel(rms_level_.Average());
405 } 425 }
406 426
407 // Push data from ACM to RTP/RTCP-module to deliver audio frame for 427 // Push data from ACM to RTP/RTCP-module to deliver audio frame for
408 // packetization. 428 // packetization.
409 // This call will trigger Transport::SendPacket() from the RTP/RTCP module. 429 // This call will trigger Transport::SendPacket() from the RTP/RTCP module.
410 if (!_rtpRtcpModule->SendOutgoingData( 430 if (!_rtpRtcpModule->SendOutgoingData(
411 (FrameType&)frameType, payloadType, timeStamp, 431 (FrameType&)frameType, payloadType, timeStamp,
412 // Leaving the time when this frame was 432 // Leaving the time when this frame was
413 // received from the capture device as 433 // received from the capture device as
414 // undefined for voice for now. 434 // undefined for voice for now.
415 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) { 435 -1, payloadData, payloadSize, fragmentation, nullptr, nullptr)) {
416 _engineStatisticsPtr->SetLastError( 436 _engineStatisticsPtr->SetLastError(
417 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning, 437 VE_RTP_RTCP_MODULE_ERROR, kTraceWarning,
418 "Channel::SendData() failed to send data to RTP/RTCP module"); 438 "Channel::SendData() failed to send data to RTP/RTCP module");
419 return -1; 439 return -1;
420 } 440 }
421 441
422 _lastLocalTimeStamp = timeStamp; 442 _lastLocalTimeStamp = timeStamp;
423 _lastPayloadType = payloadType; 443 _lastPayloadType = payloadType;
424 444 // LOG(INFO) << "__SendData";
425 return 0; 445 return 0;
426 } 446 }
427 447
428 int32_t Channel::InFrameType(FrameType frame_type) { 448 int32_t Channel::InFrameType(FrameType frame_type) {
429 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 449 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
430 "Channel::InFrameType(frame_type=%d)", frame_type); 450 "Channel::InFrameType(frame_type=%d)", frame_type);
431 451
432 rtc::CritScope cs(&_callbackCritSect); 452 rtc::CritScope cs(&_callbackCritSect);
433 _sendFrameType = (frame_type == kAudioFrameSpeech); 453 _sendFrameType = (frame_type == kAudioFrameSpeech);
434 return 0; 454 return 0;
(...skipping 468 matching lines...) Expand 10 before | Expand all | Expand 10 after
903 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()), 923 rtp_ts_wraparound_handler_(new rtc::TimestampWrapAroundHandler()),
904 capture_start_rtp_time_stamp_(-1), 924 capture_start_rtp_time_stamp_(-1),
905 capture_start_ntp_time_ms_(-1), 925 capture_start_ntp_time_ms_(-1),
906 _engineStatisticsPtr(NULL), 926 _engineStatisticsPtr(NULL),
907 _outputMixerPtr(NULL), 927 _outputMixerPtr(NULL),
908 _transmitMixerPtr(NULL), 928 _transmitMixerPtr(NULL),
909 _moduleProcessThreadPtr(NULL), 929 _moduleProcessThreadPtr(NULL),
910 _audioDeviceModulePtr(NULL), 930 _audioDeviceModulePtr(NULL),
911 _voiceEngineObserverPtr(NULL), 931 _voiceEngineObserverPtr(NULL),
912 _callbackCritSectPtr(NULL), 932 _callbackCritSectPtr(NULL),
933 encoder_queue_(nullptr),
913 _transportPtr(NULL), 934 _transportPtr(NULL),
914 _sendFrameType(0), 935 _sendFrameType(0),
915 _externalMixing(false), 936 _externalMixing(false),
916 _mixFileWithMicrophone(false), 937 _mixFileWithMicrophone(false),
917 input_mute_(false), 938 input_mute_(false),
918 previous_frame_muted_(false), 939 previous_frame_muted_(false),
919 _panLeft(1.0f), 940 _panLeft(1.0f),
920 _panRight(1.0f), 941 _panRight(1.0f),
921 _outputGain(1.0f), 942 _outputGain(1.0f),
922 _lastLocalTimeStamp(0), 943 _lastLocalTimeStamp(0),
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
1124 1145
1125 return 0; 1146 return 0;
1126 } 1147 }
1127 1148
1128 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, 1149 int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
1129 OutputMixer& outputMixer, 1150 OutputMixer& outputMixer,
1130 voe::TransmitMixer& transmitMixer, 1151 voe::TransmitMixer& transmitMixer,
1131 ProcessThread& moduleProcessThread, 1152 ProcessThread& moduleProcessThread,
1132 AudioDeviceModule& audioDeviceModule, 1153 AudioDeviceModule& audioDeviceModule,
1133 VoiceEngineObserver* voiceEngineObserver, 1154 VoiceEngineObserver* voiceEngineObserver,
1134 rtc::CriticalSection* callbackCritSect) { 1155 rtc::CriticalSection* callbackCritSect,
1156 rtc::TaskQueue* encoder_queue) {
1135 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), 1157 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
1136 "Channel::SetEngineInformation()"); 1158 "Channel::SetEngineInformation()");
1159 RTC_DCHECK(encoder_queue);
1137 _engineStatisticsPtr = &engineStatistics; 1160 _engineStatisticsPtr = &engineStatistics;
1138 _outputMixerPtr = &outputMixer; 1161 _outputMixerPtr = &outputMixer;
1139 _transmitMixerPtr = &transmitMixer, 1162 _transmitMixerPtr = &transmitMixer,
1140 _moduleProcessThreadPtr = &moduleProcessThread; 1163 _moduleProcessThreadPtr = &moduleProcessThread;
1141 _audioDeviceModulePtr = &audioDeviceModule; 1164 _audioDeviceModulePtr = &audioDeviceModule;
1142 _voiceEngineObserverPtr = voiceEngineObserver; 1165 _voiceEngineObserverPtr = voiceEngineObserver;
1143 _callbackCritSectPtr = callbackCritSect; 1166 _callbackCritSectPtr = callbackCritSect;
1167 encoder_queue_ = encoder_queue;
1144 return 0; 1168 return 0;
1145 } 1169 }
1146 1170
1147 int32_t Channel::UpdateLocalTimeStamp() {
1148 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
1149 return 0;
1150 }
1151
1152 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) { 1171 void Channel::SetSink(std::unique_ptr<AudioSinkInterface> sink) {
1153 rtc::CritScope cs(&_callbackCritSect); 1172 rtc::CritScope cs(&_callbackCritSect);
1154 audio_sink_ = std::move(sink); 1173 audio_sink_ = std::move(sink);
1155 } 1174 }
1156 1175
1157 const rtc::scoped_refptr<AudioDecoderFactory>& 1176 const rtc::scoped_refptr<AudioDecoderFactory>&
1158 Channel::GetAudioDecoderFactory() const { 1177 Channel::GetAudioDecoderFactory() const {
1159 return decoder_factory_; 1178 return decoder_factory_;
1160 } 1179 }
1161 1180
(...skipping 1594 matching lines...) Expand 10 before | Expand all | Expand 10 after
2756 audio_coding_->EnableNack(maxNumberOfPackets); 2775 audio_coding_->EnableNack(maxNumberOfPackets);
2757 else 2776 else
2758 audio_coding_->DisableNack(); 2777 audio_coding_->DisableNack();
2759 } 2778 }
2760 2779
2761 // Called when we are missing one or more packets. 2780 // Called when we are missing one or more packets.
2762 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { 2781 int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
2763 return _rtpRtcpModule->SendNACK(sequence_numbers, length); 2782 return _rtpRtcpModule->SendNACK(sequence_numbers, length);
2764 } 2783 }
2765 2784
2766 uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { 2785 void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
2767 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2786 RTC_DCHECK(encoder_queue_);
2768 "Channel::Demultiplex()"); 2787 std::unique_ptr<AudioFrame> audio_source(new AudioFrame());
the sun 2017/01/31 13:34:33 We don't want to introduce more copying if we can
tommi 2017/01/31 14:51:18 That can still be done I think. After being done w
henrika_webrtc 2017/01/31 15:20:21 I might have to ask you guys off-line on how what
2769 _audioFrame.CopyFrom(audioFrame); 2788 audio_source->CopyFrom(audio_input);
2770 _audioFrame.id_ = _channelId; 2789 audio_source->id_ = _channelId;
2771 return 0; 2790 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2791 new ProcessAndEncodeAudioTask(std::move(audio_source), this)));
tommi 2017/01/31 14:51:18 btw, in order to use move semantics, you don't hav
henrika_webrtc 2017/01/31 15:20:21 Acknowledged.
2772 } 2792 }
2773 2793
2774 void Channel::Demultiplex(const int16_t* audio_data, 2794 void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
2775 int sample_rate, 2795 int sample_rate,
2776 size_t number_of_frames, 2796 size_t number_of_frames,
2777 size_t number_of_channels) { 2797 size_t number_of_channels) {
2798 RTC_DCHECK(encoder_queue_);
2778 CodecInst codec; 2799 CodecInst codec;
2779 GetSendCodec(codec); 2800 GetSendCodec(codec);
2780 2801 std::unique_ptr<AudioFrame> audio_source(new AudioFrame());
2781 // Never upsample or upmix the capture signal here. This should be done at the 2802 audio_source->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
2782 // end of the send chain. 2803 audio_source->num_channels_ = std::min(number_of_channels, codec.channels);
2783 _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
2784 _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
2785 RemixAndResample(audio_data, number_of_frames, number_of_channels, 2804 RemixAndResample(audio_data, number_of_frames, number_of_channels,
2786 sample_rate, &input_resampler_, &_audioFrame); 2805 sample_rate, &input_resampler_, audio_source.get());
2806 audio_source->id_ = _channelId;
2807 encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
2808 new ProcessAndEncodeAudioTask(std::move(audio_source), this)));
2787 } 2809 }
2788 2810
2789 uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { 2811 void Channel::ProcessAndEncodeAudioOnTaskQueue(
2790 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2812 std::unique_ptr<AudioFrame> audio_input) {
2791 "Channel::PrepareEncodeAndSend()"); 2813 RTC_DCHECK_RUN_ON(encoder_queue_);
2814 PrepareEncodeAndSend(audio_input.get());
2815 EncodeAndSend(audio_input.get());
2816 }
2792 2817
2793 if (_audioFrame.samples_per_channel_ == 0) { 2818 uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
2794 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 2819 RTC_DCHECK_RUN_ON(encoder_queue_);
2795 "Channel::PrepareEncodeAndSend() invalid audio frame"); 2820 RTC_DCHECK(audio_input->samples_per_channel_);
2796 return 0xFFFFFFFF;
2797 }
2798 2821
2799 if (channel_state_.Get().input_file_playing) { 2822 if (channel_state_.Get().input_file_playing) {
2800 MixOrReplaceAudioWithFile(mixingFrequency); 2823 MixOrReplaceAudioWithFile(audio_input);
2801 } 2824 }
2802 2825
2803 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. 2826 bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
2804 AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); 2827 AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
2805 2828
2806 if (channel_state_.Get().input_external_media) { 2829 if (channel_state_.Get().input_external_media) {
2807 rtc::CritScope cs(&_callbackCritSect); 2830 rtc::CritScope cs(&_callbackCritSect);
2808 const bool isStereo = (_audioFrame.num_channels_ == 2); 2831 const bool isStereo = (audio_input->num_channels_ == 2);
2809 if (_inputExternalMediaCallbackPtr) { 2832 if (_inputExternalMediaCallbackPtr) {
2810 _inputExternalMediaCallbackPtr->Process( 2833 _inputExternalMediaCallbackPtr->Process(
2811 _channelId, kRecordingPerChannel, (int16_t*)_audioFrame.data_, 2834 _channelId, kRecordingPerChannel, (int16_t*)audio_input->data_,
2812 _audioFrame.samples_per_channel_, _audioFrame.sample_rate_hz_, 2835 audio_input->samples_per_channel_, audio_input->sample_rate_hz_,
2813 isStereo); 2836 isStereo);
2814 } 2837 }
2815 } 2838 }
2816 2839
2817 if (_includeAudioLevelIndication) { 2840 if (_includeAudioLevelIndication) {
2818 size_t length = 2841 size_t length =
2819 _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; 2842 audio_input->samples_per_channel_ * audio_input->num_channels_;
2820 RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); 2843 RTC_CHECK_LE(length, sizeof(audio_input->data_));
2821 if (is_muted && previous_frame_muted_) { 2844 if (is_muted && previous_frame_muted_) {
2822 rms_level_.AnalyzeMuted(length); 2845 rms_level_.AnalyzeMuted(length);
2823 } else { 2846 } else {
2824 rms_level_.Analyze( 2847 rms_level_.Analyze(
2825 rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); 2848 rtc::ArrayView<const int16_t>(audio_input->data_, length));
2826 } 2849 }
2827 } 2850 }
2828 previous_frame_muted_ = is_muted; 2851 previous_frame_muted_ = is_muted;
2829
2830 return 0; 2852 return 0;
2831 } 2853 }
2832 2854
2833 uint32_t Channel::EncodeAndSend() { 2855 uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
2834 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), 2856 RTC_DCHECK_RUN_ON(encoder_queue_);
2835 "Channel::EncodeAndSend()"); 2857 RTC_DCHECK_LE(audio_input->num_channels_, 2);
2858 RTC_DCHECK(audio_input->samples_per_channel_);
2836 2859
2837 assert(_audioFrame.num_channels_ <= 2); 2860 audio_input->id_ = _channelId;
2838 if (_audioFrame.samples_per_channel_ == 0) {
2839 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
2840 "Channel::EncodeAndSend() invalid audio frame");
2841 return 0xFFFFFFFF;
2842 }
2843
2844 _audioFrame.id_ = _channelId;
2845 2861
2846 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. 2862 // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
2847 2863
2848 // The ACM resamples internally. 2864 // The ACM resamples internally.
2849 _audioFrame.timestamp_ = _timeStamp; 2865 audio_input->timestamp_ = _timeStamp;
2850 // This call will trigger AudioPacketizationCallback::SendData if encoding 2866 // This call will trigger AudioPacketizationCallback::SendData if encoding
2851 // is done and payload is ready for packetization and transmission. 2867 // is done and payload is ready for packetization and transmission.
2852 // Otherwise, it will return without invoking the callback. 2868 // Otherwise, it will return without invoking the callback.
2853 if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { 2869 if (audio_coding_->Add10MsData(*audio_input) < 0) {
2854 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), 2870 LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
2855 "Channel::EncodeAndSend() ACM encoding failed");
2856 return 0xFFFFFFFF; 2871 return 0xFFFFFFFF;
2857 } 2872 }
2858 2873
2859 _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); 2874 _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
2875 // LOG(INFO) << "___EncodeAndSend";
2860 return 0; 2876 return 0;
2861 } 2877 }
2862 2878
2863 void Channel::set_associate_send_channel(const ChannelOwner& channel) { 2879 void Channel::set_associate_send_channel(const ChannelOwner& channel) {
2864 RTC_DCHECK(!channel.channel() || 2880 RTC_DCHECK(!channel.channel() ||
2865 channel.channel()->ChannelId() != _channelId); 2881 channel.channel()->ChannelId() != _channelId);
2866 rtc::CritScope lock(&assoc_send_channel_lock_); 2882 rtc::CritScope lock(&assoc_send_channel_lock_);
2867 associate_send_channel_ = channel; 2883 associate_send_channel_ = channel;
2868 } 2884 }
2869 2885
(...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after
3072 3088
3073 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, 3089 int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
3074 RtpReceiver** rtp_receiver) const { 3090 RtpReceiver** rtp_receiver) const {
3075 *rtpRtcpModule = _rtpRtcpModule.get(); 3091 *rtpRtcpModule = _rtpRtcpModule.get();
3076 *rtp_receiver = rtp_receiver_.get(); 3092 *rtp_receiver = rtp_receiver_.get();
3077 return 0; 3093 return 0;
3078 } 3094 }
3079 3095
3080 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use 3096 // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
3081 // a shared helper. 3097 // a shared helper.
3082 int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { 3098 int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
3099 RTC_DCHECK_RUN_ON(encoder_queue_);
3083 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); 3100 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
3084 size_t fileSamples(0); 3101 size_t fileSamples(0);
3102 const int mixingFrequency = audio_input->sample_rate_hz_;
3085 3103
3086 { 3104 if (!input_file_player_) {
3087 rtc::CritScope cs(&_fileCritSect); 3105 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
3088 3106 "Channel::MixOrReplaceAudioWithFile() fileplayer"
3089 if (!input_file_player_) { 3107 " doesnt exist");
3090 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), 3108 return -1;
3091 "Channel::MixOrReplaceAudioWithFile() fileplayer"
3092 " doesnt exist");
3093 return -1;
3094 }
3095
3096 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
3097 mixingFrequency) == -1) {
3098 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
3099 "Channel::MixOrReplaceAudioWithFile() file mixing "
3100 "failed");
3101 return -1;
3102 }
3103 if (fileSamples == 0) {
3104 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
3105 "Channel::MixOrReplaceAudioWithFile() file is ended");
3106 return 0;
3107 }
3108 } 3109 }
3109 3110
3110 assert(_audioFrame.samples_per_channel_ == fileSamples); 3111 if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
3112 mixingFrequency) == -1) {
3113 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
3114 "Channel::MixOrReplaceAudioWithFile() file mixing "
3115 "failed");
3116 return -1;
3117 }
3118 if (fileSamples == 0) {
3119 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
3120 "Channel::MixOrReplaceAudioWithFile() file is ended");
3121 return 0;
3122 }
3123
3124 assert(audio_input->samples_per_channel_ == fileSamples);
3111 3125
3112 if (_mixFileWithMicrophone) { 3126 if (_mixFileWithMicrophone) {
3113 // Currently file stream is always mono. 3127 // Currently file stream is always mono.
3114 // TODO(xians): Change the code when FilePlayer supports real stereo. 3128 // TODO(xians): Change the code when FilePlayer supports real stereo.
3115 MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), 3129 MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
3116 1, fileSamples); 3130 1, fileSamples);
3117 } else { 3131 } else {
3118 // Replace ACM audio with file. 3132 // Replace ACM audio with file.
3119 // Currently file stream is always mono. 3133 // Currently file stream is always mono.
3120 // TODO(xians): Change the code when FilePlayer supports real stereo. 3134 // TODO(xians): Change the code when FilePlayer supports real stereo.
3121 _audioFrame.UpdateFrame( 3135 audio_input->UpdateFrame(
3122 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, 3136 _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
3123 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); 3137 AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
3124 } 3138 }
3125 return 0; 3139 return 0;
3126 } 3140 }
3127 3141
3128 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) { 3142 int32_t Channel::MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency) {
3129 assert(mixingFrequency <= 48000); 3143 assert(mixingFrequency <= 48000);
3130 3144
3131 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]); 3145 std::unique_ptr<int16_t[]> fileBuffer(new int16_t[960]);
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
3303 int64_t min_rtt = 0; 3317 int64_t min_rtt = 0;
3304 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != 3318 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) !=
3305 0) { 3319 0) {
3306 return 0; 3320 return 0;
3307 } 3321 }
3308 return rtt; 3322 return rtt;
3309 } 3323 }
3310 3324
3311 } // namespace voe 3325 } // namespace voe
3312 } // namespace webrtc 3326 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/shared_data.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698