| Index: webrtc/voice_engine/channel.cc
|
| diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
|
| index 0f0831c8b1f8cbd737a7b919e4a277b04cb87ff1..c1cfa6fdd9959c2b8c11f73dffe7fecb76c28bc8 100644
|
| --- a/webrtc/voice_engine/channel.cc
|
| +++ b/webrtc/voice_engine/channel.cc
|
| @@ -21,6 +21,8 @@
|
| #include "webrtc/base/location.h"
|
| #include "webrtc/base/logging.h"
|
| #include "webrtc/base/rate_limiter.h"
|
| +#include "webrtc/base/task_queue.h"
|
| +#include "webrtc/base/thread_checker.h"
|
| #include "webrtc/base/timeutils.h"
|
| #include "webrtc/config.h"
|
| #include "webrtc/logging/rtc_event_log/rtc_event_log.h"
|
| @@ -48,6 +50,10 @@ namespace {
|
|
|
| constexpr int64_t kMaxRetransmissionWindowMs = 1000;
|
| constexpr int64_t kMinRetransmissionWindowMs = 30;
|
| +// Number of preallocated audio frames in the pool of audio frames.
|
| +// Local tests on Android devices have shown that we never reduce the size of
|
| +// the pool below 5.
|
| +constexpr size_t kAudioFramePoolSize = 10;
|
|
|
| } // namespace
|
|
|
| @@ -406,12 +412,41 @@ class VoERtcpObserver : public RtcpBandwidthObserver {
|
| RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
|
| };
|
|
|
| +class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
|
| + public:
|
| + ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_input,
|
| + Channel* channel,
|
| + AudioFramePool* audio_frame_pool)
|
| + : audio_input_(std::move(audio_input)),
|
| + channel_(channel),
|
| + audio_frame_pool_(audio_frame_pool) {}
|
| +
|
| + ~ProcessAndEncodeAudioTask() override {
|
| + RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
|
| + // Return the utilized audio frame to the pool so it can be used again.
|
| + audio_frame_pool_->Push(std::move(audio_input_));
|
| + }
|
| +
|
| + private:
|
| + bool Run() override {
|
| + RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
|
| + RTC_DCHECK(channel_);
|
| + channel_->ProcessAndEncodeAudioOnTaskQueue(audio_input_.get());
|
| + return true;
|
| + }
|
| +
|
| + std::unique_ptr<AudioFrame> audio_input_;
|
| + Channel* const channel_;
|
| + AudioFramePool* audio_frame_pool_;
|
| +};
|
| +
|
| int32_t Channel::SendData(FrameType frameType,
|
| uint8_t payloadType,
|
| uint32_t timeStamp,
|
| const uint8_t* payloadData,
|
| size_t payloadSize,
|
| const RTPFragmentationHeader* fragmentation) {
|
| + RTC_DCHECK_RUN_ON(encoder_queue_);
|
| WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
|
| "Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
|
| " payloadSize=%" PRIuS ", fragmentation=0x%x)",
|
| @@ -441,7 +476,6 @@ int32_t Channel::SendData(FrameType frameType,
|
|
|
| _lastLocalTimeStamp = timeStamp;
|
| _lastPayloadType = payloadType;
|
| -
|
| return 0;
|
| }
|
|
|
| @@ -882,6 +916,7 @@ Channel::Channel(int32_t channelId,
|
| _audioDeviceModulePtr(NULL),
|
| _voiceEngineObserverPtr(NULL),
|
| _callbackCritSectPtr(NULL),
|
| + encoder_queue_(nullptr),
|
| _transportPtr(NULL),
|
| input_mute_(false),
|
| previous_frame_muted_(false),
|
| @@ -902,6 +937,7 @@ Channel::Channel(int32_t channelId,
|
| rtp_packet_sender_proxy_(new RtpPacketSenderProxy()),
|
| retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(),
|
| kMaxRetransmissionWindowMs)),
|
| + audio_frame_pool_(kAudioFramePoolSize),
|
| decoder_factory_(config.acm_config.decoder_factory) {
|
| WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
|
| "Channel::Channel() - ctor");
|
| @@ -1097,20 +1133,18 @@ int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
|
| ProcessThread& moduleProcessThread,
|
| AudioDeviceModule& audioDeviceModule,
|
| VoiceEngineObserver* voiceEngineObserver,
|
| - rtc::CriticalSection* callbackCritSect) {
|
| + rtc::CriticalSection* callbackCritSect,
|
| + rtc::TaskQueue* encoder_queue) {
|
| WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
|
| "Channel::SetEngineInformation()");
|
| + RTC_DCHECK(encoder_queue);
|
| _engineStatisticsPtr = &engineStatistics;
|
| _outputMixerPtr = &outputMixer;
|
| _moduleProcessThreadPtr = &moduleProcessThread;
|
| _audioDeviceModulePtr = &audioDeviceModule;
|
| _voiceEngineObserverPtr = voiceEngineObserver;
|
| _callbackCritSectPtr = callbackCritSect;
|
| - return 0;
|
| -}
|
| -
|
| -int32_t Channel::UpdateLocalTimeStamp() {
|
| - _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
|
| + encoder_queue_ = encoder_queue;
|
| return 0;
|
| }
|
|
|
| @@ -2589,89 +2623,92 @@ int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
|
| return _rtpRtcpModule->SendNACK(sequence_numbers, length);
|
| }
|
|
|
| -uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) {
|
| - WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::Demultiplex()");
|
| - _audioFrame.CopyFrom(audioFrame);
|
| - _audioFrame.id_ = _channelId;
|
| - return 0;
|
| +void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
|
| + RTC_DCHECK(encoder_queue_);
|
| + std::unique_ptr<AudioFrame> audio_frame = audio_frame_pool_.Pop();
|
| + RTC_DCHECK(audio_frame) << "Pool of audio frames is empty";
|
| + if (audio_frame) {
|
| + audio_frame->CopyFrom(audio_input);
|
| + audio_frame->id_ = _channelId;
|
| + encoder_queue_->PostTask(
|
| + std::unique_ptr<rtc::QueuedTask>(new ProcessAndEncodeAudioTask(
|
| + std::move(audio_frame), this, &audio_frame_pool_)));
|
| + }
|
| }
|
|
|
| -void Channel::Demultiplex(const int16_t* audio_data,
|
| - int sample_rate,
|
| - size_t number_of_frames,
|
| - size_t number_of_channels) {
|
| +void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
|
| + int sample_rate,
|
| + size_t number_of_frames,
|
| + size_t number_of_channels) {
|
| + RTC_DCHECK(encoder_queue_);
|
| CodecInst codec;
|
| GetSendCodec(codec);
|
| + std::unique_ptr<AudioFrame> audio_frame = audio_frame_pool_.Pop();
|
| + RTC_DCHECK(audio_frame) << "Pool of audio frames is empty";
|
| + if (audio_frame) {
|
| + audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
|
| + audio_frame->num_channels_ = std::min(number_of_channels, codec.channels);
|
| + RemixAndResample(audio_data, number_of_frames, number_of_channels,
|
| + sample_rate, &input_resampler_, audio_frame.get());
|
| + audio_frame->id_ = _channelId;
|
| + encoder_queue_->PostTask(
|
| + std::unique_ptr<rtc::QueuedTask>(new ProcessAndEncodeAudioTask(
|
| + std::move(audio_frame), this, &audio_frame_pool_)));
|
| + }
|
| +}
|
|
|
| - // Never upsample or upmix the capture signal here. This should be done at the
|
| - // end of the send chain.
|
| - _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
|
| - _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
|
| - RemixAndResample(audio_data, number_of_frames, number_of_channels,
|
| - sample_rate, &input_resampler_, &_audioFrame);
|
| +void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
|
| + RTC_DCHECK_RUN_ON(encoder_queue_);
|
| + PrepareEncodeAndSend(audio_input);
|
| + EncodeAndSend(audio_input);
|
| }
|
|
|
| -uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) {
|
| - WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::PrepareEncodeAndSend()");
|
| -
|
| - if (_audioFrame.samples_per_channel_ == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::PrepareEncodeAndSend() invalid audio frame");
|
| - return 0xFFFFFFFF;
|
| - }
|
| +uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
|
| + RTC_DCHECK_RUN_ON(encoder_queue_);
|
| + RTC_DCHECK(audio_input->samples_per_channel_);
|
|
|
| if (channel_state_.Get().input_file_playing) {
|
| - MixOrReplaceAudioWithFile(mixingFrequency);
|
| + MixOrReplaceAudioWithFile(audio_input);
|
| }
|
|
|
| bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
|
| - AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted);
|
| + AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
|
|
|
| if (_includeAudioLevelIndication) {
|
| size_t length =
|
| - _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
|
| - RTC_CHECK_LE(length, sizeof(_audioFrame.data_));
|
| + audio_input->samples_per_channel_ * audio_input->num_channels_;
|
| + RTC_CHECK_LE(length, sizeof(audio_input->data_));
|
| if (is_muted && previous_frame_muted_) {
|
| rms_level_.AnalyzeMuted(length);
|
| } else {
|
| rms_level_.Analyze(
|
| - rtc::ArrayView<const int16_t>(_audioFrame.data_, length));
|
| + rtc::ArrayView<const int16_t>(audio_input->data_, length));
|
| }
|
| }
|
| previous_frame_muted_ = is_muted;
|
| -
|
| return 0;
|
| }
|
|
|
| -uint32_t Channel::EncodeAndSend() {
|
| - WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::EncodeAndSend()");
|
| +uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
|
| + RTC_DCHECK_RUN_ON(encoder_queue_);
|
| + RTC_DCHECK_LE(audio_input->num_channels_, 2);
|
| + RTC_DCHECK(audio_input->samples_per_channel_);
|
|
|
| - assert(_audioFrame.num_channels_ <= 2);
|
| - if (_audioFrame.samples_per_channel_ == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::EncodeAndSend() invalid audio frame");
|
| - return 0xFFFFFFFF;
|
| - }
|
| -
|
| - _audioFrame.id_ = _channelId;
|
| + audio_input->id_ = _channelId;
|
|
|
| // --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
|
|
|
| // The ACM resamples internally.
|
| - _audioFrame.timestamp_ = _timeStamp;
|
| + audio_input->timestamp_ = _timeStamp;
|
| // This call will trigger AudioPacketizationCallback::SendData if encoding
|
| // is done and payload is ready for packetization and transmission.
|
| // Otherwise, it will return without invoking the callback.
|
| - if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) {
|
| - WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::EncodeAndSend() ACM encoding failed");
|
| + if (audio_coding_->Add10MsData(*audio_input) < 0) {
|
| + LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
|
| return 0xFFFFFFFF;
|
| }
|
|
|
| - _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
|
| + _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
|
| return 0;
|
| }
|
|
|
| @@ -2781,46 +2818,44 @@ int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
|
|
|
| // TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
|
| // a shared helper.
|
| -int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) {
|
| +int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
|
| + RTC_DCHECK_RUN_ON(encoder_queue_);
|
| std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
|
| size_t fileSamples(0);
|
| + const int mixingFrequency = audio_input->sample_rate_hz_;
|
|
|
| - {
|
| - rtc::CritScope cs(&_fileCritSect);
|
| -
|
| - if (!input_file_player_) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::MixOrReplaceAudioWithFile() fileplayer"
|
| - " doesnt exist");
|
| - return -1;
|
| - }
|
| + if (!input_file_player_) {
|
| + WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| + "Channel::MixOrReplaceAudioWithFile() fileplayer"
|
| + " doesnt exist");
|
| + return -1;
|
| + }
|
|
|
| - if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
|
| - mixingFrequency) == -1) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::MixOrReplaceAudioWithFile() file mixing "
|
| - "failed");
|
| - return -1;
|
| - }
|
| - if (fileSamples == 0) {
|
| - WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| - "Channel::MixOrReplaceAudioWithFile() file is ended");
|
| - return 0;
|
| - }
|
| + if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
|
| + mixingFrequency) == -1) {
|
| + WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| + "Channel::MixOrReplaceAudioWithFile() file mixing "
|
| + "failed");
|
| + return -1;
|
| + }
|
| + if (fileSamples == 0) {
|
| + WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
|
| + "Channel::MixOrReplaceAudioWithFile() file is ended");
|
| + return 0;
|
| }
|
|
|
| - assert(_audioFrame.samples_per_channel_ == fileSamples);
|
| + assert(audio_input->samples_per_channel_ == fileSamples);
|
|
|
| if (_mixFileWithMicrophone) {
|
| // Currently file stream is always mono.
|
| // TODO(xians): Change the code when FilePlayer supports real stereo.
|
| - MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(),
|
| + MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
|
| 1, fileSamples);
|
| } else {
|
| // Replace ACM audio with file.
|
| // Currently file stream is always mono.
|
| // TODO(xians): Change the code when FilePlayer supports real stereo.
|
| - _audioFrame.UpdateFrame(
|
| + audio_input->UpdateFrame(
|
| _channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
|
| AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
|
| }
|
|
|