Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1186)

Unified Diff: webrtc/voice_engine/channel.cc

Issue 2665693002: Moves channel-dependent audio input processing to separate encoder task queue (Closed)
Patch Set: Fixed dependency Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: webrtc/voice_engine/channel.cc
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc
index 31242f6db935626a25bf0a95089c71a7d7337079..8b5166dffe9cc443b65b940b66b400b06b59040d 100644
--- a/webrtc/voice_engine/channel.cc
+++ b/webrtc/voice_engine/channel.cc
@@ -21,6 +21,8 @@
#include "webrtc/base/location.h"
#include "webrtc/base/logging.h"
#include "webrtc/base/rate_limiter.h"
+#include "webrtc/base/task_queue.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/base/timeutils.h"
#include "webrtc/call/rtp_transport_controller_send.h"
#include "webrtc/config.h"
@@ -408,12 +410,31 @@ class VoERtcpObserver : public RtcpBandwidthObserver {
RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_);
};
+class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask {
+ public:
+ ProcessAndEncodeAudioTask(std::unique_ptr<AudioFrame> audio_frame,
+ Channel* channel)
+ : audio_frame_(std::move(audio_frame)), channel_(channel) {}
+
+ private:
+ bool Run() override {
the sun 2017/03/28 12:57:50 Given the simplicity, could you even use a lambda
tommi 2017/03/28 13:47:20 fyi - there's one difference between this class an
the sun 2017/03/28 23:05:40 Ah, good point.
henrika_webrtc 2017/03/29 10:35:11 Given input from Tommi I would like to skip using
henrika_webrtc 2017/03/29 10:35:11 Done.
henrika_webrtc 2017/03/29 10:35:12 Acknowledged.
+ RTC_DCHECK_RUN_ON(channel_->encoder_queue_);
+ RTC_DCHECK(channel_);
+ channel_->ProcessAndEncodeAudioOnTaskQueue(audio_frame_.get());
+ return true;
+ }
+
+ std::unique_ptr<AudioFrame> audio_frame_;
+ Channel* const channel_;
+};
+
int32_t Channel::SendData(FrameType frameType,
uint8_t payloadType,
uint32_t timeStamp,
const uint8_t* payloadData,
size_t payloadSize,
const RTPFragmentationHeader* fragmentation) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u,"
" payloadSize=%" PRIuS ", fragmentation=0x%x)",
@@ -443,7 +464,6 @@ int32_t Channel::SendData(FrameType frameType,
_lastLocalTimeStamp = timeStamp;
the sun 2017/03/28 13:28:29 Appears unused - remove.
henrika_webrtc 2017/03/29 10:35:12 Nice. Thanks!
_lastPayloadType = payloadType;
the sun 2017/03/28 13:28:29 Appears unused - remove.
henrika_webrtc 2017/03/29 10:35:12 Done.
-
return 0;
}
@@ -884,6 +904,7 @@ Channel::Channel(int32_t channelId,
_audioDeviceModulePtr(NULL),
_voiceEngineObserverPtr(NULL),
_callbackCritSectPtr(NULL),
+ encoder_queue_(nullptr),
_transportPtr(NULL),
input_mute_(false),
previous_frame_muted_(false),
@@ -906,7 +927,8 @@ Channel::Channel(int32_t channelId,
kMaxRetransmissionWindowMs)),
decoder_factory_(config.acm_config.decoder_factory),
// TODO(elad.alon): Subsequent CL experiments with PLR source.
- use_twcc_plr_for_ana_(false) {
+ use_twcc_plr_for_ana_(false),
+ stop_send_event_(true /* manual_reset */, false) {
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::Channel() - ctor");
AudioCodingModule::Config acm_config(config.acm_config);
@@ -938,6 +960,12 @@ Channel::Channel(int32_t channelId,
}
Channel::~Channel() {
+ // If sending ever has been activated, ensure that StopSend() has been called
+ // to flush out any pending tasks in the encoder queue.
+ if (channel_state_.Get().sending_has_been_activated) {
tommi 2017/03/28 13:47:20 calling Get() here grabs a lock and could actually
henrika_webrtc 2017/03/29 10:35:11 Did changes. Please check again.
+ RTC_DCHECK(stop_send_event_.Wait(0))
+ << "Must call StopSend() before destruction to clean up pending tasks";
+ }
RTC_DCHECK(!channel_state_.Get().sending);
RTC_DCHECK(!channel_state_.Get().playing);
}
@@ -1124,20 +1152,18 @@ int32_t Channel::SetEngineInformation(Statistics& engineStatistics,
ProcessThread& moduleProcessThread,
AudioDeviceModule& audioDeviceModule,
VoiceEngineObserver* voiceEngineObserver,
- rtc::CriticalSection* callbackCritSect) {
+ rtc::CriticalSection* callbackCritSect,
+ rtc::TaskQueue* encoder_queue) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::SetEngineInformation()");
+ RTC_DCHECK(encoder_queue);
_engineStatisticsPtr = &engineStatistics;
_outputMixerPtr = &outputMixer;
_moduleProcessThreadPtr = &moduleProcessThread;
_audioDeviceModulePtr = &audioDeviceModule;
_voiceEngineObserverPtr = voiceEngineObserver;
_callbackCritSectPtr = callbackCritSect;
- return 0;
-}
-
-int32_t Channel::UpdateLocalTimeStamp() {
- _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
+ encoder_queue_ = encoder_queue;
return 0;
}
@@ -1229,6 +1255,18 @@ int32_t Channel::StopSend() {
}
channel_state_.SetSending(false);
+ // Post a task to the encoder thread which sets an event when the task is
+ // executed. We know that no more encoding tasks will be added to the task
+ // queue for this channel since sending is now deactivated. It means that,
+ // if we wait for the event to bet set, we know that no more pending tasks
+ // exists and it is therfore guaranteed that the task queue will never try
+ // to acccess and invalid channel object.
+ encoder_queue_->PostTask([this] {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
tommi 2017/03/28 13:47:19 nit: I don't think this is necessary. hmm... I do
henrika_webrtc 2017/03/29 10:35:11 Please let me wait with this larger task until the
henrika_webrtc 2017/03/29 11:36:20 I actually failed to do what was suggested since I
+ stop_send_event_.Set();
+ });
+ stop_send_event_.Wait(rtc::Event::kForever);
+
// Store the sequence number to be able to pick up the same sequence for
// the next StartSend(). This is needed for restarting device, otherwise
// it might cause libSRTP to complain about packets being replayed.
@@ -2647,89 +2685,84 @@ int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) {
return _rtpRtcpModule->SendNACK(sequence_numbers, length);
}
-uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) {
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::Demultiplex()");
- _audioFrame.CopyFrom(audioFrame);
- _audioFrame.id_ = _channelId;
- return 0;
+void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) {
+ RTC_DCHECK(encoder_queue_);
tommi 2017/03/28 13:47:20 did you intend to dcheck that you're running on th
henrika_webrtc 2017/03/29 10:35:11 Yes, since it is not set at ctor. See comments bel
+ std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
+ audio_frame->CopyFrom(audio_input);
+ audio_frame->id_ = ChannelId();
+ encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
+ new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
}
-void Channel::Demultiplex(const int16_t* audio_data,
- int sample_rate,
- size_t number_of_frames,
- size_t number_of_channels) {
+void Channel::ProcessAndEncodeAudio(const int16_t* audio_data,
+ int sample_rate,
+ size_t number_of_frames,
+ size_t number_of_channels) {
+ RTC_DCHECK(encoder_queue_);
tommi 2017/03/28 13:47:20 same here... might want to do a search/replace for
henrika_webrtc 2017/03/29 10:35:11 Actually, looking at the TSAN issues I realize tha
CodecInst codec;
GetSendCodec(codec);
-
- // Never upsample or upmix the capture signal here. This should be done at the
- // end of the send chain.
- _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
- _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels);
+ std::unique_ptr<AudioFrame> audio_frame(new AudioFrame());
+ audio_frame->id_ = ChannelId();
+ audio_frame->sample_rate_hz_ = std::min(codec.plfreq, sample_rate);
+ audio_frame->num_channels_ = std::min(number_of_channels, codec.channels);
RemixAndResample(audio_data, number_of_frames, number_of_channels,
- sample_rate, &input_resampler_, &_audioFrame);
+ sample_rate, &input_resampler_, audio_frame.get());
+ encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>(
+ new ProcessAndEncodeAudioTask(std::move(audio_frame), this)));
}
-uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) {
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::PrepareEncodeAndSend()");
+void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ PrepareEncodeAndSend(audio_input);
+ EncodeAndSend(audio_input);
+}
- if (_audioFrame.samples_per_channel_ == 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::PrepareEncodeAndSend() invalid audio frame");
- return 0xFFFFFFFF;
- }
+uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) {
the sun 2017/03/28 12:57:50 Could you fuse PrepareEncodeAndSend with EncodeAnd
henrika_webrtc 2017/03/29 10:35:11 Done.
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK(audio_input->samples_per_channel_);
if (channel_state_.Get().input_file_playing) {
- MixOrReplaceAudioWithFile(mixingFrequency);
+ MixOrReplaceAudioWithFile(audio_input);
}
bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock.
- AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted);
+ AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted);
the sun 2017/03/28 13:28:29 Declare ACCESS_ON for previous_frame_muted_
henrika_webrtc 2017/03/29 10:35:11 Done.
if (_includeAudioLevelIndication) {
the sun 2017/03/28 13:28:29 _includeAudioLevelIndication is now potentially ra
henrika_webrtc 2017/03/29 10:35:11 Yes, discussed offline. No action here. Adding tod
size_t length =
- _audioFrame.samples_per_channel_ * _audioFrame.num_channels_;
- RTC_CHECK_LE(length, sizeof(_audioFrame.data_));
+ audio_input->samples_per_channel_ * audio_input->num_channels_;
+ RTC_CHECK_LE(length, sizeof(audio_input->data_));
if (is_muted && previous_frame_muted_) {
rms_level_.AnalyzeMuted(length);
the sun 2017/03/28 13:28:29 Declare ACCESS_ON for rms_level_
henrika_webrtc 2017/03/29 10:35:11 Done.
} else {
rms_level_.Analyze(
- rtc::ArrayView<const int16_t>(_audioFrame.data_, length));
+ rtc::ArrayView<const int16_t>(audio_input->data_, length));
}
}
previous_frame_muted_ = is_muted;
-
return 0;
}
-uint32_t Channel::EncodeAndSend() {
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::EncodeAndSend()");
-
- assert(_audioFrame.num_channels_ <= 2);
- if (_audioFrame.samples_per_channel_ == 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::EncodeAndSend() invalid audio frame");
- return 0xFFFFFFFF;
- }
+uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) {
the sun 2017/03/28 12:57:50 Remove return value - it is unused.
henrika_webrtc 2017/03/29 10:35:11 This method is now removed.
+ RTC_DCHECK_RUN_ON(encoder_queue_);
+ RTC_DCHECK_LE(audio_input->num_channels_, 2);
+ RTC_DCHECK(audio_input->samples_per_channel_);
the sun 2017/03/28 12:57:50 _GT(..., 0);
henrika_webrtc 2017/03/29 10:35:11 Done.
- _audioFrame.id_ = _channelId;
+ audio_input->id_ = _channelId;
the sun 2017/03/28 12:57:50 Already did that in Channel::ProcessAndEncodeAudio
tommi 2017/03/28 13:47:20 change to a dcheck_eq?
henrika_webrtc 2017/03/29 10:35:11 Done.
henrika_webrtc 2017/03/29 10:35:11 Acknowledged.
// --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz.
// The ACM resamples internally.
- _audioFrame.timestamp_ = _timeStamp;
+ audio_input->timestamp_ = _timeStamp;
// This call will trigger AudioPacketizationCallback::SendData if encoding
// is done and payload is ready for packetization and transmission.
// Otherwise, it will return without invoking the callback.
- if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) {
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::EncodeAndSend() ACM encoding failed");
+ if (audio_coding_->Add10MsData(*audio_input) < 0) {
+ LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId;
return 0xFFFFFFFF;
}
- _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_);
+ _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_);
the sun 2017/03/28 12:57:50 Can you use ACCESS_ON() in the .h, when _timeStamp
henrika_webrtc 2017/03/29 10:35:11 Should work. Yes.
return 0;
}
@@ -2839,46 +2872,44 @@ int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule,
// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
// a shared helper.
-int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) {
+int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) {
+ RTC_DCHECK_RUN_ON(encoder_queue_);
std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
size_t fileSamples(0);
+ const int mixingFrequency = audio_input->sample_rate_hz_;
- {
- rtc::CritScope cs(&_fileCritSect);
-
- if (!input_file_player_) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::MixOrReplaceAudioWithFile() fileplayer"
- " doesnt exist");
- return -1;
- }
+ if (!input_file_player_) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() fileplayer"
+ " doesnt exist");
+ return -1;
+ }
- if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
- mixingFrequency) == -1) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::MixOrReplaceAudioWithFile() file mixing "
- "failed");
- return -1;
- }
- if (fileSamples == 0) {
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
- "Channel::MixOrReplaceAudioWithFile() file is ended");
- return 0;
- }
+ if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
the sun 2017/03/28 12:57:50 We must still hold _fileCritSect while making this
tommi 2017/03/28 13:47:19 Alternatively, we could change input_file_player_
the sun 2017/03/28 23:05:40 This code is being stripped away with the VoEFile
henrika_webrtc 2017/03/29 10:35:11 Added _fileCritSect, Lot's of changes required to
+ mixingFrequency) == -1) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() file mixing "
+ "failed");
+ return -1;
+ }
+ if (fileSamples == 0) {
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
+ "Channel::MixOrReplaceAudioWithFile() file is ended");
+ return 0;
}
- assert(_audioFrame.samples_per_channel_ == fileSamples);
+ assert(audio_input->samples_per_channel_ == fileSamples);
the sun 2017/03/28 12:57:50 RTC_DCHECK
henrika_webrtc 2017/03/29 10:35:11 Done.
if (_mixFileWithMicrophone) {
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(),
+ MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(),
1, fileSamples);
} else {
// Replace ACM audio with file.
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
- _audioFrame.UpdateFrame(
+ audio_input->UpdateFrame(
_channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
}

Powered by Google App Engine
This is Rietveld 408576698