Index: webrtc/voice_engine/channel.cc |
diff --git a/webrtc/voice_engine/channel.cc b/webrtc/voice_engine/channel.cc |
index 3b562f91561c83927ff973b13e51622ea10a1166..7610e3a3d2bacbc1b4e8aee9c9c53743e58a103b 100644 |
--- a/webrtc/voice_engine/channel.cc |
+++ b/webrtc/voice_engine/channel.cc |
@@ -21,6 +21,8 @@ |
#include "webrtc/base/location.h" |
#include "webrtc/base/logging.h" |
#include "webrtc/base/rate_limiter.h" |
+#include "webrtc/base/task_queue.h" |
+#include "webrtc/base/thread_checker.h" |
#include "webrtc/base/timeutils.h" |
#include "webrtc/config.h" |
#include "webrtc/logging/rtc_event_log/rtc_event_log.h" |
@@ -407,12 +409,48 @@ class VoERtcpObserver : public RtcpBandwidthObserver { |
RtcpBandwidthObserver* bandwidth_observer_ GUARDED_BY(crit_); |
}; |
+class Channel::ProcessAndEncodeAudioTask : public rtc::QueuedTask { |
+ public: |
+ ProcessAndEncodeAudioTask(const AudioFrame& audio_input, Channel* channel) |
+ : channel_(channel) { |
+ audio_input_.CopyFrom(audio_input); |
+ audio_input_.id_ = channel->ChannelId(); |
+ } |
+ |
+ ProcessAndEncodeAudioTask(const int16_t* audio_data, |
+ int sample_rate, |
+ size_t number_of_frames, |
+ size_t number_of_channels, |
+ Channel* channel) |
+ : channel_(channel) { |
+ audio_input_.id_ = channel->ChannelId(); |
+ audio_input_.sample_rate_hz_ = sample_rate; |
+ audio_input_.num_channels_ = number_of_channels; |
+ RemixAndResample(audio_data, number_of_frames, number_of_channels, |
aleloi
2017/03/24 16:59:50
Can resampling be done on the queue? E.g. copy the
henrika_webrtc
2017/03/24 17:09:36
My bad. That was actually my intention. I will rew
|
+ sample_rate, channel->input_resampler(), |
+ &audio_input_); |
+ } |
+ |
+ private: |
+ bool Run() override { |
+ RTC_DCHECK_RUN_ON(channel_->encoder_queue_); |
+ RTC_DCHECK(channel_); |
+ LOG(INFO) << "___Run"; |
+ channel_->ProcessAndEncodeAudioOnTaskQueue(&audio_input_); |
+ return true; |
+ } |
+ |
+ AudioFrame audio_input_; |
+ Channel* const channel_; |
+}; |
+ |
int32_t Channel::SendData(FrameType frameType, |
uint8_t payloadType, |
uint32_t timeStamp, |
const uint8_t* payloadData, |
size_t payloadSize, |
const RTPFragmentationHeader* fragmentation) { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
"Channel::SendData(frameType=%u, payloadType=%u, timeStamp=%u," |
" payloadSize=%" PRIuS ", fragmentation=0x%x)", |
@@ -442,7 +480,6 @@ int32_t Channel::SendData(FrameType frameType, |
_lastLocalTimeStamp = timeStamp; |
_lastPayloadType = payloadType; |
- |
return 0; |
} |
@@ -883,6 +920,7 @@ Channel::Channel(int32_t channelId, |
_audioDeviceModulePtr(NULL), |
_voiceEngineObserverPtr(NULL), |
_callbackCritSectPtr(NULL), |
+ encoder_queue_(nullptr), |
_transportPtr(NULL), |
input_mute_(false), |
previous_frame_muted_(false), |
@@ -905,7 +943,9 @@ Channel::Channel(int32_t channelId, |
kMaxRetransmissionWindowMs)), |
decoder_factory_(config.acm_config.decoder_factory), |
// TODO(elad.alon): Subsequent CL experiments with PLR source. |
- use_twcc_plr_for_ana_(false) { |
+ use_twcc_plr_for_ana_(false), |
+ stop_send_event_(true /* manual_reset */, false) { |
+ LOG(INFO) << "___ctor: " << ChannelId(); |
WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), |
"Channel::Channel() - ctor"); |
AudioCodingModule::Config acm_config(config.acm_config); |
@@ -937,6 +977,13 @@ Channel::Channel(int32_t channelId, |
} |
Channel::~Channel() { |
+ LOG(INFO) << "___dtor: " << ChannelId(); |
+ // If sending ever has been activated, ensure that StopSend() has been called |
+ // to flush out any pending tasks in the encoder queue. |
+ if (channel_state_.Get().sending_has_been_activated) { |
+ RTC_DCHECK(stop_send_event_.Wait(0)) |
+ << "Must call StopSend() before destruction to clean up pending tasks"; |
+ } |
RTC_DCHECK(!channel_state_.Get().sending); |
RTC_DCHECK(!channel_state_.Get().playing); |
} |
@@ -1099,20 +1146,18 @@ int32_t Channel::SetEngineInformation(Statistics& engineStatistics, |
ProcessThread& moduleProcessThread, |
AudioDeviceModule& audioDeviceModule, |
VoiceEngineObserver* voiceEngineObserver, |
- rtc::CriticalSection* callbackCritSect) { |
+ rtc::CriticalSection* callbackCritSect, |
+ rtc::TaskQueue* encoder_queue) { |
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
"Channel::SetEngineInformation()"); |
+ RTC_DCHECK(encoder_queue); |
_engineStatisticsPtr = &engineStatistics; |
_outputMixerPtr = &outputMixer; |
_moduleProcessThreadPtr = &moduleProcessThread; |
_audioDeviceModulePtr = &audioDeviceModule; |
_voiceEngineObserverPtr = voiceEngineObserver; |
_callbackCritSectPtr = callbackCritSect; |
- return 0; |
-} |
- |
-int32_t Channel::UpdateLocalTimeStamp() { |
- _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); |
+ encoder_queue_ = encoder_queue; |
return 0; |
} |
aleloi
2017/03/24 16:59:50
This is a cleanup of dead code, right?
henrika_webrtc
2017/03/24 17:09:36
Honestly don't know how it ended up in this CL. Mi
|
@@ -1203,6 +1248,19 @@ int32_t Channel::StopSend() { |
return 0; |
} |
channel_state_.SetSending(false); |
+ LOG(INFO) << "___StopSend: " << ChannelId(); |
+ |
+ // Post a task to the encoder thread which sets an event when the task is |
+ // executed. We know that no more encoding tasks will be added to the task |
+ // queue for this channel since sending is now deactivated. It means that, |
+ // if we wait for the event to bet set, we know that no more pending tasks |
+ // exists and it is therfore guaranteed that the task queue will never try |
+ // to acccess and invalid channel object. |
+ encoder_queue_->PostTask([this] { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
+ stop_send_event_.Set(); |
+ }); |
+ stop_send_event_.Wait(rtc::Event::kForever); |
// Store the sequence number to be able to pick up the same sequence for |
// the next StartSend(). This is needed for restarting device, otherwise |
@@ -2619,89 +2677,78 @@ int Channel::ResendPackets(const uint16_t* sequence_numbers, int length) { |
return _rtpRtcpModule->SendNACK(sequence_numbers, length); |
} |
-uint32_t Channel::Demultiplex(const AudioFrame& audioFrame) { |
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::Demultiplex()"); |
- _audioFrame.CopyFrom(audioFrame); |
- _audioFrame.id_ = _channelId; |
- return 0; |
+void Channel::ProcessAndEncodeAudio(const AudioFrame& audio_input) { |
+ RTC_DCHECK(encoder_queue_); |
+ encoder_queue_->PostTask(std::unique_ptr<rtc::QueuedTask>( |
+ new ProcessAndEncodeAudioTask(audio_input, this))); |
} |
-void Channel::Demultiplex(const int16_t* audio_data, |
- int sample_rate, |
- size_t number_of_frames, |
- size_t number_of_channels) { |
+void Channel::ProcessAndEncodeAudio(const int16_t* audio_data, |
+ int sample_rate, |
+ size_t number_of_frames, |
+ size_t number_of_channels) { |
+ RTC_DCHECK(encoder_queue_); |
CodecInst codec; |
GetSendCodec(codec); |
- |
- // Never upsample or upmix the capture signal here. This should be done at the |
- // end of the send chain. |
- _audioFrame.sample_rate_hz_ = std::min(codec.plfreq, sample_rate); |
- _audioFrame.num_channels_ = std::min(number_of_channels, codec.channels); |
- RemixAndResample(audio_data, number_of_frames, number_of_channels, |
- sample_rate, &input_resampler_, &_audioFrame); |
+ const int sample_rate_hz = std::min(codec.plfreq, sample_rate); |
+ const size_t num_channels = std::min(number_of_channels, codec.channels); |
+ encoder_queue_->PostTask( |
+ std::unique_ptr<rtc::QueuedTask>(new ProcessAndEncodeAudioTask( |
+ audio_data, sample_rate_hz, number_of_frames, num_channels, this))); |
} |
-uint32_t Channel::PrepareEncodeAndSend(int mixingFrequency) { |
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::PrepareEncodeAndSend()"); |
+void Channel::ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input) { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
+ PrepareEncodeAndSend(audio_input); |
+ EncodeAndSend(audio_input); |
+} |
- if (_audioFrame.samples_per_channel_ == 0) { |
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::PrepareEncodeAndSend() invalid audio frame"); |
- return 0xFFFFFFFF; |
- } |
+uint32_t Channel::PrepareEncodeAndSend(AudioFrame* audio_input) { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
+ RTC_DCHECK(audio_input->samples_per_channel_); |
if (channel_state_.Get().input_file_playing) { |
- MixOrReplaceAudioWithFile(mixingFrequency); |
+ MixOrReplaceAudioWithFile(audio_input); |
} |
bool is_muted = InputMute(); // Cache locally as InputMute() takes a lock. |
- AudioFrameOperations::Mute(&_audioFrame, previous_frame_muted_, is_muted); |
+ AudioFrameOperations::Mute(audio_input, previous_frame_muted_, is_muted); |
if (_includeAudioLevelIndication) { |
size_t length = |
- _audioFrame.samples_per_channel_ * _audioFrame.num_channels_; |
- RTC_CHECK_LE(length, sizeof(_audioFrame.data_)); |
+ audio_input->samples_per_channel_ * audio_input->num_channels_; |
+ RTC_CHECK_LE(length, sizeof(audio_input->data_)); |
if (is_muted && previous_frame_muted_) { |
rms_level_.AnalyzeMuted(length); |
} else { |
rms_level_.Analyze( |
- rtc::ArrayView<const int16_t>(_audioFrame.data_, length)); |
+ rtc::ArrayView<const int16_t>(audio_input->data_, length)); |
} |
} |
previous_frame_muted_ = is_muted; |
- |
return 0; |
} |
-uint32_t Channel::EncodeAndSend() { |
- WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::EncodeAndSend()"); |
- |
- assert(_audioFrame.num_channels_ <= 2); |
- if (_audioFrame.samples_per_channel_ == 0) { |
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::EncodeAndSend() invalid audio frame"); |
- return 0xFFFFFFFF; |
- } |
+uint32_t Channel::EncodeAndSend(AudioFrame* audio_input) { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
+ RTC_DCHECK_LE(audio_input->num_channels_, 2); |
+ RTC_DCHECK(audio_input->samples_per_channel_); |
- _audioFrame.id_ = _channelId; |
+ audio_input->id_ = _channelId; |
// --- Add 10ms of raw (PCM) audio data to the encoder @ 32kHz. |
// The ACM resamples internally. |
- _audioFrame.timestamp_ = _timeStamp; |
+ audio_input->timestamp_ = _timeStamp; |
// This call will trigger AudioPacketizationCallback::SendData if encoding |
// is done and payload is ready for packetization and transmission. |
// Otherwise, it will return without invoking the callback. |
- if (audio_coding_->Add10MsData((AudioFrame&)_audioFrame) < 0) { |
- WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::EncodeAndSend() ACM encoding failed"); |
+ if (audio_coding_->Add10MsData(*audio_input) < 0) { |
+ LOG(LS_ERROR) << "ACM::Add10MsData() failed for channel " << _channelId; |
return 0xFFFFFFFF; |
} |
- _timeStamp += static_cast<uint32_t>(_audioFrame.samples_per_channel_); |
+ _timeStamp += static_cast<uint32_t>(audio_input->samples_per_channel_); |
return 0; |
} |
@@ -2811,46 +2858,44 @@ int Channel::GetRtpRtcp(RtpRtcp** rtpRtcpModule, |
// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use |
// a shared helper. |
-int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) { |
+int32_t Channel::MixOrReplaceAudioWithFile(AudioFrame* audio_input) { |
+ RTC_DCHECK_RUN_ON(encoder_queue_); |
std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]); |
size_t fileSamples(0); |
+ const int mixingFrequency = audio_input->sample_rate_hz_; |
- { |
- rtc::CritScope cs(&_fileCritSect); |
- |
- if (!input_file_player_) { |
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::MixOrReplaceAudioWithFile() fileplayer" |
- " doesnt exist"); |
- return -1; |
- } |
+ if (!input_file_player_) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
+ "Channel::MixOrReplaceAudioWithFile() fileplayer" |
+ " doesnt exist"); |
+ return -1; |
+ } |
- if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, |
- mixingFrequency) == -1) { |
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::MixOrReplaceAudioWithFile() file mixing " |
- "failed"); |
- return -1; |
- } |
- if (fileSamples == 0) { |
- WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
- "Channel::MixOrReplaceAudioWithFile() file is ended"); |
- return 0; |
- } |
+ if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples, |
+ mixingFrequency) == -1) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
+ "Channel::MixOrReplaceAudioWithFile() file mixing " |
+ "failed"); |
+ return -1; |
+ } |
+ if (fileSamples == 0) { |
+ WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
+ "Channel::MixOrReplaceAudioWithFile() file is ended"); |
+ return 0; |
} |
- assert(_audioFrame.samples_per_channel_ == fileSamples); |
+ assert(audio_input->samples_per_channel_ == fileSamples); |
if (_mixFileWithMicrophone) { |
// Currently file stream is always mono. |
// TODO(xians): Change the code when FilePlayer supports real stereo. |
- MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(), |
+ MixWithSat(audio_input->data_, audio_input->num_channels_, fileBuffer.get(), |
1, fileSamples); |
} else { |
// Replace ACM audio with file. |
// Currently file stream is always mono. |
// TODO(xians): Change the code when FilePlayer supports real stereo. |
- _audioFrame.UpdateFrame( |
+ audio_input->UpdateFrame( |
_channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency, |
AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1); |
} |