Index: webrtc/voice_engine/channel.h |
diff --git a/webrtc/voice_engine/channel.h b/webrtc/voice_engine/channel.h |
index 24a7d6bfa0dc91457758d1f1aaeb0dc464ac7521..28dd8b8caa044dd41f2babf42f73f8f022c90bc1 100644 |
--- a/webrtc/voice_engine/channel.h |
+++ b/webrtc/voice_engine/channel.h |
@@ -16,6 +16,7 @@ |
#include "webrtc/api/audio/audio_mixer.h" |
#include "webrtc/api/call/audio_sink.h" |
#include "webrtc/base/criticalsection.h" |
+#include "webrtc/base/event.h" |
#include "webrtc/base/optional.h" |
#include "webrtc/base/thread_checker.h" |
#include "webrtc/common_audio/resampler/include/push_resampler.h" |
@@ -87,6 +88,7 @@ class ChannelState { |
bool input_file_playing = false; |
bool playing = false; |
bool sending = false; |
+ bool sending_has_been_activated = false; |
tommi
2017/03/28 13:47:20
what's the difference between this variable and se
|
}; |
ChannelState() {} |
@@ -120,6 +122,7 @@ class ChannelState { |
void SetSending(bool enable) { |
rtc::CritScope lock(&lock_); |
state_.sending = enable; |
+ state_.sending_has_been_activated = enable; |
} |
private: |
@@ -159,8 +162,8 @@ class Channel |
ProcessThread& moduleProcessThread, |
AudioDeviceModule& audioDeviceModule, |
VoiceEngineObserver* voiceEngineObserver, |
- rtc::CriticalSection* callbackCritSect); |
- int32_t UpdateLocalTimeStamp(); |
+ rtc::CriticalSection* callbackCritSect, |
+ rtc::TaskQueue* encoder_queue); |
void SetSink(std::unique_ptr<AudioSinkInterface> sink); |
@@ -354,16 +357,37 @@ class Channel |
} |
RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); } |
int8_t OutputEnergyLevel() const { return _outputAudioLevel.Level(); } |
- uint32_t Demultiplex(const AudioFrame& audioFrame); |
- // Demultiplex the data to the channel's |_audioFrame|. The difference |
- // between this method and the overloaded method above is that |audio_data| |
- // does not go through transmit_mixer and APM. |
- void Demultiplex(const int16_t* audio_data, |
- int sample_rate, |
- size_t number_of_frames, |
- size_t number_of_channels); |
- uint32_t PrepareEncodeAndSend(int mixingFrequency); |
- uint32_t EncodeAndSend(); |
+ PushResampler<int16_t>* input_resampler() { return &input_resampler_; } |
the sun
2017/03/28 12:57:50
Not needed anymore?
henrika_webrtc
2017/03/29 10:35:12
Acknowledged.
|
+ |
+ // ProcessAndEncodeAudio() creates an audio frame copy and posts a task |
+ // on the shared encoder task queue, wich in turn calls (on the queue) |
+ // ProcessAndEncodeAudioOnTaskQueue() where the actual processing of the |
+ // audio takes place. The processing mainly consists of encoding and preparing |
+ // the result for sending by adding it to a send queue. |
+ // The main reason for using a task queue here is to release the native, |
+ // OS-specific, audio capture thread as soon as possible to ensure that it |
+ // can go back to sleep and be prepared to deliver an new captured audio |
+ // packet. |
+ void ProcessAndEncodeAudio(const AudioFrame& audio_input); |
+ |
+ // This version of ProcessAndEncodeAudio() is used by PushCaptureData() in |
+ // VoEBase and the audio in |audio_data| has not been subject to any APM |
+ // processing. Some extra steps are therfore needed when building up the |
+ // audio frame copy before using the same task as in the default call to |
+ // ProcessAndEncodeAudio(const AudioFrame& audio_input). |
+ void ProcessAndEncodeAudio(const int16_t* audio_data, |
+ int sample_rate, |
+ size_t number_of_frames, |
+ size_t number_of_channels); |
+ |
+ // Called on the encoder task queue when a new input audio frame is ready |
+ // for encoding. |
+ void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input); |
tommi
2017/03/28 13:47:20
Will the frame be modified by this function? If n
|
+ |
+ // Internal helper methods used by ProcessAndEncodeAudioOnTaskQueue(). |
+ // Both are called on the encoder task queue. |
+ uint32_t PrepareEncodeAndSend(AudioFrame* audio_input); |
the sun
2017/03/28 12:57:50
Fuse them into ProcessAndEncodeAudioOnTaskQueue()
henrika_webrtc
2017/03/29 10:35:12
Great idea ;-)
|
+ uint32_t EncodeAndSend(AudioFrame* audio_input); |
// Associate to a send channel. |
// Used for obtaining RTT for a receive-only channel. |
@@ -389,8 +413,9 @@ class Channel |
void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate); |
private: |
- void OnUplinkPacketLossRate(float packet_loss_rate); |
+ class ProcessAndEncodeAudioTask; |
+ void OnUplinkPacketLossRate(float packet_loss_rate); |
bool InputMute() const; |
bool OnRtpPacketWithHeader(const uint8_t* received_packet, |
size_t length, |
@@ -405,7 +430,7 @@ class Channel |
bool IsPacketInOrder(const RTPHeader& header) const; |
bool IsPacketRetransmitted(const RTPHeader& header, bool in_order) const; |
int ResendPackets(const uint16_t* sequence_numbers, int length); |
- int32_t MixOrReplaceAudioWithFile(int mixingFrequency); |
+ int32_t MixOrReplaceAudioWithFile(AudioFrame* audio_frame); |
int32_t MixAudioWithFile(AudioFrame& audioFrame, int mixingFrequency); |
void UpdatePlayoutTimestamp(bool rtcp); |
void RegisterReceiveCodecsToRTPModule(); |
@@ -443,7 +468,6 @@ class Channel |
std::unique_ptr<AudioSinkInterface> audio_sink_; |
AudioLevel _outputAudioLevel; |
bool _externalTransport; |
- AudioFrame _audioFrame; |
// Downsamples to the codec rate if necessary. |
PushResampler<int16_t> input_resampler_; |
std::unique_ptr<FilePlayer> input_file_player_; |
@@ -482,6 +506,7 @@ class Channel |
AudioDeviceModule* _audioDeviceModulePtr; |
VoiceEngineObserver* _voiceEngineObserverPtr; // owned by base |
rtc::CriticalSection* _callbackCritSectPtr; // owned by base |
+ rtc::TaskQueue* encoder_queue_; |
Transport* _transportPtr; // WebRtc socket or external transport |
RmsLevel rms_level_; |
bool input_mute_ GUARDED_BY(volume_settings_critsect_); |
@@ -519,6 +544,8 @@ class Channel |
rtc::ThreadChecker construction_thread_; |
const bool use_twcc_plr_for_ana_; |
+ |
+ rtc::Event stop_send_event_; |
}; |
} // namespace voe |