Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(400)

Side by Side Diff: webrtc/modules/audio_coding/acm2/audio_coding_module.cc

Issue 2069723003: Move AudioCodingModuleImpl to anonymous namespace in audio_coding_module.cc (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
11 #include "webrtc/modules/audio_coding/include/audio_coding_module.h" 11 #include "webrtc/modules/audio_coding/include/audio_coding_module.h"
12 12
13 #include "webrtc/base/checks.h" 13 #include "webrtc/base/checks.h"
14 #include "webrtc/common_types.h" 14 #include "webrtc/base/safe_conversions.h"
15 #include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h" 15 #include "webrtc/modules/audio_coding/acm2/acm_receiver.h"
16 #include "webrtc/modules/audio_coding/acm2/acm_resampler.h"
17 #include "webrtc/modules/audio_coding/acm2/codec_manager.h"
16 #include "webrtc/modules/audio_coding/acm2/rent_a_codec.h" 18 #include "webrtc/modules/audio_coding/acm2/rent_a_codec.h"
17 #include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h" 19 #include "webrtc/modules/audio_coding/codecs/builtin_audio_decoder_factory.h"
18 #include "webrtc/system_wrappers/include/clock.h" 20 #include "webrtc/system_wrappers/include/metrics.h"
19 #include "webrtc/system_wrappers/include/trace.h" 21 #include "webrtc/system_wrappers/include/trace.h"
20 22
21 namespace webrtc { 23 namespace webrtc {
22 24
25 namespace {
26
27 struct EncoderFactory {
28 AudioEncoder* external_speech_encoder = nullptr;
29 acm2::CodecManager codec_manager;
30 acm2::RentACodec rent_a_codec;
31 };
32
33 class AudioCodingModuleImpl final : public AudioCodingModule {
34 public:
35 explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
36 ~AudioCodingModuleImpl() override;
37
38 /////////////////////////////////////////
39 // Sender
40 //
41
42 // Can be called multiple times for Codec, CNG, RED.
43 int RegisterSendCodec(const CodecInst& send_codec) override;
44
45 void RegisterExternalSendCodec(
46 AudioEncoder* external_speech_encoder) override;
47
48 void ModifyEncoder(
49 FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) override;
50
51 // Get current send codec.
52 rtc::Optional<CodecInst> SendCodec() const override;
53
54 // Get current send frequency.
55 int SendFrequency() const override;
56
57 // Sets the bitrate to the specified value in bits/sec. In case the codec does
58 // not support the requested value it will choose an appropriate value
59 // instead.
60 void SetBitRate(int bitrate_bps) override;
61
62 // Register a transport callback which will be
63 // called to deliver the encoded buffers.
64 int RegisterTransportCallback(AudioPacketizationCallback* transport) override;
65
66 // Add 10 ms of raw (PCM) audio data to the encoder.
67 int Add10MsData(const AudioFrame& audio_frame) override;
68
69 /////////////////////////////////////////
70 // (RED) Redundant Coding
71 //
72
73 // Configure RED status i.e. on/off.
74 int SetREDStatus(bool enable_red) override;
75
76 // Get RED status.
77 bool REDStatus() const override;
78
79 /////////////////////////////////////////
80 // (FEC) Forward Error Correction (codec internal)
81 //
82
83 // Configure FEC status i.e. on/off.
84 int SetCodecFEC(bool enabled_codec_fec) override;
85
86 // Get FEC status.
87 bool CodecFEC() const override;
88
89 // Set target packet loss rate
90 int SetPacketLossRate(int loss_rate) override;
91
92 /////////////////////////////////////////
93 // (VAD) Voice Activity Detection
94 // and
95 // (CNG) Comfort Noise Generation
96 //
97
98 int SetVAD(bool enable_dtx = true,
99 bool enable_vad = false,
100 ACMVADMode mode = VADNormal) override;
101
102 int VAD(bool* dtx_enabled,
103 bool* vad_enabled,
104 ACMVADMode* mode) const override;
105
106 int RegisterVADCallback(ACMVADCallback* vad_callback) override;
107
108 /////////////////////////////////////////
109 // Receiver
110 //
111
112 // Initialize receiver, resets codec database etc.
113 int InitializeReceiver() override;
114
115 // Get current receive frequency.
116 int ReceiveFrequency() const override;
117
118 // Get current playout frequency.
119 int PlayoutFrequency() const override;
120
121 int RegisterReceiveCodec(const CodecInst& receive_codec) override;
122 int RegisterReceiveCodec(
123 const CodecInst& receive_codec,
124 FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) override;
125
126 int RegisterExternalReceiveCodec(int rtp_payload_type,
127 AudioDecoder* external_decoder,
128 int sample_rate_hz,
129 int num_channels,
130 const std::string& name) override;
131
132 // Get current received codec.
133 int ReceiveCodec(CodecInst* current_codec) const override;
134
135 // Incoming packet from network parsed and ready for decode.
136 int IncomingPacket(const uint8_t* incoming_payload,
137 const size_t payload_length,
138 const WebRtcRTPHeader& rtp_info) override;
139
140 // Incoming payloads, without rtp-info, the rtp-info will be created in ACM.
141 // One usage for this API is when pre-encoded files are pushed in ACM.
142 int IncomingPayload(const uint8_t* incoming_payload,
143 const size_t payload_length,
144 uint8_t payload_type,
145 uint32_t timestamp) override;
146
147 // Minimum playout delay.
148 int SetMinimumPlayoutDelay(int time_ms) override;
149
150 // Maximum playout delay.
151 int SetMaximumPlayoutDelay(int time_ms) override;
152
153 // Smallest latency NetEq will maintain.
154 int LeastRequiredDelayMs() const override;
155
156 RTC_DEPRECATED int32_t PlayoutTimestamp(uint32_t* timestamp) override;
157
158 rtc::Optional<uint32_t> PlayoutTimestamp() override;
159
160 // Get 10 milliseconds of raw audio data to play out, and
161 // automatic resample to the requested frequency if > 0.
162 int PlayoutData10Ms(int desired_freq_hz,
163 AudioFrame* audio_frame,
164 bool* muted) override;
165 int PlayoutData10Ms(int desired_freq_hz, AudioFrame* audio_frame) override;
166
167 /////////////////////////////////////////
168 // Statistics
169 //
170
171 int GetNetworkStatistics(NetworkStatistics* statistics) override;
172
173 int SetOpusApplication(OpusApplicationMode application) override;
174
175 // If current send codec is Opus, informs it about the maximum playback rate
176 // the receiver will render.
177 int SetOpusMaxPlaybackRate(int frequency_hz) override;
178
179 int EnableOpusDtx() override;
180
181 int DisableOpusDtx() override;
182
183 int UnregisterReceiveCodec(uint8_t payload_type) override;
184
185 int EnableNack(size_t max_nack_list_size) override;
186
187 void DisableNack() override;
188
189 std::vector<uint16_t> GetNackList(int64_t round_trip_time_ms) const override;
190
191 void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const override;
192
193 private:
194 struct InputData {
195 uint32_t input_timestamp;
196 const int16_t* audio;
197 size_t length_per_channel;
198 size_t audio_channel;
199 // If a re-mix is required (up or down), this buffer will store a re-mixed
200 // version of the input.
201 int16_t buffer[WEBRTC_10MS_PCM_AUDIO];
202 };
203
204 // This member class writes values to the named UMA histogram, but only if
205 // the value has changed since the last time (and always for the first call).
206 class ChangeLogger {
207 public:
208 explicit ChangeLogger(const std::string& histogram_name)
209 : histogram_name_(histogram_name) {}
210 // Logs the new value if it is different from the last logged value, or if
211 // this is the first call.
212 void MaybeLog(int value);
213
214 private:
215 int last_value_ = 0;
216 int first_time_ = true;
217 const std::string histogram_name_;
218 };
219
220 int RegisterReceiveCodecUnlocked(
221 const CodecInst& codec,
222 FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory)
223 EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
224
225 int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
226 EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
227 int Encode(const InputData& input_data)
228 EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
229
230 int InitializeReceiverSafe() EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
231
232 bool HaveValidEncoder(const char* caller_name) const
233 EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
234
235 // Preprocessing of input audio, including resampling and down-mixing if
236 // required, before pushing audio into encoder's buffer.
237 //
238 // in_frame: input audio-frame
239 // ptr_out: pointer to output audio_frame. If no preprocessing is required
240 // |ptr_out| will be pointing to |in_frame|, otherwise pointing to
241 // |preprocess_frame_|.
242 //
243 // Return value:
244 // -1: if encountering an error.
245 // 0: otherwise.
246 int PreprocessToAddData(const AudioFrame& in_frame,
247 const AudioFrame** ptr_out)
248 EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
249
250 // Change required states after starting to receive the codec corresponding
251 // to |index|.
252 int UpdateUponReceivingCodec(int index);
253
254 rtc::CriticalSection acm_crit_sect_;
255 rtc::Buffer encode_buffer_ GUARDED_BY(acm_crit_sect_);
256 int id_; // TODO(henrik.lundin) Make const.
257 uint32_t expected_codec_ts_ GUARDED_BY(acm_crit_sect_);
258 uint32_t expected_in_ts_ GUARDED_BY(acm_crit_sect_);
259 acm2::ACMResampler resampler_ GUARDED_BY(acm_crit_sect_);
260 acm2::AcmReceiver receiver_; // AcmReceiver has it's own internal lock.
261 ChangeLogger bitrate_logger_ GUARDED_BY(acm_crit_sect_);
262
263 std::unique_ptr<EncoderFactory> encoder_factory_ GUARDED_BY(acm_crit_sect_);
264
265 // Current encoder stack, either obtained from
266 // encoder_factory_->rent_a_codec.RentEncoderStack or provided by a call to
267 // RegisterEncoder.
268 std::unique_ptr<AudioEncoder> encoder_stack_ GUARDED_BY(acm_crit_sect_);
269
270 std::unique_ptr<AudioDecoder> isac_decoder_16k_ GUARDED_BY(acm_crit_sect_);
271 std::unique_ptr<AudioDecoder> isac_decoder_32k_ GUARDED_BY(acm_crit_sect_);
272
273 // This is to keep track of CN instances where we can send DTMFs.
274 uint8_t previous_pltype_ GUARDED_BY(acm_crit_sect_);
275
276 // Used when payloads are pushed into ACM without any RTP info
277 // One example is when pre-encoded bit-stream is pushed from
278 // a file.
279 // IMPORTANT: this variable is only used in IncomingPayload(), therefore,
280 // no lock acquired when interacting with this variable. If it is going to
281 // be used in other methods, locks need to be taken.
282 std::unique_ptr<WebRtcRTPHeader> aux_rtp_header_;
283
284 bool receiver_initialized_ GUARDED_BY(acm_crit_sect_);
285
286 AudioFrame preprocess_frame_ GUARDED_BY(acm_crit_sect_);
287 bool first_10ms_data_ GUARDED_BY(acm_crit_sect_);
288
289 bool first_frame_ GUARDED_BY(acm_crit_sect_);
290 uint32_t last_timestamp_ GUARDED_BY(acm_crit_sect_);
291 uint32_t last_rtp_timestamp_ GUARDED_BY(acm_crit_sect_);
292
293 rtc::CriticalSection callback_crit_sect_;
294 AudioPacketizationCallback* packetization_callback_
295 GUARDED_BY(callback_crit_sect_);
296 ACMVADCallback* vad_callback_ GUARDED_BY(callback_crit_sect_);
297
298 int codec_histogram_bins_log_[static_cast<size_t>(
299 AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes)];
300 int number_of_consecutive_empty_packets_;
301 };
302
303 // Adds a codec usage sample to the histogram.
304 void UpdateCodecTypeHistogram(size_t codec_type) {
305 RTC_HISTOGRAM_ENUMERATION(
306 "WebRTC.Audio.Encoder.CodecType", static_cast<int>(codec_type),
307 static_cast<int>(
308 webrtc::AudioEncoder::CodecType::kMaxLoggedAudioCodecTypes));
309 }
310
311 // TODO(turajs): the same functionality is used in NetEq. If both classes
312 // need them, make it a static function in ACMCodecDB.
313 bool IsCodecRED(const CodecInst& codec) {
314 return (STR_CASE_CMP(codec.plname, "RED") == 0);
315 }
316
317 bool IsCodecCN(const CodecInst& codec) {
318 return (STR_CASE_CMP(codec.plname, "CN") == 0);
319 }
320
321 // Stereo-to-mono can be used as in-place.
322 int DownMix(const AudioFrame& frame,
323 size_t length_out_buff,
324 int16_t* out_buff) {
325 if (length_out_buff < frame.samples_per_channel_) {
326 return -1;
327 }
328 for (size_t n = 0; n < frame.samples_per_channel_; ++n)
329 out_buff[n] = (frame.data_[2 * n] + frame.data_[2 * n + 1]) >> 1;
330 return 0;
331 }
332
333 // Mono-to-stereo can be used as in-place.
334 int UpMix(const AudioFrame& frame, size_t length_out_buff, int16_t* out_buff) {
335 if (length_out_buff < frame.samples_per_channel_) {
336 return -1;
337 }
338 for (size_t n = frame.samples_per_channel_; n != 0; --n) {
339 size_t i = n - 1;
340 int16_t sample = frame.data_[i];
341 out_buff[2 * i + 1] = sample;
342 out_buff[2 * i] = sample;
343 }
344 return 0;
345 }
346
347 void ConvertEncodedInfoToFragmentationHeader(
348 const AudioEncoder::EncodedInfo& info,
349 RTPFragmentationHeader* frag) {
350 if (info.redundant.empty()) {
351 frag->fragmentationVectorSize = 0;
352 return;
353 }
354
355 frag->VerifyAndAllocateFragmentationHeader(
356 static_cast<uint16_t>(info.redundant.size()));
357 frag->fragmentationVectorSize = static_cast<uint16_t>(info.redundant.size());
358 size_t offset = 0;
359 for (size_t i = 0; i < info.redundant.size(); ++i) {
360 frag->fragmentationOffset[i] = offset;
361 offset += info.redundant[i].encoded_bytes;
362 frag->fragmentationLength[i] = info.redundant[i].encoded_bytes;
363 frag->fragmentationTimeDiff[i] = rtc::checked_cast<uint16_t>(
364 info.encoded_timestamp - info.redundant[i].encoded_timestamp);
365 frag->fragmentationPlType[i] = info.redundant[i].payload_type;
366 }
367 }
368
369 // Wraps a raw AudioEncoder pointer. The idea is that you can put one of these
370 // in a unique_ptr, to protect the contained raw pointer from being deleted
371 // when the unique_ptr expires. (This is of course a bad idea in general, but
372 // backwards compatibility.)
373 class RawAudioEncoderWrapper final : public AudioEncoder {
374 public:
375 RawAudioEncoderWrapper(AudioEncoder* enc) : enc_(enc) {}
376 int SampleRateHz() const override { return enc_->SampleRateHz(); }
377 size_t NumChannels() const override { return enc_->NumChannels(); }
378 int RtpTimestampRateHz() const override { return enc_->RtpTimestampRateHz(); }
379 size_t Num10MsFramesInNextPacket() const override {
380 return enc_->Num10MsFramesInNextPacket();
381 }
382 size_t Max10MsFramesInAPacket() const override {
383 return enc_->Max10MsFramesInAPacket();
384 }
385 int GetTargetBitrate() const override { return enc_->GetTargetBitrate(); }
386 EncodedInfo EncodeImpl(uint32_t rtp_timestamp,
387 rtc::ArrayView<const int16_t> audio,
388 rtc::Buffer* encoded) override {
389 return enc_->Encode(rtp_timestamp, audio, encoded);
390 }
391 void Reset() override { return enc_->Reset(); }
392 bool SetFec(bool enable) override { return enc_->SetFec(enable); }
393 bool SetDtx(bool enable) override { return enc_->SetDtx(enable); }
394 bool SetApplication(Application application) override {
395 return enc_->SetApplication(application);
396 }
397 void SetMaxPlaybackRate(int frequency_hz) override {
398 return enc_->SetMaxPlaybackRate(frequency_hz);
399 }
400 void SetProjectedPacketLossRate(double fraction) override {
401 return enc_->SetProjectedPacketLossRate(fraction);
402 }
403 void SetTargetBitrate(int target_bps) override {
404 return enc_->SetTargetBitrate(target_bps);
405 }
406
407 private:
408 AudioEncoder* enc_;
409 };
410
411 // Return false on error.
412 bool CreateSpeechEncoderIfNecessary(EncoderFactory* ef) {
413 auto* sp = ef->codec_manager.GetStackParams();
414 if (sp->speech_encoder) {
415 // Do nothing; we already have a speech encoder.
416 } else if (ef->codec_manager.GetCodecInst()) {
417 RTC_DCHECK(!ef->external_speech_encoder);
418 // We have no speech encoder, but we have a specification for making one.
419 std::unique_ptr<AudioEncoder> enc =
420 ef->rent_a_codec.RentEncoder(*ef->codec_manager.GetCodecInst());
421 if (!enc)
422 return false; // Encoder spec was bad.
423 sp->speech_encoder = std::move(enc);
424 } else if (ef->external_speech_encoder) {
425 RTC_DCHECK(!ef->codec_manager.GetCodecInst());
426 // We have an external speech encoder.
427 sp->speech_encoder = std::unique_ptr<AudioEncoder>(
428 new RawAudioEncoderWrapper(ef->external_speech_encoder));
429 }
430 return true;
431 }
432
433 void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
434 if (value != last_value_ || first_time_) {
435 first_time_ = false;
436 last_value_ = value;
437 RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value);
438 }
439 }
440
441 AudioCodingModuleImpl::AudioCodingModuleImpl(
442 const AudioCodingModule::Config& config)
443 : id_(config.id),
444 expected_codec_ts_(0xD87F3F9F),
445 expected_in_ts_(0xD87F3F9F),
446 receiver_(config),
447 bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
448 encoder_factory_(new EncoderFactory),
449 encoder_stack_(nullptr),
450 previous_pltype_(255),
451 receiver_initialized_(false),
452 first_10ms_data_(false),
453 first_frame_(true),
454 packetization_callback_(NULL),
455 vad_callback_(NULL),
456 codec_histogram_bins_log_(),
457 number_of_consecutive_empty_packets_(0) {
458 if (InitializeReceiverSafe() < 0) {
459 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
460 "Cannot initialize receiver");
461 }
462 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created");
463 }
464
465 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
466
467 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
468 AudioEncoder::EncodedInfo encoded_info;
469 uint8_t previous_pltype;
470
471 // Check if there is an encoder before.
472 if (!HaveValidEncoder("Process"))
473 return -1;
474
475 // Scale the timestamp to the codec's RTP timestamp rate.
476 uint32_t rtp_timestamp =
477 first_frame_ ? input_data.input_timestamp
478 : last_rtp_timestamp_ +
479 rtc::CheckedDivExact(
480 input_data.input_timestamp - last_timestamp_,
481 static_cast<uint32_t>(rtc::CheckedDivExact(
482 encoder_stack_->SampleRateHz(),
483 encoder_stack_->RtpTimestampRateHz())));
484 last_timestamp_ = input_data.input_timestamp;
485 last_rtp_timestamp_ = rtp_timestamp;
486 first_frame_ = false;
487
488 // Clear the buffer before reuse - encoded data will get appended.
489 encode_buffer_.Clear();
490 encoded_info = encoder_stack_->Encode(
491 rtp_timestamp, rtc::ArrayView<const int16_t>(
492 input_data.audio, input_data.audio_channel *
493 input_data.length_per_channel),
494 &encode_buffer_);
495
496 bitrate_logger_.MaybeLog(encoder_stack_->GetTargetBitrate() / 1000);
497 if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) {
498 // Not enough data.
499 return 0;
500 }
501 previous_pltype = previous_pltype_; // Read it while we have the critsect.
502
503 // Log codec type to histogram once every 500 packets.
504 if (encoded_info.encoded_bytes == 0) {
505 ++number_of_consecutive_empty_packets_;
506 } else {
507 size_t codec_type = static_cast<size_t>(encoded_info.encoder_type);
508 codec_histogram_bins_log_[codec_type] +=
509 number_of_consecutive_empty_packets_ + 1;
510 number_of_consecutive_empty_packets_ = 0;
511 if (codec_histogram_bins_log_[codec_type] >= 500) {
512 codec_histogram_bins_log_[codec_type] -= 500;
513 UpdateCodecTypeHistogram(codec_type);
514 }
515 }
516
517 RTPFragmentationHeader my_fragmentation;
518 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation);
519 FrameType frame_type;
520 if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) {
521 frame_type = kEmptyFrame;
522 encoded_info.payload_type = previous_pltype;
523 } else {
524 RTC_DCHECK_GT(encode_buffer_.size(), 0u);
525 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN;
526 }
527
528 {
529 rtc::CritScope lock(&callback_crit_sect_);
530 if (packetization_callback_) {
531 packetization_callback_->SendData(
532 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp,
533 encode_buffer_.data(), encode_buffer_.size(),
534 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation
535 : nullptr);
536 }
537
538 if (vad_callback_) {
539 // Callback with VAD decision.
540 vad_callback_->InFrameType(frame_type);
541 }
542 }
543 previous_pltype_ = encoded_info.payload_type;
544 return static_cast<int32_t>(encode_buffer_.size());
545 }
546
547 /////////////////////////////////////////
548 // Sender
549 //
550
551 // Can be called multiple times for Codec, CNG, RED.
552 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
553 rtc::CritScope lock(&acm_crit_sect_);
554 if (!encoder_factory_->codec_manager.RegisterEncoder(send_codec)) {
555 return -1;
556 }
557 if (encoder_factory_->codec_manager.GetCodecInst()) {
558 encoder_factory_->external_speech_encoder = nullptr;
559 }
560 if (!CreateSpeechEncoderIfNecessary(encoder_factory_.get())) {
561 return -1;
562 }
563 auto* sp = encoder_factory_->codec_manager.GetStackParams();
564 if (sp->speech_encoder)
565 encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
566 return 0;
567 }
568
569 void AudioCodingModuleImpl::RegisterExternalSendCodec(
570 AudioEncoder* external_speech_encoder) {
571 rtc::CritScope lock(&acm_crit_sect_);
572 encoder_factory_->codec_manager.UnsetCodecInst();
573 encoder_factory_->external_speech_encoder = external_speech_encoder;
574 RTC_CHECK(CreateSpeechEncoderIfNecessary(encoder_factory_.get()));
575 auto* sp = encoder_factory_->codec_manager.GetStackParams();
576 RTC_CHECK(sp->speech_encoder);
577 encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
578 }
579
580 void AudioCodingModuleImpl::ModifyEncoder(
581 FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
582 rtc::CritScope lock(&acm_crit_sect_);
583
584 // Wipe the encoder factory, so that everything that relies on it will fail.
585 // We don't want the complexity of supporting swapping back and forth.
586 if (encoder_factory_) {
587 encoder_factory_.reset();
588 RTC_CHECK(!encoder_stack_); // Ensure we hadn't started using the factory.
589 }
590
591 modifier(&encoder_stack_);
592 }
593
594 // Get current send codec.
595 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
596 rtc::CritScope lock(&acm_crit_sect_);
597 if (encoder_factory_) {
598 auto* ci = encoder_factory_->codec_manager.GetCodecInst();
599 if (ci) {
600 return rtc::Optional<CodecInst>(*ci);
601 }
602 CreateSpeechEncoderIfNecessary(encoder_factory_.get());
603 const std::unique_ptr<AudioEncoder>& enc =
604 encoder_factory_->codec_manager.GetStackParams()->speech_encoder;
605 if (enc) {
606 return rtc::Optional<CodecInst>(
607 acm2::CodecManager::ForgeCodecInst(enc.get()));
608 }
609 return rtc::Optional<CodecInst>();
610 } else {
611 return encoder_stack_
612 ? rtc::Optional<CodecInst>(
613 acm2::CodecManager::ForgeCodecInst(encoder_stack_.get()))
614 : rtc::Optional<CodecInst>();
615 }
616 }
617
618 // Get current send frequency.
619 int AudioCodingModuleImpl::SendFrequency() const {
620 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
621 "SendFrequency()");
622 rtc::CritScope lock(&acm_crit_sect_);
623
624 if (!encoder_stack_) {
625 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
626 "SendFrequency Failed, no codec is registered");
627 return -1;
628 }
629
630 return encoder_stack_->SampleRateHz();
631 }
632
633 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
634 rtc::CritScope lock(&acm_crit_sect_);
635 if (encoder_stack_) {
636 encoder_stack_->SetTargetBitrate(bitrate_bps);
637 }
638 }
639
640 // Register a transport callback which will be called to deliver
641 // the encoded buffers.
642 int AudioCodingModuleImpl::RegisterTransportCallback(
643 AudioPacketizationCallback* transport) {
644 rtc::CritScope lock(&callback_crit_sect_);
645 packetization_callback_ = transport;
646 return 0;
647 }
648
649 // Add 10MS of raw (PCM) audio data to the encoder.
650 int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) {
651 InputData input_data;
652 rtc::CritScope lock(&acm_crit_sect_);
653 int r = Add10MsDataInternal(audio_frame, &input_data);
654 return r < 0 ? r : Encode(input_data);
655 }
656
657 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame,
658 InputData* input_data) {
659 if (audio_frame.samples_per_channel_ == 0) {
660 assert(false);
661 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
662 "Cannot Add 10 ms audio, payload length is zero");
663 return -1;
664 }
665
666 if (audio_frame.sample_rate_hz_ > 48000) {
667 assert(false);
668 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
669 "Cannot Add 10 ms audio, input frequency not valid");
670 return -1;
671 }
672
673 // If the length and frequency matches. We currently just support raw PCM.
674 if (static_cast<size_t>(audio_frame.sample_rate_hz_ / 100) !=
675 audio_frame.samples_per_channel_) {
676 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
677 "Cannot Add 10 ms audio, input frequency and length doesn't"
678 " match");
679 return -1;
680 }
681
682 if (audio_frame.num_channels_ != 1 && audio_frame.num_channels_ != 2) {
683 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
684 "Cannot Add 10 ms audio, invalid number of channels.");
685 return -1;
686 }
687
688 // Do we have a codec registered?
689 if (!HaveValidEncoder("Add10MsData")) {
690 return -1;
691 }
692
693 const AudioFrame* ptr_frame;
694 // Perform a resampling, also down-mix if it is required and can be
695 // performed before resampling (a down mix prior to resampling will take
696 // place if both primary and secondary encoders are mono and input is in
697 // stereo).
698 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
699 return -1;
700 }
701
702 // Check whether we need an up-mix or down-mix?
703 const size_t current_num_channels = encoder_stack_->NumChannels();
704 const bool same_num_channels =
705 ptr_frame->num_channels_ == current_num_channels;
706
707 if (!same_num_channels) {
708 if (ptr_frame->num_channels_ == 1) {
709 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
710 return -1;
711 } else {
712 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
713 return -1;
714 }
715 }
716
717 // When adding data to encoders this pointer is pointing to an audio buffer
718 // with correct number of channels.
719 const int16_t* ptr_audio = ptr_frame->data_;
720
721 // For pushing data to primary, point the |ptr_audio| to correct buffer.
722 if (!same_num_channels)
723 ptr_audio = input_data->buffer;
724
725 input_data->input_timestamp = ptr_frame->timestamp_;
726 input_data->audio = ptr_audio;
727 input_data->length_per_channel = ptr_frame->samples_per_channel_;
728 input_data->audio_channel = current_num_channels;
729
730 return 0;
731 }
732
733 // Perform a resampling and down-mix if required. We down-mix only if
734 // encoder is mono and input is stereo. In case of dual-streaming, both
735 // encoders has to be mono for down-mix to take place.
736 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
737 // is required, |*ptr_out| points to |in_frame|.
738 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
739 const AudioFrame** ptr_out) {
740 const bool resample =
741 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz();
742
743 // This variable is true if primary codec and secondary codec (if exists)
744 // are both mono and input is stereo.
745 // TODO(henrik.lundin): This condition should probably be
746 // in_frame.num_channels_ > encoder_stack_->NumChannels()
747 const bool down_mix =
748 in_frame.num_channels_ == 2 && encoder_stack_->NumChannels() == 1;
749
750 if (!first_10ms_data_) {
751 expected_in_ts_ = in_frame.timestamp_;
752 expected_codec_ts_ = in_frame.timestamp_;
753 first_10ms_data_ = true;
754 } else if (in_frame.timestamp_ != expected_in_ts_) {
755 // TODO(turajs): Do we need a warning here.
756 expected_codec_ts_ +=
757 (in_frame.timestamp_ - expected_in_ts_) *
758 static_cast<uint32_t>(
759 static_cast<double>(encoder_stack_->SampleRateHz()) /
760 static_cast<double>(in_frame.sample_rate_hz_));
761 expected_in_ts_ = in_frame.timestamp_;
762 }
763
764
765 if (!down_mix && !resample) {
766 // No pre-processing is required.
767 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
768 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
769 *ptr_out = &in_frame;
770 return 0;
771 }
772
773 *ptr_out = &preprocess_frame_;
774 preprocess_frame_.num_channels_ = in_frame.num_channels_;
775 int16_t audio[WEBRTC_10MS_PCM_AUDIO];
776 const int16_t* src_ptr_audio = in_frame.data_;
777 int16_t* dest_ptr_audio = preprocess_frame_.data_;
778 if (down_mix) {
779 // If a resampling is required the output of a down-mix is written into a
780 // local buffer, otherwise, it will be written to the output frame.
781 if (resample)
782 dest_ptr_audio = audio;
783 if (DownMix(in_frame, WEBRTC_10MS_PCM_AUDIO, dest_ptr_audio) < 0)
784 return -1;
785 preprocess_frame_.num_channels_ = 1;
786 // Set the input of the resampler is the down-mixed signal.
787 src_ptr_audio = audio;
788 }
789
790 preprocess_frame_.timestamp_ = expected_codec_ts_;
791 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
792 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
793 // If it is required, we have to do a resampling.
794 if (resample) {
795 // The result of the resampler is written to output frame.
796 dest_ptr_audio = preprocess_frame_.data_;
797
798 int samples_per_channel = resampler_.Resample10Msec(
799 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(),
800 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
801 dest_ptr_audio);
802
803 if (samples_per_channel < 0) {
804 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
805 "Cannot add 10 ms audio, resampling failed");
806 return -1;
807 }
808 preprocess_frame_.samples_per_channel_ =
809 static_cast<size_t>(samples_per_channel);
810 preprocess_frame_.sample_rate_hz_ = encoder_stack_->SampleRateHz();
811 }
812
813 expected_codec_ts_ +=
814 static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
815 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
816
817 return 0;
818 }
819
820 /////////////////////////////////////////
821 // (RED) Redundant Coding
822 //
823
824 bool AudioCodingModuleImpl::REDStatus() const {
825 rtc::CritScope lock(&acm_crit_sect_);
826 return encoder_factory_->codec_manager.GetStackParams()->use_red;
827 }
828
829 // Configure RED status i.e on/off.
830 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
831 #ifdef WEBRTC_CODEC_RED
832 rtc::CritScope lock(&acm_crit_sect_);
833 CreateSpeechEncoderIfNecessary(encoder_factory_.get());
834 if (!encoder_factory_->codec_manager.SetCopyRed(enable_red)) {
835 return -1;
836 }
837 auto* sp = encoder_factory_->codec_manager.GetStackParams();
838 if (sp->speech_encoder)
839 encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
840 return 0;
841 #else
842 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
843 " WEBRTC_CODEC_RED is undefined");
844 return -1;
845 #endif
846 }
847
848 /////////////////////////////////////////
849 // (FEC) Forward Error Correction (codec internal)
850 //
851
852 bool AudioCodingModuleImpl::CodecFEC() const {
853 rtc::CritScope lock(&acm_crit_sect_);
854 return encoder_factory_->codec_manager.GetStackParams()->use_codec_fec;
855 }
856
857 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
858 rtc::CritScope lock(&acm_crit_sect_);
859 CreateSpeechEncoderIfNecessary(encoder_factory_.get());
860 if (!encoder_factory_->codec_manager.SetCodecFEC(enable_codec_fec)) {
861 return -1;
862 }
863 auto* sp = encoder_factory_->codec_manager.GetStackParams();
864 if (sp->speech_encoder)
865 encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
866 if (enable_codec_fec) {
867 return sp->use_codec_fec ? 0 : -1;
868 } else {
869 RTC_DCHECK(!sp->use_codec_fec);
870 return 0;
871 }
872 }
873
874 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
875 rtc::CritScope lock(&acm_crit_sect_);
876 if (HaveValidEncoder("SetPacketLossRate")) {
877 encoder_stack_->SetProjectedPacketLossRate(loss_rate / 100.0);
878 }
879 return 0;
880 }
881
882 /////////////////////////////////////////
883 // (VAD) Voice Activity Detection
884 //
885 int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
886 bool enable_vad,
887 ACMVADMode mode) {
888 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
889 RTC_DCHECK_EQ(enable_dtx, enable_vad);
890 rtc::CritScope lock(&acm_crit_sect_);
891 CreateSpeechEncoderIfNecessary(encoder_factory_.get());
892 if (!encoder_factory_->codec_manager.SetVAD(enable_dtx, mode)) {
893 return -1;
894 }
895 auto* sp = encoder_factory_->codec_manager.GetStackParams();
896 if (sp->speech_encoder)
897 encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
898 return 0;
899 }
900
901 // Get VAD/DTX settings.
902 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
903 ACMVADMode* mode) const {
904 rtc::CritScope lock(&acm_crit_sect_);
905 const auto* sp = encoder_factory_->codec_manager.GetStackParams();
906 *dtx_enabled = *vad_enabled = sp->use_cng;
907 *mode = sp->vad_mode;
908 return 0;
909 }
910
911 /////////////////////////////////////////
912 // Receiver
913 //
914
915 int AudioCodingModuleImpl::InitializeReceiver() {
916 rtc::CritScope lock(&acm_crit_sect_);
917 return InitializeReceiverSafe();
918 }
919
920 // Initialize receiver, resets codec database etc.
921 int AudioCodingModuleImpl::InitializeReceiverSafe() {
922 // If the receiver is already initialized then we want to destroy any
923 // existing decoders. After a call to this function, we should have a clean
924 // start-up.
925 if (receiver_initialized_) {
926 if (receiver_.RemoveAllCodecs() < 0)
927 return -1;
928 }
929 receiver_.ResetInitialDelay();
930 receiver_.SetMinimumDelay(0);
931 receiver_.SetMaximumDelay(0);
932 receiver_.FlushBuffers();
933
934 // Register RED and CN.
935 auto db = acm2::RentACodec::Database();
936 for (size_t i = 0; i < db.size(); i++) {
937 if (IsCodecRED(db[i]) || IsCodecCN(db[i])) {
938 if (receiver_.AddCodec(static_cast<int>(i),
939 static_cast<uint8_t>(db[i].pltype), 1,
940 db[i].plfreq, nullptr, db[i].plname) < 0) {
941 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
942 "Cannot register master codec.");
943 return -1;
944 }
945 }
946 }
947 receiver_initialized_ = true;
948 return 0;
949 }
950
951 // Get current receive frequency.
952 int AudioCodingModuleImpl::ReceiveFrequency() const {
953 const auto last_packet_sample_rate = receiver_.last_packet_sample_rate_hz();
954 return last_packet_sample_rate ? *last_packet_sample_rate
955 : receiver_.last_output_sample_rate_hz();
956 }
957
958 // Get current playout frequency.
959 int AudioCodingModuleImpl::PlayoutFrequency() const {
960 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
961 "PlayoutFrequency()");
962 return receiver_.last_output_sample_rate_hz();
963 }
964
965 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
966 rtc::CritScope lock(&acm_crit_sect_);
967 auto* ef = encoder_factory_.get();
968 return RegisterReceiveCodecUnlocked(
969 codec, [&] { return ef->rent_a_codec.RentIsacDecoder(codec.plfreq); });
970 }
971
972 int AudioCodingModuleImpl::RegisterReceiveCodec(
973 const CodecInst& codec,
974 FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
975 rtc::CritScope lock(&acm_crit_sect_);
976 return RegisterReceiveCodecUnlocked(codec, isac_factory);
977 }
978
979 int AudioCodingModuleImpl::RegisterReceiveCodecUnlocked(
980 const CodecInst& codec,
981 FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
982 RTC_DCHECK(receiver_initialized_);
983 if (codec.channels > 2) {
984 LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
985 return -1;
986 }
987
988 auto codec_id = acm2::RentACodec::CodecIdByParams(codec.plname, codec.plfreq,
989 codec.channels);
990 if (!codec_id) {
991 LOG_F(LS_ERROR) << "Wrong codec params to be registered as receive codec";
992 return -1;
993 }
994 auto codec_index = acm2::RentACodec::CodecIndexFromId(*codec_id);
995 RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
996
997 // Check if the payload-type is valid.
998 if (!acm2::RentACodec::IsPayloadTypeValid(codec.pltype)) {
999 LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
1000 << codec.plname;
1001 return -1;
1002 }
1003
1004 AudioDecoder* isac_decoder = nullptr;
1005 if (STR_CASE_CMP(codec.plname, "isac") == 0) {
1006 std::unique_ptr<AudioDecoder>& saved_isac_decoder =
1007 codec.plfreq == 16000 ? isac_decoder_16k_ : isac_decoder_32k_;
1008 if (!saved_isac_decoder) {
1009 saved_isac_decoder = isac_factory();
1010 }
1011 isac_decoder = saved_isac_decoder.get();
1012 }
1013 return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
1014 codec.plfreq, isac_decoder, codec.plname);
1015 }
1016
1017 int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
1018 int rtp_payload_type,
1019 AudioDecoder* external_decoder,
1020 int sample_rate_hz,
1021 int num_channels,
1022 const std::string& name) {
1023 rtc::CritScope lock(&acm_crit_sect_);
1024 RTC_DCHECK(receiver_initialized_);
1025 if (num_channels > 2 || num_channels < 0) {
1026 LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels;
1027 return -1;
1028 }
1029
1030 // Check if the payload-type is valid.
1031 if (!acm2::RentACodec::IsPayloadTypeValid(rtp_payload_type)) {
1032 LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type
1033 << " for external decoder.";
1034 return -1;
1035 }
1036
1037 return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels,
1038 sample_rate_hz, external_decoder, name);
1039 }
1040
1041 // Get current received codec.
1042 int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const {
1043 rtc::CritScope lock(&acm_crit_sect_);
1044 return receiver_.LastAudioCodec(current_codec);
1045 }
1046
1047 // Incoming packet from network parsed and ready for decode.
1048 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload,
1049 const size_t payload_length,
1050 const WebRtcRTPHeader& rtp_header) {
1051 return receiver_.InsertPacket(
1052 rtp_header,
1053 rtc::ArrayView<const uint8_t>(incoming_payload, payload_length));
1054 }
1055
1056 // Minimum playout delay (Used for lip-sync).
1057 int AudioCodingModuleImpl::SetMinimumPlayoutDelay(int time_ms) {
1058 if ((time_ms < 0) || (time_ms > 10000)) {
1059 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1060 "Delay must be in the range of 0-1000 milliseconds.");
1061 return -1;
1062 }
1063 return receiver_.SetMinimumDelay(time_ms);
1064 }
1065
1066 int AudioCodingModuleImpl::SetMaximumPlayoutDelay(int time_ms) {
1067 if ((time_ms < 0) || (time_ms > 10000)) {
1068 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1069 "Delay must be in the range of 0-1000 milliseconds.");
1070 return -1;
1071 }
1072 return receiver_.SetMaximumDelay(time_ms);
1073 }
1074
1075 // Get 10 milliseconds of raw audio data to play out.
1076 // Automatic resample to the requested frequency.
1077 int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
1078 AudioFrame* audio_frame,
1079 bool* muted) {
1080 // GetAudio always returns 10 ms, at the requested sample rate.
1081 if (receiver_.GetAudio(desired_freq_hz, audio_frame, muted) != 0) {
1082 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1083 "PlayoutData failed, RecOut Failed");
1084 return -1;
1085 }
1086 audio_frame->id_ = id_;
1087 return 0;
1088 }
1089
1090 int AudioCodingModuleImpl::PlayoutData10Ms(int desired_freq_hz,
1091 AudioFrame* audio_frame) {
1092 bool muted;
1093 int ret = PlayoutData10Ms(desired_freq_hz, audio_frame, &muted);
1094 RTC_DCHECK(!muted);
1095 return ret;
1096 }
1097
1098 /////////////////////////////////////////
1099 // Statistics
1100 //
1101
1102 // TODO(turajs) change the return value to void. Also change the corresponding
1103 // NetEq function.
1104 int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) {
1105 receiver_.GetNetworkStatistics(statistics);
1106 return 0;
1107 }
1108
1109 int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) {
1110 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_,
1111 "RegisterVADCallback()");
1112 rtc::CritScope lock(&callback_crit_sect_);
1113 vad_callback_ = vad_callback;
1114 return 0;
1115 }
1116
1117 // TODO(kwiberg): Remove this method, and have callers call IncomingPacket
1118 // instead. The translation logic and state belong with them, not with
1119 // AudioCodingModuleImpl.
1120 int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload,
1121 size_t payload_length,
1122 uint8_t payload_type,
1123 uint32_t timestamp) {
1124 // We are not acquiring any lock when interacting with |aux_rtp_header_| no
1125 // other method uses this member variable.
1126 if (!aux_rtp_header_) {
1127 // This is the first time that we are using |dummy_rtp_header_|
1128 // so we have to create it.
1129 aux_rtp_header_.reset(new WebRtcRTPHeader);
1130 aux_rtp_header_->header.payloadType = payload_type;
1131 // Don't matter in this case.
1132 aux_rtp_header_->header.ssrc = 0;
1133 aux_rtp_header_->header.markerBit = false;
1134 // Start with random numbers.
1135 aux_rtp_header_->header.sequenceNumber = 0x1234; // Arbitrary.
1136 aux_rtp_header_->type.Audio.channel = 1;
1137 }
1138
1139 aux_rtp_header_->header.timestamp = timestamp;
1140 IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_);
1141 // Get ready for the next payload.
1142 aux_rtp_header_->header.sequenceNumber++;
1143 return 0;
1144 }
1145
1146 int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) {
1147 rtc::CritScope lock(&acm_crit_sect_);
1148 if (!HaveValidEncoder("SetOpusApplication")) {
1149 return -1;
1150 }
1151 AudioEncoder::Application app;
1152 switch (application) {
1153 case kVoip:
1154 app = AudioEncoder::Application::kSpeech;
1155 break;
1156 case kAudio:
1157 app = AudioEncoder::Application::kAudio;
1158 break;
1159 default:
1160 FATAL();
1161 return 0;
1162 }
1163 return encoder_stack_->SetApplication(app) ? 0 : -1;
1164 }
1165
1166 // Informs Opus encoder of the maximum playback rate the receiver will render.
1167 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
1168 rtc::CritScope lock(&acm_crit_sect_);
1169 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
1170 return -1;
1171 }
1172 encoder_stack_->SetMaxPlaybackRate(frequency_hz);
1173 return 0;
1174 }
1175
1176 int AudioCodingModuleImpl::EnableOpusDtx() {
1177 rtc::CritScope lock(&acm_crit_sect_);
1178 if (!HaveValidEncoder("EnableOpusDtx")) {
1179 return -1;
1180 }
1181 return encoder_stack_->SetDtx(true) ? 0 : -1;
1182 }
1183
1184 int AudioCodingModuleImpl::DisableOpusDtx() {
1185 rtc::CritScope lock(&acm_crit_sect_);
1186 if (!HaveValidEncoder("DisableOpusDtx")) {
1187 return -1;
1188 }
1189 return encoder_stack_->SetDtx(false) ? 0 : -1;
1190 }
1191
1192 int32_t AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
1193 rtc::Optional<uint32_t> ts = PlayoutTimestamp();
1194 if (!ts)
1195 return -1;
1196 *timestamp = *ts;
1197 return 0;
1198 }
1199
1200 rtc::Optional<uint32_t> AudioCodingModuleImpl::PlayoutTimestamp() {
1201 return receiver_.GetPlayoutTimestamp();
1202 }
1203
1204 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
1205 if (!encoder_stack_) {
1206 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
1207 "%s failed: No send codec is registered.", caller_name);
1208 return false;
1209 }
1210 return true;
1211 }
1212
1213 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
1214 return receiver_.RemoveCodec(payload_type);
1215 }
1216
1217 int AudioCodingModuleImpl::EnableNack(size_t max_nack_list_size) {
1218 return receiver_.EnableNack(max_nack_list_size);
1219 }
1220
1221 void AudioCodingModuleImpl::DisableNack() {
1222 receiver_.DisableNack();
1223 }
1224
1225 std::vector<uint16_t> AudioCodingModuleImpl::GetNackList(
1226 int64_t round_trip_time_ms) const {
1227 return receiver_.GetNackList(round_trip_time_ms);
1228 }
1229
1230 int AudioCodingModuleImpl::LeastRequiredDelayMs() const {
1231 return receiver_.LeastRequiredDelayMs();
1232 }
1233
1234 void AudioCodingModuleImpl::GetDecodingCallStatistics(
1235 AudioDecodingCallStats* call_stats) const {
1236 receiver_.GetDecodingCallStatistics(call_stats);
1237 }
1238
1239 } // namespace
1240
23 // Create module 1241 // Create module
24 AudioCodingModule* AudioCodingModule::Create(int id) { 1242 AudioCodingModule* AudioCodingModule::Create(int id) {
25 Config config; 1243 Config config;
26 config.id = id; 1244 config.id = id;
27 config.clock = Clock::GetRealTimeClock(); 1245 config.clock = Clock::GetRealTimeClock();
28 config.decoder_factory = CreateBuiltinAudioDecoderFactory(); 1246 config.decoder_factory = CreateBuiltinAudioDecoderFactory();
29 return Create(config); 1247 return Create(config);
30 } 1248 }
31 1249
32 AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) { 1250 AudioCodingModule* AudioCodingModule::Create(int id, Clock* clock) {
33 Config config; 1251 Config config;
34 config.id = id; 1252 config.id = id;
35 config.clock = clock; 1253 config.clock = clock;
36 config.decoder_factory = CreateBuiltinAudioDecoderFactory(); 1254 config.decoder_factory = CreateBuiltinAudioDecoderFactory();
37 return Create(config); 1255 return Create(config);
38 } 1256 }
39 1257
40 AudioCodingModule* AudioCodingModule::Create(const Config& config) { 1258 AudioCodingModule* AudioCodingModule::Create(const Config& config) {
41 if (!config.decoder_factory) { 1259 if (!config.decoder_factory) {
42 // TODO(ossu): Backwards compatibility. Will be removed after a deprecation 1260 // TODO(ossu): Backwards compatibility. Will be removed after a deprecation
43 // cycle. 1261 // cycle.
44 Config config_copy = config; 1262 Config config_copy = config;
45 config_copy.decoder_factory = CreateBuiltinAudioDecoderFactory(); 1263 config_copy.decoder_factory = CreateBuiltinAudioDecoderFactory();
46 return new acm2::AudioCodingModuleImpl(config_copy); 1264 return new AudioCodingModuleImpl(config_copy);
47 } 1265 }
48 return new acm2::AudioCodingModuleImpl(config); 1266 return new AudioCodingModuleImpl(config);
49 } 1267 }
50 1268
51 int AudioCodingModule::NumberOfCodecs() { 1269 int AudioCodingModule::NumberOfCodecs() {
52 return static_cast<int>(acm2::RentACodec::NumberOfCodecs()); 1270 return static_cast<int>(acm2::RentACodec::NumberOfCodecs());
53 } 1271 }
54 1272
55 int AudioCodingModule::Codec(int list_id, CodecInst* codec) { 1273 int AudioCodingModule::Codec(int list_id, CodecInst* codec) {
56 auto codec_id = acm2::RentACodec::CodecIdFromIndex(list_id); 1274 auto codec_id = acm2::RentACodec::CodecIdFromIndex(list_id);
57 if (!codec_id) 1275 if (!codec_id)
58 return -1; 1276 return -1;
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 // Checks the validity of the parameters of the given codec 1317 // Checks the validity of the parameters of the given codec
100 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) { 1318 bool AudioCodingModule::IsCodecValid(const CodecInst& codec) {
101 bool valid = acm2::RentACodec::IsCodecValid(codec); 1319 bool valid = acm2::RentACodec::IsCodecValid(codec);
102 if (!valid) 1320 if (!valid)
103 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1, 1321 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, -1,
104 "Invalid codec setting"); 1322 "Invalid codec setting");
105 return valid; 1323 return valid;
106 } 1324 }
107 1325
108 } // namespace webrtc 1326 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698