Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(179)

Side by Side Diff: webrtc/modules/audio_coding/acm2/audio_coding_module_impl.cc

Issue 1520283006: Move Rent-A-Codec out of CodecManager (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@rac0
Patch Set: review comments Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; 126 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default;
127 127
128 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) { 128 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) {
129 AudioEncoder::EncodedInfo encoded_info; 129 AudioEncoder::EncodedInfo encoded_info;
130 uint8_t previous_pltype; 130 uint8_t previous_pltype;
131 131
132 // Check if there is an encoder before. 132 // Check if there is an encoder before.
133 if (!HaveValidEncoder("Process")) 133 if (!HaveValidEncoder("Process"))
134 return -1; 134 return -1;
135 135
136 AudioEncoder* audio_encoder = codec_manager_.CurrentEncoder(); 136 AudioEncoder* audio_encoder = rent_a_codec_.GetEncoderStack();
137 // Scale the timestamp to the codec's RTP timestamp rate. 137 // Scale the timestamp to the codec's RTP timestamp rate.
138 uint32_t rtp_timestamp = 138 uint32_t rtp_timestamp =
139 first_frame_ ? input_data.input_timestamp 139 first_frame_ ? input_data.input_timestamp
140 : last_rtp_timestamp_ + 140 : last_rtp_timestamp_ +
141 rtc::CheckedDivExact( 141 rtc::CheckedDivExact(
142 input_data.input_timestamp - last_timestamp_, 142 input_data.input_timestamp - last_timestamp_,
143 static_cast<uint32_t>(rtc::CheckedDivExact( 143 static_cast<uint32_t>(rtc::CheckedDivExact(
144 audio_encoder->SampleRateHz(), 144 audio_encoder->SampleRateHz(),
145 audio_encoder->RtpTimestampRateHz()))); 145 audio_encoder->RtpTimestampRateHz())));
146 last_timestamp_ = input_data.input_timestamp; 146 last_timestamp_ = input_data.input_timestamp;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
191 return static_cast<int32_t>(encode_buffer_.size()); 191 return static_cast<int32_t>(encode_buffer_.size());
192 } 192 }
193 193
194 ///////////////////////////////////////// 194 /////////////////////////////////////////
195 // Sender 195 // Sender
196 // 196 //
197 197
198 // Can be called multiple times for Codec, CNG, RED. 198 // Can be called multiple times for Codec, CNG, RED.
199 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) { 199 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
200 CriticalSectionScoped lock(acm_crit_sect_.get()); 200 CriticalSectionScoped lock(acm_crit_sect_.get());
201 return codec_manager_.RegisterEncoder(send_codec); 201 if (!codec_manager_.RegisterEncoder(send_codec)) {
202 return -1;
203 }
204 auto* sp = codec_manager_.GetStackParams();
205 if (!sp->speech_encoder && codec_manager_.GetCodecInst()) {
206 // We have no speech encoder, but we have a specification for making one.
207 AudioEncoder* enc =
208 rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst());
209 if (!enc)
210 return -1;
211 sp->speech_encoder = enc;
212 }
213 if (sp->speech_encoder)
214 rent_a_codec_.RentEncoderStack(sp);
215 return 0;
202 } 216 }
203 217
204 void AudioCodingModuleImpl::RegisterExternalSendCodec( 218 void AudioCodingModuleImpl::RegisterExternalSendCodec(
205 AudioEncoder* external_speech_encoder) { 219 AudioEncoder* external_speech_encoder) {
206 CriticalSectionScoped lock(acm_crit_sect_.get()); 220 CriticalSectionScoped lock(acm_crit_sect_.get());
207 codec_manager_.RegisterEncoder(external_speech_encoder); 221 auto* sp = codec_manager_.GetStackParams();
222 sp->speech_encoder = external_speech_encoder;
223 rent_a_codec_.RentEncoderStack(sp);
208 } 224 }
209 225
210 // Get current send codec. 226 // Get current send codec.
211 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { 227 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
212 CriticalSectionScoped lock(acm_crit_sect_.get()); 228 CriticalSectionScoped lock(acm_crit_sect_.get());
213 return codec_manager_.GetCodecInst(); 229 auto* ci = codec_manager_.GetCodecInst();
230 if (ci) {
231 return rtc::Optional<CodecInst>(*ci);
232 }
233 auto* enc = codec_manager_.GetStackParams()->speech_encoder;
234 if (enc) {
235 return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc));
236 }
237 return rtc::Optional<CodecInst>();
214 } 238 }
215 239
216 // Get current send frequency. 240 // Get current send frequency.
217 int AudioCodingModuleImpl::SendFrequency() const { 241 int AudioCodingModuleImpl::SendFrequency() const {
218 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 242 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
219 "SendFrequency()"); 243 "SendFrequency()");
220 CriticalSectionScoped lock(acm_crit_sect_.get()); 244 CriticalSectionScoped lock(acm_crit_sect_.get());
221 245
222 if (!codec_manager_.CurrentEncoder()) { 246 const auto* enc = rent_a_codec_.GetEncoderStack();
247 if (!enc) {
223 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, 248 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_,
224 "SendFrequency Failed, no codec is registered"); 249 "SendFrequency Failed, no codec is registered");
225 return -1; 250 return -1;
226 } 251 }
227 252
228 return codec_manager_.CurrentEncoder()->SampleRateHz(); 253 return enc->SampleRateHz();
229 } 254 }
230 255
231 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { 256 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) {
232 CriticalSectionScoped lock(acm_crit_sect_.get()); 257 CriticalSectionScoped lock(acm_crit_sect_.get());
233 if (codec_manager_.CurrentEncoder()) { 258 auto* enc = rent_a_codec_.GetEncoderStack();
234 codec_manager_.CurrentEncoder()->SetTargetBitrate(bitrate_bps); 259 if (enc) {
260 enc->SetTargetBitrate(bitrate_bps);
235 } 261 }
236 } 262 }
237 263
238 // Register a transport callback which will be called to deliver 264 // Register a transport callback which will be called to deliver
239 // the encoded buffers. 265 // the encoded buffers.
240 int AudioCodingModuleImpl::RegisterTransportCallback( 266 int AudioCodingModuleImpl::RegisterTransportCallback(
241 AudioPacketizationCallback* transport) { 267 AudioPacketizationCallback* transport) {
242 CriticalSectionScoped lock(callback_crit_sect_.get()); 268 CriticalSectionScoped lock(callback_crit_sect_.get());
243 packetization_callback_ = transport; 269 packetization_callback_ = transport;
244 return 0; 270 return 0;
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
291 const AudioFrame* ptr_frame; 317 const AudioFrame* ptr_frame;
292 // Perform a resampling, also down-mix if it is required and can be 318 // Perform a resampling, also down-mix if it is required and can be
293 // performed before resampling (a down mix prior to resampling will take 319 // performed before resampling (a down mix prior to resampling will take
294 // place if both primary and secondary encoders are mono and input is in 320 // place if both primary and secondary encoders are mono and input is in
295 // stereo). 321 // stereo).
296 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) { 322 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) {
297 return -1; 323 return -1;
298 } 324 }
299 325
300 // Check whether we need an up-mix or down-mix? 326 // Check whether we need an up-mix or down-mix?
301 bool remix = ptr_frame->num_channels_ != 327 const int current_num_channels =
302 codec_manager_.CurrentEncoder()->NumChannels(); 328 rent_a_codec_.GetEncoderStack()->NumChannels();
329 const bool same_num_channels =
330 ptr_frame->num_channels_ == current_num_channels;
303 331
304 if (remix) { 332 if (!same_num_channels) {
305 if (ptr_frame->num_channels_ == 1) { 333 if (ptr_frame->num_channels_ == 1) {
306 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) 334 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
307 return -1; 335 return -1;
308 } else { 336 } else {
309 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) 337 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0)
310 return -1; 338 return -1;
311 } 339 }
312 } 340 }
313 341
314 // When adding data to encoders this pointer is pointing to an audio buffer 342 // When adding data to encoders this pointer is pointing to an audio buffer
315 // with correct number of channels. 343 // with correct number of channels.
316 const int16_t* ptr_audio = ptr_frame->data_; 344 const int16_t* ptr_audio = ptr_frame->data_;
317 345
318 // For pushing data to primary, point the |ptr_audio| to correct buffer. 346 // For pushing data to primary, point the |ptr_audio| to correct buffer.
319 if (codec_manager_.CurrentEncoder()->NumChannels() != 347 if (!same_num_channels)
320 ptr_frame->num_channels_)
321 ptr_audio = input_data->buffer; 348 ptr_audio = input_data->buffer;
322 349
323 input_data->input_timestamp = ptr_frame->timestamp_; 350 input_data->input_timestamp = ptr_frame->timestamp_;
324 input_data->audio = ptr_audio; 351 input_data->audio = ptr_audio;
325 input_data->length_per_channel = ptr_frame->samples_per_channel_; 352 input_data->length_per_channel = ptr_frame->samples_per_channel_;
326 input_data->audio_channel = codec_manager_.CurrentEncoder()->NumChannels(); 353 input_data->audio_channel = current_num_channels;
327 354
328 return 0; 355 return 0;
329 } 356 }
330 357
331 // Perform a resampling and down-mix if required. We down-mix only if 358 // Perform a resampling and down-mix if required. We down-mix only if
332 // encoder is mono and input is stereo. In case of dual-streaming, both 359 // encoder is mono and input is stereo. In case of dual-streaming, both
333 // encoders has to be mono for down-mix to take place. 360 // encoders has to be mono for down-mix to take place.
334 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing 361 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing
335 // is required, |*ptr_out| points to |in_frame|. 362 // is required, |*ptr_out| points to |in_frame|.
336 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, 363 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame,
337 const AudioFrame** ptr_out) { 364 const AudioFrame** ptr_out) {
338 bool resample = (in_frame.sample_rate_hz_ != 365 const auto* enc = rent_a_codec_.GetEncoderStack();
339 codec_manager_.CurrentEncoder()->SampleRateHz()); 366 const bool resample = in_frame.sample_rate_hz_ != enc->SampleRateHz();
340 367
341 // This variable is true if primary codec and secondary codec (if exists) 368 // This variable is true if primary codec and secondary codec (if exists)
342 // are both mono and input is stereo. 369 // are both mono and input is stereo.
343 bool down_mix = (in_frame.num_channels_ == 2) && 370 // TODO(henrik.lundin): This condition should probably be
344 (codec_manager_.CurrentEncoder()->NumChannels() == 1); 371 // in_frame.num_channels_ > enc->NumChannels()
372 const bool down_mix = in_frame.num_channels_ == 2 && enc->NumChannels() == 1;
345 373
346 if (!first_10ms_data_) { 374 if (!first_10ms_data_) {
347 expected_in_ts_ = in_frame.timestamp_; 375 expected_in_ts_ = in_frame.timestamp_;
348 expected_codec_ts_ = in_frame.timestamp_; 376 expected_codec_ts_ = in_frame.timestamp_;
349 first_10ms_data_ = true; 377 first_10ms_data_ = true;
350 } else if (in_frame.timestamp_ != expected_in_ts_) { 378 } else if (in_frame.timestamp_ != expected_in_ts_) {
351 // TODO(turajs): Do we need a warning here. 379 // TODO(turajs): Do we need a warning here.
352 expected_codec_ts_ += 380 expected_codec_ts_ +=
353 (in_frame.timestamp_ - expected_in_ts_) * 381 (in_frame.timestamp_ - expected_in_ts_) *
354 static_cast<uint32_t>( 382 static_cast<uint32_t>(static_cast<double>(enc->SampleRateHz()) /
355 (static_cast<double>( 383 static_cast<double>(in_frame.sample_rate_hz_));
356 codec_manager_.CurrentEncoder()->SampleRateHz()) /
357 static_cast<double>(in_frame.sample_rate_hz_)));
358 expected_in_ts_ = in_frame.timestamp_; 384 expected_in_ts_ = in_frame.timestamp_;
359 } 385 }
360 386
361 387
362 if (!down_mix && !resample) { 388 if (!down_mix && !resample) {
363 // No pre-processing is required. 389 // No pre-processing is required.
364 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); 390 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
365 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); 391 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
366 *ptr_out = &in_frame; 392 *ptr_out = &in_frame;
367 return 0; 393 return 0;
(...skipping 18 matching lines...) Expand all
386 412
387 preprocess_frame_.timestamp_ = expected_codec_ts_; 413 preprocess_frame_.timestamp_ = expected_codec_ts_;
388 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; 414 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_;
389 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; 415 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_;
390 // If it is required, we have to do a resampling. 416 // If it is required, we have to do a resampling.
391 if (resample) { 417 if (resample) {
392 // The result of the resampler is written to output frame. 418 // The result of the resampler is written to output frame.
393 dest_ptr_audio = preprocess_frame_.data_; 419 dest_ptr_audio = preprocess_frame_.data_;
394 420
395 int samples_per_channel = resampler_.Resample10Msec( 421 int samples_per_channel = resampler_.Resample10Msec(
396 src_ptr_audio, in_frame.sample_rate_hz_, 422 src_ptr_audio, in_frame.sample_rate_hz_, enc->SampleRateHz(),
397 codec_manager_.CurrentEncoder()->SampleRateHz(),
398 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, 423 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples,
399 dest_ptr_audio); 424 dest_ptr_audio);
400 425
401 if (samples_per_channel < 0) { 426 if (samples_per_channel < 0) {
402 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 427 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
403 "Cannot add 10 ms audio, resampling failed"); 428 "Cannot add 10 ms audio, resampling failed");
404 return -1; 429 return -1;
405 } 430 }
406 preprocess_frame_.samples_per_channel_ = 431 preprocess_frame_.samples_per_channel_ =
407 static_cast<size_t>(samples_per_channel); 432 static_cast<size_t>(samples_per_channel);
408 preprocess_frame_.sample_rate_hz_ = 433 preprocess_frame_.sample_rate_hz_ = enc->SampleRateHz();
409 codec_manager_.CurrentEncoder()->SampleRateHz();
410 } 434 }
411 435
412 expected_codec_ts_ += 436 expected_codec_ts_ +=
413 static_cast<uint32_t>(preprocess_frame_.samples_per_channel_); 437 static_cast<uint32_t>(preprocess_frame_.samples_per_channel_);
414 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); 438 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_);
415 439
416 return 0; 440 return 0;
417 } 441 }
418 442
419 ///////////////////////////////////////// 443 /////////////////////////////////////////
420 // (RED) Redundant Coding 444 // (RED) Redundant Coding
421 // 445 //
422 446
423 bool AudioCodingModuleImpl::REDStatus() const { 447 bool AudioCodingModuleImpl::REDStatus() const {
424 CriticalSectionScoped lock(acm_crit_sect_.get()); 448 CriticalSectionScoped lock(acm_crit_sect_.get());
425 return codec_manager_.red_enabled(); 449 return codec_manager_.GetStackParams()->use_red;
426 } 450 }
427 451
428 // Configure RED status i.e on/off. 452 // Configure RED status i.e on/off.
429 int AudioCodingModuleImpl::SetREDStatus( 453 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
430 #ifdef WEBRTC_CODEC_RED 454 #ifdef WEBRTC_CODEC_RED
431 bool enable_red) {
432 CriticalSectionScoped lock(acm_crit_sect_.get()); 455 CriticalSectionScoped lock(acm_crit_sect_.get());
433 return codec_manager_.SetCopyRed(enable_red) ? 0 : -1; 456 if (!codec_manager_.SetCopyRed(enable_red)) {
457 return -1;
458 }
459 auto* sp = codec_manager_.GetStackParams();
460 if (sp->speech_encoder)
461 rent_a_codec_.RentEncoderStack(sp);
462 return 0;
434 #else 463 #else
435 bool /* enable_red */) {
436 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, 464 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
437 " WEBRTC_CODEC_RED is undefined"); 465 " WEBRTC_CODEC_RED is undefined");
438 return -1; 466 return -1;
439 #endif 467 #endif
440 } 468 }
441 469
442 ///////////////////////////////////////// 470 /////////////////////////////////////////
443 // (FEC) Forward Error Correction (codec internal) 471 // (FEC) Forward Error Correction (codec internal)
444 // 472 //
445 473
446 bool AudioCodingModuleImpl::CodecFEC() const { 474 bool AudioCodingModuleImpl::CodecFEC() const {
447 CriticalSectionScoped lock(acm_crit_sect_.get()); 475 CriticalSectionScoped lock(acm_crit_sect_.get());
448 return codec_manager_.codec_fec_enabled(); 476 return codec_manager_.GetStackParams()->use_codec_fec;
449 } 477 }
450 478
451 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { 479 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
452 CriticalSectionScoped lock(acm_crit_sect_.get()); 480 CriticalSectionScoped lock(acm_crit_sect_.get());
453 return codec_manager_.SetCodecFEC(enable_codec_fec); 481 if (!codec_manager_.SetCodecFEC(enable_codec_fec)) {
482 return -1;
483 }
484 auto* sp = codec_manager_.GetStackParams();
485 if (sp->speech_encoder)
486 rent_a_codec_.RentEncoderStack(sp);
487 if (enable_codec_fec) {
488 return sp->use_codec_fec ? 0 : -1;
489 } else {
490 RTC_DCHECK(!sp->use_codec_fec);
491 return 0;
492 }
454 } 493 }
455 494
456 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { 495 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
457 CriticalSectionScoped lock(acm_crit_sect_.get()); 496 CriticalSectionScoped lock(acm_crit_sect_.get());
458 if (HaveValidEncoder("SetPacketLossRate")) { 497 if (HaveValidEncoder("SetPacketLossRate")) {
459 codec_manager_.CurrentEncoder()->SetProjectedPacketLossRate(loss_rate / 498 rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate /
460 100.0); 499 100.0);
461 } 500 }
462 return 0; 501 return 0;
463 } 502 }
464 503
465 ///////////////////////////////////////// 504 /////////////////////////////////////////
466 // (VAD) Voice Activity Detection 505 // (VAD) Voice Activity Detection
467 // 506 //
468 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, 507 int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
469 bool enable_vad, 508 bool enable_vad,
470 ACMVADMode mode) { 509 ACMVADMode mode) {
471 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. 510 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
472 RTC_DCHECK_EQ(enable_dtx, enable_vad); 511 RTC_DCHECK_EQ(enable_dtx, enable_vad);
473 CriticalSectionScoped lock(acm_crit_sect_.get()); 512 CriticalSectionScoped lock(acm_crit_sect_.get());
474 return codec_manager_.SetVAD(enable_dtx, mode); 513 if (!codec_manager_.SetVAD(enable_dtx, mode)) {
514 return -1;
515 }
516 auto* sp = codec_manager_.GetStackParams();
517 if (sp->speech_encoder)
518 rent_a_codec_.RentEncoderStack(sp);
519 return 0;
475 } 520 }
476 521
477 // Get VAD/DTX settings. 522 // Get VAD/DTX settings.
478 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, 523 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled,
479 ACMVADMode* mode) const { 524 ACMVADMode* mode) const {
480 CriticalSectionScoped lock(acm_crit_sect_.get()); 525 CriticalSectionScoped lock(acm_crit_sect_.get());
481 codec_manager_.VAD(dtx_enabled, vad_enabled, mode); 526 const auto* sp = codec_manager_.GetStackParams();
527 *dtx_enabled = *vad_enabled = sp->use_cng;
528 *mode = sp->vad_mode;
482 return 0; 529 return 0;
483 } 530 }
484 531
485 ///////////////////////////////////////// 532 /////////////////////////////////////////
486 // Receiver 533 // Receiver
487 // 534 //
488 535
489 int AudioCodingModuleImpl::InitializeReceiver() { 536 int AudioCodingModuleImpl::InitializeReceiver() {
490 CriticalSectionScoped lock(acm_crit_sect_.get()); 537 CriticalSectionScoped lock(acm_crit_sect_.get());
491 return InitializeReceiverSafe(); 538 return InitializeReceiverSafe();
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
558 605
559 // Check if the payload-type is valid. 606 // Check if the payload-type is valid.
560 if (!RentACodec::IsPayloadTypeValid(codec.pltype)) { 607 if (!RentACodec::IsPayloadTypeValid(codec.pltype)) {
561 LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for " 608 LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
562 << codec.plname; 609 << codec.plname;
563 return -1; 610 return -1;
564 } 611 }
565 612
566 // Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does 613 // Get |decoder| associated with |codec|. |decoder| is NULL if |codec| does
567 // not own its decoder. 614 // not own its decoder.
568 return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels, 615 return receiver_.AddCodec(
569 codec.plfreq, codec_manager_.GetAudioDecoder(codec), 616 *codec_index, codec.pltype, codec.channels, codec.plfreq,
570 codec.plname); 617 STR_CASE_CMP(codec.plname, "isac") == 0 ? rent_a_codec_.RentIsacDecoder()
618 : nullptr,
619 codec.plname);
571 } 620 }
572 621
573 int AudioCodingModuleImpl::RegisterExternalReceiveCodec( 622 int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
574 int rtp_payload_type, 623 int rtp_payload_type,
575 AudioDecoder* external_decoder, 624 AudioDecoder* external_decoder,
576 int sample_rate_hz, 625 int sample_rate_hz,
577 int num_channels, 626 int num_channels,
578 const std::string& name) { 627 const std::string& name) {
579 CriticalSectionScoped lock(acm_crit_sect_.get()); 628 CriticalSectionScoped lock(acm_crit_sect_.get());
580 RTC_DCHECK(receiver_initialized_); 629 RTC_DCHECK(receiver_initialized_);
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
702 case kVoip: 751 case kVoip:
703 app = AudioEncoder::Application::kSpeech; 752 app = AudioEncoder::Application::kSpeech;
704 break; 753 break;
705 case kAudio: 754 case kAudio:
706 app = AudioEncoder::Application::kAudio; 755 app = AudioEncoder::Application::kAudio;
707 break; 756 break;
708 default: 757 default:
709 FATAL(); 758 FATAL();
710 return 0; 759 return 0;
711 } 760 }
712 return codec_manager_.CurrentEncoder()->SetApplication(app) ? 0 : -1; 761 return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1;
713 } 762 }
714 763
715 // Informs Opus encoder of the maximum playback rate the receiver will render. 764 // Informs Opus encoder of the maximum playback rate the receiver will render.
716 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) { 765 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) {
717 CriticalSectionScoped lock(acm_crit_sect_.get()); 766 CriticalSectionScoped lock(acm_crit_sect_.get());
718 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) { 767 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) {
719 return -1; 768 return -1;
720 } 769 }
721 if (!codec_manager_.CurrentEncoderIsOpus()) 770 if (!codec_manager_.CurrentEncoderIsOpus())
722 return -1; 771 return -1;
723 codec_manager_.CurrentEncoder()->SetMaxPlaybackRate(frequency_hz); 772 rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz);
724 return 0; 773 return 0;
725 } 774 }
726 775
727 int AudioCodingModuleImpl::EnableOpusDtx() { 776 int AudioCodingModuleImpl::EnableOpusDtx() {
728 CriticalSectionScoped lock(acm_crit_sect_.get()); 777 CriticalSectionScoped lock(acm_crit_sect_.get());
729 if (!HaveValidEncoder("EnableOpusDtx")) { 778 if (!HaveValidEncoder("EnableOpusDtx")) {
730 return -1; 779 return -1;
731 } 780 }
732 if (!codec_manager_.CurrentEncoderIsOpus()) 781 if (!codec_manager_.CurrentEncoderIsOpus())
733 return -1; 782 return -1;
734 return codec_manager_.CurrentEncoder()->SetDtx(true) ? 0 : -1; 783 return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1;
735 } 784 }
736 785
737 int AudioCodingModuleImpl::DisableOpusDtx() { 786 int AudioCodingModuleImpl::DisableOpusDtx() {
738 CriticalSectionScoped lock(acm_crit_sect_.get()); 787 CriticalSectionScoped lock(acm_crit_sect_.get());
739 if (!HaveValidEncoder("DisableOpusDtx")) { 788 if (!HaveValidEncoder("DisableOpusDtx")) {
740 return -1; 789 return -1;
741 } 790 }
742 if (!codec_manager_.CurrentEncoderIsOpus()) 791 if (!codec_manager_.CurrentEncoderIsOpus())
743 return -1; 792 return -1;
744 return codec_manager_.CurrentEncoder()->SetDtx(false) ? 0 : -1; 793 return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1;
745 } 794 }
746 795
747 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { 796 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) {
748 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; 797 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1;
749 } 798 }
750 799
751 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const { 800 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const {
752 if (!codec_manager_.CurrentEncoder()) { 801 if (!rent_a_codec_.GetEncoderStack()) {
753 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, 802 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_,
754 "%s failed: No send codec is registered.", caller_name); 803 "%s failed: No send codec is registered.", caller_name);
755 return false; 804 return false;
756 } 805 }
757 return true; 806 return true;
758 } 807 }
759 808
760 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) { 809 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) {
761 return receiver_.RemoveCodec(payload_type); 810 return receiver_.RemoveCodec(payload_type);
762 } 811 }
(...skipping 15 matching lines...) Expand all
778 return receiver_.LeastRequiredDelayMs(); 827 return receiver_.LeastRequiredDelayMs();
779 } 828 }
780 829
781 void AudioCodingModuleImpl::GetDecodingCallStatistics( 830 void AudioCodingModuleImpl::GetDecodingCallStatistics(
782 AudioDecodingCallStats* call_stats) const { 831 AudioDecodingCallStats* call_stats) const {
783 receiver_.GetDecodingCallStatistics(call_stats); 832 receiver_.GetDecodingCallStatistics(call_stats);
784 } 833 }
785 834
786 } // namespace acm2 835 } // namespace acm2
787 } // namespace webrtc 836 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698