| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 123 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; | 123 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; |
| 124 | 124 |
| 125 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) { | 125 int32_t AudioCodingModuleImpl::Encode(const InputData& input_data) { |
| 126 AudioEncoder::EncodedInfo encoded_info; | 126 AudioEncoder::EncodedInfo encoded_info; |
| 127 uint8_t previous_pltype; | 127 uint8_t previous_pltype; |
| 128 | 128 |
| 129 // Check if there is an encoder before. | 129 // Check if there is an encoder before. |
| 130 if (!HaveValidEncoder("Process")) | 130 if (!HaveValidEncoder("Process")) |
| 131 return -1; | 131 return -1; |
| 132 | 132 |
| 133 AudioEncoder* audio_encoder = rent_a_codec_.GetEncoderStack(); | |
| 134 // Scale the timestamp to the codec's RTP timestamp rate. | 133 // Scale the timestamp to the codec's RTP timestamp rate. |
| 135 uint32_t rtp_timestamp = | 134 uint32_t rtp_timestamp = |
| 136 first_frame_ ? input_data.input_timestamp | 135 first_frame_ ? input_data.input_timestamp |
| 137 : last_rtp_timestamp_ + | 136 : last_rtp_timestamp_ + |
| 138 rtc::CheckedDivExact( | 137 rtc::CheckedDivExact( |
| 139 input_data.input_timestamp - last_timestamp_, | 138 input_data.input_timestamp - last_timestamp_, |
| 140 static_cast<uint32_t>(rtc::CheckedDivExact( | 139 static_cast<uint32_t>(rtc::CheckedDivExact( |
| 141 audio_encoder->SampleRateHz(), | 140 encoder_stack_->SampleRateHz(), |
| 142 audio_encoder->RtpTimestampRateHz()))); | 141 encoder_stack_->RtpTimestampRateHz()))); |
| 143 last_timestamp_ = input_data.input_timestamp; | 142 last_timestamp_ = input_data.input_timestamp; |
| 144 last_rtp_timestamp_ = rtp_timestamp; | 143 last_rtp_timestamp_ = rtp_timestamp; |
| 145 first_frame_ = false; | 144 first_frame_ = false; |
| 146 | 145 |
| 147 encode_buffer_.SetSize(audio_encoder->MaxEncodedBytes()); | 146 encode_buffer_.SetSize(encoder_stack_->MaxEncodedBytes()); |
| 148 encoded_info = audio_encoder->Encode( | 147 encoded_info = encoder_stack_->Encode( |
| 149 rtp_timestamp, rtc::ArrayView<const int16_t>( | 148 rtp_timestamp, rtc::ArrayView<const int16_t>( |
| 150 input_data.audio, input_data.audio_channel * | 149 input_data.audio, input_data.audio_channel * |
| 151 input_data.length_per_channel), | 150 input_data.length_per_channel), |
| 152 encode_buffer_.size(), encode_buffer_.data()); | 151 encode_buffer_.size(), encode_buffer_.data()); |
| 153 encode_buffer_.SetSize(encoded_info.encoded_bytes); | 152 encode_buffer_.SetSize(encoded_info.encoded_bytes); |
| 154 bitrate_logger_.MaybeLog(audio_encoder->GetTargetBitrate() / 1000); | 153 bitrate_logger_.MaybeLog(encoder_stack_->GetTargetBitrate() / 1000); |
| 155 if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) { | 154 if (encode_buffer_.size() == 0 && !encoded_info.send_even_if_empty) { |
| 156 // Not enough data. | 155 // Not enough data. |
| 157 return 0; | 156 return 0; |
| 158 } | 157 } |
| 159 previous_pltype = previous_pltype_; // Read it while we have the critsect. | 158 previous_pltype = previous_pltype_; // Read it while we have the critsect. |
| 160 | 159 |
| 161 RTPFragmentationHeader my_fragmentation; | 160 RTPFragmentationHeader my_fragmentation; |
| 162 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation); | 161 ConvertEncodedInfoToFragmentationHeader(encoded_info, &my_fragmentation); |
| 163 FrameType frame_type; | 162 FrameType frame_type; |
| 164 if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) { | 163 if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 201 auto* sp = codec_manager_.GetStackParams(); | 200 auto* sp = codec_manager_.GetStackParams(); |
| 202 if (!sp->speech_encoder && codec_manager_.GetCodecInst()) { | 201 if (!sp->speech_encoder && codec_manager_.GetCodecInst()) { |
| 203 // We have no speech encoder, but we have a specification for making one. | 202 // We have no speech encoder, but we have a specification for making one. |
| 204 AudioEncoder* enc = | 203 AudioEncoder* enc = |
| 205 rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst()); | 204 rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst()); |
| 206 if (!enc) | 205 if (!enc) |
| 207 return -1; | 206 return -1; |
| 208 sp->speech_encoder = enc; | 207 sp->speech_encoder = enc; |
| 209 } | 208 } |
| 210 if (sp->speech_encoder) | 209 if (sp->speech_encoder) |
| 211 rent_a_codec_.RentEncoderStack(sp); | 210 encoder_stack_ = rent_a_codec_.RentEncoderStack(sp); |
| 212 return 0; | 211 return 0; |
| 213 } | 212 } |
| 214 | 213 |
| 215 void AudioCodingModuleImpl::RegisterExternalSendCodec( | 214 void AudioCodingModuleImpl::RegisterExternalSendCodec( |
| 216 AudioEncoder* external_speech_encoder) { | 215 AudioEncoder* external_speech_encoder) { |
| 217 rtc::CritScope lock(&acm_crit_sect_); | 216 rtc::CritScope lock(&acm_crit_sect_); |
| 218 auto* sp = codec_manager_.GetStackParams(); | 217 auto* sp = codec_manager_.GetStackParams(); |
| 219 sp->speech_encoder = external_speech_encoder; | 218 sp->speech_encoder = external_speech_encoder; |
| 220 rent_a_codec_.RentEncoderStack(sp); | 219 encoder_stack_ = rent_a_codec_.RentEncoderStack(sp); |
| 221 } | 220 } |
| 222 | 221 |
| 223 // Get current send codec. | 222 // Get current send codec. |
| 224 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { | 223 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { |
| 225 rtc::CritScope lock(&acm_crit_sect_); | 224 rtc::CritScope lock(&acm_crit_sect_); |
| 226 auto* ci = codec_manager_.GetCodecInst(); | 225 auto* ci = codec_manager_.GetCodecInst(); |
| 227 if (ci) { | 226 if (ci) { |
| 228 return rtc::Optional<CodecInst>(*ci); | 227 return rtc::Optional<CodecInst>(*ci); |
| 229 } | 228 } |
| 230 auto* enc = codec_manager_.GetStackParams()->speech_encoder; | 229 auto* enc = codec_manager_.GetStackParams()->speech_encoder; |
| 231 if (enc) { | 230 if (enc) { |
| 232 return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc)); | 231 return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc)); |
| 233 } | 232 } |
| 234 return rtc::Optional<CodecInst>(); | 233 return rtc::Optional<CodecInst>(); |
| 235 } | 234 } |
| 236 | 235 |
| 237 // Get current send frequency. | 236 // Get current send frequency. |
| 238 int AudioCodingModuleImpl::SendFrequency() const { | 237 int AudioCodingModuleImpl::SendFrequency() const { |
| 239 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, | 238 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, |
| 240 "SendFrequency()"); | 239 "SendFrequency()"); |
| 241 rtc::CritScope lock(&acm_crit_sect_); | 240 rtc::CritScope lock(&acm_crit_sect_); |
| 242 | 241 |
| 243 const auto* enc = rent_a_codec_.GetEncoderStack(); | 242 if (!encoder_stack_) { |
| 244 if (!enc) { | |
| 245 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, | 243 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, |
| 246 "SendFrequency Failed, no codec is registered"); | 244 "SendFrequency Failed, no codec is registered"); |
| 247 return -1; | 245 return -1; |
| 248 } | 246 } |
| 249 | 247 |
| 250 return enc->SampleRateHz(); | 248 return encoder_stack_->SampleRateHz(); |
| 251 } | 249 } |
| 252 | 250 |
| 253 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { | 251 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { |
| 254 rtc::CritScope lock(&acm_crit_sect_); | 252 rtc::CritScope lock(&acm_crit_sect_); |
| 255 auto* enc = rent_a_codec_.GetEncoderStack(); | 253 if (encoder_stack_) { |
| 256 if (enc) { | 254 encoder_stack_->SetTargetBitrate(bitrate_bps); |
| 257 enc->SetTargetBitrate(bitrate_bps); | |
| 258 } | 255 } |
| 259 } | 256 } |
| 260 | 257 |
| 261 // Register a transport callback which will be called to deliver | 258 // Register a transport callback which will be called to deliver |
| 262 // the encoded buffers. | 259 // the encoded buffers. |
| 263 int AudioCodingModuleImpl::RegisterTransportCallback( | 260 int AudioCodingModuleImpl::RegisterTransportCallback( |
| 264 AudioPacketizationCallback* transport) { | 261 AudioPacketizationCallback* transport) { |
| 265 rtc::CritScope lock(&callback_crit_sect_); | 262 rtc::CritScope lock(&callback_crit_sect_); |
| 266 packetization_callback_ = transport; | 263 packetization_callback_ = transport; |
| 267 return 0; | 264 return 0; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 314 const AudioFrame* ptr_frame; | 311 const AudioFrame* ptr_frame; |
| 315 // Perform a resampling, also down-mix if it is required and can be | 312 // Perform a resampling, also down-mix if it is required and can be |
| 316 // performed before resampling (a down mix prior to resampling will take | 313 // performed before resampling (a down mix prior to resampling will take |
| 317 // place if both primary and secondary encoders are mono and input is in | 314 // place if both primary and secondary encoders are mono and input is in |
| 318 // stereo). | 315 // stereo). |
| 319 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) { | 316 if (PreprocessToAddData(audio_frame, &ptr_frame) < 0) { |
| 320 return -1; | 317 return -1; |
| 321 } | 318 } |
| 322 | 319 |
| 323 // Check whether we need an up-mix or down-mix? | 320 // Check whether we need an up-mix or down-mix? |
| 324 const size_t current_num_channels = | 321 const size_t current_num_channels = encoder_stack_->NumChannels(); |
| 325 rent_a_codec_.GetEncoderStack()->NumChannels(); | |
| 326 const bool same_num_channels = | 322 const bool same_num_channels = |
| 327 ptr_frame->num_channels_ == current_num_channels; | 323 ptr_frame->num_channels_ == current_num_channels; |
| 328 | 324 |
| 329 if (!same_num_channels) { | 325 if (!same_num_channels) { |
| 330 if (ptr_frame->num_channels_ == 1) { | 326 if (ptr_frame->num_channels_ == 1) { |
| 331 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 327 if (UpMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 332 return -1; | 328 return -1; |
| 333 } else { | 329 } else { |
| 334 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) | 330 if (DownMix(*ptr_frame, WEBRTC_10MS_PCM_AUDIO, input_data->buffer) < 0) |
| 335 return -1; | 331 return -1; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 352 return 0; | 348 return 0; |
| 353 } | 349 } |
| 354 | 350 |
| 355 // Perform a resampling and down-mix if required. We down-mix only if | 351 // Perform a resampling and down-mix if required. We down-mix only if |
| 356 // encoder is mono and input is stereo. In case of dual-streaming, both | 352 // encoder is mono and input is stereo. In case of dual-streaming, both |
| 357 // encoders has to be mono for down-mix to take place. | 353 // encoders has to be mono for down-mix to take place. |
| 358 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing | 354 // |*ptr_out| will point to the pre-processed audio-frame. If no pre-processing |
| 359 // is required, |*ptr_out| points to |in_frame|. | 355 // is required, |*ptr_out| points to |in_frame|. |
| 360 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, | 356 int AudioCodingModuleImpl::PreprocessToAddData(const AudioFrame& in_frame, |
| 361 const AudioFrame** ptr_out) { | 357 const AudioFrame** ptr_out) { |
| 362 const auto* enc = rent_a_codec_.GetEncoderStack(); | 358 const bool resample = |
| 363 const bool resample = in_frame.sample_rate_hz_ != enc->SampleRateHz(); | 359 in_frame.sample_rate_hz_ != encoder_stack_->SampleRateHz(); |
| 364 | 360 |
| 365 // This variable is true if primary codec and secondary codec (if exists) | 361 // This variable is true if primary codec and secondary codec (if exists) |
| 366 // are both mono and input is stereo. | 362 // are both mono and input is stereo. |
| 367 // TODO(henrik.lundin): This condition should probably be | 363 // TODO(henrik.lundin): This condition should probably be |
| 368 // in_frame.num_channels_ > enc->NumChannels() | 364 // in_frame.num_channels_ > encoder_stack_->NumChannels() |
| 369 const bool down_mix = in_frame.num_channels_ == 2 && enc->NumChannels() == 1; | 365 const bool down_mix = |
| 366 in_frame.num_channels_ == 2 && encoder_stack_->NumChannels() == 1; |
| 370 | 367 |
| 371 if (!first_10ms_data_) { | 368 if (!first_10ms_data_) { |
| 372 expected_in_ts_ = in_frame.timestamp_; | 369 expected_in_ts_ = in_frame.timestamp_; |
| 373 expected_codec_ts_ = in_frame.timestamp_; | 370 expected_codec_ts_ = in_frame.timestamp_; |
| 374 first_10ms_data_ = true; | 371 first_10ms_data_ = true; |
| 375 } else if (in_frame.timestamp_ != expected_in_ts_) { | 372 } else if (in_frame.timestamp_ != expected_in_ts_) { |
| 376 // TODO(turajs): Do we need a warning here. | 373 // TODO(turajs): Do we need a warning here. |
| 377 expected_codec_ts_ += | 374 expected_codec_ts_ += |
| 378 (in_frame.timestamp_ - expected_in_ts_) * | 375 (in_frame.timestamp_ - expected_in_ts_) * |
| 379 static_cast<uint32_t>(static_cast<double>(enc->SampleRateHz()) / | 376 static_cast<uint32_t>( |
| 380 static_cast<double>(in_frame.sample_rate_hz_)); | 377 static_cast<double>(encoder_stack_->SampleRateHz()) / |
| 378 static_cast<double>(in_frame.sample_rate_hz_)); |
| 381 expected_in_ts_ = in_frame.timestamp_; | 379 expected_in_ts_ = in_frame.timestamp_; |
| 382 } | 380 } |
| 383 | 381 |
| 384 | 382 |
| 385 if (!down_mix && !resample) { | 383 if (!down_mix && !resample) { |
| 386 // No pre-processing is required. | 384 // No pre-processing is required. |
| 387 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 385 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 388 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 386 expected_codec_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 389 *ptr_out = &in_frame; | 387 *ptr_out = &in_frame; |
| 390 return 0; | 388 return 0; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 409 | 407 |
| 410 preprocess_frame_.timestamp_ = expected_codec_ts_; | 408 preprocess_frame_.timestamp_ = expected_codec_ts_; |
| 411 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; | 409 preprocess_frame_.samples_per_channel_ = in_frame.samples_per_channel_; |
| 412 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; | 410 preprocess_frame_.sample_rate_hz_ = in_frame.sample_rate_hz_; |
| 413 // If it is required, we have to do a resampling. | 411 // If it is required, we have to do a resampling. |
| 414 if (resample) { | 412 if (resample) { |
| 415 // The result of the resampler is written to output frame. | 413 // The result of the resampler is written to output frame. |
| 416 dest_ptr_audio = preprocess_frame_.data_; | 414 dest_ptr_audio = preprocess_frame_.data_; |
| 417 | 415 |
| 418 int samples_per_channel = resampler_.Resample10Msec( | 416 int samples_per_channel = resampler_.Resample10Msec( |
| 419 src_ptr_audio, in_frame.sample_rate_hz_, enc->SampleRateHz(), | 417 src_ptr_audio, in_frame.sample_rate_hz_, encoder_stack_->SampleRateHz(), |
| 420 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, | 418 preprocess_frame_.num_channels_, AudioFrame::kMaxDataSizeSamples, |
| 421 dest_ptr_audio); | 419 dest_ptr_audio); |
| 422 | 420 |
| 423 if (samples_per_channel < 0) { | 421 if (samples_per_channel < 0) { |
| 424 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 422 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 425 "Cannot add 10 ms audio, resampling failed"); | 423 "Cannot add 10 ms audio, resampling failed"); |
| 426 return -1; | 424 return -1; |
| 427 } | 425 } |
| 428 preprocess_frame_.samples_per_channel_ = | 426 preprocess_frame_.samples_per_channel_ = |
| 429 static_cast<size_t>(samples_per_channel); | 427 static_cast<size_t>(samples_per_channel); |
| 430 preprocess_frame_.sample_rate_hz_ = enc->SampleRateHz(); | 428 preprocess_frame_.sample_rate_hz_ = encoder_stack_->SampleRateHz(); |
| 431 } | 429 } |
| 432 | 430 |
| 433 expected_codec_ts_ += | 431 expected_codec_ts_ += |
| 434 static_cast<uint32_t>(preprocess_frame_.samples_per_channel_); | 432 static_cast<uint32_t>(preprocess_frame_.samples_per_channel_); |
| 435 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 433 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 436 | 434 |
| 437 return 0; | 435 return 0; |
| 438 } | 436 } |
| 439 | 437 |
| 440 ///////////////////////////////////////// | 438 ///////////////////////////////////////// |
| 441 // (RED) Redundant Coding | 439 // (RED) Redundant Coding |
| 442 // | 440 // |
| 443 | 441 |
| 444 bool AudioCodingModuleImpl::REDStatus() const { | 442 bool AudioCodingModuleImpl::REDStatus() const { |
| 445 rtc::CritScope lock(&acm_crit_sect_); | 443 rtc::CritScope lock(&acm_crit_sect_); |
| 446 return codec_manager_.GetStackParams()->use_red; | 444 return codec_manager_.GetStackParams()->use_red; |
| 447 } | 445 } |
| 448 | 446 |
| 449 // Configure RED status i.e on/off. | 447 // Configure RED status i.e on/off. |
| 450 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) { | 448 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) { |
| 451 #ifdef WEBRTC_CODEC_RED | 449 #ifdef WEBRTC_CODEC_RED |
| 452 rtc::CritScope lock(&acm_crit_sect_); | 450 rtc::CritScope lock(&acm_crit_sect_); |
| 453 if (!codec_manager_.SetCopyRed(enable_red)) { | 451 if (!codec_manager_.SetCopyRed(enable_red)) { |
| 454 return -1; | 452 return -1; |
| 455 } | 453 } |
| 456 auto* sp = codec_manager_.GetStackParams(); | 454 auto* sp = codec_manager_.GetStackParams(); |
| 457 if (sp->speech_encoder) | 455 if (sp->speech_encoder) |
| 458 rent_a_codec_.RentEncoderStack(sp); | 456 encoder_stack_ = rent_a_codec_.RentEncoderStack(sp); |
| 459 return 0; | 457 return 0; |
| 460 #else | 458 #else |
| 461 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, | 459 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, |
| 462 " WEBRTC_CODEC_RED is undefined"); | 460 " WEBRTC_CODEC_RED is undefined"); |
| 463 return -1; | 461 return -1; |
| 464 #endif | 462 #endif |
| 465 } | 463 } |
| 466 | 464 |
| 467 ///////////////////////////////////////// | 465 ///////////////////////////////////////// |
| 468 // (FEC) Forward Error Correction (codec internal) | 466 // (FEC) Forward Error Correction (codec internal) |
| 469 // | 467 // |
| 470 | 468 |
| 471 bool AudioCodingModuleImpl::CodecFEC() const { | 469 bool AudioCodingModuleImpl::CodecFEC() const { |
| 472 rtc::CritScope lock(&acm_crit_sect_); | 470 rtc::CritScope lock(&acm_crit_sect_); |
| 473 return codec_manager_.GetStackParams()->use_codec_fec; | 471 return codec_manager_.GetStackParams()->use_codec_fec; |
| 474 } | 472 } |
| 475 | 473 |
| 476 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { | 474 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { |
| 477 rtc::CritScope lock(&acm_crit_sect_); | 475 rtc::CritScope lock(&acm_crit_sect_); |
| 478 if (!codec_manager_.SetCodecFEC(enable_codec_fec)) { | 476 if (!codec_manager_.SetCodecFEC(enable_codec_fec)) { |
| 479 return -1; | 477 return -1; |
| 480 } | 478 } |
| 481 auto* sp = codec_manager_.GetStackParams(); | 479 auto* sp = codec_manager_.GetStackParams(); |
| 482 if (sp->speech_encoder) | 480 if (sp->speech_encoder) |
| 483 rent_a_codec_.RentEncoderStack(sp); | 481 encoder_stack_ = rent_a_codec_.RentEncoderStack(sp); |
| 484 if (enable_codec_fec) { | 482 if (enable_codec_fec) { |
| 485 return sp->use_codec_fec ? 0 : -1; | 483 return sp->use_codec_fec ? 0 : -1; |
| 486 } else { | 484 } else { |
| 487 RTC_DCHECK(!sp->use_codec_fec); | 485 RTC_DCHECK(!sp->use_codec_fec); |
| 488 return 0; | 486 return 0; |
| 489 } | 487 } |
| 490 } | 488 } |
| 491 | 489 |
| 492 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { | 490 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { |
| 493 rtc::CritScope lock(&acm_crit_sect_); | 491 rtc::CritScope lock(&acm_crit_sect_); |
| 494 if (HaveValidEncoder("SetPacketLossRate")) { | 492 if (HaveValidEncoder("SetPacketLossRate")) { |
| 495 rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate / | 493 encoder_stack_->SetProjectedPacketLossRate(loss_rate / 100.0); |
| 496 100.0); | |
| 497 } | 494 } |
| 498 return 0; | 495 return 0; |
| 499 } | 496 } |
| 500 | 497 |
| 501 ///////////////////////////////////////// | 498 ///////////////////////////////////////// |
| 502 // (VAD) Voice Activity Detection | 499 // (VAD) Voice Activity Detection |
| 503 // | 500 // |
| 504 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, | 501 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, |
| 505 bool enable_vad, | 502 bool enable_vad, |
| 506 ACMVADMode mode) { | 503 ACMVADMode mode) { |
| 507 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. | 504 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. |
| 508 RTC_DCHECK_EQ(enable_dtx, enable_vad); | 505 RTC_DCHECK_EQ(enable_dtx, enable_vad); |
| 509 rtc::CritScope lock(&acm_crit_sect_); | 506 rtc::CritScope lock(&acm_crit_sect_); |
| 510 if (!codec_manager_.SetVAD(enable_dtx, mode)) { | 507 if (!codec_manager_.SetVAD(enable_dtx, mode)) { |
| 511 return -1; | 508 return -1; |
| 512 } | 509 } |
| 513 auto* sp = codec_manager_.GetStackParams(); | 510 auto* sp = codec_manager_.GetStackParams(); |
| 514 if (sp->speech_encoder) | 511 if (sp->speech_encoder) |
| 515 rent_a_codec_.RentEncoderStack(sp); | 512 encoder_stack_ = rent_a_codec_.RentEncoderStack(sp); |
| 516 return 0; | 513 return 0; |
| 517 } | 514 } |
| 518 | 515 |
| 519 // Get VAD/DTX settings. | 516 // Get VAD/DTX settings. |
| 520 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, | 517 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, |
| 521 ACMVADMode* mode) const { | 518 ACMVADMode* mode) const { |
| 522 rtc::CritScope lock(&acm_crit_sect_); | 519 rtc::CritScope lock(&acm_crit_sect_); |
| 523 const auto* sp = codec_manager_.GetStackParams(); | 520 const auto* sp = codec_manager_.GetStackParams(); |
| 524 *dtx_enabled = *vad_enabled = sp->use_cng; | 521 *dtx_enabled = *vad_enabled = sp->use_cng; |
| 525 *mode = sp->vad_mode; | 522 *mode = sp->vad_mode; |
| (...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 746 case kVoip: | 743 case kVoip: |
| 747 app = AudioEncoder::Application::kSpeech; | 744 app = AudioEncoder::Application::kSpeech; |
| 748 break; | 745 break; |
| 749 case kAudio: | 746 case kAudio: |
| 750 app = AudioEncoder::Application::kAudio; | 747 app = AudioEncoder::Application::kAudio; |
| 751 break; | 748 break; |
| 752 default: | 749 default: |
| 753 FATAL(); | 750 FATAL(); |
| 754 return 0; | 751 return 0; |
| 755 } | 752 } |
| 756 return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1; | 753 return encoder_stack_->SetApplication(app) ? 0 : -1; |
| 757 } | 754 } |
| 758 | 755 |
| 759 // Informs Opus encoder of the maximum playback rate the receiver will render. | 756 // Informs Opus encoder of the maximum playback rate the receiver will render. |
| 760 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) { | 757 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) { |
| 761 rtc::CritScope lock(&acm_crit_sect_); | 758 rtc::CritScope lock(&acm_crit_sect_); |
| 762 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) { | 759 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) { |
| 763 return -1; | 760 return -1; |
| 764 } | 761 } |
| 765 rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz); | 762 encoder_stack_->SetMaxPlaybackRate(frequency_hz); |
| 766 return 0; | 763 return 0; |
| 767 } | 764 } |
| 768 | 765 |
| 769 int AudioCodingModuleImpl::EnableOpusDtx() { | 766 int AudioCodingModuleImpl::EnableOpusDtx() { |
| 770 rtc::CritScope lock(&acm_crit_sect_); | 767 rtc::CritScope lock(&acm_crit_sect_); |
| 771 if (!HaveValidEncoder("EnableOpusDtx")) { | 768 if (!HaveValidEncoder("EnableOpusDtx")) { |
| 772 return -1; | 769 return -1; |
| 773 } | 770 } |
| 774 return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1; | 771 return encoder_stack_->SetDtx(true) ? 0 : -1; |
| 775 } | 772 } |
| 776 | 773 |
| 777 int AudioCodingModuleImpl::DisableOpusDtx() { | 774 int AudioCodingModuleImpl::DisableOpusDtx() { |
| 778 rtc::CritScope lock(&acm_crit_sect_); | 775 rtc::CritScope lock(&acm_crit_sect_); |
| 779 if (!HaveValidEncoder("DisableOpusDtx")) { | 776 if (!HaveValidEncoder("DisableOpusDtx")) { |
| 780 return -1; | 777 return -1; |
| 781 } | 778 } |
| 782 return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1; | 779 return encoder_stack_->SetDtx(false) ? 0 : -1; |
| 783 } | 780 } |
| 784 | 781 |
| 785 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { | 782 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { |
| 786 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; | 783 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; |
| 787 } | 784 } |
| 788 | 785 |
| 789 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const { | 786 bool AudioCodingModuleImpl::HaveValidEncoder(const char* caller_name) const { |
| 790 if (!rent_a_codec_.GetEncoderStack()) { | 787 if (!encoder_stack_) { |
| 791 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 788 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 792 "%s failed: No send codec is registered.", caller_name); | 789 "%s failed: No send codec is registered.", caller_name); |
| 793 return false; | 790 return false; |
| 794 } | 791 } |
| 795 return true; | 792 return true; |
| 796 } | 793 } |
| 797 | 794 |
| 798 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) { | 795 int AudioCodingModuleImpl::UnregisterReceiveCodec(uint8_t payload_type) { |
| 799 return receiver_.RemoveCodec(payload_type); | 796 return receiver_.RemoveCodec(payload_type); |
| 800 } | 797 } |
| (...skipping 15 matching lines...) Expand all Loading... |
| 816 return receiver_.LeastRequiredDelayMs(); | 813 return receiver_.LeastRequiredDelayMs(); |
| 817 } | 814 } |
| 818 | 815 |
| 819 void AudioCodingModuleImpl::GetDecodingCallStatistics( | 816 void AudioCodingModuleImpl::GetDecodingCallStatistics( |
| 820 AudioDecodingCallStats* call_stats) const { | 817 AudioDecodingCallStats* call_stats) const { |
| 821 receiver_.GetDecodingCallStatistics(call_stats); | 818 receiver_.GetDecodingCallStatistics(call_stats); |
| 822 } | 819 } |
| 823 | 820 |
| 824 } // namespace acm2 | 821 } // namespace acm2 |
| 825 } // namespace webrtc | 822 } // namespace webrtc |
| OLD | NEW |