| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h" | 11 #include "webrtc/modules/audio_coding/acm2/audio_coding_module_impl.h" |
| 12 | 12 |
| 13 #include <assert.h> | 13 #include <assert.h> |
| 14 #include <stdlib.h> | 14 #include <stdlib.h> |
| 15 #include <vector> | 15 #include <vector> |
| 16 | 16 |
| 17 #include "webrtc/base/checks.h" | 17 #include "webrtc/base/checks.h" |
| 18 #include "webrtc/base/safe_conversions.h" | 18 #include "webrtc/base/safe_conversions.h" |
| 19 #include "webrtc/engine_configurations.h" | 19 #include "webrtc/engine_configurations.h" |
| 20 #include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h" | 20 #include "webrtc/modules/audio_coding/include/audio_coding_module_typedefs.h" |
| 21 #include "webrtc/modules/audio_coding/acm2/acm_common_defs.h" | 21 #include "webrtc/modules/audio_coding/acm2/acm_common_defs.h" |
| 22 #include "webrtc/modules/audio_coding/acm2/acm_resampler.h" | 22 #include "webrtc/modules/audio_coding/acm2/acm_resampler.h" |
| 23 #include "webrtc/modules/audio_coding/acm2/call_statistics.h" | 23 #include "webrtc/modules/audio_coding/acm2/call_statistics.h" |
| 24 #include "webrtc/system_wrappers/include/critical_section_wrapper.h" | |
| 25 #include "webrtc/system_wrappers/include/logging.h" | 24 #include "webrtc/system_wrappers/include/logging.h" |
| 26 #include "webrtc/system_wrappers/include/metrics.h" | 25 #include "webrtc/system_wrappers/include/metrics.h" |
| 27 #include "webrtc/system_wrappers/include/rw_lock_wrapper.h" | 26 #include "webrtc/system_wrappers/include/rw_lock_wrapper.h" |
| 28 #include "webrtc/system_wrappers/include/trace.h" | 27 #include "webrtc/system_wrappers/include/trace.h" |
| 29 #include "webrtc/typedefs.h" | 28 #include "webrtc/typedefs.h" |
| 30 | 29 |
| 31 namespace webrtc { | 30 namespace webrtc { |
| 32 | 31 |
| 33 namespace acm2 { | 32 namespace acm2 { |
| 34 | 33 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 96 void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) { | 95 void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) { |
| 97 if (value != last_value_ || first_time_) { | 96 if (value != last_value_ || first_time_) { |
| 98 first_time_ = false; | 97 first_time_ = false; |
| 99 last_value_ = value; | 98 last_value_ = value; |
| 100 RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value); | 99 RTC_HISTOGRAM_COUNTS_SPARSE_100(histogram_name_, value); |
| 101 } | 100 } |
| 102 } | 101 } |
| 103 | 102 |
| 104 AudioCodingModuleImpl::AudioCodingModuleImpl( | 103 AudioCodingModuleImpl::AudioCodingModuleImpl( |
| 105 const AudioCodingModule::Config& config) | 104 const AudioCodingModule::Config& config) |
| 106 : acm_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), | 105 : id_(config.id), |
| 107 id_(config.id), | |
| 108 expected_codec_ts_(0xD87F3F9F), | 106 expected_codec_ts_(0xD87F3F9F), |
| 109 expected_in_ts_(0xD87F3F9F), | 107 expected_in_ts_(0xD87F3F9F), |
| 110 receiver_(config), | 108 receiver_(config), |
| 111 bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"), | 109 bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"), |
| 112 previous_pltype_(255), | 110 previous_pltype_(255), |
| 113 receiver_initialized_(false), | 111 receiver_initialized_(false), |
| 114 first_10ms_data_(false), | 112 first_10ms_data_(false), |
| 115 first_frame_(true), | 113 first_frame_(true), |
| 116 callback_crit_sect_(CriticalSectionWrapper::CreateCriticalSection()), | |
| 117 packetization_callback_(NULL), | 114 packetization_callback_(NULL), |
| 118 vad_callback_(NULL) { | 115 vad_callback_(NULL) { |
| 119 if (InitializeReceiverSafe() < 0) { | 116 if (InitializeReceiverSafe() < 0) { |
| 120 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 117 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 121 "Cannot initialize receiver"); | 118 "Cannot initialize receiver"); |
| 122 } | 119 } |
| 123 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created"); | 120 WEBRTC_TRACE(webrtc::kTraceMemory, webrtc::kTraceAudioCoding, id_, "Created"); |
| 124 } | 121 } |
| 125 | 122 |
| 126 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; | 123 AudioCodingModuleImpl::~AudioCodingModuleImpl() = default; |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 166 FrameType frame_type; | 163 FrameType frame_type; |
| 167 if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) { | 164 if (encode_buffer_.size() == 0 && encoded_info.send_even_if_empty) { |
| 168 frame_type = kEmptyFrame; | 165 frame_type = kEmptyFrame; |
| 169 encoded_info.payload_type = previous_pltype; | 166 encoded_info.payload_type = previous_pltype; |
| 170 } else { | 167 } else { |
| 171 RTC_DCHECK_GT(encode_buffer_.size(), 0u); | 168 RTC_DCHECK_GT(encode_buffer_.size(), 0u); |
| 172 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN; | 169 frame_type = encoded_info.speech ? kAudioFrameSpeech : kAudioFrameCN; |
| 173 } | 170 } |
| 174 | 171 |
| 175 { | 172 { |
| 176 CriticalSectionScoped lock(callback_crit_sect_.get()); | 173 rtc::CritScope lock(&callback_crit_sect_); |
| 177 if (packetization_callback_) { | 174 if (packetization_callback_) { |
| 178 packetization_callback_->SendData( | 175 packetization_callback_->SendData( |
| 179 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, | 176 frame_type, encoded_info.payload_type, encoded_info.encoded_timestamp, |
| 180 encode_buffer_.data(), encode_buffer_.size(), | 177 encode_buffer_.data(), encode_buffer_.size(), |
| 181 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation | 178 my_fragmentation.fragmentationVectorSize > 0 ? &my_fragmentation |
| 182 : nullptr); | 179 : nullptr); |
| 183 } | 180 } |
| 184 | 181 |
| 185 if (vad_callback_) { | 182 if (vad_callback_) { |
| 186 // Callback with VAD decision. | 183 // Callback with VAD decision. |
| 187 vad_callback_->InFrameType(frame_type); | 184 vad_callback_->InFrameType(frame_type); |
| 188 } | 185 } |
| 189 } | 186 } |
| 190 previous_pltype_ = encoded_info.payload_type; | 187 previous_pltype_ = encoded_info.payload_type; |
| 191 return static_cast<int32_t>(encode_buffer_.size()); | 188 return static_cast<int32_t>(encode_buffer_.size()); |
| 192 } | 189 } |
| 193 | 190 |
| 194 ///////////////////////////////////////// | 191 ///////////////////////////////////////// |
| 195 // Sender | 192 // Sender |
| 196 // | 193 // |
| 197 | 194 |
| 198 // Can be called multiple times for Codec, CNG, RED. | 195 // Can be called multiple times for Codec, CNG, RED. |
| 199 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) { | 196 int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) { |
| 200 CriticalSectionScoped lock(acm_crit_sect_.get()); | 197 rtc::CritScope lock(&acm_crit_sect_); |
| 201 if (!codec_manager_.RegisterEncoder(send_codec)) { | 198 if (!codec_manager_.RegisterEncoder(send_codec)) { |
| 202 return -1; | 199 return -1; |
| 203 } | 200 } |
| 204 auto* sp = codec_manager_.GetStackParams(); | 201 auto* sp = codec_manager_.GetStackParams(); |
| 205 if (!sp->speech_encoder && codec_manager_.GetCodecInst()) { | 202 if (!sp->speech_encoder && codec_manager_.GetCodecInst()) { |
| 206 // We have no speech encoder, but we have a specification for making one. | 203 // We have no speech encoder, but we have a specification for making one. |
| 207 AudioEncoder* enc = | 204 AudioEncoder* enc = |
| 208 rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst()); | 205 rent_a_codec_.RentEncoder(*codec_manager_.GetCodecInst()); |
| 209 if (!enc) | 206 if (!enc) |
| 210 return -1; | 207 return -1; |
| 211 sp->speech_encoder = enc; | 208 sp->speech_encoder = enc; |
| 212 } | 209 } |
| 213 if (sp->speech_encoder) | 210 if (sp->speech_encoder) |
| 214 rent_a_codec_.RentEncoderStack(sp); | 211 rent_a_codec_.RentEncoderStack(sp); |
| 215 return 0; | 212 return 0; |
| 216 } | 213 } |
| 217 | 214 |
| 218 void AudioCodingModuleImpl::RegisterExternalSendCodec( | 215 void AudioCodingModuleImpl::RegisterExternalSendCodec( |
| 219 AudioEncoder* external_speech_encoder) { | 216 AudioEncoder* external_speech_encoder) { |
| 220 CriticalSectionScoped lock(acm_crit_sect_.get()); | 217 rtc::CritScope lock(&acm_crit_sect_); |
| 221 auto* sp = codec_manager_.GetStackParams(); | 218 auto* sp = codec_manager_.GetStackParams(); |
| 222 sp->speech_encoder = external_speech_encoder; | 219 sp->speech_encoder = external_speech_encoder; |
| 223 rent_a_codec_.RentEncoderStack(sp); | 220 rent_a_codec_.RentEncoderStack(sp); |
| 224 } | 221 } |
| 225 | 222 |
| 226 // Get current send codec. | 223 // Get current send codec. |
| 227 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { | 224 rtc::Optional<CodecInst> AudioCodingModuleImpl::SendCodec() const { |
| 228 CriticalSectionScoped lock(acm_crit_sect_.get()); | 225 rtc::CritScope lock(&acm_crit_sect_); |
| 229 auto* ci = codec_manager_.GetCodecInst(); | 226 auto* ci = codec_manager_.GetCodecInst(); |
| 230 if (ci) { | 227 if (ci) { |
| 231 return rtc::Optional<CodecInst>(*ci); | 228 return rtc::Optional<CodecInst>(*ci); |
| 232 } | 229 } |
| 233 auto* enc = codec_manager_.GetStackParams()->speech_encoder; | 230 auto* enc = codec_manager_.GetStackParams()->speech_encoder; |
| 234 if (enc) { | 231 if (enc) { |
| 235 return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc)); | 232 return rtc::Optional<CodecInst>(CodecManager::ForgeCodecInst(enc)); |
| 236 } | 233 } |
| 237 return rtc::Optional<CodecInst>(); | 234 return rtc::Optional<CodecInst>(); |
| 238 } | 235 } |
| 239 | 236 |
| 240 // Get current send frequency. | 237 // Get current send frequency. |
| 241 int AudioCodingModuleImpl::SendFrequency() const { | 238 int AudioCodingModuleImpl::SendFrequency() const { |
| 242 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, | 239 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, |
| 243 "SendFrequency()"); | 240 "SendFrequency()"); |
| 244 CriticalSectionScoped lock(acm_crit_sect_.get()); | 241 rtc::CritScope lock(&acm_crit_sect_); |
| 245 | 242 |
| 246 const auto* enc = rent_a_codec_.GetEncoderStack(); | 243 const auto* enc = rent_a_codec_.GetEncoderStack(); |
| 247 if (!enc) { | 244 if (!enc) { |
| 248 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, | 245 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, |
| 249 "SendFrequency Failed, no codec is registered"); | 246 "SendFrequency Failed, no codec is registered"); |
| 250 return -1; | 247 return -1; |
| 251 } | 248 } |
| 252 | 249 |
| 253 return enc->SampleRateHz(); | 250 return enc->SampleRateHz(); |
| 254 } | 251 } |
| 255 | 252 |
| 256 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { | 253 void AudioCodingModuleImpl::SetBitRate(int bitrate_bps) { |
| 257 CriticalSectionScoped lock(acm_crit_sect_.get()); | 254 rtc::CritScope lock(&acm_crit_sect_); |
| 258 auto* enc = rent_a_codec_.GetEncoderStack(); | 255 auto* enc = rent_a_codec_.GetEncoderStack(); |
| 259 if (enc) { | 256 if (enc) { |
| 260 enc->SetTargetBitrate(bitrate_bps); | 257 enc->SetTargetBitrate(bitrate_bps); |
| 261 } | 258 } |
| 262 } | 259 } |
| 263 | 260 |
| 264 // Register a transport callback which will be called to deliver | 261 // Register a transport callback which will be called to deliver |
| 265 // the encoded buffers. | 262 // the encoded buffers. |
| 266 int AudioCodingModuleImpl::RegisterTransportCallback( | 263 int AudioCodingModuleImpl::RegisterTransportCallback( |
| 267 AudioPacketizationCallback* transport) { | 264 AudioPacketizationCallback* transport) { |
| 268 CriticalSectionScoped lock(callback_crit_sect_.get()); | 265 rtc::CritScope lock(&callback_crit_sect_); |
| 269 packetization_callback_ = transport; | 266 packetization_callback_ = transport; |
| 270 return 0; | 267 return 0; |
| 271 } | 268 } |
| 272 | 269 |
| 273 // Add 10MS of raw (PCM) audio data to the encoder. | 270 // Add 10MS of raw (PCM) audio data to the encoder. |
| 274 int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { | 271 int AudioCodingModuleImpl::Add10MsData(const AudioFrame& audio_frame) { |
| 275 InputData input_data; | 272 InputData input_data; |
| 276 CriticalSectionScoped lock(acm_crit_sect_.get()); | 273 rtc::CritScope lock(&acm_crit_sect_); |
| 277 int r = Add10MsDataInternal(audio_frame, &input_data); | 274 int r = Add10MsDataInternal(audio_frame, &input_data); |
| 278 return r < 0 ? r : Encode(input_data); | 275 return r < 0 ? r : Encode(input_data); |
| 279 } | 276 } |
| 280 | 277 |
| 281 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, | 278 int AudioCodingModuleImpl::Add10MsDataInternal(const AudioFrame& audio_frame, |
| 282 InputData* input_data) { | 279 InputData* input_data) { |
| 283 if (audio_frame.samples_per_channel_ == 0) { | 280 if (audio_frame.samples_per_channel_ == 0) { |
| 284 assert(false); | 281 assert(false); |
| 285 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, | 282 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, id_, |
| 286 "Cannot Add 10 ms audio, payload length is zero"); | 283 "Cannot Add 10 ms audio, payload length is zero"); |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 438 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); | 435 expected_in_ts_ += static_cast<uint32_t>(in_frame.samples_per_channel_); |
| 439 | 436 |
| 440 return 0; | 437 return 0; |
| 441 } | 438 } |
| 442 | 439 |
| 443 ///////////////////////////////////////// | 440 ///////////////////////////////////////// |
| 444 // (RED) Redundant Coding | 441 // (RED) Redundant Coding |
| 445 // | 442 // |
| 446 | 443 |
| 447 bool AudioCodingModuleImpl::REDStatus() const { | 444 bool AudioCodingModuleImpl::REDStatus() const { |
| 448 CriticalSectionScoped lock(acm_crit_sect_.get()); | 445 rtc::CritScope lock(&acm_crit_sect_); |
| 449 return codec_manager_.GetStackParams()->use_red; | 446 return codec_manager_.GetStackParams()->use_red; |
| 450 } | 447 } |
| 451 | 448 |
| 452 // Configure RED status i.e on/off. | 449 // Configure RED status i.e on/off. |
| 453 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) { | 450 int AudioCodingModuleImpl::SetREDStatus(bool enable_red) { |
| 454 #ifdef WEBRTC_CODEC_RED | 451 #ifdef WEBRTC_CODEC_RED |
| 455 CriticalSectionScoped lock(acm_crit_sect_.get()); | 452 rtc::CritScope lock(&acm_crit_sect_); |
| 456 if (!codec_manager_.SetCopyRed(enable_red)) { | 453 if (!codec_manager_.SetCopyRed(enable_red)) { |
| 457 return -1; | 454 return -1; |
| 458 } | 455 } |
| 459 auto* sp = codec_manager_.GetStackParams(); | 456 auto* sp = codec_manager_.GetStackParams(); |
| 460 if (sp->speech_encoder) | 457 if (sp->speech_encoder) |
| 461 rent_a_codec_.RentEncoderStack(sp); | 458 rent_a_codec_.RentEncoderStack(sp); |
| 462 return 0; | 459 return 0; |
| 463 #else | 460 #else |
| 464 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, | 461 WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_, |
| 465 " WEBRTC_CODEC_RED is undefined"); | 462 " WEBRTC_CODEC_RED is undefined"); |
| 466 return -1; | 463 return -1; |
| 467 #endif | 464 #endif |
| 468 } | 465 } |
| 469 | 466 |
| 470 ///////////////////////////////////////// | 467 ///////////////////////////////////////// |
| 471 // (FEC) Forward Error Correction (codec internal) | 468 // (FEC) Forward Error Correction (codec internal) |
| 472 // | 469 // |
| 473 | 470 |
| 474 bool AudioCodingModuleImpl::CodecFEC() const { | 471 bool AudioCodingModuleImpl::CodecFEC() const { |
| 475 CriticalSectionScoped lock(acm_crit_sect_.get()); | 472 rtc::CritScope lock(&acm_crit_sect_); |
| 476 return codec_manager_.GetStackParams()->use_codec_fec; | 473 return codec_manager_.GetStackParams()->use_codec_fec; |
| 477 } | 474 } |
| 478 | 475 |
| 479 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { | 476 int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) { |
| 480 CriticalSectionScoped lock(acm_crit_sect_.get()); | 477 rtc::CritScope lock(&acm_crit_sect_); |
| 481 if (!codec_manager_.SetCodecFEC(enable_codec_fec)) { | 478 if (!codec_manager_.SetCodecFEC(enable_codec_fec)) { |
| 482 return -1; | 479 return -1; |
| 483 } | 480 } |
| 484 auto* sp = codec_manager_.GetStackParams(); | 481 auto* sp = codec_manager_.GetStackParams(); |
| 485 if (sp->speech_encoder) | 482 if (sp->speech_encoder) |
| 486 rent_a_codec_.RentEncoderStack(sp); | 483 rent_a_codec_.RentEncoderStack(sp); |
| 487 if (enable_codec_fec) { | 484 if (enable_codec_fec) { |
| 488 return sp->use_codec_fec ? 0 : -1; | 485 return sp->use_codec_fec ? 0 : -1; |
| 489 } else { | 486 } else { |
| 490 RTC_DCHECK(!sp->use_codec_fec); | 487 RTC_DCHECK(!sp->use_codec_fec); |
| 491 return 0; | 488 return 0; |
| 492 } | 489 } |
| 493 } | 490 } |
| 494 | 491 |
| 495 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { | 492 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) { |
| 496 CriticalSectionScoped lock(acm_crit_sect_.get()); | 493 rtc::CritScope lock(&acm_crit_sect_); |
| 497 if (HaveValidEncoder("SetPacketLossRate")) { | 494 if (HaveValidEncoder("SetPacketLossRate")) { |
| 498 rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate / | 495 rent_a_codec_.GetEncoderStack()->SetProjectedPacketLossRate(loss_rate / |
| 499 100.0); | 496 100.0); |
| 500 } | 497 } |
| 501 return 0; | 498 return 0; |
| 502 } | 499 } |
| 503 | 500 |
| 504 ///////////////////////////////////////// | 501 ///////////////////////////////////////// |
| 505 // (VAD) Voice Activity Detection | 502 // (VAD) Voice Activity Detection |
| 506 // | 503 // |
| 507 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, | 504 int AudioCodingModuleImpl::SetVAD(bool enable_dtx, |
| 508 bool enable_vad, | 505 bool enable_vad, |
| 509 ACMVADMode mode) { | 506 ACMVADMode mode) { |
| 510 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. | 507 // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting. |
| 511 RTC_DCHECK_EQ(enable_dtx, enable_vad); | 508 RTC_DCHECK_EQ(enable_dtx, enable_vad); |
| 512 CriticalSectionScoped lock(acm_crit_sect_.get()); | 509 rtc::CritScope lock(&acm_crit_sect_); |
| 513 if (!codec_manager_.SetVAD(enable_dtx, mode)) { | 510 if (!codec_manager_.SetVAD(enable_dtx, mode)) { |
| 514 return -1; | 511 return -1; |
| 515 } | 512 } |
| 516 auto* sp = codec_manager_.GetStackParams(); | 513 auto* sp = codec_manager_.GetStackParams(); |
| 517 if (sp->speech_encoder) | 514 if (sp->speech_encoder) |
| 518 rent_a_codec_.RentEncoderStack(sp); | 515 rent_a_codec_.RentEncoderStack(sp); |
| 519 return 0; | 516 return 0; |
| 520 } | 517 } |
| 521 | 518 |
| 522 // Get VAD/DTX settings. | 519 // Get VAD/DTX settings. |
| 523 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, | 520 int AudioCodingModuleImpl::VAD(bool* dtx_enabled, bool* vad_enabled, |
| 524 ACMVADMode* mode) const { | 521 ACMVADMode* mode) const { |
| 525 CriticalSectionScoped lock(acm_crit_sect_.get()); | 522 rtc::CritScope lock(&acm_crit_sect_); |
| 526 const auto* sp = codec_manager_.GetStackParams(); | 523 const auto* sp = codec_manager_.GetStackParams(); |
| 527 *dtx_enabled = *vad_enabled = sp->use_cng; | 524 *dtx_enabled = *vad_enabled = sp->use_cng; |
| 528 *mode = sp->vad_mode; | 525 *mode = sp->vad_mode; |
| 529 return 0; | 526 return 0; |
| 530 } | 527 } |
| 531 | 528 |
| 532 ///////////////////////////////////////// | 529 ///////////////////////////////////////// |
| 533 // Receiver | 530 // Receiver |
| 534 // | 531 // |
| 535 | 532 |
| 536 int AudioCodingModuleImpl::InitializeReceiver() { | 533 int AudioCodingModuleImpl::InitializeReceiver() { |
| 537 CriticalSectionScoped lock(acm_crit_sect_.get()); | 534 rtc::CritScope lock(&acm_crit_sect_); |
| 538 return InitializeReceiverSafe(); | 535 return InitializeReceiverSafe(); |
| 539 } | 536 } |
| 540 | 537 |
| 541 // Initialize receiver, resets codec database etc. | 538 // Initialize receiver, resets codec database etc. |
| 542 int AudioCodingModuleImpl::InitializeReceiverSafe() { | 539 int AudioCodingModuleImpl::InitializeReceiverSafe() { |
| 543 // If the receiver is already initialized then we want to destroy any | 540 // If the receiver is already initialized then we want to destroy any |
| 544 // existing decoders. After a call to this function, we should have a clean | 541 // existing decoders. After a call to this function, we should have a clean |
| 545 // start-up. | 542 // start-up. |
| 546 if (receiver_initialized_) { | 543 if (receiver_initialized_) { |
| 547 if (receiver_.RemoveAllCodecs() < 0) | 544 if (receiver_.RemoveAllCodecs() < 0) |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 580 // Get current playout frequency. | 577 // Get current playout frequency. |
| 581 int AudioCodingModuleImpl::PlayoutFrequency() const { | 578 int AudioCodingModuleImpl::PlayoutFrequency() const { |
| 582 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, | 579 WEBRTC_TRACE(webrtc::kTraceStream, webrtc::kTraceAudioCoding, id_, |
| 583 "PlayoutFrequency()"); | 580 "PlayoutFrequency()"); |
| 584 return receiver_.last_output_sample_rate_hz(); | 581 return receiver_.last_output_sample_rate_hz(); |
| 585 } | 582 } |
| 586 | 583 |
| 587 // Register possible receive codecs, can be called multiple times, | 584 // Register possible receive codecs, can be called multiple times, |
| 588 // for codecs, CNG (NB, WB and SWB), DTMF, RED. | 585 // for codecs, CNG (NB, WB and SWB), DTMF, RED. |
| 589 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) { | 586 int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) { |
| 590 CriticalSectionScoped lock(acm_crit_sect_.get()); | 587 rtc::CritScope lock(&acm_crit_sect_); |
| 591 RTC_DCHECK(receiver_initialized_); | 588 RTC_DCHECK(receiver_initialized_); |
| 592 if (codec.channels > 2) { | 589 if (codec.channels > 2) { |
| 593 LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels; | 590 LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels; |
| 594 return -1; | 591 return -1; |
| 595 } | 592 } |
| 596 | 593 |
| 597 auto codec_id = | 594 auto codec_id = |
| 598 RentACodec::CodecIdByParams(codec.plname, codec.plfreq, codec.channels); | 595 RentACodec::CodecIdByParams(codec.plname, codec.plfreq, codec.channels); |
| 599 if (!codec_id) { | 596 if (!codec_id) { |
| 600 LOG_F(LS_ERROR) << "Wrong codec params to be registered as receive codec"; | 597 LOG_F(LS_ERROR) << "Wrong codec params to be registered as receive codec"; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 618 : nullptr, | 615 : nullptr, |
| 619 codec.plname); | 616 codec.plname); |
| 620 } | 617 } |
| 621 | 618 |
| 622 int AudioCodingModuleImpl::RegisterExternalReceiveCodec( | 619 int AudioCodingModuleImpl::RegisterExternalReceiveCodec( |
| 623 int rtp_payload_type, | 620 int rtp_payload_type, |
| 624 AudioDecoder* external_decoder, | 621 AudioDecoder* external_decoder, |
| 625 int sample_rate_hz, | 622 int sample_rate_hz, |
| 626 int num_channels, | 623 int num_channels, |
| 627 const std::string& name) { | 624 const std::string& name) { |
| 628 CriticalSectionScoped lock(acm_crit_sect_.get()); | 625 rtc::CritScope lock(&acm_crit_sect_); |
| 629 RTC_DCHECK(receiver_initialized_); | 626 RTC_DCHECK(receiver_initialized_); |
| 630 if (num_channels > 2 || num_channels < 0) { | 627 if (num_channels > 2 || num_channels < 0) { |
| 631 LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels; | 628 LOG_F(LS_ERROR) << "Unsupported number of channels: " << num_channels; |
| 632 return -1; | 629 return -1; |
| 633 } | 630 } |
| 634 | 631 |
| 635 // Check if the payload-type is valid. | 632 // Check if the payload-type is valid. |
| 636 if (!RentACodec::IsPayloadTypeValid(rtp_payload_type)) { | 633 if (!RentACodec::IsPayloadTypeValid(rtp_payload_type)) { |
| 637 LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type | 634 LOG_F(LS_ERROR) << "Invalid payload-type " << rtp_payload_type |
| 638 << " for external decoder."; | 635 << " for external decoder."; |
| 639 return -1; | 636 return -1; |
| 640 } | 637 } |
| 641 | 638 |
| 642 return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels, | 639 return receiver_.AddCodec(-1 /* external */, rtp_payload_type, num_channels, |
| 643 sample_rate_hz, external_decoder, name); | 640 sample_rate_hz, external_decoder, name); |
| 644 } | 641 } |
| 645 | 642 |
| 646 // Get current received codec. | 643 // Get current received codec. |
| 647 int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const { | 644 int AudioCodingModuleImpl::ReceiveCodec(CodecInst* current_codec) const { |
| 648 CriticalSectionScoped lock(acm_crit_sect_.get()); | 645 rtc::CritScope lock(&acm_crit_sect_); |
| 649 return receiver_.LastAudioCodec(current_codec); | 646 return receiver_.LastAudioCodec(current_codec); |
| 650 } | 647 } |
| 651 | 648 |
| 652 // Incoming packet from network parsed and ready for decode. | 649 // Incoming packet from network parsed and ready for decode. |
| 653 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, | 650 int AudioCodingModuleImpl::IncomingPacket(const uint8_t* incoming_payload, |
| 654 const size_t payload_length, | 651 const size_t payload_length, |
| 655 const WebRtcRTPHeader& rtp_header) { | 652 const WebRtcRTPHeader& rtp_header) { |
| 656 return receiver_.InsertPacket( | 653 return receiver_.InsertPacket( |
| 657 rtp_header, | 654 rtp_header, |
| 658 rtc::ArrayView<const uint8_t>(incoming_payload, payload_length)); | 655 rtc::ArrayView<const uint8_t>(incoming_payload, payload_length)); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 698 // TODO(turajs) change the return value to void. Also change the corresponding | 695 // TODO(turajs) change the return value to void. Also change the corresponding |
| 699 // NetEq function. | 696 // NetEq function. |
| 700 int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) { | 697 int AudioCodingModuleImpl::GetNetworkStatistics(NetworkStatistics* statistics) { |
| 701 receiver_.GetNetworkStatistics(statistics); | 698 receiver_.GetNetworkStatistics(statistics); |
| 702 return 0; | 699 return 0; |
| 703 } | 700 } |
| 704 | 701 |
| 705 int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) { | 702 int AudioCodingModuleImpl::RegisterVADCallback(ACMVADCallback* vad_callback) { |
| 706 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_, | 703 WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceAudioCoding, id_, |
| 707 "RegisterVADCallback()"); | 704 "RegisterVADCallback()"); |
| 708 CriticalSectionScoped lock(callback_crit_sect_.get()); | 705 rtc::CritScope lock(&callback_crit_sect_); |
| 709 vad_callback_ = vad_callback; | 706 vad_callback_ = vad_callback; |
| 710 return 0; | 707 return 0; |
| 711 } | 708 } |
| 712 | 709 |
| 713 // TODO(kwiberg): Remove this method, and have callers call IncomingPacket | 710 // TODO(kwiberg): Remove this method, and have callers call IncomingPacket |
| 714 // instead. The translation logic and state belong with them, not with | 711 // instead. The translation logic and state belong with them, not with |
| 715 // AudioCodingModuleImpl. | 712 // AudioCodingModuleImpl. |
| 716 int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload, | 713 int AudioCodingModuleImpl::IncomingPayload(const uint8_t* incoming_payload, |
| 717 size_t payload_length, | 714 size_t payload_length, |
| 718 uint8_t payload_type, | 715 uint8_t payload_type, |
| (...skipping 14 matching lines...) Expand all Loading... |
| 733 } | 730 } |
| 734 | 731 |
| 735 aux_rtp_header_->header.timestamp = timestamp; | 732 aux_rtp_header_->header.timestamp = timestamp; |
| 736 IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_); | 733 IncomingPacket(incoming_payload, payload_length, *aux_rtp_header_); |
| 737 // Get ready for the next payload. | 734 // Get ready for the next payload. |
| 738 aux_rtp_header_->header.sequenceNumber++; | 735 aux_rtp_header_->header.sequenceNumber++; |
| 739 return 0; | 736 return 0; |
| 740 } | 737 } |
| 741 | 738 |
| 742 int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) { | 739 int AudioCodingModuleImpl::SetOpusApplication(OpusApplicationMode application) { |
| 743 CriticalSectionScoped lock(acm_crit_sect_.get()); | 740 rtc::CritScope lock(&acm_crit_sect_); |
| 744 if (!HaveValidEncoder("SetOpusApplication")) { | 741 if (!HaveValidEncoder("SetOpusApplication")) { |
| 745 return -1; | 742 return -1; |
| 746 } | 743 } |
| 747 AudioEncoder::Application app; | 744 AudioEncoder::Application app; |
| 748 switch (application) { | 745 switch (application) { |
| 749 case kVoip: | 746 case kVoip: |
| 750 app = AudioEncoder::Application::kSpeech; | 747 app = AudioEncoder::Application::kSpeech; |
| 751 break; | 748 break; |
| 752 case kAudio: | 749 case kAudio: |
| 753 app = AudioEncoder::Application::kAudio; | 750 app = AudioEncoder::Application::kAudio; |
| 754 break; | 751 break; |
| 755 default: | 752 default: |
| 756 FATAL(); | 753 FATAL(); |
| 757 return 0; | 754 return 0; |
| 758 } | 755 } |
| 759 return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1; | 756 return rent_a_codec_.GetEncoderStack()->SetApplication(app) ? 0 : -1; |
| 760 } | 757 } |
| 761 | 758 |
| 762 // Informs Opus encoder of the maximum playback rate the receiver will render. | 759 // Informs Opus encoder of the maximum playback rate the receiver will render. |
| 763 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) { | 760 int AudioCodingModuleImpl::SetOpusMaxPlaybackRate(int frequency_hz) { |
| 764 CriticalSectionScoped lock(acm_crit_sect_.get()); | 761 rtc::CritScope lock(&acm_crit_sect_); |
| 765 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) { | 762 if (!HaveValidEncoder("SetOpusMaxPlaybackRate")) { |
| 766 return -1; | 763 return -1; |
| 767 } | 764 } |
| 768 rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz); | 765 rent_a_codec_.GetEncoderStack()->SetMaxPlaybackRate(frequency_hz); |
| 769 return 0; | 766 return 0; |
| 770 } | 767 } |
| 771 | 768 |
| 772 int AudioCodingModuleImpl::EnableOpusDtx() { | 769 int AudioCodingModuleImpl::EnableOpusDtx() { |
| 773 CriticalSectionScoped lock(acm_crit_sect_.get()); | 770 rtc::CritScope lock(&acm_crit_sect_); |
| 774 if (!HaveValidEncoder("EnableOpusDtx")) { | 771 if (!HaveValidEncoder("EnableOpusDtx")) { |
| 775 return -1; | 772 return -1; |
| 776 } | 773 } |
| 777 return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1; | 774 return rent_a_codec_.GetEncoderStack()->SetDtx(true) ? 0 : -1; |
| 778 } | 775 } |
| 779 | 776 |
| 780 int AudioCodingModuleImpl::DisableOpusDtx() { | 777 int AudioCodingModuleImpl::DisableOpusDtx() { |
| 781 CriticalSectionScoped lock(acm_crit_sect_.get()); | 778 rtc::CritScope lock(&acm_crit_sect_); |
| 782 if (!HaveValidEncoder("DisableOpusDtx")) { | 779 if (!HaveValidEncoder("DisableOpusDtx")) { |
| 783 return -1; | 780 return -1; |
| 784 } | 781 } |
| 785 return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1; | 782 return rent_a_codec_.GetEncoderStack()->SetDtx(false) ? 0 : -1; |
| 786 } | 783 } |
| 787 | 784 |
| 788 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { | 785 int AudioCodingModuleImpl::PlayoutTimestamp(uint32_t* timestamp) { |
| 789 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; | 786 return receiver_.GetPlayoutTimestamp(timestamp) ? 0 : -1; |
| 790 } | 787 } |
| 791 | 788 |
| (...skipping 27 matching lines...) Expand all Loading... |
| 819 return receiver_.LeastRequiredDelayMs(); | 816 return receiver_.LeastRequiredDelayMs(); |
| 820 } | 817 } |
| 821 | 818 |
| 822 void AudioCodingModuleImpl::GetDecodingCallStatistics( | 819 void AudioCodingModuleImpl::GetDecodingCallStatistics( |
| 823 AudioDecodingCallStats* call_stats) const { | 820 AudioDecodingCallStats* call_stats) const { |
| 824 receiver_.GetDecodingCallStatistics(call_stats); | 821 receiver_.GetDecodingCallStatistics(call_stats); |
| 825 } | 822 } |
| 826 | 823 |
| 827 } // namespace acm2 | 824 } // namespace acm2 |
| 828 } // namespace webrtc | 825 } // namespace webrtc |
| OLD | NEW |