| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 28 #include "webrtc/system_wrappers/include/clock.h" | 28 #include "webrtc/system_wrappers/include/clock.h" |
| 29 #include "webrtc/system_wrappers/include/tick_util.h" | 29 #include "webrtc/system_wrappers/include/tick_util.h" |
| 30 #include "webrtc/system_wrappers/include/trace.h" | 30 #include "webrtc/system_wrappers/include/trace.h" |
| 31 | 31 |
| 32 namespace webrtc { | 32 namespace webrtc { |
| 33 | 33 |
| 34 namespace acm2 { | 34 namespace acm2 { |
| 35 | 35 |
| 36 namespace { | 36 namespace { |
| 37 | 37 |
| 38 // |vad_activity_| field of |audio_frame| is set to |previous_audio_activity_| | |
| 39 // before the call to this function. | |
| 40 void SetAudioFrameActivityAndType(bool vad_enabled, | |
| 41 NetEqOutputType type, | |
| 42 AudioFrame* audio_frame) { | |
| 43 if (vad_enabled) { | |
| 44 switch (type) { | |
| 45 case kOutputNormal: { | |
| 46 audio_frame->vad_activity_ = AudioFrame::kVadActive; | |
| 47 audio_frame->speech_type_ = AudioFrame::kNormalSpeech; | |
| 48 break; | |
| 49 } | |
| 50 case kOutputVADPassive: { | |
| 51 audio_frame->vad_activity_ = AudioFrame::kVadPassive; | |
| 52 audio_frame->speech_type_ = AudioFrame::kNormalSpeech; | |
| 53 break; | |
| 54 } | |
| 55 case kOutputCNG: { | |
| 56 audio_frame->vad_activity_ = AudioFrame::kVadPassive; | |
| 57 audio_frame->speech_type_ = AudioFrame::kCNG; | |
| 58 break; | |
| 59 } | |
| 60 case kOutputPLC: { | |
| 61 // Don't change |audio_frame->vad_activity_|, it should be the same as | |
| 62 // |previous_audio_activity_|. | |
| 63 audio_frame->speech_type_ = AudioFrame::kPLC; | |
| 64 break; | |
| 65 } | |
| 66 case kOutputPLCtoCNG: { | |
| 67 audio_frame->vad_activity_ = AudioFrame::kVadPassive; | |
| 68 audio_frame->speech_type_ = AudioFrame::kPLCCNG; | |
| 69 break; | |
| 70 } | |
| 71 default: | |
| 72 assert(false); | |
| 73 } | |
| 74 } else { | |
| 75 // Always return kVadUnknown when receive VAD is inactive | |
| 76 audio_frame->vad_activity_ = AudioFrame::kVadUnknown; | |
| 77 switch (type) { | |
| 78 case kOutputNormal: { | |
| 79 audio_frame->speech_type_ = AudioFrame::kNormalSpeech; | |
| 80 break; | |
| 81 } | |
| 82 case kOutputCNG: { | |
| 83 audio_frame->speech_type_ = AudioFrame::kCNG; | |
| 84 break; | |
| 85 } | |
| 86 case kOutputPLC: { | |
| 87 audio_frame->speech_type_ = AudioFrame::kPLC; | |
| 88 break; | |
| 89 } | |
| 90 case kOutputPLCtoCNG: { | |
| 91 audio_frame->speech_type_ = AudioFrame::kPLCCNG; | |
| 92 break; | |
| 93 } | |
| 94 case kOutputVADPassive: { | |
| 95 // Normally, we should no get any VAD decision if post-decoding VAD is | |
| 96 // not active. However, if post-decoding VAD has been active then | |
| 97 // disabled, we might be here for couple of frames. | |
| 98 audio_frame->speech_type_ = AudioFrame::kNormalSpeech; | |
| 99 LOG(WARNING) << "Post-decoding VAD is disabled but output is " | |
| 100 << "labeled VAD-passive"; | |
| 101 break; | |
| 102 } | |
| 103 default: | |
| 104 assert(false); | |
| 105 } | |
| 106 } | |
| 107 } | |
| 108 | |
| 109 // Is the given codec a CNG codec? | 38 // Is the given codec a CNG codec? |
| 110 // TODO(kwiberg): Move to RentACodec. | 39 // TODO(kwiberg): Move to RentACodec. |
| 111 bool IsCng(int codec_id) { | 40 bool IsCng(int codec_id) { |
| 112 auto i = RentACodec::CodecIdFromIndex(codec_id); | 41 auto i = RentACodec::CodecIdFromIndex(codec_id); |
| 113 return (i && (*i == RentACodec::CodecId::kCNNB || | 42 return (i && (*i == RentACodec::CodecId::kCNNB || |
| 114 *i == RentACodec::CodecId::kCNWB || | 43 *i == RentACodec::CodecId::kCNWB || |
| 115 *i == RentACodec::CodecId::kCNSWB || | 44 *i == RentACodec::CodecId::kCNSWB || |
| 116 *i == RentACodec::CodecId::kCNFB)); | 45 *i == RentACodec::CodecId::kCNFB)); |
| 117 } | 46 } |
| 118 | 47 |
| 119 } // namespace | 48 } // namespace |
| 120 | 49 |
| 121 AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config) | 50 AcmReceiver::AcmReceiver(const AudioCodingModule::Config& config) |
| 122 : last_audio_decoder_(nullptr), | 51 : last_audio_decoder_(nullptr), |
| 123 previous_audio_activity_(AudioFrame::kVadPassive), | |
| 124 last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]), | 52 last_audio_buffer_(new int16_t[AudioFrame::kMaxDataSizeSamples]), |
| 125 neteq_(NetEq::Create(config.neteq_config)), | 53 neteq_(NetEq::Create(config.neteq_config)), |
| 126 vad_enabled_(config.neteq_config.enable_post_decode_vad), | |
| 127 clock_(config.clock), | 54 clock_(config.clock), |
| 128 resampled_last_output_frame_(true) { | 55 resampled_last_output_frame_(true) { |
| 129 assert(clock_); | 56 assert(clock_); |
| 130 memset(last_audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples); | 57 memset(last_audio_buffer_.get(), 0, AudioFrame::kMaxDataSizeSamples); |
| 131 } | 58 } |
| 132 | 59 |
| 133 AcmReceiver::~AcmReceiver() { | 60 AcmReceiver::~AcmReceiver() { |
| 134 delete neteq_; | 61 delete neteq_; |
| 135 } | 62 } |
| 136 | 63 |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 257 } else { | 184 } else { |
| 258 resampled_last_output_frame_ = false; | 185 resampled_last_output_frame_ = false; |
| 259 // We might end up here ONLY if codec is changed. | 186 // We might end up here ONLY if codec is changed. |
| 260 } | 187 } |
| 261 | 188 |
| 262 // Store current audio in |last_audio_buffer_| for next time. | 189 // Store current audio in |last_audio_buffer_| for next time. |
| 263 memcpy(last_audio_buffer_.get(), audio_frame->data_, | 190 memcpy(last_audio_buffer_.get(), audio_frame->data_, |
| 264 sizeof(int16_t) * audio_frame->samples_per_channel_ * | 191 sizeof(int16_t) * audio_frame->samples_per_channel_ * |
| 265 audio_frame->num_channels_); | 192 audio_frame->num_channels_); |
| 266 | 193 |
| 267 // Should set |vad_activity| before calling SetAudioFrameActivityAndType(). | |
| 268 audio_frame->vad_activity_ = previous_audio_activity_; | |
| 269 SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame); | |
| 270 previous_audio_activity_ = audio_frame->vad_activity_; | |
| 271 call_stats_.DecodedByNetEq(audio_frame->speech_type_); | 194 call_stats_.DecodedByNetEq(audio_frame->speech_type_); |
| 272 | 195 |
| 273 // Computes the RTP timestamp of the first sample in |audio_frame| from | 196 // Computes the RTP timestamp of the first sample in |audio_frame| from |
| 274 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of | 197 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of |
| 275 // |audio_frame|. | 198 // |audio_frame|. |
| 276 // TODO(henrik.lundin) Move setting of audio_frame->timestamp_ inside NetEq. | 199 // TODO(henrik.lundin) Move setting of audio_frame->timestamp_ inside NetEq. |
| 277 uint32_t playout_timestamp = 0; | 200 uint32_t playout_timestamp = 0; |
| 278 if (GetPlayoutTimestamp(&playout_timestamp)) { | 201 if (GetPlayoutTimestamp(&playout_timestamp)) { |
| 279 audio_frame->timestamp_ = playout_timestamp - | 202 audio_frame->timestamp_ = playout_timestamp - |
| 280 static_cast<uint32_t>(audio_frame->samples_per_channel_); | 203 static_cast<uint32_t>(audio_frame->samples_per_channel_); |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 344 | 267 |
| 345 Decoder decoder; | 268 Decoder decoder; |
| 346 decoder.acm_codec_id = acm_codec_id; | 269 decoder.acm_codec_id = acm_codec_id; |
| 347 decoder.payload_type = payload_type; | 270 decoder.payload_type = payload_type; |
| 348 decoder.channels = channels; | 271 decoder.channels = channels; |
| 349 decoder.sample_rate_hz = sample_rate_hz; | 272 decoder.sample_rate_hz = sample_rate_hz; |
| 350 decoders_[payload_type] = decoder; | 273 decoders_[payload_type] = decoder; |
| 351 return 0; | 274 return 0; |
| 352 } | 275 } |
| 353 | 276 |
| 354 void AcmReceiver::EnableVad() { | |
| 355 neteq_->EnableVad(); | |
| 356 rtc::CritScope lock(&crit_sect_); | |
| 357 vad_enabled_ = true; | |
| 358 } | |
| 359 | |
| 360 void AcmReceiver::DisableVad() { | |
| 361 neteq_->DisableVad(); | |
| 362 rtc::CritScope lock(&crit_sect_); | |
| 363 vad_enabled_ = false; | |
| 364 } | |
| 365 | |
| 366 void AcmReceiver::FlushBuffers() { | 277 void AcmReceiver::FlushBuffers() { |
| 367 neteq_->FlushBuffers(); | 278 neteq_->FlushBuffers(); |
| 368 } | 279 } |
| 369 | 280 |
| 370 // If failed in removing one of the codecs, this method continues to remove as | 281 // If failed in removing one of the codecs, this method continues to remove as |
| 371 // many as it can. | 282 // many as it can. |
| 372 int AcmReceiver::RemoveAllCodecs() { | 283 int AcmReceiver::RemoveAllCodecs() { |
| 373 int ret_val = 0; | 284 int ret_val = 0; |
| 374 rtc::CritScope lock(&crit_sect_); | 285 rtc::CritScope lock(&crit_sect_); |
| 375 for (auto it = decoders_.begin(); it != decoders_.end(); ) { | 286 for (auto it = decoders_.begin(); it != decoders_.end(); ) { |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 514 | 425 |
| 515 void AcmReceiver::GetDecodingCallStatistics( | 426 void AcmReceiver::GetDecodingCallStatistics( |
| 516 AudioDecodingCallStats* stats) const { | 427 AudioDecodingCallStats* stats) const { |
| 517 rtc::CritScope lock(&crit_sect_); | 428 rtc::CritScope lock(&crit_sect_); |
| 518 *stats = call_stats_.GetDecodingStatistics(); | 429 *stats = call_stats_.GetDecodingStatistics(); |
| 519 } | 430 } |
| 520 | 431 |
| 521 } // namespace acm2 | 432 } // namespace acm2 |
| 522 | 433 |
| 523 } // namespace webrtc | 434 } // namespace webrtc |
| OLD | NEW |