OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
379 | 379 |
380 int32_t Channel::InFrameType(FrameType frame_type) { | 380 int32_t Channel::InFrameType(FrameType frame_type) { |
381 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 381 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
382 "Channel::InFrameType(frame_type=%d)", frame_type); | 382 "Channel::InFrameType(frame_type=%d)", frame_type); |
383 | 383 |
384 rtc::CritScope cs(&_callbackCritSect); | 384 rtc::CritScope cs(&_callbackCritSect); |
385 _sendFrameType = (frame_type == kAudioFrameSpeech); | 385 _sendFrameType = (frame_type == kAudioFrameSpeech); |
386 return 0; | 386 return 0; |
387 } | 387 } |
388 | 388 |
389 int32_t Channel::OnRxVadDetected(int vadDecision) { | |
390 rtc::CritScope cs(&_callbackCritSect); | |
391 if (_rxVadObserverPtr) { | |
392 _rxVadObserverPtr->OnRxVad(_channelId, vadDecision); | |
393 } | |
394 | |
395 return 0; | |
396 } | |
397 | |
398 bool Channel::SendRtp(const uint8_t* data, | 389 bool Channel::SendRtp(const uint8_t* data, |
399 size_t len, | 390 size_t len, |
400 const PacketOptions& options) { | 391 const PacketOptions& options) { |
401 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | 392 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), |
402 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); | 393 "Channel::SendPacket(channel=%d, len=%" PRIuS ")", len); |
403 | 394 |
404 rtc::CritScope cs(&_callbackCritSect); | 395 rtc::CritScope cs(&_callbackCritSect); |
405 | 396 |
406 if (_transportPtr == NULL) { | 397 if (_transportPtr == NULL) { |
407 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), | 398 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, _channelId), |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
577 return MixerParticipant::AudioFrameInfo::kError; | 568 return MixerParticipant::AudioFrameInfo::kError; |
578 } | 569 } |
579 | 570 |
580 if (muted) { | 571 if (muted) { |
581 // TODO(henrik.lundin): We should be able to do better than this. But we | 572 // TODO(henrik.lundin): We should be able to do better than this. But we |
582 // will have to go through all the cases below where the audio samples may | 573 // will have to go through all the cases below where the audio samples may |
583 // be used, and handle the muted case in some way. | 574 // be used, and handle the muted case in some way. |
584 audioFrame->Mute(); | 575 audioFrame->Mute(); |
585 } | 576 } |
586 | 577 |
587 if (_RxVadDetection) { | |
588 UpdateRxVadDetection(*audioFrame); | |
589 } | |
590 | |
591 // Convert module ID to internal VoE channel ID | 578 // Convert module ID to internal VoE channel ID |
592 audioFrame->id_ = VoEChannelId(audioFrame->id_); | 579 audioFrame->id_ = VoEChannelId(audioFrame->id_); |
593 // Store speech type for dead-or-alive detection | 580 // Store speech type for dead-or-alive detection |
594 _outputSpeechType = audioFrame->speech_type_; | 581 _outputSpeechType = audioFrame->speech_type_; |
595 | 582 |
596 ChannelState::State state = channel_state_.Get(); | 583 ChannelState::State state = channel_state_.Get(); |
597 | 584 |
598 if (state.rx_apm_is_enabled) { | |
599 int err = rx_audioproc_->ProcessStream(audioFrame); | |
600 if (err) { | |
601 LOG(LS_ERROR) << "ProcessStream() error: " << err; | |
602 assert(false); | |
603 } | |
604 } | |
605 | |
606 { | 585 { |
607 // Pass the audio buffers to an optional sink callback, before applying | 586 // Pass the audio buffers to an optional sink callback, before applying |
608 // scaling/panning, as that applies to the mix operation. | 587 // scaling/panning, as that applies to the mix operation. |
609 // External recipients of the audio (e.g. via AudioTrack), will do their | 588 // External recipients of the audio (e.g. via AudioTrack), will do their |
610 // own mixing/dynamic processing. | 589 // own mixing/dynamic processing. |
611 rtc::CritScope cs(&_callbackCritSect); | 590 rtc::CritScope cs(&_callbackCritSect); |
612 if (audio_sink_) { | 591 if (audio_sink_) { |
613 AudioSinkInterface::Data data( | 592 AudioSinkInterface::Data data( |
614 &audioFrame->data_[0], audioFrame->samples_per_channel_, | 593 &audioFrame->data_[0], audioFrame->samples_per_channel_, |
615 audioFrame->sample_rate_hz_, audioFrame->num_channels_, | 594 audioFrame->sample_rate_hz_, audioFrame->num_channels_, |
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
855 capture_start_rtp_time_stamp_(-1), | 834 capture_start_rtp_time_stamp_(-1), |
856 capture_start_ntp_time_ms_(-1), | 835 capture_start_ntp_time_ms_(-1), |
857 _engineStatisticsPtr(NULL), | 836 _engineStatisticsPtr(NULL), |
858 _outputMixerPtr(NULL), | 837 _outputMixerPtr(NULL), |
859 _transmitMixerPtr(NULL), | 838 _transmitMixerPtr(NULL), |
860 _moduleProcessThreadPtr(NULL), | 839 _moduleProcessThreadPtr(NULL), |
861 _audioDeviceModulePtr(NULL), | 840 _audioDeviceModulePtr(NULL), |
862 _voiceEngineObserverPtr(NULL), | 841 _voiceEngineObserverPtr(NULL), |
863 _callbackCritSectPtr(NULL), | 842 _callbackCritSectPtr(NULL), |
864 _transportPtr(NULL), | 843 _transportPtr(NULL), |
865 _rxVadObserverPtr(NULL), | |
866 _oldVadDecision(-1), | |
867 _sendFrameType(0), | 844 _sendFrameType(0), |
868 _externalMixing(false), | 845 _externalMixing(false), |
869 _mixFileWithMicrophone(false), | 846 _mixFileWithMicrophone(false), |
870 input_mute_(false), | 847 input_mute_(false), |
871 previous_frame_muted_(false), | 848 previous_frame_muted_(false), |
872 _panLeft(1.0f), | 849 _panLeft(1.0f), |
873 _panRight(1.0f), | 850 _panRight(1.0f), |
874 _outputGain(1.0f), | 851 _outputGain(1.0f), |
875 _lastLocalTimeStamp(0), | 852 _lastLocalTimeStamp(0), |
876 _lastPayloadType(0), | 853 _lastPayloadType(0), |
877 _includeAudioLevelIndication(false), | 854 _includeAudioLevelIndication(false), |
878 _outputSpeechType(AudioFrame::kNormalSpeech), | 855 _outputSpeechType(AudioFrame::kNormalSpeech), |
879 _RxVadDetection(false), | |
880 _rxAgcIsEnabled(false), | |
881 _rxNsIsEnabled(false), | |
882 restored_packet_in_use_(false), | 856 restored_packet_in_use_(false), |
883 rtcp_observer_(new VoERtcpObserver(this)), | 857 rtcp_observer_(new VoERtcpObserver(this)), |
884 network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock())), | 858 network_predictor_(new NetworkPredictor(Clock::GetRealTimeClock())), |
885 associate_send_channel_(ChannelOwner(nullptr)), | 859 associate_send_channel_(ChannelOwner(nullptr)), |
886 pacing_enabled_(config.Get<VoicePacing>().enabled), | 860 pacing_enabled_(config.Get<VoicePacing>().enabled), |
887 feedback_observer_proxy_(new TransportFeedbackProxy()), | 861 feedback_observer_proxy_(new TransportFeedbackProxy()), |
888 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), | 862 seq_num_allocator_proxy_(new TransportSequenceNumberProxy()), |
889 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), | 863 rtp_packet_sender_proxy_(new RtpPacketSenderProxy()), |
890 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), | 864 retransmission_rate_limiter_(new RateLimiter(Clock::GetRealTimeClock(), |
891 kMaxRetransmissionWindowMs)), | 865 kMaxRetransmissionWindowMs)), |
(...skipping 30 matching lines...) Expand all Loading... |
922 configuration.event_log = &(*event_log_proxy_); | 896 configuration.event_log = &(*event_log_proxy_); |
923 configuration.retransmission_rate_limiter = | 897 configuration.retransmission_rate_limiter = |
924 retransmission_rate_limiter_.get(); | 898 retransmission_rate_limiter_.get(); |
925 | 899 |
926 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); | 900 _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration)); |
927 _rtpRtcpModule->SetSendingMediaStatus(false); | 901 _rtpRtcpModule->SetSendingMediaStatus(false); |
928 | 902 |
929 statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC())); | 903 statistics_proxy_.reset(new StatisticsProxy(_rtpRtcpModule->SSRC())); |
930 rtp_receive_statistics_->RegisterRtcpStatisticsCallback( | 904 rtp_receive_statistics_->RegisterRtcpStatisticsCallback( |
931 statistics_proxy_.get()); | 905 statistics_proxy_.get()); |
932 | |
933 Config audioproc_config; | |
934 audioproc_config.Set<ExperimentalAgc>(new ExperimentalAgc(false)); | |
935 rx_audioproc_.reset(AudioProcessing::Create(audioproc_config)); | |
936 } | 906 } |
937 | 907 |
938 Channel::~Channel() { | 908 Channel::~Channel() { |
939 rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL); | 909 rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL); |
940 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), | 910 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, _channelId), |
941 "Channel::~Channel() - dtor"); | 911 "Channel::~Channel() - dtor"); |
942 | 912 |
943 if (_outputExternalMedia) { | 913 if (_outputExternalMedia) { |
944 DeRegisterExternalMediaProcessing(kPlaybackPerChannel); | 914 DeRegisterExternalMediaProcessing(kPlaybackPerChannel); |
945 } | 915 } |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1081 !RegisterReceiveCodec(&audio_coding_, &rent_a_codec_, codec) || | 1051 !RegisterReceiveCodec(&audio_coding_, &rent_a_codec_, codec) || |
1082 _rtpRtcpModule->RegisterSendPayload(codec) == -1) { | 1052 _rtpRtcpModule->RegisterSendPayload(codec) == -1) { |
1083 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), | 1053 WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId), |
1084 "Channel::Init() failed to register CN (%d/%d) " | 1054 "Channel::Init() failed to register CN (%d/%d) " |
1085 "correctly - 1", | 1055 "correctly - 1", |
1086 codec.pltype, codec.plfreq); | 1056 codec.pltype, codec.plfreq); |
1087 } | 1057 } |
1088 } | 1058 } |
1089 } | 1059 } |
1090 | 1060 |
1091 if (rx_audioproc_->noise_suppression()->set_level(kDefaultNsMode) != 0) { | |
1092 LOG(LS_ERROR) << "noise_suppression()->set_level(kDefaultNsMode) failed."; | |
1093 return -1; | |
1094 } | |
1095 if (rx_audioproc_->gain_control()->set_mode(kDefaultRxAgcMode) != 0) { | |
1096 LOG(LS_ERROR) << "gain_control()->set_mode(kDefaultRxAgcMode) failed."; | |
1097 return -1; | |
1098 } | |
1099 | |
1100 return 0; | 1061 return 0; |
1101 } | 1062 } |
1102 | 1063 |
1103 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, | 1064 int32_t Channel::SetEngineInformation(Statistics& engineStatistics, |
1104 OutputMixer& outputMixer, | 1065 OutputMixer& outputMixer, |
1105 voe::TransmitMixer& transmitMixer, | 1066 voe::TransmitMixer& transmitMixer, |
1106 ProcessThread& moduleProcessThread, | 1067 ProcessThread& moduleProcessThread, |
1107 AudioDeviceModule& audioDeviceModule, | 1068 AudioDeviceModule& audioDeviceModule, |
1108 VoiceEngineObserver* voiceEngineObserver, | 1069 VoiceEngineObserver* voiceEngineObserver, |
1109 rtc::CriticalSection* callbackCritSect) { | 1070 rtc::CriticalSection* callbackCritSect) { |
(...skipping 1202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2312 _engineStatisticsPtr->SetLastError( | 2273 _engineStatisticsPtr->SetLastError( |
2313 VE_RTP_RTCP_MODULE_ERROR, kTraceError, | 2274 VE_RTP_RTCP_MODULE_ERROR, kTraceError, |
2314 "SetSendTelephoneEventPayloadType() failed to register send" | 2275 "SetSendTelephoneEventPayloadType() failed to register send" |
2315 "payload type"); | 2276 "payload type"); |
2316 return -1; | 2277 return -1; |
2317 } | 2278 } |
2318 } | 2279 } |
2319 return 0; | 2280 return 0; |
2320 } | 2281 } |
2321 | 2282 |
2322 int Channel::UpdateRxVadDetection(AudioFrame& audioFrame) { | |
2323 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
2324 "Channel::UpdateRxVadDetection()"); | |
2325 | |
2326 int vadDecision = 1; | |
2327 | |
2328 vadDecision = (audioFrame.vad_activity_ == AudioFrame::kVadActive) ? 1 : 0; | |
2329 | |
2330 if ((vadDecision != _oldVadDecision) && _rxVadObserverPtr) { | |
2331 OnRxVadDetected(vadDecision); | |
2332 _oldVadDecision = vadDecision; | |
2333 } | |
2334 | |
2335 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, _channelId), | |
2336 "Channel::UpdateRxVadDetection() => vadDecision=%d", | |
2337 vadDecision); | |
2338 return 0; | |
2339 } | |
2340 | |
2341 int Channel::RegisterRxVadObserver(VoERxVadCallback& observer) { | |
2342 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
2343 "Channel::RegisterRxVadObserver()"); | |
2344 rtc::CritScope cs(&_callbackCritSect); | |
2345 | |
2346 if (_rxVadObserverPtr) { | |
2347 _engineStatisticsPtr->SetLastError( | |
2348 VE_INVALID_OPERATION, kTraceError, | |
2349 "RegisterRxVadObserver() observer already enabled"); | |
2350 return -1; | |
2351 } | |
2352 _rxVadObserverPtr = &observer; | |
2353 _RxVadDetection = true; | |
2354 return 0; | |
2355 } | |
2356 | |
2357 int Channel::DeRegisterRxVadObserver() { | |
2358 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
2359 "Channel::DeRegisterRxVadObserver()"); | |
2360 rtc::CritScope cs(&_callbackCritSect); | |
2361 | |
2362 if (!_rxVadObserverPtr) { | |
2363 _engineStatisticsPtr->SetLastError( | |
2364 VE_INVALID_OPERATION, kTraceWarning, | |
2365 "DeRegisterRxVadObserver() observer already disabled"); | |
2366 return 0; | |
2367 } | |
2368 _rxVadObserverPtr = NULL; | |
2369 _RxVadDetection = false; | |
2370 return 0; | |
2371 } | |
2372 | |
2373 int Channel::VoiceActivityIndicator(int& activity) { | 2283 int Channel::VoiceActivityIndicator(int& activity) { |
2374 activity = _sendFrameType; | 2284 activity = _sendFrameType; |
2375 return 0; | 2285 return 0; |
2376 } | 2286 } |
2377 | 2287 |
2378 #ifdef WEBRTC_VOICE_ENGINE_AGC | |
2379 | |
2380 int Channel::SetRxAgcStatus(bool enable, AgcModes mode) { | |
2381 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
2382 "Channel::SetRxAgcStatus(enable=%d, mode=%d)", (int)enable, | |
2383 (int)mode); | |
2384 | |
2385 GainControl::Mode agcMode = kDefaultRxAgcMode; | |
2386 switch (mode) { | |
2387 case kAgcDefault: | |
2388 break; | |
2389 case kAgcUnchanged: | |
2390 agcMode = rx_audioproc_->gain_control()->mode(); | |
2391 break; | |
2392 case kAgcFixedDigital: | |
2393 agcMode = GainControl::kFixedDigital; | |
2394 break; | |
2395 case kAgcAdaptiveDigital: | |
2396 agcMode = GainControl::kAdaptiveDigital; | |
2397 break; | |
2398 default: | |
2399 _engineStatisticsPtr->SetLastError(VE_INVALID_ARGUMENT, kTraceError, | |
2400 "SetRxAgcStatus() invalid Agc mode"); | |
2401 return -1; | |
2402 } | |
2403 | |
2404 if (rx_audioproc_->gain_control()->set_mode(agcMode) != 0) { | |
2405 _engineStatisticsPtr->SetLastError( | |
2406 VE_APM_ERROR, kTraceError, "SetRxAgcStatus() failed to set Agc mode"); | |
2407 return -1; | |
2408 } | |
2409 if (rx_audioproc_->gain_control()->Enable(enable) != 0) { | |
2410 _engineStatisticsPtr->SetLastError( | |
2411 VE_APM_ERROR, kTraceError, "SetRxAgcStatus() failed to set Agc state"); | |
2412 return -1; | |
2413 } | |
2414 | |
2415 _rxAgcIsEnabled = enable; | |
2416 channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled); | |
2417 | |
2418 return 0; | |
2419 } | |
2420 | |
2421 int Channel::GetRxAgcStatus(bool& enabled, AgcModes& mode) { | |
2422 bool enable = rx_audioproc_->gain_control()->is_enabled(); | |
2423 GainControl::Mode agcMode = rx_audioproc_->gain_control()->mode(); | |
2424 | |
2425 enabled = enable; | |
2426 | |
2427 switch (agcMode) { | |
2428 case GainControl::kFixedDigital: | |
2429 mode = kAgcFixedDigital; | |
2430 break; | |
2431 case GainControl::kAdaptiveDigital: | |
2432 mode = kAgcAdaptiveDigital; | |
2433 break; | |
2434 default: | |
2435 _engineStatisticsPtr->SetLastError(VE_APM_ERROR, kTraceError, | |
2436 "GetRxAgcStatus() invalid Agc mode"); | |
2437 return -1; | |
2438 } | |
2439 | |
2440 return 0; | |
2441 } | |
2442 | |
2443 int Channel::SetRxAgcConfig(AgcConfig config) { | |
2444 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
2445 "Channel::SetRxAgcConfig()"); | |
2446 | |
2447 if (rx_audioproc_->gain_control()->set_target_level_dbfs( | |
2448 config.targetLeveldBOv) != 0) { | |
2449 _engineStatisticsPtr->SetLastError( | |
2450 VE_APM_ERROR, kTraceError, | |
2451 "SetRxAgcConfig() failed to set target peak |level|" | |
2452 "(or envelope) of the Agc"); | |
2453 return -1; | |
2454 } | |
2455 if (rx_audioproc_->gain_control()->set_compression_gain_db( | |
2456 config.digitalCompressionGaindB) != 0) { | |
2457 _engineStatisticsPtr->SetLastError( | |
2458 VE_APM_ERROR, kTraceError, | |
2459 "SetRxAgcConfig() failed to set the range in |gain| the" | |
2460 " digital compression stage may apply"); | |
2461 return -1; | |
2462 } | |
2463 if (rx_audioproc_->gain_control()->enable_limiter(config.limiterEnable) != | |
2464 0) { | |
2465 _engineStatisticsPtr->SetLastError( | |
2466 VE_APM_ERROR, kTraceError, | |
2467 "SetRxAgcConfig() failed to set hard limiter to the signal"); | |
2468 return -1; | |
2469 } | |
2470 | |
2471 return 0; | |
2472 } | |
2473 | |
2474 int Channel::GetRxAgcConfig(AgcConfig& config) { | |
2475 config.targetLeveldBOv = rx_audioproc_->gain_control()->target_level_dbfs(); | |
2476 config.digitalCompressionGaindB = | |
2477 rx_audioproc_->gain_control()->compression_gain_db(); | |
2478 config.limiterEnable = rx_audioproc_->gain_control()->is_limiter_enabled(); | |
2479 | |
2480 return 0; | |
2481 } | |
2482 | |
2483 #endif // #ifdef WEBRTC_VOICE_ENGINE_AGC | |
2484 | |
2485 #ifdef WEBRTC_VOICE_ENGINE_NR | |
2486 | |
2487 int Channel::SetRxNsStatus(bool enable, NsModes mode) { | |
2488 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | |
2489 "Channel::SetRxNsStatus(enable=%d, mode=%d)", (int)enable, | |
2490 (int)mode); | |
2491 | |
2492 NoiseSuppression::Level nsLevel = kDefaultNsMode; | |
2493 switch (mode) { | |
2494 case kNsDefault: | |
2495 break; | |
2496 case kNsUnchanged: | |
2497 nsLevel = rx_audioproc_->noise_suppression()->level(); | |
2498 break; | |
2499 case kNsConference: | |
2500 nsLevel = NoiseSuppression::kHigh; | |
2501 break; | |
2502 case kNsLowSuppression: | |
2503 nsLevel = NoiseSuppression::kLow; | |
2504 break; | |
2505 case kNsModerateSuppression: | |
2506 nsLevel = NoiseSuppression::kModerate; | |
2507 break; | |
2508 case kNsHighSuppression: | |
2509 nsLevel = NoiseSuppression::kHigh; | |
2510 break; | |
2511 case kNsVeryHighSuppression: | |
2512 nsLevel = NoiseSuppression::kVeryHigh; | |
2513 break; | |
2514 } | |
2515 | |
2516 if (rx_audioproc_->noise_suppression()->set_level(nsLevel) != 0) { | |
2517 _engineStatisticsPtr->SetLastError( | |
2518 VE_APM_ERROR, kTraceError, "SetRxNsStatus() failed to set NS level"); | |
2519 return -1; | |
2520 } | |
2521 if (rx_audioproc_->noise_suppression()->Enable(enable) != 0) { | |
2522 _engineStatisticsPtr->SetLastError( | |
2523 VE_APM_ERROR, kTraceError, "SetRxNsStatus() failed to set NS state"); | |
2524 return -1; | |
2525 } | |
2526 | |
2527 _rxNsIsEnabled = enable; | |
2528 channel_state_.SetRxApmIsEnabled(_rxAgcIsEnabled || _rxNsIsEnabled); | |
2529 | |
2530 return 0; | |
2531 } | |
2532 | |
2533 int Channel::GetRxNsStatus(bool& enabled, NsModes& mode) { | |
2534 bool enable = rx_audioproc_->noise_suppression()->is_enabled(); | |
2535 NoiseSuppression::Level ncLevel = rx_audioproc_->noise_suppression()->level(); | |
2536 | |
2537 enabled = enable; | |
2538 | |
2539 switch (ncLevel) { | |
2540 case NoiseSuppression::kLow: | |
2541 mode = kNsLowSuppression; | |
2542 break; | |
2543 case NoiseSuppression::kModerate: | |
2544 mode = kNsModerateSuppression; | |
2545 break; | |
2546 case NoiseSuppression::kHigh: | |
2547 mode = kNsHighSuppression; | |
2548 break; | |
2549 case NoiseSuppression::kVeryHigh: | |
2550 mode = kNsVeryHighSuppression; | |
2551 break; | |
2552 } | |
2553 | |
2554 return 0; | |
2555 } | |
2556 | |
2557 #endif // #ifdef WEBRTC_VOICE_ENGINE_NR | |
2558 | |
2559 int Channel::SetLocalSSRC(unsigned int ssrc) { | 2288 int Channel::SetLocalSSRC(unsigned int ssrc) { |
2560 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), | 2289 WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId), |
2561 "Channel::SetLocalSSRC()"); | 2290 "Channel::SetLocalSSRC()"); |
2562 if (channel_state_.Get().sending) { | 2291 if (channel_state_.Get().sending) { |
2563 _engineStatisticsPtr->SetLastError(VE_ALREADY_SENDING, kTraceError, | 2292 _engineStatisticsPtr->SetLastError(VE_ALREADY_SENDING, kTraceError, |
2564 "SetLocalSSRC() already sending"); | 2293 "SetLocalSSRC() already sending"); |
2565 return -1; | 2294 return -1; |
2566 } | 2295 } |
2567 _rtpRtcpModule->SetSSRC(ssrc); | 2296 _rtpRtcpModule->SetSSRC(ssrc); |
2568 return 0; | 2297 return 0; |
(...skipping 917 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3486 int64_t min_rtt = 0; | 3215 int64_t min_rtt = 0; |
3487 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != | 3216 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) != |
3488 0) { | 3217 0) { |
3489 return 0; | 3218 return 0; |
3490 } | 3219 } |
3491 return rtt; | 3220 return rtt; |
3492 } | 3221 } |
3493 | 3222 |
3494 } // namespace voe | 3223 } // namespace voe |
3495 } // namespace webrtc | 3224 } // namespace webrtc |
OLD | NEW |