OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #include "webrtc/voice_engine/voe_audio_processing_impl.h" | |
12 | |
13 #include "webrtc/base/logging.h" | |
14 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
15 #include "webrtc/system_wrappers/include/trace.h" | |
16 #include "webrtc/voice_engine/channel.h" | |
17 #include "webrtc/voice_engine/include/voe_errors.h" | |
18 #include "webrtc/voice_engine/transmit_mixer.h" | |
19 #include "webrtc/voice_engine/voice_engine_impl.h" | |
20 | |
21 // TODO(andrew): move to a common place. | |
22 #define WEBRTC_VOICE_INIT_CHECK() \ | |
23 do { \ | |
24 if (!_shared->statistics().Initialized()) { \ | |
25 _shared->SetLastError(VE_NOT_INITED, kTraceError); \ | |
26 return -1; \ | |
27 } \ | |
28 } while (0) | |
29 | |
30 #define WEBRTC_VOICE_INIT_CHECK_BOOL() \ | |
31 do { \ | |
32 if (!_shared->statistics().Initialized()) { \ | |
33 _shared->SetLastError(VE_NOT_INITED, kTraceError); \ | |
34 return false; \ | |
35 } \ | |
36 } while (0) | |
37 | |
38 namespace webrtc { | |
39 | |
40 #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) | |
41 static const EcModes kDefaultEcMode = kEcAecm; | |
42 #else | |
43 static const EcModes kDefaultEcMode = kEcAec; | |
44 #endif | |
45 | |
46 VoEAudioProcessing* VoEAudioProcessing::GetInterface(VoiceEngine* voiceEngine) { | |
47 if (NULL == voiceEngine) { | |
48 return NULL; | |
49 } | |
50 VoiceEngineImpl* s = static_cast<VoiceEngineImpl*>(voiceEngine); | |
51 s->AddRef(); | |
52 return s; | |
53 } | |
54 | |
55 VoEAudioProcessingImpl::VoEAudioProcessingImpl(voe::SharedData* shared) | |
56 : _isAecMode(kDefaultEcMode == kEcAec), _shared(shared) { | |
57 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
58 "VoEAudioProcessingImpl::VoEAudioProcessingImpl() - ctor"); | |
59 } | |
60 | |
61 VoEAudioProcessingImpl::~VoEAudioProcessingImpl() { | |
62 WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
63 "VoEAudioProcessingImpl::~VoEAudioProcessingImpl() - dtor"); | |
64 } | |
65 | |
66 int VoEAudioProcessingImpl::SetNsStatus(bool enable, NsModes mode) { | |
67 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
68 "SetNsStatus(enable=%d, mode=%d)", enable, mode); | |
69 if (!_shared->statistics().Initialized()) { | |
70 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
71 return -1; | |
72 } | |
73 | |
74 NoiseSuppression::Level nsLevel = kDefaultNsMode; | |
75 switch (mode) { | |
76 case kNsDefault: | |
77 nsLevel = kDefaultNsMode; | |
78 break; | |
79 case kNsUnchanged: | |
80 nsLevel = _shared->audio_processing()->noise_suppression()->level(); | |
81 break; | |
82 case kNsConference: | |
83 nsLevel = NoiseSuppression::kHigh; | |
84 break; | |
85 case kNsLowSuppression: | |
86 nsLevel = NoiseSuppression::kLow; | |
87 break; | |
88 case kNsModerateSuppression: | |
89 nsLevel = NoiseSuppression::kModerate; | |
90 break; | |
91 case kNsHighSuppression: | |
92 nsLevel = NoiseSuppression::kHigh; | |
93 break; | |
94 case kNsVeryHighSuppression: | |
95 nsLevel = NoiseSuppression::kVeryHigh; | |
96 break; | |
97 } | |
98 | |
99 if (_shared->audio_processing()->noise_suppression()->set_level(nsLevel) != | |
100 0) { | |
101 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
102 "SetNsStatus() failed to set Ns mode"); | |
103 return -1; | |
104 } | |
105 if (_shared->audio_processing()->noise_suppression()->Enable(enable) != 0) { | |
106 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
107 "SetNsStatus() failed to set Ns state"); | |
108 return -1; | |
109 } | |
110 | |
111 return 0; | |
112 } | |
113 | |
114 int VoEAudioProcessingImpl::GetNsStatus(bool& enabled, NsModes& mode) { | |
115 if (!_shared->statistics().Initialized()) { | |
116 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
117 return -1; | |
118 } | |
119 | |
120 enabled = _shared->audio_processing()->noise_suppression()->is_enabled(); | |
121 NoiseSuppression::Level nsLevel = | |
122 _shared->audio_processing()->noise_suppression()->level(); | |
123 | |
124 switch (nsLevel) { | |
125 case NoiseSuppression::kLow: | |
126 mode = kNsLowSuppression; | |
127 break; | |
128 case NoiseSuppression::kModerate: | |
129 mode = kNsModerateSuppression; | |
130 break; | |
131 case NoiseSuppression::kHigh: | |
132 mode = kNsHighSuppression; | |
133 break; | |
134 case NoiseSuppression::kVeryHigh: | |
135 mode = kNsVeryHighSuppression; | |
136 break; | |
137 } | |
138 return 0; | |
139 } | |
140 | |
141 int VoEAudioProcessingImpl::SetAgcStatus(bool enable, AgcModes mode) { | |
142 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
143 "SetAgcStatus(enable=%d, mode=%d)", enable, mode); | |
144 if (!_shared->statistics().Initialized()) { | |
145 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
146 return -1; | |
147 } | |
148 | |
149 #if defined(WEBRTC_IOS) || defined(ATA) || defined(WEBRTC_ANDROID) | |
150 if (mode == kAgcAdaptiveAnalog) { | |
151 _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, | |
152 "SetAgcStatus() invalid Agc mode for mobile device"); | |
153 return -1; | |
154 } | |
155 #endif | |
156 | |
157 GainControl::Mode agcMode = kDefaultAgcMode; | |
158 switch (mode) { | |
159 case kAgcDefault: | |
160 agcMode = kDefaultAgcMode; | |
161 break; | |
162 case kAgcUnchanged: | |
163 agcMode = _shared->audio_processing()->gain_control()->mode(); | |
164 break; | |
165 case kAgcFixedDigital: | |
166 agcMode = GainControl::kFixedDigital; | |
167 break; | |
168 case kAgcAdaptiveAnalog: | |
169 agcMode = GainControl::kAdaptiveAnalog; | |
170 break; | |
171 case kAgcAdaptiveDigital: | |
172 agcMode = GainControl::kAdaptiveDigital; | |
173 break; | |
174 } | |
175 | |
176 if (_shared->audio_processing()->gain_control()->set_mode(agcMode) != 0) { | |
177 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
178 "SetAgcStatus() failed to set Agc mode"); | |
179 return -1; | |
180 } | |
181 if (_shared->audio_processing()->gain_control()->Enable(enable) != 0) { | |
182 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
183 "SetAgcStatus() failed to set Agc state"); | |
184 return -1; | |
185 } | |
186 | |
187 if (agcMode != GainControl::kFixedDigital) { | |
188 // Set Agc state in the ADM when adaptive Agc mode has been selected. | |
189 // Note that we also enable the ADM Agc when Adaptive Digital mode is | |
190 // used since we want to be able to provide the APM with updated mic | |
191 // levels when the user modifies the mic level manually. | |
192 if (_shared->audio_device()->SetAGC(enable) != 0) { | |
193 _shared->SetLastError(VE_AUDIO_DEVICE_MODULE_ERROR, kTraceWarning, | |
194 "SetAgcStatus() failed to set Agc mode"); | |
195 } | |
196 } | |
197 | |
198 return 0; | |
199 } | |
200 | |
201 int VoEAudioProcessingImpl::GetAgcStatus(bool& enabled, AgcModes& mode) { | |
202 if (!_shared->statistics().Initialized()) { | |
203 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
204 return -1; | |
205 } | |
206 | |
207 enabled = _shared->audio_processing()->gain_control()->is_enabled(); | |
208 GainControl::Mode agcMode = | |
209 _shared->audio_processing()->gain_control()->mode(); | |
210 | |
211 switch (agcMode) { | |
212 case GainControl::kFixedDigital: | |
213 mode = kAgcFixedDigital; | |
214 break; | |
215 case GainControl::kAdaptiveAnalog: | |
216 mode = kAgcAdaptiveAnalog; | |
217 break; | |
218 case GainControl::kAdaptiveDigital: | |
219 mode = kAgcAdaptiveDigital; | |
220 break; | |
221 } | |
222 | |
223 return 0; | |
224 } | |
225 | |
226 int VoEAudioProcessingImpl::SetAgcConfig(AgcConfig config) { | |
227 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
228 "SetAgcConfig()"); | |
229 if (!_shared->statistics().Initialized()) { | |
230 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
231 return -1; | |
232 } | |
233 | |
234 if (_shared->audio_processing()->gain_control()->set_target_level_dbfs( | |
235 config.targetLeveldBOv) != 0) { | |
236 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
237 "SetAgcConfig() failed to set target peak |level|" | |
238 " (or envelope) of the Agc"); | |
239 return -1; | |
240 } | |
241 if (_shared->audio_processing()->gain_control()->set_compression_gain_db( | |
242 config.digitalCompressionGaindB) != 0) { | |
243 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
244 "SetAgcConfig() failed to set the range in |gain| " | |
245 "the digital compression stage may apply"); | |
246 return -1; | |
247 } | |
248 if (_shared->audio_processing()->gain_control()->enable_limiter( | |
249 config.limiterEnable) != 0) { | |
250 _shared->SetLastError( | |
251 VE_APM_ERROR, kTraceError, | |
252 "SetAgcConfig() failed to set hard limiter to the signal"); | |
253 return -1; | |
254 } | |
255 | |
256 return 0; | |
257 } | |
258 | |
259 int VoEAudioProcessingImpl::GetAgcConfig(AgcConfig& config) { | |
260 if (!_shared->statistics().Initialized()) { | |
261 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
262 return -1; | |
263 } | |
264 | |
265 config.targetLeveldBOv = | |
266 _shared->audio_processing()->gain_control()->target_level_dbfs(); | |
267 config.digitalCompressionGaindB = | |
268 _shared->audio_processing()->gain_control()->compression_gain_db(); | |
269 config.limiterEnable = | |
270 _shared->audio_processing()->gain_control()->is_limiter_enabled(); | |
271 | |
272 return 0; | |
273 } | |
274 | |
275 bool VoEAudioProcessing::DriftCompensationSupported() { | |
276 #if defined(WEBRTC_DRIFT_COMPENSATION_SUPPORTED) | |
277 return true; | |
278 #else | |
279 return false; | |
280 #endif | |
281 } | |
282 | |
283 int VoEAudioProcessingImpl::EnableDriftCompensation(bool enable) { | |
284 WEBRTC_VOICE_INIT_CHECK(); | |
285 | |
286 if (!DriftCompensationSupported()) { | |
287 _shared->SetLastError( | |
288 VE_APM_ERROR, kTraceWarning, | |
289 "Drift compensation is not supported on this platform."); | |
290 return -1; | |
291 } | |
292 | |
293 EchoCancellation* aec = _shared->audio_processing()->echo_cancellation(); | |
294 if (aec->enable_drift_compensation(enable) != 0) { | |
295 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
296 "aec->enable_drift_compensation() failed"); | |
297 return -1; | |
298 } | |
299 return 0; | |
300 } | |
301 | |
302 bool VoEAudioProcessingImpl::DriftCompensationEnabled() { | |
303 WEBRTC_VOICE_INIT_CHECK_BOOL(); | |
304 | |
305 EchoCancellation* aec = _shared->audio_processing()->echo_cancellation(); | |
306 return aec->is_drift_compensation_enabled(); | |
307 } | |
308 | |
309 int VoEAudioProcessingImpl::SetEcStatus(bool enable, EcModes mode) { | |
310 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
311 "SetEcStatus(enable=%d, mode=%d)", enable, mode); | |
312 if (!_shared->statistics().Initialized()) { | |
313 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
314 return -1; | |
315 } | |
316 | |
317 // AEC mode | |
318 if ((mode == kEcDefault) || (mode == kEcConference) || (mode == kEcAec) || | |
319 ((mode == kEcUnchanged) && (_isAecMode == true))) { | |
320 if (enable) { | |
321 // Disable the AECM before enable the AEC | |
322 if (_shared->audio_processing()->echo_control_mobile()->is_enabled()) { | |
323 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, | |
324 "SetEcStatus() disable AECM before enabling AEC"); | |
325 if (_shared->audio_processing()->echo_control_mobile()->Enable(false) != | |
326 0) { | |
327 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
328 "SetEcStatus() failed to disable AECM"); | |
329 return -1; | |
330 } | |
331 } | |
332 } | |
333 if (_shared->audio_processing()->echo_cancellation()->Enable(enable) != 0) { | |
334 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
335 "SetEcStatus() failed to set AEC state"); | |
336 return -1; | |
337 } | |
338 if (mode == kEcConference) { | |
339 if (_shared->audio_processing() | |
340 ->echo_cancellation() | |
341 ->set_suppression_level(EchoCancellation::kHighSuppression) != | |
342 0) { | |
343 _shared->SetLastError( | |
344 VE_APM_ERROR, kTraceError, | |
345 "SetEcStatus() failed to set aggressiveness to high"); | |
346 return -1; | |
347 } | |
348 } else { | |
349 if (_shared->audio_processing() | |
350 ->echo_cancellation() | |
351 ->set_suppression_level(EchoCancellation::kModerateSuppression) != | |
352 0) { | |
353 _shared->SetLastError( | |
354 VE_APM_ERROR, kTraceError, | |
355 "SetEcStatus() failed to set aggressiveness to moderate"); | |
356 return -1; | |
357 } | |
358 } | |
359 | |
360 _isAecMode = true; | |
361 } else if ((mode == kEcAecm) || | |
362 ((mode == kEcUnchanged) && (_isAecMode == false))) { | |
363 if (enable) { | |
364 // Disable the AEC before enable the AECM | |
365 if (_shared->audio_processing()->echo_cancellation()->is_enabled()) { | |
366 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, | |
367 "SetEcStatus() disable AEC before enabling AECM"); | |
368 if (_shared->audio_processing()->echo_cancellation()->Enable(false) != | |
369 0) { | |
370 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
371 "SetEcStatus() failed to disable AEC"); | |
372 return -1; | |
373 } | |
374 } | |
375 } | |
376 if (_shared->audio_processing()->echo_control_mobile()->Enable(enable) != | |
377 0) { | |
378 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
379 "SetEcStatus() failed to set AECM state"); | |
380 return -1; | |
381 } | |
382 _isAecMode = false; | |
383 } else { | |
384 _shared->SetLastError(VE_INVALID_ARGUMENT, kTraceError, | |
385 "SetEcStatus() invalid EC mode"); | |
386 return -1; | |
387 } | |
388 | |
389 return 0; | |
390 } | |
391 | |
392 int VoEAudioProcessingImpl::GetEcStatus(bool& enabled, EcModes& mode) { | |
393 if (!_shared->statistics().Initialized()) { | |
394 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
395 return -1; | |
396 } | |
397 | |
398 if (_isAecMode == true) { | |
399 mode = kEcAec; | |
400 enabled = _shared->audio_processing()->echo_cancellation()->is_enabled(); | |
401 } else { | |
402 mode = kEcAecm; | |
403 enabled = _shared->audio_processing()->echo_control_mobile()->is_enabled(); | |
404 } | |
405 | |
406 return 0; | |
407 } | |
408 | |
409 void VoEAudioProcessingImpl::SetDelayOffsetMs(int offset) { | |
410 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
411 "SetDelayOffsetMs(offset = %d)", offset); | |
412 _shared->audio_processing()->set_delay_offset_ms(offset); | |
413 } | |
414 | |
415 int VoEAudioProcessingImpl::DelayOffsetMs() { | |
416 return _shared->audio_processing()->delay_offset_ms(); | |
417 } | |
418 | |
419 int VoEAudioProcessingImpl::SetAecmMode(AecmModes mode, bool enableCNG) { | |
420 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
421 "SetAECMMode(mode = %d)", mode); | |
422 if (!_shared->statistics().Initialized()) { | |
423 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
424 return -1; | |
425 } | |
426 | |
427 EchoControlMobile::RoutingMode aecmMode( | |
428 EchoControlMobile::kQuietEarpieceOrHeadset); | |
429 | |
430 switch (mode) { | |
431 case kAecmQuietEarpieceOrHeadset: | |
432 aecmMode = EchoControlMobile::kQuietEarpieceOrHeadset; | |
433 break; | |
434 case kAecmEarpiece: | |
435 aecmMode = EchoControlMobile::kEarpiece; | |
436 break; | |
437 case kAecmLoudEarpiece: | |
438 aecmMode = EchoControlMobile::kLoudEarpiece; | |
439 break; | |
440 case kAecmSpeakerphone: | |
441 aecmMode = EchoControlMobile::kSpeakerphone; | |
442 break; | |
443 case kAecmLoudSpeakerphone: | |
444 aecmMode = EchoControlMobile::kLoudSpeakerphone; | |
445 break; | |
446 } | |
447 | |
448 if (_shared->audio_processing()->echo_control_mobile()->set_routing_mode( | |
449 aecmMode) != 0) { | |
450 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
451 "SetAECMMode() failed to set AECM routing mode"); | |
452 return -1; | |
453 } | |
454 if (_shared->audio_processing()->echo_control_mobile()->enable_comfort_noise( | |
455 enableCNG) != 0) { | |
456 _shared->SetLastError( | |
457 VE_APM_ERROR, kTraceError, | |
458 "SetAECMMode() failed to set comfort noise state for AECM"); | |
459 return -1; | |
460 } | |
461 | |
462 return 0; | |
463 } | |
464 | |
465 int VoEAudioProcessingImpl::GetAecmMode(AecmModes& mode, bool& enabledCNG) { | |
466 if (!_shared->statistics().Initialized()) { | |
467 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
468 return -1; | |
469 } | |
470 | |
471 enabledCNG = false; | |
472 | |
473 EchoControlMobile::RoutingMode aecmMode = | |
474 _shared->audio_processing()->echo_control_mobile()->routing_mode(); | |
475 enabledCNG = _shared->audio_processing() | |
476 ->echo_control_mobile() | |
477 ->is_comfort_noise_enabled(); | |
478 | |
479 switch (aecmMode) { | |
480 case EchoControlMobile::kQuietEarpieceOrHeadset: | |
481 mode = kAecmQuietEarpieceOrHeadset; | |
482 break; | |
483 case EchoControlMobile::kEarpiece: | |
484 mode = kAecmEarpiece; | |
485 break; | |
486 case EchoControlMobile::kLoudEarpiece: | |
487 mode = kAecmLoudEarpiece; | |
488 break; | |
489 case EchoControlMobile::kSpeakerphone: | |
490 mode = kAecmSpeakerphone; | |
491 break; | |
492 case EchoControlMobile::kLoudSpeakerphone: | |
493 mode = kAecmLoudSpeakerphone; | |
494 break; | |
495 } | |
496 | |
497 return 0; | |
498 } | |
499 | |
500 int VoEAudioProcessingImpl::EnableHighPassFilter(bool enable) { | |
501 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
502 "EnableHighPassFilter(%d)", enable); | |
503 if (_shared->audio_processing()->high_pass_filter()->Enable(enable) != | |
504 AudioProcessing::kNoError) { | |
505 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
506 "HighPassFilter::Enable() failed."); | |
507 return -1; | |
508 } | |
509 | |
510 return 0; | |
511 } | |
512 | |
513 bool VoEAudioProcessingImpl::IsHighPassFilterEnabled() { | |
514 return _shared->audio_processing()->high_pass_filter()->is_enabled(); | |
515 } | |
516 | |
517 int VoEAudioProcessingImpl::VoiceActivityIndicator(int channel) { | |
518 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
519 "VoiceActivityIndicator(channel=%d)", channel); | |
520 if (!_shared->statistics().Initialized()) { | |
521 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
522 return -1; | |
523 } | |
524 | |
525 voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel); | |
526 voe::Channel* channelPtr = ch.channel(); | |
527 if (channelPtr == NULL) { | |
528 _shared->SetLastError(VE_CHANNEL_NOT_VALID, kTraceError, | |
529 "VoiceActivityIndicator() failed to locate channel"); | |
530 return -1; | |
531 } | |
532 int activity(-1); | |
533 channelPtr->VoiceActivityIndicator(activity); | |
534 | |
535 return activity; | |
536 } | |
537 | |
538 int VoEAudioProcessingImpl::SetEcMetricsStatus(bool enable) { | |
539 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
540 "SetEcMetricsStatus(enable=%d)", enable); | |
541 if (!_shared->statistics().Initialized()) { | |
542 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
543 return -1; | |
544 } | |
545 | |
546 if ((_shared->audio_processing()->echo_cancellation()->enable_metrics( | |
547 enable) != 0) || | |
548 (_shared->audio_processing()->echo_cancellation()->enable_delay_logging( | |
549 enable) != 0)) { | |
550 _shared->SetLastError(VE_APM_ERROR, kTraceError, | |
551 "SetEcMetricsStatus() unable to set EC metrics mode"); | |
552 return -1; | |
553 } | |
554 return 0; | |
555 } | |
556 | |
557 int VoEAudioProcessingImpl::GetEcMetricsStatus(bool& enabled) { | |
558 if (!_shared->statistics().Initialized()) { | |
559 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
560 return -1; | |
561 } | |
562 | |
563 bool echo_mode = | |
564 _shared->audio_processing()->echo_cancellation()->are_metrics_enabled(); | |
565 bool delay_mode = _shared->audio_processing() | |
566 ->echo_cancellation() | |
567 ->is_delay_logging_enabled(); | |
568 | |
569 if (echo_mode != delay_mode) { | |
570 _shared->SetLastError( | |
571 VE_APM_ERROR, kTraceError, | |
572 "GetEcMetricsStatus() delay logging and echo mode are not the same"); | |
573 return -1; | |
574 } | |
575 | |
576 enabled = echo_mode; | |
577 | |
578 return 0; | |
579 } | |
580 | |
581 int VoEAudioProcessingImpl::GetEchoMetrics(int& ERL, | |
582 int& ERLE, | |
583 int& RERL, | |
584 int& A_NLP) { | |
585 if (!_shared->statistics().Initialized()) { | |
586 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
587 return -1; | |
588 } | |
589 if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { | |
590 _shared->SetLastError( | |
591 VE_APM_ERROR, kTraceWarning, | |
592 "GetEchoMetrics() AudioProcessingModule AEC is not enabled"); | |
593 return -1; | |
594 } | |
595 | |
596 // Get Echo Metrics from Audio Processing Module. | |
597 EchoCancellation::Metrics echoMetrics; | |
598 if (_shared->audio_processing()->echo_cancellation()->GetMetrics( | |
599 &echoMetrics)) { | |
600 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
601 "GetEchoMetrics(), AudioProcessingModule metrics error"); | |
602 return -1; | |
603 } | |
604 | |
605 // Echo quality metrics. | |
606 ERL = echoMetrics.echo_return_loss.instant; | |
607 ERLE = echoMetrics.echo_return_loss_enhancement.instant; | |
608 RERL = echoMetrics.residual_echo_return_loss.instant; | |
609 A_NLP = echoMetrics.a_nlp.instant; | |
610 | |
611 return 0; | |
612 } | |
613 | |
614 int VoEAudioProcessingImpl::GetEcDelayMetrics(int& delay_median, | |
615 int& delay_std, | |
616 float& fraction_poor_delays) { | |
617 if (!_shared->statistics().Initialized()) { | |
618 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
619 return -1; | |
620 } | |
621 if (!_shared->audio_processing()->echo_cancellation()->is_enabled()) { | |
622 _shared->SetLastError( | |
623 VE_APM_ERROR, kTraceWarning, | |
624 "GetEcDelayMetrics() AudioProcessingModule AEC is not enabled"); | |
625 return -1; | |
626 } | |
627 | |
628 int median = 0; | |
629 int std = 0; | |
630 float poor_fraction = 0; | |
631 // Get delay-logging values from Audio Processing Module. | |
632 if (_shared->audio_processing()->echo_cancellation()->GetDelayMetrics( | |
633 &median, &std, &poor_fraction)) { | |
634 WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
635 "GetEcDelayMetrics(), AudioProcessingModule delay-logging " | |
636 "error"); | |
637 return -1; | |
638 } | |
639 | |
640 // EC delay-logging metrics | |
641 delay_median = median; | |
642 delay_std = std; | |
643 fraction_poor_delays = poor_fraction; | |
644 | |
645 return 0; | |
646 } | |
647 | |
648 int VoEAudioProcessingImpl::StartDebugRecording(const char* fileNameUTF8) { | |
649 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
650 "StartDebugRecording()"); | |
651 if (!_shared->statistics().Initialized()) { | |
652 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
653 return -1; | |
654 } | |
655 | |
656 return _shared->audio_processing()->StartDebugRecording(fileNameUTF8, -1); | |
657 } | |
658 | |
659 int VoEAudioProcessingImpl::StartDebugRecording(FILE* file_handle) { | |
660 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
661 "StartDebugRecording()"); | |
662 if (!_shared->statistics().Initialized()) { | |
663 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
664 return -1; | |
665 } | |
666 | |
667 return _shared->audio_processing()->StartDebugRecording(file_handle, -1); | |
668 } | |
669 | |
670 int VoEAudioProcessingImpl::StopDebugRecording() { | |
671 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
672 "StopDebugRecording()"); | |
673 if (!_shared->statistics().Initialized()) { | |
674 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
675 return -1; | |
676 } | |
677 | |
678 return _shared->audio_processing()->StopDebugRecording(); | |
679 } | |
680 | |
681 int VoEAudioProcessingImpl::SetTypingDetectionStatus(bool enable) { | |
682 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
683 "SetTypingDetectionStatus()"); | |
684 #if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION | |
685 NOT_SUPPORTED(_shared->statistics()); | |
686 #else | |
687 if (!_shared->statistics().Initialized()) { | |
688 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
689 return -1; | |
690 } | |
691 | |
692 // Just use the VAD state to determine if we should enable typing detection | |
693 // or not | |
694 | |
695 if (_shared->audio_processing()->voice_detection()->Enable(enable)) { | |
696 _shared->SetLastError(VE_APM_ERROR, kTraceWarning, | |
697 "SetTypingDetectionStatus() failed to set VAD state"); | |
698 return -1; | |
699 } | |
700 if (_shared->audio_processing()->voice_detection()->set_likelihood( | |
701 VoiceDetection::kVeryLowLikelihood)) { | |
702 _shared->SetLastError( | |
703 VE_APM_ERROR, kTraceWarning, | |
704 "SetTypingDetectionStatus() failed to set VAD likelihood to low"); | |
705 return -1; | |
706 } | |
707 | |
708 return 0; | |
709 #endif | |
710 } | |
711 | |
712 int VoEAudioProcessingImpl::GetTypingDetectionStatus(bool& enabled) { | |
713 if (!_shared->statistics().Initialized()) { | |
714 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
715 return -1; | |
716 } | |
717 // Just use the VAD state to determine if we should enable typing | |
718 // detection or not | |
719 | |
720 enabled = _shared->audio_processing()->voice_detection()->is_enabled(); | |
721 | |
722 return 0; | |
723 } | |
724 | |
725 int VoEAudioProcessingImpl::TimeSinceLastTyping(int& seconds) { | |
726 #if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION | |
727 NOT_SUPPORTED(_shared->statistics()); | |
728 #else | |
729 if (!_shared->statistics().Initialized()) { | |
730 _shared->SetLastError(VE_NOT_INITED, kTraceError); | |
731 return -1; | |
732 } | |
733 // Check if typing detection is enabled | |
734 bool enabled = _shared->audio_processing()->voice_detection()->is_enabled(); | |
735 if (enabled) { | |
736 _shared->transmit_mixer()->TimeSinceLastTyping(seconds); | |
737 return 0; | |
738 } else { | |
739 _shared->SetLastError(VE_FUNC_NOT_SUPPORTED, kTraceError, | |
740 "SetTypingDetectionStatus is not enabled"); | |
741 return -1; | |
742 } | |
743 #endif | |
744 } | |
745 | |
746 int VoEAudioProcessingImpl::SetTypingDetectionParameters(int timeWindow, | |
747 int costPerTyping, | |
748 int reportingThreshold, | |
749 int penaltyDecay, | |
750 int typeEventDelay) { | |
751 WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1), | |
752 "SetTypingDetectionParameters()"); | |
753 #if !WEBRTC_VOICE_ENGINE_TYPING_DETECTION | |
754 NOT_SUPPORTED(_shared->statistics()); | |
755 #else | |
756 if (!_shared->statistics().Initialized()) { | |
757 _shared->statistics().SetLastError(VE_NOT_INITED, kTraceError); | |
758 return -1; | |
759 } | |
760 return (_shared->transmit_mixer()->SetTypingDetectionParameters( | |
761 timeWindow, costPerTyping, reportingThreshold, penaltyDecay, | |
762 typeEventDelay)); | |
763 #endif | |
764 } | |
765 | |
766 void VoEAudioProcessingImpl::EnableStereoChannelSwapping(bool enable) { | |
767 _shared->transmit_mixer()->EnableStereoChannelSwapping(enable); | |
768 } | |
769 | |
770 bool VoEAudioProcessingImpl::IsStereoChannelSwappingEnabled() { | |
771 return _shared->transmit_mixer()->IsStereoChannelSwappingEnabled(); | |
772 } | |
773 | |
774 } // namespace webrtc | |
OLD | NEW |