OLD | NEW |
| (Empty) |
1 /* | |
2 * libjingle | |
3 * Copyright 2004 Google Inc. | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions are met: | |
7 * | |
8 * 1. Redistributions of source code must retain the above copyright notice, | |
9 * this list of conditions and the following disclaimer. | |
10 * 2. Redistributions in binary form must reproduce the above copyright notice, | |
11 * this list of conditions and the following disclaimer in the documentation | |
12 * and/or other materials provided with the distribution. | |
13 * 3. The name of the author may not be used to endorse or promote products | |
14 * derived from this software without specific prior written permission. | |
15 * | |
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 */ | |
27 | |
28 #ifdef HAVE_CONFIG_H | |
29 #include <config.h> | |
30 #endif | |
31 | |
32 #ifdef HAVE_WEBRTC_VOICE | |
33 | |
34 #include "talk/media/webrtc/webrtcvoiceengine.h" | |
35 | |
36 #include <algorithm> | |
37 #include <cstdio> | |
38 #include <string> | |
39 #include <vector> | |
40 | |
41 #include "talk/media/base/audioframe.h" | |
42 #include "talk/media/base/audiorenderer.h" | |
43 #include "talk/media/base/constants.h" | |
44 #include "talk/media/base/streamparams.h" | |
45 #include "talk/media/webrtc/webrtcmediaengine.h" | |
46 #include "talk/media/webrtc/webrtcvoe.h" | |
47 #include "webrtc/audio/audio_sink.h" | |
48 #include "webrtc/base/arraysize.h" | |
49 #include "webrtc/base/base64.h" | |
50 #include "webrtc/base/byteorder.h" | |
51 #include "webrtc/base/common.h" | |
52 #include "webrtc/base/helpers.h" | |
53 #include "webrtc/base/logging.h" | |
54 #include "webrtc/base/stringencode.h" | |
55 #include "webrtc/base/stringutils.h" | |
56 #include "webrtc/call/rtc_event_log.h" | |
57 #include "webrtc/common.h" | |
58 #include "webrtc/modules/audio_coding/acm2/rent_a_codec.h" | |
59 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
60 #include "webrtc/system_wrappers/include/field_trial.h" | |
61 #include "webrtc/system_wrappers/include/trace.h" | |
62 | |
63 namespace cricket { | |
64 namespace { | |
65 | |
66 const int kDefaultTraceFilter = webrtc::kTraceNone | webrtc::kTraceTerseInfo | | |
67 webrtc::kTraceWarning | webrtc::kTraceError | | |
68 webrtc::kTraceCritical; | |
69 const int kElevatedTraceFilter = kDefaultTraceFilter | webrtc::kTraceStateInfo | | |
70 webrtc::kTraceInfo; | |
71 | |
72 // On Windows Vista and newer, Microsoft introduced the concept of "Default | |
73 // Communications Device". This means that there are two types of default | |
74 // devices (old Wave Audio style default and Default Communications Device). | |
75 // | |
76 // On Windows systems which only support Wave Audio style default, uses either | |
77 // -1 or 0 to select the default device. | |
78 #ifdef WIN32 | |
79 const int kDefaultAudioDeviceId = -1; | |
80 #else | |
81 const int kDefaultAudioDeviceId = 0; | |
82 #endif | |
83 | |
84 // Parameter used for NACK. | |
85 // This value is equivalent to 5 seconds of audio data at 20 ms per packet. | |
86 const int kNackMaxPackets = 250; | |
87 | |
88 // Codec parameters for Opus. | |
89 // draft-spittka-payload-rtp-opus-03 | |
90 | |
91 // Recommended bitrates: | |
92 // 8-12 kb/s for NB speech, | |
93 // 16-20 kb/s for WB speech, | |
94 // 28-40 kb/s for FB speech, | |
95 // 48-64 kb/s for FB mono music, and | |
96 // 64-128 kb/s for FB stereo music. | |
97 // The current implementation applies the following values to mono signals, | |
98 // and multiplies them by 2 for stereo. | |
99 const int kOpusBitrateNb = 12000; | |
100 const int kOpusBitrateWb = 20000; | |
101 const int kOpusBitrateFb = 32000; | |
102 | |
103 // Opus bitrate should be in the range between 6000 and 510000. | |
104 const int kOpusMinBitrate = 6000; | |
105 const int kOpusMaxBitrate = 510000; | |
106 | |
107 // Default audio dscp value. | |
108 // See http://tools.ietf.org/html/rfc2474 for details. | |
109 // See also http://tools.ietf.org/html/draft-jennings-rtcweb-qos-00 | |
110 const rtc::DiffServCodePoint kAudioDscpValue = rtc::DSCP_EF; | |
111 | |
112 // Ensure we open the file in a writeable path on ChromeOS and Android. This | |
113 // workaround can be removed when it's possible to specify a filename for audio | |
114 // option based AEC dumps. | |
115 // | |
116 // TODO(grunell): Use a string in the options instead of hardcoding it here | |
117 // and let the embedder choose the filename (crbug.com/264223). | |
118 // | |
119 // NOTE(ajm): Don't use hardcoded paths on platforms not explicitly specified | |
120 // below. | |
121 #if defined(CHROMEOS) | |
122 const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump"; | |
123 #elif defined(ANDROID) | |
124 const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump"; | |
125 #else | |
126 const char kAecDumpByAudioOptionFilename[] = "audio.aecdump"; | |
127 #endif | |
128 | |
129 // Constants from voice_engine_defines.h. | |
130 const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1) | |
131 const int kMaxTelephoneEventCode = 255; | |
132 const int kMinTelephoneEventDuration = 100; | |
133 const int kMaxTelephoneEventDuration = 60000; // Actual limit is 2^16 | |
134 | |
135 class ProxySink : public webrtc::AudioSinkInterface { | |
136 public: | |
137 ProxySink(AudioSinkInterface* sink) : sink_(sink) { RTC_DCHECK(sink); } | |
138 | |
139 void OnData(const Data& audio) override { sink_->OnData(audio); } | |
140 | |
141 private: | |
142 webrtc::AudioSinkInterface* sink_; | |
143 }; | |
144 | |
145 bool ValidateStreamParams(const StreamParams& sp) { | |
146 if (sp.ssrcs.empty()) { | |
147 LOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString(); | |
148 return false; | |
149 } | |
150 if (sp.ssrcs.size() > 1) { | |
151 LOG(LS_ERROR) << "Multiple SSRCs in stream parameters: " << sp.ToString(); | |
152 return false; | |
153 } | |
154 return true; | |
155 } | |
156 | |
157 // Dumps an AudioCodec in RFC 2327-ish format. | |
158 std::string ToString(const AudioCodec& codec) { | |
159 std::stringstream ss; | |
160 ss << codec.name << "/" << codec.clockrate << "/" << codec.channels | |
161 << " (" << codec.id << ")"; | |
162 return ss.str(); | |
163 } | |
164 | |
165 std::string ToString(const webrtc::CodecInst& codec) { | |
166 std::stringstream ss; | |
167 ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels | |
168 << " (" << codec.pltype << ")"; | |
169 return ss.str(); | |
170 } | |
171 | |
172 bool IsCodec(const AudioCodec& codec, const char* ref_name) { | |
173 return (_stricmp(codec.name.c_str(), ref_name) == 0); | |
174 } | |
175 | |
176 bool IsCodec(const webrtc::CodecInst& codec, const char* ref_name) { | |
177 return (_stricmp(codec.plname, ref_name) == 0); | |
178 } | |
179 | |
180 bool FindCodec(const std::vector<AudioCodec>& codecs, | |
181 const AudioCodec& codec, | |
182 AudioCodec* found_codec) { | |
183 for (const AudioCodec& c : codecs) { | |
184 if (c.Matches(codec)) { | |
185 if (found_codec != NULL) { | |
186 *found_codec = c; | |
187 } | |
188 return true; | |
189 } | |
190 } | |
191 return false; | |
192 } | |
193 | |
194 bool VerifyUniquePayloadTypes(const std::vector<AudioCodec>& codecs) { | |
195 if (codecs.empty()) { | |
196 return true; | |
197 } | |
198 std::vector<int> payload_types; | |
199 for (const AudioCodec& codec : codecs) { | |
200 payload_types.push_back(codec.id); | |
201 } | |
202 std::sort(payload_types.begin(), payload_types.end()); | |
203 auto it = std::unique(payload_types.begin(), payload_types.end()); | |
204 return it == payload_types.end(); | |
205 } | |
206 | |
207 bool IsNackEnabled(const AudioCodec& codec) { | |
208 return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack, | |
209 kParamValueEmpty)); | |
210 } | |
211 | |
212 // Return true if codec.params[feature] == "1", false otherwise. | |
213 bool IsCodecFeatureEnabled(const AudioCodec& codec, const char* feature) { | |
214 int value; | |
215 return codec.GetParam(feature, &value) && value == 1; | |
216 } | |
217 | |
218 // Use params[kCodecParamMaxAverageBitrate] if it is defined, use codec.bitrate | |
219 // otherwise. If the value (either from params or codec.bitrate) <=0, use the | |
220 // default configuration. If the value is beyond feasible bit rate of Opus, | |
221 // clamp it. Returns the Opus bit rate for operation. | |
222 int GetOpusBitrate(const AudioCodec& codec, int max_playback_rate) { | |
223 int bitrate = 0; | |
224 bool use_param = true; | |
225 if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) { | |
226 bitrate = codec.bitrate; | |
227 use_param = false; | |
228 } | |
229 if (bitrate <= 0) { | |
230 if (max_playback_rate <= 8000) { | |
231 bitrate = kOpusBitrateNb; | |
232 } else if (max_playback_rate <= 16000) { | |
233 bitrate = kOpusBitrateWb; | |
234 } else { | |
235 bitrate = kOpusBitrateFb; | |
236 } | |
237 | |
238 if (IsCodecFeatureEnabled(codec, kCodecParamStereo)) { | |
239 bitrate *= 2; | |
240 } | |
241 } else if (bitrate < kOpusMinBitrate || bitrate > kOpusMaxBitrate) { | |
242 bitrate = (bitrate < kOpusMinBitrate) ? kOpusMinBitrate : kOpusMaxBitrate; | |
243 std::string rate_source = | |
244 use_param ? "Codec parameter \"maxaveragebitrate\"" : | |
245 "Supplied Opus bitrate"; | |
246 LOG(LS_WARNING) << rate_source | |
247 << " is invalid and is replaced by: " | |
248 << bitrate; | |
249 } | |
250 return bitrate; | |
251 } | |
252 | |
253 // Returns kOpusDefaultPlaybackRate if params[kCodecParamMaxPlaybackRate] is not | |
254 // defined. Returns the value of params[kCodecParamMaxPlaybackRate] otherwise. | |
255 int GetOpusMaxPlaybackRate(const AudioCodec& codec) { | |
256 int value; | |
257 if (codec.GetParam(kCodecParamMaxPlaybackRate, &value)) { | |
258 return value; | |
259 } | |
260 return kOpusDefaultMaxPlaybackRate; | |
261 } | |
262 | |
263 void GetOpusConfig(const AudioCodec& codec, webrtc::CodecInst* voe_codec, | |
264 bool* enable_codec_fec, int* max_playback_rate, | |
265 bool* enable_codec_dtx) { | |
266 *enable_codec_fec = IsCodecFeatureEnabled(codec, kCodecParamUseInbandFec); | |
267 *enable_codec_dtx = IsCodecFeatureEnabled(codec, kCodecParamUseDtx); | |
268 *max_playback_rate = GetOpusMaxPlaybackRate(codec); | |
269 | |
270 // If OPUS, change what we send according to the "stereo" codec | |
271 // parameter, and not the "channels" parameter. We set | |
272 // voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If | |
273 // the bitrate is not specified, i.e. is <= zero, we set it to the | |
274 // appropriate default value for mono or stereo Opus. | |
275 | |
276 voe_codec->channels = IsCodecFeatureEnabled(codec, kCodecParamStereo) ? 2 : 1; | |
277 voe_codec->rate = GetOpusBitrate(codec, *max_playback_rate); | |
278 } | |
279 | |
280 webrtc::AudioState::Config MakeAudioStateConfig(VoEWrapper* voe_wrapper) { | |
281 webrtc::AudioState::Config config; | |
282 config.voice_engine = voe_wrapper->engine(); | |
283 return config; | |
284 } | |
285 | |
286 class WebRtcVoiceCodecs final { | |
287 public: | |
288 // TODO(solenberg): Do this filtering once off-line, add a simple AudioCodec | |
289 // list and add a test which verifies VoE supports the listed codecs. | |
290 static std::vector<AudioCodec> SupportedCodecs() { | |
291 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:"; | |
292 std::vector<AudioCodec> result; | |
293 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
294 // Change the sample rate of G722 to 8000 to match SDP. | |
295 MaybeFixupG722(&voe_codec, 8000); | |
296 // Skip uncompressed formats. | |
297 if (IsCodec(voe_codec, kL16CodecName)) { | |
298 continue; | |
299 } | |
300 | |
301 const CodecPref* pref = NULL; | |
302 for (size_t j = 0; j < arraysize(kCodecPrefs); ++j) { | |
303 if (IsCodec(voe_codec, kCodecPrefs[j].name) && | |
304 kCodecPrefs[j].clockrate == voe_codec.plfreq && | |
305 kCodecPrefs[j].channels == voe_codec.channels) { | |
306 pref = &kCodecPrefs[j]; | |
307 break; | |
308 } | |
309 } | |
310 | |
311 if (pref) { | |
312 // Use the payload type that we've configured in our pref table; | |
313 // use the offset in our pref table to determine the sort order. | |
314 AudioCodec codec( | |
315 pref->payload_type, voe_codec.plname, voe_codec.plfreq, | |
316 voe_codec.rate, voe_codec.channels, | |
317 static_cast<int>(arraysize(kCodecPrefs)) - (pref - kCodecPrefs)); | |
318 LOG(LS_INFO) << ToString(codec); | |
319 if (IsCodec(codec, kIsacCodecName)) { | |
320 // Indicate auto-bitrate in signaling. | |
321 codec.bitrate = 0; | |
322 } | |
323 if (IsCodec(codec, kOpusCodecName)) { | |
324 // Only add fmtp parameters that differ from the spec. | |
325 if (kPreferredMinPTime != kOpusDefaultMinPTime) { | |
326 codec.params[kCodecParamMinPTime] = | |
327 rtc::ToString(kPreferredMinPTime); | |
328 } | |
329 if (kPreferredMaxPTime != kOpusDefaultMaxPTime) { | |
330 codec.params[kCodecParamMaxPTime] = | |
331 rtc::ToString(kPreferredMaxPTime); | |
332 } | |
333 codec.SetParam(kCodecParamUseInbandFec, 1); | |
334 | |
335 // TODO(hellner): Add ptime, sprop-stereo, and stereo | |
336 // when they can be set to values other than the default. | |
337 } | |
338 result.push_back(codec); | |
339 } else { | |
340 LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec); | |
341 } | |
342 } | |
343 // Make sure they are in local preference order. | |
344 std::sort(result.begin(), result.end(), &AudioCodec::Preferable); | |
345 return result; | |
346 } | |
347 | |
348 static bool ToCodecInst(const AudioCodec& in, | |
349 webrtc::CodecInst* out) { | |
350 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
351 // Change the sample rate of G722 to 8000 to match SDP. | |
352 MaybeFixupG722(&voe_codec, 8000); | |
353 AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq, | |
354 voe_codec.rate, voe_codec.channels, 0); | |
355 bool multi_rate = IsCodecMultiRate(voe_codec); | |
356 // Allow arbitrary rates for ISAC to be specified. | |
357 if (multi_rate) { | |
358 // Set codec.bitrate to 0 so the check for codec.Matches() passes. | |
359 codec.bitrate = 0; | |
360 } | |
361 if (codec.Matches(in)) { | |
362 if (out) { | |
363 // Fixup the payload type. | |
364 voe_codec.pltype = in.id; | |
365 | |
366 // Set bitrate if specified. | |
367 if (multi_rate && in.bitrate != 0) { | |
368 voe_codec.rate = in.bitrate; | |
369 } | |
370 | |
371 // Reset G722 sample rate to 16000 to match WebRTC. | |
372 MaybeFixupG722(&voe_codec, 16000); | |
373 | |
374 // Apply codec-specific settings. | |
375 if (IsCodec(codec, kIsacCodecName)) { | |
376 // If ISAC and an explicit bitrate is not specified, | |
377 // enable auto bitrate adjustment. | |
378 voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1; | |
379 } | |
380 *out = voe_codec; | |
381 } | |
382 return true; | |
383 } | |
384 } | |
385 return false; | |
386 } | |
387 | |
388 static bool IsCodecMultiRate(const webrtc::CodecInst& codec) { | |
389 for (size_t i = 0; i < arraysize(kCodecPrefs); ++i) { | |
390 if (IsCodec(codec, kCodecPrefs[i].name) && | |
391 kCodecPrefs[i].clockrate == codec.plfreq) { | |
392 return kCodecPrefs[i].is_multi_rate; | |
393 } | |
394 } | |
395 return false; | |
396 } | |
397 | |
398 // If the AudioCodec param kCodecParamPTime is set, then we will set it to | |
399 // codec pacsize if it's valid, or we will pick the next smallest value we | |
400 // support. | |
401 // TODO(Brave): Query supported packet sizes from ACM when the API is ready. | |
402 static bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) { | |
403 for (const CodecPref& codec_pref : kCodecPrefs) { | |
404 if ((IsCodec(*codec, codec_pref.name) && | |
405 codec_pref.clockrate == codec->plfreq) || | |
406 IsCodec(*codec, kG722CodecName)) { | |
407 int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms); | |
408 if (packet_size_ms) { | |
409 // Convert unit from milli-seconds to samples. | |
410 codec->pacsize = (codec->plfreq / 1000) * packet_size_ms; | |
411 return true; | |
412 } | |
413 } | |
414 } | |
415 return false; | |
416 } | |
417 | |
418 private: | |
419 static const int kMaxNumPacketSize = 6; | |
420 struct CodecPref { | |
421 const char* name; | |
422 int clockrate; | |
423 size_t channels; | |
424 int payload_type; | |
425 bool is_multi_rate; | |
426 int packet_sizes_ms[kMaxNumPacketSize]; | |
427 }; | |
428 // Note: keep the supported packet sizes in ascending order. | |
429 static const CodecPref kCodecPrefs[12]; | |
430 | |
431 static int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) { | |
432 int selected_packet_size_ms = codec_pref.packet_sizes_ms[0]; | |
433 for (int packet_size_ms : codec_pref.packet_sizes_ms) { | |
434 if (packet_size_ms && packet_size_ms <= ptime_ms) { | |
435 selected_packet_size_ms = packet_size_ms; | |
436 } | |
437 } | |
438 return selected_packet_size_ms; | |
439 } | |
440 | |
441 // Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC | |
442 // which says that G722 should be advertised as 8 kHz although it is a 16 kHz | |
443 // codec. | |
444 static void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) { | |
445 if (IsCodec(*voe_codec, kG722CodecName)) { | |
446 // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine | |
447 // has changed, and this special case is no longer needed. | |
448 RTC_DCHECK(voe_codec->plfreq != new_plfreq); | |
449 voe_codec->plfreq = new_plfreq; | |
450 } | |
451 } | |
452 }; | |
453 | |
454 const WebRtcVoiceCodecs::CodecPref WebRtcVoiceCodecs::kCodecPrefs[12] = { | |
455 { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } }, | |
456 { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } }, | |
457 { kIsacCodecName, 32000, 1, 104, true, { 30 } }, | |
458 // G722 should be advertised as 8000 Hz because of the RFC "bug". | |
459 { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } }, | |
460 { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } }, | |
461 { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } }, | |
462 { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } }, | |
463 { kCnCodecName, 32000, 1, 106, false, { } }, | |
464 { kCnCodecName, 16000, 1, 105, false, { } }, | |
465 { kCnCodecName, 8000, 1, 13, false, { } }, | |
466 { kRedCodecName, 8000, 1, 127, false, { } }, | |
467 { kDtmfCodecName, 8000, 1, 126, false, { } }, | |
468 }; | |
469 } // namespace { | |
470 | |
471 bool WebRtcVoiceEngine::ToCodecInst(const AudioCodec& in, | |
472 webrtc::CodecInst* out) { | |
473 return WebRtcVoiceCodecs::ToCodecInst(in, out); | |
474 } | |
475 | |
476 WebRtcVoiceEngine::WebRtcVoiceEngine() | |
477 : voe_wrapper_(new VoEWrapper()), | |
478 audio_state_(webrtc::AudioState::Create(MakeAudioStateConfig(voe()))) { | |
479 Construct(); | |
480 } | |
481 | |
482 WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper) | |
483 : voe_wrapper_(voe_wrapper) { | |
484 Construct(); | |
485 } | |
486 | |
487 void WebRtcVoiceEngine::Construct() { | |
488 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
489 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine"; | |
490 | |
491 signal_thread_checker_.DetachFromThread(); | |
492 std::memset(&default_agc_config_, 0, sizeof(default_agc_config_)); | |
493 voe_config_.Set<webrtc::VoicePacing>(new webrtc::VoicePacing(true)); | |
494 | |
495 webrtc::Trace::set_level_filter(kDefaultTraceFilter); | |
496 webrtc::Trace::SetTraceCallback(this); | |
497 | |
498 // Load our audio codec list. | |
499 codecs_ = WebRtcVoiceCodecs::SupportedCodecs(); | |
500 } | |
501 | |
502 WebRtcVoiceEngine::~WebRtcVoiceEngine() { | |
503 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
504 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine"; | |
505 if (adm_) { | |
506 voe_wrapper_.reset(); | |
507 adm_->Release(); | |
508 adm_ = NULL; | |
509 } | |
510 webrtc::Trace::SetTraceCallback(nullptr); | |
511 } | |
512 | |
513 bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) { | |
514 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
515 RTC_DCHECK(worker_thread == rtc::Thread::Current()); | |
516 LOG(LS_INFO) << "WebRtcVoiceEngine::Init"; | |
517 bool res = InitInternal(); | |
518 if (res) { | |
519 LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!"; | |
520 } else { | |
521 LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed"; | |
522 Terminate(); | |
523 } | |
524 return res; | |
525 } | |
526 | |
527 bool WebRtcVoiceEngine::InitInternal() { | |
528 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
529 // Temporarily turn logging level up for the Init call | |
530 webrtc::Trace::set_level_filter(kElevatedTraceFilter); | |
531 LOG(LS_INFO) << webrtc::VoiceEngine::GetVersionString(); | |
532 if (voe_wrapper_->base()->Init(adm_) == -1) { | |
533 LOG_RTCERR0_EX(Init, voe_wrapper_->error()); | |
534 return false; | |
535 } | |
536 webrtc::Trace::set_level_filter(kDefaultTraceFilter); | |
537 | |
538 // Save the default AGC configuration settings. This must happen before | |
539 // calling ApplyOptions or the default will be overwritten. | |
540 if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) { | |
541 LOG_RTCERR0(GetAgcConfig); | |
542 return false; | |
543 } | |
544 | |
545 // Set default engine options. | |
546 { | |
547 AudioOptions options; | |
548 options.echo_cancellation = rtc::Optional<bool>(true); | |
549 options.auto_gain_control = rtc::Optional<bool>(true); | |
550 options.noise_suppression = rtc::Optional<bool>(true); | |
551 options.highpass_filter = rtc::Optional<bool>(true); | |
552 options.stereo_swapping = rtc::Optional<bool>(false); | |
553 options.audio_jitter_buffer_max_packets = rtc::Optional<int>(50); | |
554 options.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(false); | |
555 options.typing_detection = rtc::Optional<bool>(true); | |
556 options.adjust_agc_delta = rtc::Optional<int>(0); | |
557 options.experimental_agc = rtc::Optional<bool>(false); | |
558 options.extended_filter_aec = rtc::Optional<bool>(false); | |
559 options.delay_agnostic_aec = rtc::Optional<bool>(false); | |
560 options.experimental_ns = rtc::Optional<bool>(false); | |
561 options.aec_dump = rtc::Optional<bool>(false); | |
562 if (!ApplyOptions(options)) { | |
563 return false; | |
564 } | |
565 } | |
566 | |
567 // Print our codec list again for the call diagnostic log | |
568 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:"; | |
569 for (const AudioCodec& codec : codecs_) { | |
570 LOG(LS_INFO) << ToString(codec); | |
571 } | |
572 | |
573 SetDefaultDevices(); | |
574 | |
575 initialized_ = true; | |
576 return true; | |
577 } | |
578 | |
579 void WebRtcVoiceEngine::Terminate() { | |
580 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
581 LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate"; | |
582 initialized_ = false; | |
583 | |
584 StopAecDump(); | |
585 | |
586 voe_wrapper_->base()->Terminate(); | |
587 } | |
588 | |
589 rtc::scoped_refptr<webrtc::AudioState> | |
590 WebRtcVoiceEngine::GetAudioState() const { | |
591 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
592 return audio_state_; | |
593 } | |
594 | |
595 VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call, | |
596 const AudioOptions& options) { | |
597 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
598 return new WebRtcVoiceMediaChannel(this, options, call); | |
599 } | |
600 | |
601 bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { | |
602 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
603 LOG(LS_INFO) << "ApplyOptions: " << options_in.ToString(); | |
604 AudioOptions options = options_in; // The options are modified below. | |
605 | |
606 // kEcConference is AEC with high suppression. | |
607 webrtc::EcModes ec_mode = webrtc::kEcConference; | |
608 webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone; | |
609 webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog; | |
610 webrtc::NsModes ns_mode = webrtc::kNsHighSuppression; | |
611 if (options.aecm_generate_comfort_noise) { | |
612 LOG(LS_VERBOSE) << "Comfort noise explicitly set to " | |
613 << *options.aecm_generate_comfort_noise | |
614 << " (default is false)."; | |
615 } | |
616 | |
617 #if defined(WEBRTC_IOS) | |
618 // On iOS, VPIO provides built-in EC and AGC. | |
619 options.echo_cancellation = rtc::Optional<bool>(false); | |
620 options.auto_gain_control = rtc::Optional<bool>(false); | |
621 LOG(LS_INFO) << "Always disable AEC and AGC on iOS. Use built-in instead."; | |
622 #elif defined(ANDROID) | |
623 ec_mode = webrtc::kEcAecm; | |
624 #endif | |
625 | |
626 #if defined(WEBRTC_IOS) || defined(ANDROID) | |
627 // Set the AGC mode for iOS as well despite disabling it above, to avoid | |
628 // unsupported configuration errors from webrtc. | |
629 agc_mode = webrtc::kAgcFixedDigital; | |
630 options.typing_detection = rtc::Optional<bool>(false); | |
631 options.experimental_agc = rtc::Optional<bool>(false); | |
632 options.extended_filter_aec = rtc::Optional<bool>(false); | |
633 options.experimental_ns = rtc::Optional<bool>(false); | |
634 #endif | |
635 | |
636 // Delay Agnostic AEC automatically turns on EC if not set except on iOS | |
637 // where the feature is not supported. | |
638 bool use_delay_agnostic_aec = false; | |
639 #if !defined(WEBRTC_IOS) | |
640 if (options.delay_agnostic_aec) { | |
641 use_delay_agnostic_aec = *options.delay_agnostic_aec; | |
642 if (use_delay_agnostic_aec) { | |
643 options.echo_cancellation = rtc::Optional<bool>(true); | |
644 options.extended_filter_aec = rtc::Optional<bool>(true); | |
645 ec_mode = webrtc::kEcConference; | |
646 } | |
647 } | |
648 #endif | |
649 | |
650 webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing(); | |
651 | |
652 if (options.echo_cancellation) { | |
653 // Check if platform supports built-in EC. Currently only supported on | |
654 // Android and in combination with Java based audio layer. | |
655 // TODO(henrika): investigate possibility to support built-in EC also | |
656 // in combination with Open SL ES audio. | |
657 const bool built_in_aec = voe_wrapper_->hw()->BuiltInAECIsAvailable(); | |
658 if (built_in_aec) { | |
659 // Built-in EC exists on this device and use_delay_agnostic_aec is not | |
660 // overriding it. Enable/Disable it according to the echo_cancellation | |
661 // audio option. | |
662 const bool enable_built_in_aec = | |
663 *options.echo_cancellation && !use_delay_agnostic_aec; | |
664 if (voe_wrapper_->hw()->EnableBuiltInAEC(enable_built_in_aec) == 0 && | |
665 enable_built_in_aec) { | |
666 // Disable internal software EC if built-in EC is enabled, | |
667 // i.e., replace the software EC with the built-in EC. | |
668 options.echo_cancellation = rtc::Optional<bool>(false); | |
669 LOG(LS_INFO) << "Disabling EC since built-in EC will be used instead"; | |
670 } | |
671 } | |
672 if (voep->SetEcStatus(*options.echo_cancellation, ec_mode) == -1) { | |
673 LOG_RTCERR2(SetEcStatus, *options.echo_cancellation, ec_mode); | |
674 return false; | |
675 } else { | |
676 LOG(LS_INFO) << "Echo control set to " << *options.echo_cancellation | |
677 << " with mode " << ec_mode; | |
678 } | |
679 #if !defined(ANDROID) | |
680 // TODO(ajm): Remove the error return on Android from webrtc. | |
681 if (voep->SetEcMetricsStatus(*options.echo_cancellation) == -1) { | |
682 LOG_RTCERR1(SetEcMetricsStatus, *options.echo_cancellation); | |
683 return false; | |
684 } | |
685 #endif | |
686 if (ec_mode == webrtc::kEcAecm) { | |
687 bool cn = options.aecm_generate_comfort_noise.value_or(false); | |
688 if (voep->SetAecmMode(aecm_mode, cn) != 0) { | |
689 LOG_RTCERR2(SetAecmMode, aecm_mode, cn); | |
690 return false; | |
691 } | |
692 } | |
693 } | |
694 | |
695 if (options.auto_gain_control) { | |
696 const bool built_in_agc = voe_wrapper_->hw()->BuiltInAGCIsAvailable(); | |
697 if (built_in_agc) { | |
698 if (voe_wrapper_->hw()->EnableBuiltInAGC(*options.auto_gain_control) == | |
699 0 && | |
700 *options.auto_gain_control) { | |
701 // Disable internal software AGC if built-in AGC is enabled, | |
702 // i.e., replace the software AGC with the built-in AGC. | |
703 options.auto_gain_control = rtc::Optional<bool>(false); | |
704 LOG(LS_INFO) << "Disabling AGC since built-in AGC will be used instead"; | |
705 } | |
706 } | |
707 if (voep->SetAgcStatus(*options.auto_gain_control, agc_mode) == -1) { | |
708 LOG_RTCERR2(SetAgcStatus, *options.auto_gain_control, agc_mode); | |
709 return false; | |
710 } else { | |
711 LOG(LS_INFO) << "Auto gain set to " << *options.auto_gain_control | |
712 << " with mode " << agc_mode; | |
713 } | |
714 } | |
715 | |
716 if (options.tx_agc_target_dbov || options.tx_agc_digital_compression_gain || | |
717 options.tx_agc_limiter) { | |
718 // Override default_agc_config_. Generally, an unset option means "leave | |
719 // the VoE bits alone" in this function, so we want whatever is set to be | |
720 // stored as the new "default". If we didn't, then setting e.g. | |
721 // tx_agc_target_dbov would reset digital compression gain and limiter | |
722 // settings. | |
723 // Also, if we don't update default_agc_config_, then adjust_agc_delta | |
724 // would be an offset from the original values, and not whatever was set | |
725 // explicitly. | |
726 default_agc_config_.targetLeveldBOv = options.tx_agc_target_dbov.value_or( | |
727 default_agc_config_.targetLeveldBOv); | |
728 default_agc_config_.digitalCompressionGaindB = | |
729 options.tx_agc_digital_compression_gain.value_or( | |
730 default_agc_config_.digitalCompressionGaindB); | |
731 default_agc_config_.limiterEnable = | |
732 options.tx_agc_limiter.value_or(default_agc_config_.limiterEnable); | |
733 if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) { | |
734 LOG_RTCERR3(SetAgcConfig, | |
735 default_agc_config_.targetLeveldBOv, | |
736 default_agc_config_.digitalCompressionGaindB, | |
737 default_agc_config_.limiterEnable); | |
738 return false; | |
739 } | |
740 } | |
741 | |
742 if (options.noise_suppression) { | |
743 const bool built_in_ns = voe_wrapper_->hw()->BuiltInNSIsAvailable(); | |
744 if (built_in_ns) { | |
745 if (voe_wrapper_->hw()->EnableBuiltInNS(*options.noise_suppression) == | |
746 0 && | |
747 *options.noise_suppression) { | |
748 // Disable internal software NS if built-in NS is enabled, | |
749 // i.e., replace the software NS with the built-in NS. | |
750 options.noise_suppression = rtc::Optional<bool>(false); | |
751 LOG(LS_INFO) << "Disabling NS since built-in NS will be used instead"; | |
752 } | |
753 } | |
754 if (voep->SetNsStatus(*options.noise_suppression, ns_mode) == -1) { | |
755 LOG_RTCERR2(SetNsStatus, *options.noise_suppression, ns_mode); | |
756 return false; | |
757 } else { | |
758 LOG(LS_INFO) << "Noise suppression set to " << *options.noise_suppression | |
759 << " with mode " << ns_mode; | |
760 } | |
761 } | |
762 | |
763 if (options.highpass_filter) { | |
764 LOG(LS_INFO) << "High pass filter enabled? " << *options.highpass_filter; | |
765 if (voep->EnableHighPassFilter(*options.highpass_filter) == -1) { | |
766 LOG_RTCERR1(SetHighpassFilterStatus, *options.highpass_filter); | |
767 return false; | |
768 } | |
769 } | |
770 | |
771 if (options.stereo_swapping) { | |
772 LOG(LS_INFO) << "Stereo swapping enabled? " << *options.stereo_swapping; | |
773 voep->EnableStereoChannelSwapping(*options.stereo_swapping); | |
774 if (voep->IsStereoChannelSwappingEnabled() != *options.stereo_swapping) { | |
775 LOG_RTCERR1(EnableStereoChannelSwapping, *options.stereo_swapping); | |
776 return false; | |
777 } | |
778 } | |
779 | |
780 if (options.audio_jitter_buffer_max_packets) { | |
781 LOG(LS_INFO) << "NetEq capacity is " | |
782 << *options.audio_jitter_buffer_max_packets; | |
783 voe_config_.Set<webrtc::NetEqCapacityConfig>( | |
784 new webrtc::NetEqCapacityConfig( | |
785 *options.audio_jitter_buffer_max_packets)); | |
786 } | |
787 | |
788 if (options.audio_jitter_buffer_fast_accelerate) { | |
789 LOG(LS_INFO) << "NetEq fast mode? " | |
790 << *options.audio_jitter_buffer_fast_accelerate; | |
791 voe_config_.Set<webrtc::NetEqFastAccelerate>( | |
792 new webrtc::NetEqFastAccelerate( | |
793 *options.audio_jitter_buffer_fast_accelerate)); | |
794 } | |
795 | |
796 if (options.typing_detection) { | |
797 LOG(LS_INFO) << "Typing detection is enabled? " | |
798 << *options.typing_detection; | |
799 if (voep->SetTypingDetectionStatus(*options.typing_detection) == -1) { | |
800 // In case of error, log the info and continue | |
801 LOG_RTCERR1(SetTypingDetectionStatus, *options.typing_detection); | |
802 } | |
803 } | |
804 | |
805 if (options.adjust_agc_delta) { | |
806 LOG(LS_INFO) << "Adjust agc delta is " << *options.adjust_agc_delta; | |
807 if (!AdjustAgcLevel(*options.adjust_agc_delta)) { | |
808 return false; | |
809 } | |
810 } | |
811 | |
812 if (options.aec_dump) { | |
813 LOG(LS_INFO) << "Aec dump is enabled? " << *options.aec_dump; | |
814 if (*options.aec_dump) | |
815 StartAecDump(kAecDumpByAudioOptionFilename); | |
816 else | |
817 StopAecDump(); | |
818 } | |
819 | |
820 webrtc::Config config; | |
821 | |
822 if (options.delay_agnostic_aec) | |
823 delay_agnostic_aec_ = options.delay_agnostic_aec; | |
824 if (delay_agnostic_aec_) { | |
825 LOG(LS_INFO) << "Delay agnostic aec is enabled? " << *delay_agnostic_aec_; | |
826 config.Set<webrtc::DelayAgnostic>( | |
827 new webrtc::DelayAgnostic(*delay_agnostic_aec_)); | |
828 } | |
829 | |
830 if (options.extended_filter_aec) { | |
831 extended_filter_aec_ = options.extended_filter_aec; | |
832 } | |
833 if (extended_filter_aec_) { | |
834 LOG(LS_INFO) << "Extended filter aec is enabled? " << *extended_filter_aec_; | |
835 config.Set<webrtc::ExtendedFilter>( | |
836 new webrtc::ExtendedFilter(*extended_filter_aec_)); | |
837 } | |
838 | |
839 if (options.experimental_ns) { | |
840 experimental_ns_ = options.experimental_ns; | |
841 } | |
842 if (experimental_ns_) { | |
843 LOG(LS_INFO) << "Experimental ns is enabled? " << *experimental_ns_; | |
844 config.Set<webrtc::ExperimentalNs>( | |
845 new webrtc::ExperimentalNs(*experimental_ns_)); | |
846 } | |
847 | |
848 // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine | |
849 // returns NULL on audio_processing(). | |
850 webrtc::AudioProcessing* audioproc = voe_wrapper_->base()->audio_processing(); | |
851 if (audioproc) { | |
852 audioproc->SetExtraOptions(config); | |
853 } | |
854 | |
855 if (options.recording_sample_rate) { | |
856 LOG(LS_INFO) << "Recording sample rate is " | |
857 << *options.recording_sample_rate; | |
858 if (voe_wrapper_->hw()->SetRecordingSampleRate( | |
859 *options.recording_sample_rate)) { | |
860 LOG_RTCERR1(SetRecordingSampleRate, *options.recording_sample_rate); | |
861 } | |
862 } | |
863 | |
864 if (options.playout_sample_rate) { | |
865 LOG(LS_INFO) << "Playout sample rate is " << *options.playout_sample_rate; | |
866 if (voe_wrapper_->hw()->SetPlayoutSampleRate( | |
867 *options.playout_sample_rate)) { | |
868 LOG_RTCERR1(SetPlayoutSampleRate, *options.playout_sample_rate); | |
869 } | |
870 } | |
871 | |
872 return true; | |
873 } | |
874 | |
875 void WebRtcVoiceEngine::SetDefaultDevices() { | |
876 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
877 #if !defined(WEBRTC_IOS) | |
878 int in_id = kDefaultAudioDeviceId; | |
879 int out_id = kDefaultAudioDeviceId; | |
880 LOG(LS_INFO) << "Setting microphone to (id=" << in_id | |
881 << ") and speaker to (id=" << out_id << ")"; | |
882 | |
883 bool ret = true; | |
884 if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) { | |
885 LOG_RTCERR1(SetRecordingDevice, in_id); | |
886 ret = false; | |
887 } | |
888 webrtc::AudioProcessing* ap = voe()->base()->audio_processing(); | |
889 if (ap) { | |
890 ap->Initialize(); | |
891 } | |
892 | |
893 if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) { | |
894 LOG_RTCERR1(SetPlayoutDevice, out_id); | |
895 ret = false; | |
896 } | |
897 | |
898 if (ret) { | |
899 LOG(LS_INFO) << "Set microphone to (id=" << in_id | |
900 << ") and speaker to (id=" << out_id << ")"; | |
901 } | |
902 #endif // !WEBRTC_IOS | |
903 } | |
904 | |
905 bool WebRtcVoiceEngine::GetOutputVolume(int* level) { | |
906 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
907 unsigned int ulevel; | |
908 if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) { | |
909 LOG_RTCERR1(GetSpeakerVolume, level); | |
910 return false; | |
911 } | |
912 *level = ulevel; | |
913 return true; | |
914 } | |
915 | |
916 bool WebRtcVoiceEngine::SetOutputVolume(int level) { | |
917 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
918 RTC_DCHECK(level >= 0 && level <= 255); | |
919 if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) { | |
920 LOG_RTCERR1(SetSpeakerVolume, level); | |
921 return false; | |
922 } | |
923 return true; | |
924 } | |
925 | |
926 int WebRtcVoiceEngine::GetInputLevel() { | |
927 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
928 unsigned int ulevel; | |
929 return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ? | |
930 static_cast<int>(ulevel) : -1; | |
931 } | |
932 | |
933 const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() { | |
934 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
935 return codecs_; | |
936 } | |
937 | |
938 RtpCapabilities WebRtcVoiceEngine::GetCapabilities() const { | |
939 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
940 RtpCapabilities capabilities; | |
941 capabilities.header_extensions.push_back(RtpHeaderExtension( | |
942 kRtpAudioLevelHeaderExtension, kRtpAudioLevelHeaderExtensionDefaultId)); | |
943 capabilities.header_extensions.push_back( | |
944 RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension, | |
945 kRtpAbsoluteSenderTimeHeaderExtensionDefaultId)); | |
946 return capabilities; | |
947 } | |
948 | |
949 int WebRtcVoiceEngine::GetLastEngineError() { | |
950 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
951 return voe_wrapper_->error(); | |
952 } | |
953 | |
954 void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace, | |
955 int length) { | |
956 // Note: This callback can happen on any thread! | |
957 rtc::LoggingSeverity sev = rtc::LS_VERBOSE; | |
958 if (level == webrtc::kTraceError || level == webrtc::kTraceCritical) | |
959 sev = rtc::LS_ERROR; | |
960 else if (level == webrtc::kTraceWarning) | |
961 sev = rtc::LS_WARNING; | |
962 else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo) | |
963 sev = rtc::LS_INFO; | |
964 else if (level == webrtc::kTraceTerseInfo) | |
965 sev = rtc::LS_INFO; | |
966 | |
967 // Skip past boilerplate prefix text | |
968 if (length < 72) { | |
969 std::string msg(trace, length); | |
970 LOG(LS_ERROR) << "Malformed webrtc log message: "; | |
971 LOG_V(sev) << msg; | |
972 } else { | |
973 std::string msg(trace + 71, length - 72); | |
974 LOG_V(sev) << "webrtc: " << msg; | |
975 } | |
976 } | |
977 | |
978 void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel* channel) { | |
979 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
980 RTC_DCHECK(channel); | |
981 channels_.push_back(channel); | |
982 } | |
983 | |
984 void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) { | |
985 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
986 auto it = std::find(channels_.begin(), channels_.end(), channel); | |
987 RTC_DCHECK(it != channels_.end()); | |
988 channels_.erase(it); | |
989 } | |
990 | |
991 // Adjusts the default AGC target level by the specified delta. | |
992 // NB: If we start messing with other config fields, we'll want | |
993 // to save the current webrtc::AgcConfig as well. | |
994 bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) { | |
995 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
996 webrtc::AgcConfig config = default_agc_config_; | |
997 config.targetLeveldBOv -= delta; | |
998 | |
999 LOG(LS_INFO) << "Adjusting AGC level from default -" | |
1000 << default_agc_config_.targetLeveldBOv << "dB to -" | |
1001 << config.targetLeveldBOv << "dB"; | |
1002 | |
1003 if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) { | |
1004 LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv); | |
1005 return false; | |
1006 } | |
1007 return true; | |
1008 } | |
1009 | |
1010 bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) { | |
1011 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1012 if (initialized_) { | |
1013 LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init."; | |
1014 return false; | |
1015 } | |
1016 if (adm_) { | |
1017 adm_->Release(); | |
1018 adm_ = NULL; | |
1019 } | |
1020 if (adm) { | |
1021 adm_ = adm; | |
1022 adm_->AddRef(); | |
1023 } | |
1024 return true; | |
1025 } | |
1026 | |
1027 bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file, | |
1028 int64_t max_size_bytes) { | |
1029 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1030 FILE* aec_dump_file_stream = rtc::FdopenPlatformFileForWriting(file); | |
1031 if (!aec_dump_file_stream) { | |
1032 LOG(LS_ERROR) << "Could not open AEC dump file stream."; | |
1033 if (!rtc::ClosePlatformFile(file)) | |
1034 LOG(LS_WARNING) << "Could not close file."; | |
1035 return false; | |
1036 } | |
1037 StopAecDump(); | |
1038 if (voe_wrapper_->base()->audio_processing()->StartDebugRecording( | |
1039 aec_dump_file_stream, max_size_bytes) != | |
1040 webrtc::AudioProcessing::kNoError) { | |
1041 LOG_RTCERR0(StartDebugRecording); | |
1042 fclose(aec_dump_file_stream); | |
1043 return false; | |
1044 } | |
1045 is_dumping_aec_ = true; | |
1046 return true; | |
1047 } | |
1048 | |
1049 void WebRtcVoiceEngine::StartAecDump(const std::string& filename) { | |
1050 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1051 if (!is_dumping_aec_) { | |
1052 // Start dumping AEC when we are not dumping. | |
1053 if (voe_wrapper_->base()->audio_processing()->StartDebugRecording( | |
1054 filename.c_str(), -1) != webrtc::AudioProcessing::kNoError) { | |
1055 LOG_RTCERR1(StartDebugRecording, filename.c_str()); | |
1056 } else { | |
1057 is_dumping_aec_ = true; | |
1058 } | |
1059 } | |
1060 } | |
1061 | |
1062 void WebRtcVoiceEngine::StopAecDump() { | |
1063 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1064 if (is_dumping_aec_) { | |
1065 // Stop dumping AEC when we are dumping. | |
1066 if (voe_wrapper_->base()->audio_processing()->StopDebugRecording() != | |
1067 webrtc::AudioProcessing::kNoError) { | |
1068 LOG_RTCERR0(StopDebugRecording); | |
1069 } | |
1070 is_dumping_aec_ = false; | |
1071 } | |
1072 } | |
1073 | |
1074 bool WebRtcVoiceEngine::StartRtcEventLog(rtc::PlatformFile file) { | |
1075 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1076 return voe_wrapper_->codec()->GetEventLog()->StartLogging(file); | |
1077 } | |
1078 | |
1079 void WebRtcVoiceEngine::StopRtcEventLog() { | |
1080 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1081 voe_wrapper_->codec()->GetEventLog()->StopLogging(); | |
1082 } | |
1083 | |
1084 int WebRtcVoiceEngine::CreateVoEChannel() { | |
1085 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1086 return voe_wrapper_->base()->CreateChannel(voe_config_); | |
1087 } | |
1088 | |
1089 class WebRtcVoiceMediaChannel::WebRtcAudioSendStream | |
1090 : public AudioRenderer::Sink { | |
1091 public: | |
1092 WebRtcAudioSendStream(int ch, webrtc::AudioTransport* voe_audio_transport, | |
1093 uint32_t ssrc, const std::string& c_name, | |
1094 const std::vector<webrtc::RtpExtension>& extensions, | |
1095 webrtc::Call* call) | |
1096 : voe_audio_transport_(voe_audio_transport), | |
1097 call_(call), | |
1098 config_(nullptr) { | |
1099 RTC_DCHECK_GE(ch, 0); | |
1100 // TODO(solenberg): Once we're not using FakeWebRtcVoiceEngine anymore: | |
1101 // RTC_DCHECK(voe_audio_transport); | |
1102 RTC_DCHECK(call); | |
1103 audio_capture_thread_checker_.DetachFromThread(); | |
1104 config_.rtp.ssrc = ssrc; | |
1105 config_.rtp.c_name = c_name; | |
1106 config_.voe_channel_id = ch; | |
1107 RecreateAudioSendStream(extensions); | |
1108 } | |
1109 | |
1110 ~WebRtcAudioSendStream() override { | |
1111 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1112 Stop(); | |
1113 call_->DestroyAudioSendStream(stream_); | |
1114 } | |
1115 | |
1116 void RecreateAudioSendStream( | |
1117 const std::vector<webrtc::RtpExtension>& extensions) { | |
1118 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1119 if (stream_) { | |
1120 call_->DestroyAudioSendStream(stream_); | |
1121 stream_ = nullptr; | |
1122 } | |
1123 config_.rtp.extensions = extensions; | |
1124 RTC_DCHECK(!stream_); | |
1125 stream_ = call_->CreateAudioSendStream(config_); | |
1126 RTC_CHECK(stream_); | |
1127 } | |
1128 | |
1129 bool SendTelephoneEvent(int payload_type, uint8_t event, | |
1130 uint32_t duration_ms) { | |
1131 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1132 RTC_DCHECK(stream_); | |
1133 return stream_->SendTelephoneEvent(payload_type, event, duration_ms); | |
1134 } | |
1135 | |
1136 webrtc::AudioSendStream::Stats GetStats() const { | |
1137 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1138 RTC_DCHECK(stream_); | |
1139 return stream_->GetStats(); | |
1140 } | |
1141 | |
1142 // Starts the rendering by setting a sink to the renderer to get data | |
1143 // callback. | |
1144 // This method is called on the libjingle worker thread. | |
1145 // TODO(xians): Make sure Start() is called only once. | |
1146 void Start(AudioRenderer* renderer) { | |
1147 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1148 RTC_DCHECK(renderer); | |
1149 if (renderer_) { | |
1150 RTC_DCHECK(renderer_ == renderer); | |
1151 return; | |
1152 } | |
1153 renderer->SetSink(this); | |
1154 renderer_ = renderer; | |
1155 } | |
1156 | |
1157 // Stops rendering by setting the sink of the renderer to nullptr. No data | |
1158 // callback will be received after this method. | |
1159 // This method is called on the libjingle worker thread. | |
1160 void Stop() { | |
1161 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1162 if (renderer_) { | |
1163 renderer_->SetSink(nullptr); | |
1164 renderer_ = nullptr; | |
1165 } | |
1166 } | |
1167 | |
1168 // AudioRenderer::Sink implementation. | |
1169 // This method is called on the audio thread. | |
1170 void OnData(const void* audio_data, | |
1171 int bits_per_sample, | |
1172 int sample_rate, | |
1173 size_t number_of_channels, | |
1174 size_t number_of_frames) override { | |
1175 RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread()); | |
1176 RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread()); | |
1177 RTC_DCHECK(voe_audio_transport_); | |
1178 voe_audio_transport_->OnData(config_.voe_channel_id, | |
1179 audio_data, | |
1180 bits_per_sample, | |
1181 sample_rate, | |
1182 number_of_channels, | |
1183 number_of_frames); | |
1184 } | |
1185 | |
1186 // Callback from the |renderer_| when it is going away. In case Start() has | |
1187 // never been called, this callback won't be triggered. | |
1188 void OnClose() override { | |
1189 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1190 // Set |renderer_| to nullptr to make sure no more callback will get into | |
1191 // the renderer. | |
1192 renderer_ = nullptr; | |
1193 } | |
1194 | |
1195 // Accessor to the VoE channel ID. | |
1196 int channel() const { | |
1197 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1198 return config_.voe_channel_id; | |
1199 } | |
1200 | |
1201 private: | |
1202 rtc::ThreadChecker worker_thread_checker_; | |
1203 rtc::ThreadChecker audio_capture_thread_checker_; | |
1204 webrtc::AudioTransport* const voe_audio_transport_ = nullptr; | |
1205 webrtc::Call* call_ = nullptr; | |
1206 webrtc::AudioSendStream::Config config_; | |
1207 // The stream is owned by WebRtcAudioSendStream and may be reallocated if | |
1208 // configuration changes. | |
1209 webrtc::AudioSendStream* stream_ = nullptr; | |
1210 | |
1211 // Raw pointer to AudioRenderer owned by LocalAudioTrackHandler. | |
1212 // PeerConnection will make sure invalidating the pointer before the object | |
1213 // goes away. | |
1214 AudioRenderer* renderer_ = nullptr; | |
1215 | |
1216 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioSendStream); | |
1217 }; | |
1218 | |
1219 class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { | |
1220 public: | |
1221 WebRtcAudioReceiveStream(int ch, uint32_t remote_ssrc, uint32_t local_ssrc, | |
1222 bool use_combined_bwe, const std::string& sync_group, | |
1223 const std::vector<webrtc::RtpExtension>& extensions, | |
1224 webrtc::Call* call) | |
1225 : call_(call), | |
1226 config_() { | |
1227 RTC_DCHECK_GE(ch, 0); | |
1228 RTC_DCHECK(call); | |
1229 config_.rtp.remote_ssrc = remote_ssrc; | |
1230 config_.rtp.local_ssrc = local_ssrc; | |
1231 config_.voe_channel_id = ch; | |
1232 config_.sync_group = sync_group; | |
1233 RecreateAudioReceiveStream(use_combined_bwe, extensions); | |
1234 } | |
1235 | |
1236 ~WebRtcAudioReceiveStream() { | |
1237 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1238 call_->DestroyAudioReceiveStream(stream_); | |
1239 } | |
1240 | |
1241 void RecreateAudioReceiveStream( | |
1242 const std::vector<webrtc::RtpExtension>& extensions) { | |
1243 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1244 RecreateAudioReceiveStream(config_.combined_audio_video_bwe, extensions); | |
1245 } | |
1246 void RecreateAudioReceiveStream(bool use_combined_bwe) { | |
1247 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1248 RecreateAudioReceiveStream(use_combined_bwe, config_.rtp.extensions); | |
1249 } | |
1250 | |
1251 webrtc::AudioReceiveStream::Stats GetStats() const { | |
1252 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1253 RTC_DCHECK(stream_); | |
1254 return stream_->GetStats(); | |
1255 } | |
1256 | |
1257 int channel() const { | |
1258 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1259 return config_.voe_channel_id; | |
1260 } | |
1261 | |
1262 void SetRawAudioSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) { | |
1263 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1264 stream_->SetSink(std::move(sink)); | |
1265 } | |
1266 | |
1267 private: | |
1268 void RecreateAudioReceiveStream(bool use_combined_bwe, | |
1269 const std::vector<webrtc::RtpExtension>& extensions) { | |
1270 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1271 if (stream_) { | |
1272 call_->DestroyAudioReceiveStream(stream_); | |
1273 stream_ = nullptr; | |
1274 } | |
1275 config_.rtp.extensions = extensions; | |
1276 config_.combined_audio_video_bwe = use_combined_bwe; | |
1277 RTC_DCHECK(!stream_); | |
1278 stream_ = call_->CreateAudioReceiveStream(config_); | |
1279 RTC_CHECK(stream_); | |
1280 } | |
1281 | |
1282 rtc::ThreadChecker worker_thread_checker_; | |
1283 webrtc::Call* call_ = nullptr; | |
1284 webrtc::AudioReceiveStream::Config config_; | |
1285 // The stream is owned by WebRtcAudioReceiveStream and may be reallocated if | |
1286 // configuration changes. | |
1287 webrtc::AudioReceiveStream* stream_ = nullptr; | |
1288 | |
1289 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioReceiveStream); | |
1290 }; | |
1291 | |
1292 WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine, | |
1293 const AudioOptions& options, | |
1294 webrtc::Call* call) | |
1295 : engine_(engine), call_(call) { | |
1296 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel"; | |
1297 RTC_DCHECK(call); | |
1298 engine->RegisterChannel(this); | |
1299 SetOptions(options); | |
1300 } | |
1301 | |
1302 WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() { | |
1303 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1304 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel"; | |
1305 // TODO(solenberg): Should be able to delete the streams directly, without | |
1306 // going through RemoveNnStream(), once stream objects handle | |
1307 // all (de)configuration. | |
1308 while (!send_streams_.empty()) { | |
1309 RemoveSendStream(send_streams_.begin()->first); | |
1310 } | |
1311 while (!recv_streams_.empty()) { | |
1312 RemoveRecvStream(recv_streams_.begin()->first); | |
1313 } | |
1314 engine()->UnregisterChannel(this); | |
1315 } | |
1316 | |
1317 bool WebRtcVoiceMediaChannel::SetSendParameters( | |
1318 const AudioSendParameters& params) { | |
1319 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1320 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendParameters: " | |
1321 << params.ToString(); | |
1322 // TODO(pthatcher): Refactor this to be more clean now that we have | |
1323 // all the information at once. | |
1324 | |
1325 if (!SetSendCodecs(params.codecs)) { | |
1326 return false; | |
1327 } | |
1328 | |
1329 if (!ValidateRtpExtensions(params.extensions)) { | |
1330 return false; | |
1331 } | |
1332 std::vector<webrtc::RtpExtension> filtered_extensions = | |
1333 FilterRtpExtensions(params.extensions, | |
1334 webrtc::RtpExtension::IsSupportedForAudio, true); | |
1335 if (send_rtp_extensions_ != filtered_extensions) { | |
1336 send_rtp_extensions_.swap(filtered_extensions); | |
1337 for (auto& it : send_streams_) { | |
1338 it.second->RecreateAudioSendStream(send_rtp_extensions_); | |
1339 } | |
1340 } | |
1341 | |
1342 if (!SetMaxSendBandwidth(params.max_bandwidth_bps)) { | |
1343 return false; | |
1344 } | |
1345 return SetOptions(params.options); | |
1346 } | |
1347 | |
1348 bool WebRtcVoiceMediaChannel::SetRecvParameters( | |
1349 const AudioRecvParameters& params) { | |
1350 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1351 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetRecvParameters: " | |
1352 << params.ToString(); | |
1353 // TODO(pthatcher): Refactor this to be more clean now that we have | |
1354 // all the information at once. | |
1355 | |
1356 if (!SetRecvCodecs(params.codecs)) { | |
1357 return false; | |
1358 } | |
1359 | |
1360 if (!ValidateRtpExtensions(params.extensions)) { | |
1361 return false; | |
1362 } | |
1363 std::vector<webrtc::RtpExtension> filtered_extensions = | |
1364 FilterRtpExtensions(params.extensions, | |
1365 webrtc::RtpExtension::IsSupportedForAudio, false); | |
1366 if (recv_rtp_extensions_ != filtered_extensions) { | |
1367 recv_rtp_extensions_.swap(filtered_extensions); | |
1368 for (auto& it : recv_streams_) { | |
1369 it.second->RecreateAudioReceiveStream(recv_rtp_extensions_); | |
1370 } | |
1371 } | |
1372 | |
1373 return true; | |
1374 } | |
1375 | |
1376 bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) { | |
1377 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1378 LOG(LS_INFO) << "Setting voice channel options: " | |
1379 << options.ToString(); | |
1380 | |
1381 // Check if DSCP value is changed from previous. | |
1382 bool dscp_option_changed = (options_.dscp != options.dscp); | |
1383 | |
1384 // We retain all of the existing options, and apply the given ones | |
1385 // on top. This means there is no way to "clear" options such that | |
1386 // they go back to the engine default. | |
1387 options_.SetAll(options); | |
1388 if (!engine()->ApplyOptions(options_)) { | |
1389 LOG(LS_WARNING) << | |
1390 "Failed to apply engine options during channel SetOptions."; | |
1391 return false; | |
1392 } | |
1393 | |
1394 if (dscp_option_changed) { | |
1395 rtc::DiffServCodePoint dscp = rtc::DSCP_DEFAULT; | |
1396 if (options_.dscp.value_or(false)) { | |
1397 dscp = kAudioDscpValue; | |
1398 } | |
1399 if (MediaChannel::SetDscp(dscp) != 0) { | |
1400 LOG(LS_WARNING) << "Failed to set DSCP settings for audio channel"; | |
1401 } | |
1402 } | |
1403 | |
1404 // TODO(solenberg): Don't recreate unless options changed. | |
1405 for (auto& it : recv_streams_) { | |
1406 it.second->RecreateAudioReceiveStream( | |
1407 options_.combined_audio_video_bwe.value_or(false)); | |
1408 } | |
1409 | |
1410 LOG(LS_INFO) << "Set voice channel options. Current options: " | |
1411 << options_.ToString(); | |
1412 return true; | |
1413 } | |
1414 | |
1415 bool WebRtcVoiceMediaChannel::SetRecvCodecs( | |
1416 const std::vector<AudioCodec>& codecs) { | |
1417 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1418 | |
1419 // Set the payload types to be used for incoming media. | |
1420 LOG(LS_INFO) << "Setting receive voice codecs."; | |
1421 | |
1422 if (!VerifyUniquePayloadTypes(codecs)) { | |
1423 LOG(LS_ERROR) << "Codec payload types overlap."; | |
1424 return false; | |
1425 } | |
1426 | |
1427 std::vector<AudioCodec> new_codecs; | |
1428 // Find all new codecs. We allow adding new codecs but don't allow changing | |
1429 // the payload type of codecs that is already configured since we might | |
1430 // already be receiving packets with that payload type. | |
1431 for (const AudioCodec& codec : codecs) { | |
1432 AudioCodec old_codec; | |
1433 if (FindCodec(recv_codecs_, codec, &old_codec)) { | |
1434 if (old_codec.id != codec.id) { | |
1435 LOG(LS_ERROR) << codec.name << " payload type changed."; | |
1436 return false; | |
1437 } | |
1438 } else { | |
1439 new_codecs.push_back(codec); | |
1440 } | |
1441 } | |
1442 if (new_codecs.empty()) { | |
1443 // There are no new codecs to configure. Already configured codecs are | |
1444 // never removed. | |
1445 return true; | |
1446 } | |
1447 | |
1448 if (playout_) { | |
1449 // Receive codecs can not be changed while playing. So we temporarily | |
1450 // pause playout. | |
1451 PausePlayout(); | |
1452 } | |
1453 | |
1454 bool result = true; | |
1455 for (const AudioCodec& codec : new_codecs) { | |
1456 webrtc::CodecInst voe_codec; | |
1457 if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
1458 LOG(LS_INFO) << ToString(codec); | |
1459 voe_codec.pltype = codec.id; | |
1460 for (const auto& ch : recv_streams_) { | |
1461 if (engine()->voe()->codec()->SetRecPayloadType( | |
1462 ch.second->channel(), voe_codec) == -1) { | |
1463 LOG_RTCERR2(SetRecPayloadType, ch.second->channel(), | |
1464 ToString(voe_codec)); | |
1465 result = false; | |
1466 } | |
1467 } | |
1468 } else { | |
1469 LOG(LS_WARNING) << "Unknown codec " << ToString(codec); | |
1470 result = false; | |
1471 break; | |
1472 } | |
1473 } | |
1474 if (result) { | |
1475 recv_codecs_ = codecs; | |
1476 } | |
1477 | |
1478 if (desired_playout_ && !playout_) { | |
1479 ResumePlayout(); | |
1480 } | |
1481 return result; | |
1482 } | |
1483 | |
1484 bool WebRtcVoiceMediaChannel::SetSendCodecs( | |
1485 int channel, const std::vector<AudioCodec>& codecs) { | |
1486 // Disable VAD, FEC, and RED unless we know the other side wants them. | |
1487 engine()->voe()->codec()->SetVADStatus(channel, false); | |
1488 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0); | |
1489 engine()->voe()->rtp()->SetREDStatus(channel, false); | |
1490 engine()->voe()->codec()->SetFECStatus(channel, false); | |
1491 | |
1492 // Scan through the list to figure out the codec to use for sending, along | |
1493 // with the proper configuration for VAD. | |
1494 bool found_send_codec = false; | |
1495 webrtc::CodecInst send_codec; | |
1496 memset(&send_codec, 0, sizeof(send_codec)); | |
1497 | |
1498 bool nack_enabled = nack_enabled_; | |
1499 bool enable_codec_fec = false; | |
1500 bool enable_opus_dtx = false; | |
1501 int opus_max_playback_rate = 0; | |
1502 | |
1503 // Set send codec (the first non-telephone-event/CN codec) | |
1504 for (const AudioCodec& codec : codecs) { | |
1505 // Ignore codecs we don't know about. The negotiation step should prevent | |
1506 // this, but double-check to be sure. | |
1507 webrtc::CodecInst voe_codec; | |
1508 if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
1509 LOG(LS_WARNING) << "Unknown codec " << ToString(codec); | |
1510 continue; | |
1511 } | |
1512 | |
1513 if (IsCodec(codec, kDtmfCodecName) || IsCodec(codec, kCnCodecName)) { | |
1514 // Skip telephone-event/CN codec, which will be handled later. | |
1515 continue; | |
1516 } | |
1517 | |
1518 // We'll use the first codec in the list to actually send audio data. | |
1519 // Be sure to use the payload type requested by the remote side. | |
1520 // "red", for RED audio, is a special case where the actual codec to be | |
1521 // used is specified in params. | |
1522 if (IsCodec(codec, kRedCodecName)) { | |
1523 // Parse out the RED parameters. If we fail, just ignore RED; | |
1524 // we don't support all possible params/usage scenarios. | |
1525 if (!GetRedSendCodec(codec, codecs, &send_codec)) { | |
1526 continue; | |
1527 } | |
1528 | |
1529 // Enable redundant encoding of the specified codec. Treat any | |
1530 // failure as a fatal internal error. | |
1531 LOG(LS_INFO) << "Enabling RED on channel " << channel; | |
1532 if (engine()->voe()->rtp()->SetREDStatus(channel, true, codec.id) == -1) { | |
1533 LOG_RTCERR3(SetREDStatus, channel, true, codec.id); | |
1534 return false; | |
1535 } | |
1536 } else { | |
1537 send_codec = voe_codec; | |
1538 nack_enabled = IsNackEnabled(codec); | |
1539 // For Opus as the send codec, we are to determine inband FEC, maximum | |
1540 // playback rate, and opus internal dtx. | |
1541 if (IsCodec(codec, kOpusCodecName)) { | |
1542 GetOpusConfig(codec, &send_codec, &enable_codec_fec, | |
1543 &opus_max_playback_rate, &enable_opus_dtx); | |
1544 } | |
1545 | |
1546 // Set packet size if the AudioCodec param kCodecParamPTime is set. | |
1547 int ptime_ms = 0; | |
1548 if (codec.GetParam(kCodecParamPTime, &ptime_ms)) { | |
1549 if (!WebRtcVoiceCodecs::SetPTimeAsPacketSize(&send_codec, ptime_ms)) { | |
1550 LOG(LS_WARNING) << "Failed to set packet size for codec " | |
1551 << send_codec.plname; | |
1552 return false; | |
1553 } | |
1554 } | |
1555 } | |
1556 found_send_codec = true; | |
1557 break; | |
1558 } | |
1559 | |
1560 if (nack_enabled_ != nack_enabled) { | |
1561 SetNack(channel, nack_enabled); | |
1562 nack_enabled_ = nack_enabled; | |
1563 } | |
1564 | |
1565 if (!found_send_codec) { | |
1566 LOG(LS_WARNING) << "Received empty list of codecs."; | |
1567 return false; | |
1568 } | |
1569 | |
1570 // Set the codec immediately, since SetVADStatus() depends on whether | |
1571 // the current codec is mono or stereo. | |
1572 if (!SetSendCodec(channel, send_codec)) | |
1573 return false; | |
1574 | |
1575 // FEC should be enabled after SetSendCodec. | |
1576 if (enable_codec_fec) { | |
1577 LOG(LS_INFO) << "Attempt to enable codec internal FEC on channel " | |
1578 << channel; | |
1579 if (engine()->voe()->codec()->SetFECStatus(channel, true) == -1) { | |
1580 // Enable codec internal FEC. Treat any failure as fatal internal error. | |
1581 LOG_RTCERR2(SetFECStatus, channel, true); | |
1582 return false; | |
1583 } | |
1584 } | |
1585 | |
1586 if (IsCodec(send_codec, kOpusCodecName)) { | |
1587 // DTX and maxplaybackrate should be set after SetSendCodec. Because current | |
1588 // send codec has to be Opus. | |
1589 | |
1590 // Set Opus internal DTX. | |
1591 LOG(LS_INFO) << "Attempt to " | |
1592 << (enable_opus_dtx ? "enable" : "disable") | |
1593 << " Opus DTX on channel " | |
1594 << channel; | |
1595 if (engine()->voe()->codec()->SetOpusDtx(channel, enable_opus_dtx)) { | |
1596 LOG_RTCERR2(SetOpusDtx, channel, enable_opus_dtx); | |
1597 return false; | |
1598 } | |
1599 | |
1600 // If opus_max_playback_rate <= 0, the default maximum playback rate | |
1601 // (48 kHz) will be used. | |
1602 if (opus_max_playback_rate > 0) { | |
1603 LOG(LS_INFO) << "Attempt to set maximum playback rate to " | |
1604 << opus_max_playback_rate | |
1605 << " Hz on channel " | |
1606 << channel; | |
1607 if (engine()->voe()->codec()->SetOpusMaxPlaybackRate( | |
1608 channel, opus_max_playback_rate) == -1) { | |
1609 LOG_RTCERR2(SetOpusMaxPlaybackRate, channel, opus_max_playback_rate); | |
1610 return false; | |
1611 } | |
1612 } | |
1613 } | |
1614 | |
1615 // Always update the |send_codec_| to the currently set send codec. | |
1616 send_codec_.reset(new webrtc::CodecInst(send_codec)); | |
1617 | |
1618 if (send_bitrate_setting_) { | |
1619 SetSendBitrateInternal(send_bitrate_bps_); | |
1620 } | |
1621 | |
1622 // Loop through the codecs list again to config the CN codec. | |
1623 for (const AudioCodec& codec : codecs) { | |
1624 // Ignore codecs we don't know about. The negotiation step should prevent | |
1625 // this, but double-check to be sure. | |
1626 webrtc::CodecInst voe_codec; | |
1627 if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
1628 LOG(LS_WARNING) << "Unknown codec " << ToString(codec); | |
1629 continue; | |
1630 } | |
1631 | |
1632 if (IsCodec(codec, kCnCodecName)) { | |
1633 // Turn voice activity detection/comfort noise on if supported. | |
1634 // Set the wideband CN payload type appropriately. | |
1635 // (narrowband always uses the static payload type 13). | |
1636 webrtc::PayloadFrequencies cn_freq; | |
1637 switch (codec.clockrate) { | |
1638 case 8000: | |
1639 cn_freq = webrtc::kFreq8000Hz; | |
1640 break; | |
1641 case 16000: | |
1642 cn_freq = webrtc::kFreq16000Hz; | |
1643 break; | |
1644 case 32000: | |
1645 cn_freq = webrtc::kFreq32000Hz; | |
1646 break; | |
1647 default: | |
1648 LOG(LS_WARNING) << "CN frequency " << codec.clockrate | |
1649 << " not supported."; | |
1650 continue; | |
1651 } | |
1652 // Set the CN payloadtype and the VAD status. | |
1653 // The CN payload type for 8000 Hz clockrate is fixed at 13. | |
1654 if (cn_freq != webrtc::kFreq8000Hz) { | |
1655 if (engine()->voe()->codec()->SetSendCNPayloadType( | |
1656 channel, codec.id, cn_freq) == -1) { | |
1657 LOG_RTCERR3(SetSendCNPayloadType, channel, codec.id, cn_freq); | |
1658 // TODO(ajm): This failure condition will be removed from VoE. | |
1659 // Restore the return here when we update to a new enough webrtc. | |
1660 // | |
1661 // Not returning false because the SetSendCNPayloadType will fail if | |
1662 // the channel is already sending. | |
1663 // This can happen if the remote description is applied twice, for | |
1664 // example in the case of ROAP on top of JSEP, where both side will | |
1665 // send the offer. | |
1666 } | |
1667 } | |
1668 // Only turn on VAD if we have a CN payload type that matches the | |
1669 // clockrate for the codec we are going to use. | |
1670 if (codec.clockrate == send_codec.plfreq && send_codec.channels != 2) { | |
1671 // TODO(minyue): If CN frequency == 48000 Hz is allowed, consider the | |
1672 // interaction between VAD and Opus FEC. | |
1673 LOG(LS_INFO) << "Enabling VAD"; | |
1674 if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) { | |
1675 LOG_RTCERR2(SetVADStatus, channel, true); | |
1676 return false; | |
1677 } | |
1678 } | |
1679 } | |
1680 } | |
1681 return true; | |
1682 } | |
1683 | |
1684 bool WebRtcVoiceMediaChannel::SetSendCodecs( | |
1685 const std::vector<AudioCodec>& codecs) { | |
1686 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1687 // TODO(solenberg): Validate input - that payload types don't overlap, are | |
1688 // within range, filter out codecs we don't support, | |
1689 // redundant codecs etc. | |
1690 | |
1691 // Find the DTMF telephone event "codec" payload type. | |
1692 dtmf_payload_type_ = rtc::Optional<int>(); | |
1693 for (const AudioCodec& codec : codecs) { | |
1694 if (IsCodec(codec, kDtmfCodecName)) { | |
1695 dtmf_payload_type_ = rtc::Optional<int>(codec.id); | |
1696 break; | |
1697 } | |
1698 } | |
1699 | |
1700 // Cache the codecs in order to configure the channel created later. | |
1701 send_codecs_ = codecs; | |
1702 for (const auto& ch : send_streams_) { | |
1703 if (!SetSendCodecs(ch.second->channel(), codecs)) { | |
1704 return false; | |
1705 } | |
1706 } | |
1707 | |
1708 // Set nack status on receive channels and update |nack_enabled_|. | |
1709 for (const auto& ch : recv_streams_) { | |
1710 SetNack(ch.second->channel(), nack_enabled_); | |
1711 } | |
1712 | |
1713 return true; | |
1714 } | |
1715 | |
1716 void WebRtcVoiceMediaChannel::SetNack(int channel, bool nack_enabled) { | |
1717 if (nack_enabled) { | |
1718 LOG(LS_INFO) << "Enabling NACK for channel " << channel; | |
1719 engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets); | |
1720 } else { | |
1721 LOG(LS_INFO) << "Disabling NACK for channel " << channel; | |
1722 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0); | |
1723 } | |
1724 } | |
1725 | |
1726 bool WebRtcVoiceMediaChannel::SetSendCodec( | |
1727 int channel, const webrtc::CodecInst& send_codec) { | |
1728 LOG(LS_INFO) << "Send channel " << channel << " selected voice codec " | |
1729 << ToString(send_codec) << ", bitrate=" << send_codec.rate; | |
1730 | |
1731 webrtc::CodecInst current_codec; | |
1732 if (engine()->voe()->codec()->GetSendCodec(channel, current_codec) == 0 && | |
1733 (send_codec == current_codec)) { | |
1734 // Codec is already configured, we can return without setting it again. | |
1735 return true; | |
1736 } | |
1737 | |
1738 if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) { | |
1739 LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec)); | |
1740 return false; | |
1741 } | |
1742 return true; | |
1743 } | |
1744 | |
1745 bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) { | |
1746 desired_playout_ = playout; | |
1747 return ChangePlayout(desired_playout_); | |
1748 } | |
1749 | |
1750 bool WebRtcVoiceMediaChannel::PausePlayout() { | |
1751 return ChangePlayout(false); | |
1752 } | |
1753 | |
1754 bool WebRtcVoiceMediaChannel::ResumePlayout() { | |
1755 return ChangePlayout(desired_playout_); | |
1756 } | |
1757 | |
1758 bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) { | |
1759 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1760 if (playout_ == playout) { | |
1761 return true; | |
1762 } | |
1763 | |
1764 for (const auto& ch : recv_streams_) { | |
1765 if (!SetPlayout(ch.second->channel(), playout)) { | |
1766 LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " | |
1767 << ch.second->channel() << " failed"; | |
1768 return false; | |
1769 } | |
1770 } | |
1771 playout_ = playout; | |
1772 return true; | |
1773 } | |
1774 | |
1775 bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) { | |
1776 desired_send_ = send; | |
1777 if (!send_streams_.empty()) { | |
1778 return ChangeSend(desired_send_); | |
1779 } | |
1780 return true; | |
1781 } | |
1782 | |
1783 bool WebRtcVoiceMediaChannel::PauseSend() { | |
1784 return ChangeSend(SEND_NOTHING); | |
1785 } | |
1786 | |
1787 bool WebRtcVoiceMediaChannel::ResumeSend() { | |
1788 return ChangeSend(desired_send_); | |
1789 } | |
1790 | |
1791 bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) { | |
1792 if (send_ == send) { | |
1793 return true; | |
1794 } | |
1795 | |
1796 // Apply channel specific options when channel is enabled for sending. | |
1797 if (send == SEND_MICROPHONE) { | |
1798 engine()->ApplyOptions(options_); | |
1799 } | |
1800 | |
1801 // Change the settings on each send channel. | |
1802 for (const auto& ch : send_streams_) { | |
1803 if (!ChangeSend(ch.second->channel(), send)) { | |
1804 return false; | |
1805 } | |
1806 } | |
1807 | |
1808 send_ = send; | |
1809 return true; | |
1810 } | |
1811 | |
1812 bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) { | |
1813 if (send == SEND_MICROPHONE) { | |
1814 if (engine()->voe()->base()->StartSend(channel) == -1) { | |
1815 LOG_RTCERR1(StartSend, channel); | |
1816 return false; | |
1817 } | |
1818 } else { // SEND_NOTHING | |
1819 RTC_DCHECK(send == SEND_NOTHING); | |
1820 if (engine()->voe()->base()->StopSend(channel) == -1) { | |
1821 LOG_RTCERR1(StopSend, channel); | |
1822 return false; | |
1823 } | |
1824 } | |
1825 | |
1826 return true; | |
1827 } | |
1828 | |
1829 bool WebRtcVoiceMediaChannel::SetAudioSend(uint32_t ssrc, | |
1830 bool enable, | |
1831 const AudioOptions* options, | |
1832 AudioRenderer* renderer) { | |
1833 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1834 // TODO(solenberg): The state change should be fully rolled back if any one of | |
1835 // these calls fail. | |
1836 if (!SetLocalRenderer(ssrc, renderer)) { | |
1837 return false; | |
1838 } | |
1839 if (!MuteStream(ssrc, !enable)) { | |
1840 return false; | |
1841 } | |
1842 if (enable && options) { | |
1843 return SetOptions(*options); | |
1844 } | |
1845 return true; | |
1846 } | |
1847 | |
1848 int WebRtcVoiceMediaChannel::CreateVoEChannel() { | |
1849 int id = engine()->CreateVoEChannel(); | |
1850 if (id == -1) { | |
1851 LOG_RTCERR0(CreateVoEChannel); | |
1852 return -1; | |
1853 } | |
1854 if (engine()->voe()->network()->RegisterExternalTransport(id, *this) == -1) { | |
1855 LOG_RTCERR2(RegisterExternalTransport, id, this); | |
1856 engine()->voe()->base()->DeleteChannel(id); | |
1857 return -1; | |
1858 } | |
1859 return id; | |
1860 } | |
1861 | |
1862 bool WebRtcVoiceMediaChannel::DeleteVoEChannel(int channel) { | |
1863 if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) { | |
1864 LOG_RTCERR1(DeRegisterExternalTransport, channel); | |
1865 } | |
1866 if (engine()->voe()->base()->DeleteChannel(channel) == -1) { | |
1867 LOG_RTCERR1(DeleteChannel, channel); | |
1868 return false; | |
1869 } | |
1870 return true; | |
1871 } | |
1872 | |
1873 bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) { | |
1874 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1875 LOG(LS_INFO) << "AddSendStream: " << sp.ToString(); | |
1876 | |
1877 uint32_t ssrc = sp.first_ssrc(); | |
1878 RTC_DCHECK(0 != ssrc); | |
1879 | |
1880 if (GetSendChannelId(ssrc) != -1) { | |
1881 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; | |
1882 return false; | |
1883 } | |
1884 | |
1885 // Create a new channel for sending audio data. | |
1886 int channel = CreateVoEChannel(); | |
1887 if (channel == -1) { | |
1888 return false; | |
1889 } | |
1890 | |
1891 // Save the channel to send_streams_, so that RemoveSendStream() can still | |
1892 // delete the channel in case failure happens below. | |
1893 webrtc::AudioTransport* audio_transport = | |
1894 engine()->voe()->base()->audio_transport(); | |
1895 send_streams_.insert(std::make_pair(ssrc, new WebRtcAudioSendStream( | |
1896 channel, audio_transport, ssrc, sp.cname, send_rtp_extensions_, call_))); | |
1897 | |
1898 // Set the current codecs to be used for the new channel. We need to do this | |
1899 // after adding the channel to send_channels_, because of how max bitrate is | |
1900 // currently being configured by SetSendCodec(). | |
1901 if (!send_codecs_.empty() && !SetSendCodecs(channel, send_codecs_)) { | |
1902 RemoveSendStream(ssrc); | |
1903 return false; | |
1904 } | |
1905 | |
1906 // At this point the channel's local SSRC has been updated. If the channel is | |
1907 // the first send channel make sure that all the receive channels are updated | |
1908 // with the same SSRC in order to send receiver reports. | |
1909 if (send_streams_.size() == 1) { | |
1910 receiver_reports_ssrc_ = ssrc; | |
1911 for (const auto& stream : recv_streams_) { | |
1912 int recv_channel = stream.second->channel(); | |
1913 if (engine()->voe()->rtp()->SetLocalSSRC(recv_channel, ssrc) != 0) { | |
1914 LOG_RTCERR2(SetLocalSSRC, recv_channel, ssrc); | |
1915 return false; | |
1916 } | |
1917 engine()->voe()->base()->AssociateSendChannel(recv_channel, channel); | |
1918 LOG(LS_INFO) << "VoiceEngine channel #" << recv_channel | |
1919 << " is associated with channel #" << channel << "."; | |
1920 } | |
1921 } | |
1922 | |
1923 return ChangeSend(channel, desired_send_); | |
1924 } | |
1925 | |
1926 bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) { | |
1927 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1928 LOG(LS_INFO) << "RemoveSendStream: " << ssrc; | |
1929 | |
1930 auto it = send_streams_.find(ssrc); | |
1931 if (it == send_streams_.end()) { | |
1932 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc | |
1933 << " which doesn't exist."; | |
1934 return false; | |
1935 } | |
1936 | |
1937 int channel = it->second->channel(); | |
1938 ChangeSend(channel, SEND_NOTHING); | |
1939 | |
1940 // Clean up and delete the send stream+channel. | |
1941 LOG(LS_INFO) << "Removing audio send stream " << ssrc | |
1942 << " with VoiceEngine channel #" << channel << "."; | |
1943 delete it->second; | |
1944 send_streams_.erase(it); | |
1945 if (!DeleteVoEChannel(channel)) { | |
1946 return false; | |
1947 } | |
1948 if (send_streams_.empty()) { | |
1949 ChangeSend(SEND_NOTHING); | |
1950 } | |
1951 return true; | |
1952 } | |
1953 | |
1954 bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) { | |
1955 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1956 LOG(LS_INFO) << "AddRecvStream: " << sp.ToString(); | |
1957 | |
1958 if (!ValidateStreamParams(sp)) { | |
1959 return false; | |
1960 } | |
1961 | |
1962 const uint32_t ssrc = sp.first_ssrc(); | |
1963 if (ssrc == 0) { | |
1964 LOG(LS_WARNING) << "AddRecvStream with ssrc==0 is not supported."; | |
1965 return false; | |
1966 } | |
1967 | |
1968 // Remove the default receive stream if one had been created with this ssrc; | |
1969 // we'll recreate it then. | |
1970 if (IsDefaultRecvStream(ssrc)) { | |
1971 RemoveRecvStream(ssrc); | |
1972 } | |
1973 | |
1974 if (GetReceiveChannelId(ssrc) != -1) { | |
1975 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; | |
1976 return false; | |
1977 } | |
1978 | |
1979 // Create a new channel for receiving audio data. | |
1980 const int channel = CreateVoEChannel(); | |
1981 if (channel == -1) { | |
1982 return false; | |
1983 } | |
1984 | |
1985 // Turn off all supported codecs. | |
1986 // TODO(solenberg): Remove once "no codecs" is the default state of a stream. | |
1987 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
1988 voe_codec.pltype = -1; | |
1989 if (engine()->voe()->codec()->SetRecPayloadType(channel, voe_codec) == -1) { | |
1990 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec)); | |
1991 DeleteVoEChannel(channel); | |
1992 return false; | |
1993 } | |
1994 } | |
1995 | |
1996 // Only enable those configured for this channel. | |
1997 for (const auto& codec : recv_codecs_) { | |
1998 webrtc::CodecInst voe_codec; | |
1999 if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
2000 voe_codec.pltype = codec.id; | |
2001 if (engine()->voe()->codec()->SetRecPayloadType( | |
2002 channel, voe_codec) == -1) { | |
2003 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec)); | |
2004 DeleteVoEChannel(channel); | |
2005 return false; | |
2006 } | |
2007 } | |
2008 } | |
2009 | |
2010 const int send_channel = GetSendChannelId(receiver_reports_ssrc_); | |
2011 if (send_channel != -1) { | |
2012 // Associate receive channel with first send channel (so the receive channel | |
2013 // can obtain RTT from the send channel) | |
2014 engine()->voe()->base()->AssociateSendChannel(channel, send_channel); | |
2015 LOG(LS_INFO) << "VoiceEngine channel #" << channel | |
2016 << " is associated with channel #" << send_channel << "."; | |
2017 } | |
2018 | |
2019 recv_streams_.insert(std::make_pair(ssrc, new WebRtcAudioReceiveStream( | |
2020 channel, ssrc, receiver_reports_ssrc_, | |
2021 options_.combined_audio_video_bwe.value_or(false), sp.sync_label, | |
2022 recv_rtp_extensions_, call_))); | |
2023 | |
2024 SetNack(channel, nack_enabled_); | |
2025 SetPlayout(channel, playout_); | |
2026 | |
2027 return true; | |
2028 } | |
2029 | |
2030 bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32_t ssrc) { | |
2031 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2032 LOG(LS_INFO) << "RemoveRecvStream: " << ssrc; | |
2033 | |
2034 const auto it = recv_streams_.find(ssrc); | |
2035 if (it == recv_streams_.end()) { | |
2036 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc | |
2037 << " which doesn't exist."; | |
2038 return false; | |
2039 } | |
2040 | |
2041 // Deregister default channel, if that's the one being destroyed. | |
2042 if (IsDefaultRecvStream(ssrc)) { | |
2043 default_recv_ssrc_ = -1; | |
2044 } | |
2045 | |
2046 const int channel = it->second->channel(); | |
2047 | |
2048 // Clean up and delete the receive stream+channel. | |
2049 LOG(LS_INFO) << "Removing audio receive stream " << ssrc | |
2050 << " with VoiceEngine channel #" << channel << "."; | |
2051 it->second->SetRawAudioSink(nullptr); | |
2052 delete it->second; | |
2053 recv_streams_.erase(it); | |
2054 return DeleteVoEChannel(channel); | |
2055 } | |
2056 | |
2057 bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc, | |
2058 AudioRenderer* renderer) { | |
2059 auto it = send_streams_.find(ssrc); | |
2060 if (it == send_streams_.end()) { | |
2061 if (renderer) { | |
2062 // Return an error if trying to set a valid renderer with an invalid ssrc. | |
2063 LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc; | |
2064 return false; | |
2065 } | |
2066 | |
2067 // The channel likely has gone away, do nothing. | |
2068 return true; | |
2069 } | |
2070 | |
2071 if (renderer) { | |
2072 it->second->Start(renderer); | |
2073 } else { | |
2074 it->second->Stop(); | |
2075 } | |
2076 | |
2077 return true; | |
2078 } | |
2079 | |
2080 bool WebRtcVoiceMediaChannel::GetActiveStreams( | |
2081 AudioInfo::StreamList* actives) { | |
2082 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2083 actives->clear(); | |
2084 for (const auto& ch : recv_streams_) { | |
2085 int level = GetOutputLevel(ch.second->channel()); | |
2086 if (level > 0) { | |
2087 actives->push_back(std::make_pair(ch.first, level)); | |
2088 } | |
2089 } | |
2090 return true; | |
2091 } | |
2092 | |
2093 int WebRtcVoiceMediaChannel::GetOutputLevel() { | |
2094 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2095 int highest = 0; | |
2096 for (const auto& ch : recv_streams_) { | |
2097 highest = std::max(GetOutputLevel(ch.second->channel()), highest); | |
2098 } | |
2099 return highest; | |
2100 } | |
2101 | |
2102 int WebRtcVoiceMediaChannel::GetTimeSinceLastTyping() { | |
2103 int ret; | |
2104 if (engine()->voe()->processing()->TimeSinceLastTyping(ret) == -1) { | |
2105 // In case of error, log the info and continue | |
2106 LOG_RTCERR0(TimeSinceLastTyping); | |
2107 ret = -1; | |
2108 } else { | |
2109 ret *= 1000; // We return ms, webrtc returns seconds. | |
2110 } | |
2111 return ret; | |
2112 } | |
2113 | |
2114 void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window, | |
2115 int cost_per_typing, int reporting_threshold, int penalty_decay, | |
2116 int type_event_delay) { | |
2117 if (engine()->voe()->processing()->SetTypingDetectionParameters( | |
2118 time_window, cost_per_typing, | |
2119 reporting_threshold, penalty_decay, type_event_delay) == -1) { | |
2120 // In case of error, log the info and continue | |
2121 LOG_RTCERR5(SetTypingDetectionParameters, time_window, | |
2122 cost_per_typing, reporting_threshold, penalty_decay, | |
2123 type_event_delay); | |
2124 } | |
2125 } | |
2126 | |
2127 bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) { | |
2128 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2129 if (ssrc == 0) { | |
2130 default_recv_volume_ = volume; | |
2131 if (default_recv_ssrc_ == -1) { | |
2132 return true; | |
2133 } | |
2134 ssrc = static_cast<uint32_t>(default_recv_ssrc_); | |
2135 } | |
2136 int ch_id = GetReceiveChannelId(ssrc); | |
2137 if (ch_id < 0) { | |
2138 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc; | |
2139 return false; | |
2140 } | |
2141 | |
2142 if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(ch_id, | |
2143 volume)) { | |
2144 LOG_RTCERR2(SetChannelOutputVolumeScaling, ch_id, volume); | |
2145 return false; | |
2146 } | |
2147 LOG(LS_INFO) << "SetOutputVolume to " << volume | |
2148 << " for channel " << ch_id << " and ssrc " << ssrc; | |
2149 return true; | |
2150 } | |
2151 | |
2152 bool WebRtcVoiceMediaChannel::CanInsertDtmf() { | |
2153 return dtmf_payload_type_ ? true : false; | |
2154 } | |
2155 | |
2156 bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc, int event, | |
2157 int duration) { | |
2158 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2159 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf"; | |
2160 if (!dtmf_payload_type_) { | |
2161 return false; | |
2162 } | |
2163 | |
2164 // Figure out which WebRtcAudioSendStream to send the event on. | |
2165 auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin(); | |
2166 if (it == send_streams_.end()) { | |
2167 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; | |
2168 return false; | |
2169 } | |
2170 if (event < kMinTelephoneEventCode || | |
2171 event > kMaxTelephoneEventCode) { | |
2172 LOG(LS_WARNING) << "DTMF event code " << event << " out of range."; | |
2173 return false; | |
2174 } | |
2175 if (duration < kMinTelephoneEventDuration || | |
2176 duration > kMaxTelephoneEventDuration) { | |
2177 LOG(LS_WARNING) << "DTMF event duration " << duration << " out of range."; | |
2178 return false; | |
2179 } | |
2180 return it->second->SendTelephoneEvent(*dtmf_payload_type_, event, duration); | |
2181 } | |
2182 | |
2183 void WebRtcVoiceMediaChannel::OnPacketReceived( | |
2184 rtc::Buffer* packet, const rtc::PacketTime& packet_time) { | |
2185 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2186 | |
2187 uint32_t ssrc = 0; | |
2188 if (!GetRtpSsrc(packet->data(), packet->size(), &ssrc)) { | |
2189 return; | |
2190 } | |
2191 | |
2192 // If we don't have a default channel, and the SSRC is unknown, create a | |
2193 // default channel. | |
2194 if (default_recv_ssrc_ == -1 && GetReceiveChannelId(ssrc) == -1) { | |
2195 StreamParams sp; | |
2196 sp.ssrcs.push_back(ssrc); | |
2197 LOG(LS_INFO) << "Creating default receive stream for SSRC=" << ssrc << "."; | |
2198 if (!AddRecvStream(sp)) { | |
2199 LOG(LS_WARNING) << "Could not create default receive stream."; | |
2200 return; | |
2201 } | |
2202 default_recv_ssrc_ = ssrc; | |
2203 SetOutputVolume(default_recv_ssrc_, default_recv_volume_); | |
2204 if (default_sink_) { | |
2205 rtc::scoped_ptr<webrtc::AudioSinkInterface> proxy_sink( | |
2206 new ProxySink(default_sink_.get())); | |
2207 SetRawAudioSink(default_recv_ssrc_, std::move(proxy_sink)); | |
2208 } | |
2209 } | |
2210 | |
2211 // Forward packet to Call. If the SSRC is unknown we'll return after this. | |
2212 const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, | |
2213 packet_time.not_before); | |
2214 webrtc::PacketReceiver::DeliveryStatus delivery_result = | |
2215 call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, | |
2216 reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), | |
2217 webrtc_packet_time); | |
2218 if (webrtc::PacketReceiver::DELIVERY_OK != delivery_result) { | |
2219 // If the SSRC is unknown here, route it to the default channel, if we have | |
2220 // one. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208 | |
2221 if (default_recv_ssrc_ == -1) { | |
2222 return; | |
2223 } else { | |
2224 ssrc = default_recv_ssrc_; | |
2225 } | |
2226 } | |
2227 | |
2228 // Find the channel to send this packet to. It must exist since webrtc::Call | |
2229 // was able to demux the packet. | |
2230 int channel = GetReceiveChannelId(ssrc); | |
2231 RTC_DCHECK(channel != -1); | |
2232 | |
2233 // Pass it off to the decoder. | |
2234 engine()->voe()->network()->ReceivedRTPPacket( | |
2235 channel, packet->data(), packet->size(), webrtc_packet_time); | |
2236 } | |
2237 | |
2238 void WebRtcVoiceMediaChannel::OnRtcpReceived( | |
2239 rtc::Buffer* packet, const rtc::PacketTime& packet_time) { | |
2240 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2241 | |
2242 // Forward packet to Call as well. | |
2243 const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, | |
2244 packet_time.not_before); | |
2245 call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, | |
2246 reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), | |
2247 webrtc_packet_time); | |
2248 | |
2249 // Sending channels need all RTCP packets with feedback information. | |
2250 // Even sender reports can contain attached report blocks. | |
2251 // Receiving channels need sender reports in order to create | |
2252 // correct receiver reports. | |
2253 int type = 0; | |
2254 if (!GetRtcpType(packet->data(), packet->size(), &type)) { | |
2255 LOG(LS_WARNING) << "Failed to parse type from received RTCP packet"; | |
2256 return; | |
2257 } | |
2258 | |
2259 // If it is a sender report, find the receive channel that is listening. | |
2260 if (type == kRtcpTypeSR) { | |
2261 uint32_t ssrc = 0; | |
2262 if (!GetRtcpSsrc(packet->data(), packet->size(), &ssrc)) { | |
2263 return; | |
2264 } | |
2265 int recv_channel_id = GetReceiveChannelId(ssrc); | |
2266 if (recv_channel_id != -1) { | |
2267 engine()->voe()->network()->ReceivedRTCPPacket( | |
2268 recv_channel_id, packet->data(), packet->size()); | |
2269 } | |
2270 } | |
2271 | |
2272 // SR may continue RR and any RR entry may correspond to any one of the send | |
2273 // channels. So all RTCP packets must be forwarded all send channels. VoE | |
2274 // will filter out RR internally. | |
2275 for (const auto& ch : send_streams_) { | |
2276 engine()->voe()->network()->ReceivedRTCPPacket( | |
2277 ch.second->channel(), packet->data(), packet->size()); | |
2278 } | |
2279 } | |
2280 | |
2281 bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) { | |
2282 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2283 int channel = GetSendChannelId(ssrc); | |
2284 if (channel == -1) { | |
2285 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; | |
2286 return false; | |
2287 } | |
2288 if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) { | |
2289 LOG_RTCERR2(SetInputMute, channel, muted); | |
2290 return false; | |
2291 } | |
2292 // We set the AGC to mute state only when all the channels are muted. | |
2293 // This implementation is not ideal, instead we should signal the AGC when | |
2294 // the mic channel is muted/unmuted. We can't do it today because there | |
2295 // is no good way to know which stream is mapping to the mic channel. | |
2296 bool all_muted = muted; | |
2297 for (const auto& ch : send_streams_) { | |
2298 if (!all_muted) { | |
2299 break; | |
2300 } | |
2301 if (engine()->voe()->volume()->GetInputMute(ch.second->channel(), | |
2302 all_muted)) { | |
2303 LOG_RTCERR1(GetInputMute, ch.second->channel()); | |
2304 return false; | |
2305 } | |
2306 } | |
2307 | |
2308 webrtc::AudioProcessing* ap = engine()->voe()->base()->audio_processing(); | |
2309 if (ap) { | |
2310 ap->set_output_will_be_muted(all_muted); | |
2311 } | |
2312 return true; | |
2313 } | |
2314 | |
2315 // TODO(minyue): SetMaxSendBandwidth() is subject to be renamed to | |
2316 // SetMaxSendBitrate() in future. | |
2317 bool WebRtcVoiceMediaChannel::SetMaxSendBandwidth(int bps) { | |
2318 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetMaxSendBandwidth."; | |
2319 return SetSendBitrateInternal(bps); | |
2320 } | |
2321 | |
2322 bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) { | |
2323 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendBitrateInternal."; | |
2324 | |
2325 send_bitrate_setting_ = true; | |
2326 send_bitrate_bps_ = bps; | |
2327 | |
2328 if (!send_codec_) { | |
2329 LOG(LS_INFO) << "The send codec has not been set up yet. " | |
2330 << "The send bitrate setting will be applied later."; | |
2331 return true; | |
2332 } | |
2333 | |
2334 // Bitrate is auto by default. | |
2335 // TODO(bemasc): Fix this so that if SetMaxSendBandwidth(50) is followed by | |
2336 // SetMaxSendBandwith(0), the second call removes the previous limit. | |
2337 if (bps <= 0) | |
2338 return true; | |
2339 | |
2340 webrtc::CodecInst codec = *send_codec_; | |
2341 bool is_multi_rate = WebRtcVoiceCodecs::IsCodecMultiRate(codec); | |
2342 | |
2343 if (is_multi_rate) { | |
2344 // If codec is multi-rate then just set the bitrate. | |
2345 codec.rate = bps; | |
2346 for (const auto& ch : send_streams_) { | |
2347 if (!SetSendCodec(ch.second->channel(), codec)) { | |
2348 LOG(LS_INFO) << "Failed to set codec " << codec.plname | |
2349 << " to bitrate " << bps << " bps."; | |
2350 return false; | |
2351 } | |
2352 } | |
2353 return true; | |
2354 } else { | |
2355 // If codec is not multi-rate and |bps| is less than the fixed bitrate | |
2356 // then fail. If codec is not multi-rate and |bps| exceeds or equal the | |
2357 // fixed bitrate then ignore. | |
2358 if (bps < codec.rate) { | |
2359 LOG(LS_INFO) << "Failed to set codec " << codec.plname | |
2360 << " to bitrate " << bps << " bps" | |
2361 << ", requires at least " << codec.rate << " bps."; | |
2362 return false; | |
2363 } | |
2364 return true; | |
2365 } | |
2366 } | |
2367 | |
2368 bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) { | |
2369 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2370 RTC_DCHECK(info); | |
2371 | |
2372 // Get SSRC and stats for each sender. | |
2373 RTC_DCHECK(info->senders.size() == 0); | |
2374 for (const auto& stream : send_streams_) { | |
2375 webrtc::AudioSendStream::Stats stats = stream.second->GetStats(); | |
2376 VoiceSenderInfo sinfo; | |
2377 sinfo.add_ssrc(stats.local_ssrc); | |
2378 sinfo.bytes_sent = stats.bytes_sent; | |
2379 sinfo.packets_sent = stats.packets_sent; | |
2380 sinfo.packets_lost = stats.packets_lost; | |
2381 sinfo.fraction_lost = stats.fraction_lost; | |
2382 sinfo.codec_name = stats.codec_name; | |
2383 sinfo.ext_seqnum = stats.ext_seqnum; | |
2384 sinfo.jitter_ms = stats.jitter_ms; | |
2385 sinfo.rtt_ms = stats.rtt_ms; | |
2386 sinfo.audio_level = stats.audio_level; | |
2387 sinfo.aec_quality_min = stats.aec_quality_min; | |
2388 sinfo.echo_delay_median_ms = stats.echo_delay_median_ms; | |
2389 sinfo.echo_delay_std_ms = stats.echo_delay_std_ms; | |
2390 sinfo.echo_return_loss = stats.echo_return_loss; | |
2391 sinfo.echo_return_loss_enhancement = stats.echo_return_loss_enhancement; | |
2392 sinfo.typing_noise_detected = | |
2393 (send_ == SEND_NOTHING ? false : stats.typing_noise_detected); | |
2394 info->senders.push_back(sinfo); | |
2395 } | |
2396 | |
2397 // Get SSRC and stats for each receiver. | |
2398 RTC_DCHECK(info->receivers.size() == 0); | |
2399 for (const auto& stream : recv_streams_) { | |
2400 webrtc::AudioReceiveStream::Stats stats = stream.second->GetStats(); | |
2401 VoiceReceiverInfo rinfo; | |
2402 rinfo.add_ssrc(stats.remote_ssrc); | |
2403 rinfo.bytes_rcvd = stats.bytes_rcvd; | |
2404 rinfo.packets_rcvd = stats.packets_rcvd; | |
2405 rinfo.packets_lost = stats.packets_lost; | |
2406 rinfo.fraction_lost = stats.fraction_lost; | |
2407 rinfo.codec_name = stats.codec_name; | |
2408 rinfo.ext_seqnum = stats.ext_seqnum; | |
2409 rinfo.jitter_ms = stats.jitter_ms; | |
2410 rinfo.jitter_buffer_ms = stats.jitter_buffer_ms; | |
2411 rinfo.jitter_buffer_preferred_ms = stats.jitter_buffer_preferred_ms; | |
2412 rinfo.delay_estimate_ms = stats.delay_estimate_ms; | |
2413 rinfo.audio_level = stats.audio_level; | |
2414 rinfo.expand_rate = stats.expand_rate; | |
2415 rinfo.speech_expand_rate = stats.speech_expand_rate; | |
2416 rinfo.secondary_decoded_rate = stats.secondary_decoded_rate; | |
2417 rinfo.accelerate_rate = stats.accelerate_rate; | |
2418 rinfo.preemptive_expand_rate = stats.preemptive_expand_rate; | |
2419 rinfo.decoding_calls_to_silence_generator = | |
2420 stats.decoding_calls_to_silence_generator; | |
2421 rinfo.decoding_calls_to_neteq = stats.decoding_calls_to_neteq; | |
2422 rinfo.decoding_normal = stats.decoding_normal; | |
2423 rinfo.decoding_plc = stats.decoding_plc; | |
2424 rinfo.decoding_cng = stats.decoding_cng; | |
2425 rinfo.decoding_plc_cng = stats.decoding_plc_cng; | |
2426 rinfo.capture_start_ntp_time_ms = stats.capture_start_ntp_time_ms; | |
2427 info->receivers.push_back(rinfo); | |
2428 } | |
2429 | |
2430 return true; | |
2431 } | |
2432 | |
2433 void WebRtcVoiceMediaChannel::SetRawAudioSink( | |
2434 uint32_t ssrc, | |
2435 rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) { | |
2436 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2437 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink: ssrc:" << ssrc | |
2438 << " " << (sink ? "(ptr)" : "NULL"); | |
2439 if (ssrc == 0) { | |
2440 if (default_recv_ssrc_ != -1) { | |
2441 rtc::scoped_ptr<webrtc::AudioSinkInterface> proxy_sink( | |
2442 sink ? new ProxySink(sink.get()) : nullptr); | |
2443 SetRawAudioSink(default_recv_ssrc_, std::move(proxy_sink)); | |
2444 } | |
2445 default_sink_ = std::move(sink); | |
2446 return; | |
2447 } | |
2448 const auto it = recv_streams_.find(ssrc); | |
2449 if (it == recv_streams_.end()) { | |
2450 LOG(LS_WARNING) << "SetRawAudioSink: no recv stream" << ssrc; | |
2451 return; | |
2452 } | |
2453 it->second->SetRawAudioSink(std::move(sink)); | |
2454 } | |
2455 | |
2456 int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) { | |
2457 unsigned int ulevel = 0; | |
2458 int ret = engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel); | |
2459 return (ret == 0) ? static_cast<int>(ulevel) : -1; | |
2460 } | |
2461 | |
2462 int WebRtcVoiceMediaChannel::GetReceiveChannelId(uint32_t ssrc) const { | |
2463 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2464 const auto it = recv_streams_.find(ssrc); | |
2465 if (it != recv_streams_.end()) { | |
2466 return it->second->channel(); | |
2467 } | |
2468 return -1; | |
2469 } | |
2470 | |
2471 int WebRtcVoiceMediaChannel::GetSendChannelId(uint32_t ssrc) const { | |
2472 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2473 const auto it = send_streams_.find(ssrc); | |
2474 if (it != send_streams_.end()) { | |
2475 return it->second->channel(); | |
2476 } | |
2477 return -1; | |
2478 } | |
2479 | |
2480 bool WebRtcVoiceMediaChannel::GetRedSendCodec(const AudioCodec& red_codec, | |
2481 const std::vector<AudioCodec>& all_codecs, webrtc::CodecInst* send_codec) { | |
2482 // Get the RED encodings from the parameter with no name. This may | |
2483 // change based on what is discussed on the Jingle list. | |
2484 // The encoding parameter is of the form "a/b"; we only support where | |
2485 // a == b. Verify this and parse out the value into red_pt. | |
2486 // If the parameter value is absent (as it will be until we wire up the | |
2487 // signaling of this message), use the second codec specified (i.e. the | |
2488 // one after "red") as the encoding parameter. | |
2489 int red_pt = -1; | |
2490 std::string red_params; | |
2491 CodecParameterMap::const_iterator it = red_codec.params.find(""); | |
2492 if (it != red_codec.params.end()) { | |
2493 red_params = it->second; | |
2494 std::vector<std::string> red_pts; | |
2495 if (rtc::split(red_params, '/', &red_pts) != 2 || | |
2496 red_pts[0] != red_pts[1] || | |
2497 !rtc::FromString(red_pts[0], &red_pt)) { | |
2498 LOG(LS_WARNING) << "RED params " << red_params << " not supported."; | |
2499 return false; | |
2500 } | |
2501 } else if (red_codec.params.empty()) { | |
2502 LOG(LS_WARNING) << "RED params not present, using defaults"; | |
2503 if (all_codecs.size() > 1) { | |
2504 red_pt = all_codecs[1].id; | |
2505 } | |
2506 } | |
2507 | |
2508 // Try to find red_pt in |codecs|. | |
2509 for (const AudioCodec& codec : all_codecs) { | |
2510 if (codec.id == red_pt) { | |
2511 // If we find the right codec, that will be the codec we pass to | |
2512 // SetSendCodec, with the desired payload type. | |
2513 if (WebRtcVoiceEngine::ToCodecInst(codec, send_codec)) { | |
2514 return true; | |
2515 } else { | |
2516 break; | |
2517 } | |
2518 } | |
2519 } | |
2520 LOG(LS_WARNING) << "RED params " << red_params << " are invalid."; | |
2521 return false; | |
2522 } | |
2523 | |
2524 bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) { | |
2525 if (playout) { | |
2526 LOG(LS_INFO) << "Starting playout for channel #" << channel; | |
2527 if (engine()->voe()->base()->StartPlayout(channel) == -1) { | |
2528 LOG_RTCERR1(StartPlayout, channel); | |
2529 return false; | |
2530 } | |
2531 } else { | |
2532 LOG(LS_INFO) << "Stopping playout for channel #" << channel; | |
2533 engine()->voe()->base()->StopPlayout(channel); | |
2534 } | |
2535 return true; | |
2536 } | |
2537 } // namespace cricket | |
2538 | |
2539 #endif // HAVE_WEBRTC_VOICE | |
OLD | NEW |