OLD | NEW |
| (Empty) |
1 /* | |
2 * libjingle | |
3 * Copyright 2004 Google Inc. | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions are met: | |
7 * | |
8 * 1. Redistributions of source code must retain the above copyright notice, | |
9 * this list of conditions and the following disclaimer. | |
10 * 2. Redistributions in binary form must reproduce the above copyright notice, | |
11 * this list of conditions and the following disclaimer in the documentation | |
12 * and/or other materials provided with the distribution. | |
13 * 3. The name of the author may not be used to endorse or promote products | |
14 * derived from this software without specific prior written permission. | |
15 * | |
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED | |
17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | |
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO | |
19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; | |
22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR | |
24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | |
25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
26 */ | |
27 | |
28 #ifdef HAVE_CONFIG_H | |
29 #include <config.h> | |
30 #endif | |
31 | |
32 #ifdef HAVE_WEBRTC_VOICE | |
33 | |
34 #include "talk/media/webrtc/webrtcvoiceengine.h" | |
35 | |
36 #include <algorithm> | |
37 #include <cstdio> | |
38 #include <string> | |
39 #include <vector> | |
40 | |
41 #include "talk/media/base/audioframe.h" | |
42 #include "talk/media/base/audiorenderer.h" | |
43 #include "talk/media/base/constants.h" | |
44 #include "talk/media/base/streamparams.h" | |
45 #include "talk/media/webrtc/webrtcmediaengine.h" | |
46 #include "talk/media/webrtc/webrtcvoe.h" | |
47 #include "webrtc/audio/audio_sink.h" | |
48 #include "webrtc/base/arraysize.h" | |
49 #include "webrtc/base/base64.h" | |
50 #include "webrtc/base/byteorder.h" | |
51 #include "webrtc/base/common.h" | |
52 #include "webrtc/base/helpers.h" | |
53 #include "webrtc/base/logging.h" | |
54 #include "webrtc/base/stringencode.h" | |
55 #include "webrtc/base/stringutils.h" | |
56 #include "webrtc/call/rtc_event_log.h" | |
57 #include "webrtc/common.h" | |
58 #include "webrtc/modules/audio_coding/acm2/rent_a_codec.h" | |
59 #include "webrtc/modules/audio_processing/include/audio_processing.h" | |
60 #include "webrtc/system_wrappers/include/field_trial.h" | |
61 #include "webrtc/system_wrappers/include/trace.h" | |
62 | |
63 namespace cricket { | |
64 namespace { | |
65 | |
66 const int kDefaultTraceFilter = webrtc::kTraceNone | webrtc::kTraceTerseInfo | | |
67 webrtc::kTraceWarning | webrtc::kTraceError | | |
68 webrtc::kTraceCritical; | |
69 const int kElevatedTraceFilter = kDefaultTraceFilter | webrtc::kTraceStateInfo | | |
70 webrtc::kTraceInfo; | |
71 | |
72 // On Windows Vista and newer, Microsoft introduced the concept of "Default | |
73 // Communications Device". This means that there are two types of default | |
74 // devices (old Wave Audio style default and Default Communications Device). | |
75 // | |
76 // On Windows systems which only support Wave Audio style default, uses either | |
77 // -1 or 0 to select the default device. | |
78 #ifdef WIN32 | |
79 const int kDefaultAudioDeviceId = -1; | |
80 #else | |
81 const int kDefaultAudioDeviceId = 0; | |
82 #endif | |
83 | |
84 // Parameter used for NACK. | |
85 // This value is equivalent to 5 seconds of audio data at 20 ms per packet. | |
86 const int kNackMaxPackets = 250; | |
87 | |
88 // Codec parameters for Opus. | |
89 // draft-spittka-payload-rtp-opus-03 | |
90 | |
91 // Recommended bitrates: | |
92 // 8-12 kb/s for NB speech, | |
93 // 16-20 kb/s for WB speech, | |
94 // 28-40 kb/s for FB speech, | |
95 // 48-64 kb/s for FB mono music, and | |
96 // 64-128 kb/s for FB stereo music. | |
97 // The current implementation applies the following values to mono signals, | |
98 // and multiplies them by 2 for stereo. | |
99 const int kOpusBitrateNb = 12000; | |
100 const int kOpusBitrateWb = 20000; | |
101 const int kOpusBitrateFb = 32000; | |
102 | |
103 // Opus bitrate should be in the range between 6000 and 510000. | |
104 const int kOpusMinBitrate = 6000; | |
105 const int kOpusMaxBitrate = 510000; | |
106 | |
107 // Default audio dscp value. | |
108 // See http://tools.ietf.org/html/rfc2474 for details. | |
109 // See also http://tools.ietf.org/html/draft-jennings-rtcweb-qos-00 | |
110 const rtc::DiffServCodePoint kAudioDscpValue = rtc::DSCP_EF; | |
111 | |
112 // Ensure we open the file in a writeable path on ChromeOS and Android. This | |
113 // workaround can be removed when it's possible to specify a filename for audio | |
114 // option based AEC dumps. | |
115 // | |
116 // TODO(grunell): Use a string in the options instead of hardcoding it here | |
117 // and let the embedder choose the filename (crbug.com/264223). | |
118 // | |
119 // NOTE(ajm): Don't use hardcoded paths on platforms not explicitly specified | |
120 // below. | |
121 #if defined(CHROMEOS) | |
122 const char kAecDumpByAudioOptionFilename[] = "/tmp/audio.aecdump"; | |
123 #elif defined(ANDROID) | |
124 const char kAecDumpByAudioOptionFilename[] = "/sdcard/audio.aecdump"; | |
125 #else | |
126 const char kAecDumpByAudioOptionFilename[] = "audio.aecdump"; | |
127 #endif | |
128 | |
129 // Constants from voice_engine_defines.h. | |
130 const int kMinTelephoneEventCode = 0; // RFC4733 (Section 2.3.1) | |
131 const int kMaxTelephoneEventCode = 255; | |
132 const int kMinTelephoneEventDuration = 100; | |
133 const int kMaxTelephoneEventDuration = 60000; // Actual limit is 2^16 | |
134 | |
135 class ProxySink : public webrtc::AudioSinkInterface { | |
136 public: | |
137 ProxySink(AudioSinkInterface* sink) : sink_(sink) { RTC_DCHECK(sink); } | |
138 | |
139 void OnData(const Data& audio) override { sink_->OnData(audio); } | |
140 | |
141 private: | |
142 webrtc::AudioSinkInterface* sink_; | |
143 }; | |
144 | |
145 bool ValidateStreamParams(const StreamParams& sp) { | |
146 if (sp.ssrcs.empty()) { | |
147 LOG(LS_ERROR) << "No SSRCs in stream parameters: " << sp.ToString(); | |
148 return false; | |
149 } | |
150 if (sp.ssrcs.size() > 1) { | |
151 LOG(LS_ERROR) << "Multiple SSRCs in stream parameters: " << sp.ToString(); | |
152 return false; | |
153 } | |
154 return true; | |
155 } | |
156 | |
157 // Dumps an AudioCodec in RFC 2327-ish format. | |
158 std::string ToString(const AudioCodec& codec) { | |
159 std::stringstream ss; | |
160 ss << codec.name << "/" << codec.clockrate << "/" << codec.channels | |
161 << " (" << codec.id << ")"; | |
162 return ss.str(); | |
163 } | |
164 | |
165 std::string ToString(const webrtc::CodecInst& codec) { | |
166 std::stringstream ss; | |
167 ss << codec.plname << "/" << codec.plfreq << "/" << codec.channels | |
168 << " (" << codec.pltype << ")"; | |
169 return ss.str(); | |
170 } | |
171 | |
172 bool IsCodec(const AudioCodec& codec, const char* ref_name) { | |
173 return (_stricmp(codec.name.c_str(), ref_name) == 0); | |
174 } | |
175 | |
176 bool IsCodec(const webrtc::CodecInst& codec, const char* ref_name) { | |
177 return (_stricmp(codec.plname, ref_name) == 0); | |
178 } | |
179 | |
180 bool FindCodec(const std::vector<AudioCodec>& codecs, | |
181 const AudioCodec& codec, | |
182 AudioCodec* found_codec) { | |
183 for (const AudioCodec& c : codecs) { | |
184 if (c.Matches(codec)) { | |
185 if (found_codec != NULL) { | |
186 *found_codec = c; | |
187 } | |
188 return true; | |
189 } | |
190 } | |
191 return false; | |
192 } | |
193 | |
194 bool VerifyUniquePayloadTypes(const std::vector<AudioCodec>& codecs) { | |
195 if (codecs.empty()) { | |
196 return true; | |
197 } | |
198 std::vector<int> payload_types; | |
199 for (const AudioCodec& codec : codecs) { | |
200 payload_types.push_back(codec.id); | |
201 } | |
202 std::sort(payload_types.begin(), payload_types.end()); | |
203 auto it = std::unique(payload_types.begin(), payload_types.end()); | |
204 return it == payload_types.end(); | |
205 } | |
206 | |
207 // Return true if codec.params[feature] == "1", false otherwise. | |
208 bool IsCodecFeatureEnabled(const AudioCodec& codec, const char* feature) { | |
209 int value; | |
210 return codec.GetParam(feature, &value) && value == 1; | |
211 } | |
212 | |
213 // Use params[kCodecParamMaxAverageBitrate] if it is defined, use codec.bitrate | |
214 // otherwise. If the value (either from params or codec.bitrate) <=0, use the | |
215 // default configuration. If the value is beyond feasible bit rate of Opus, | |
216 // clamp it. Returns the Opus bit rate for operation. | |
217 int GetOpusBitrate(const AudioCodec& codec, int max_playback_rate) { | |
218 int bitrate = 0; | |
219 bool use_param = true; | |
220 if (!codec.GetParam(kCodecParamMaxAverageBitrate, &bitrate)) { | |
221 bitrate = codec.bitrate; | |
222 use_param = false; | |
223 } | |
224 if (bitrate <= 0) { | |
225 if (max_playback_rate <= 8000) { | |
226 bitrate = kOpusBitrateNb; | |
227 } else if (max_playback_rate <= 16000) { | |
228 bitrate = kOpusBitrateWb; | |
229 } else { | |
230 bitrate = kOpusBitrateFb; | |
231 } | |
232 | |
233 if (IsCodecFeatureEnabled(codec, kCodecParamStereo)) { | |
234 bitrate *= 2; | |
235 } | |
236 } else if (bitrate < kOpusMinBitrate || bitrate > kOpusMaxBitrate) { | |
237 bitrate = (bitrate < kOpusMinBitrate) ? kOpusMinBitrate : kOpusMaxBitrate; | |
238 std::string rate_source = | |
239 use_param ? "Codec parameter \"maxaveragebitrate\"" : | |
240 "Supplied Opus bitrate"; | |
241 LOG(LS_WARNING) << rate_source | |
242 << " is invalid and is replaced by: " | |
243 << bitrate; | |
244 } | |
245 return bitrate; | |
246 } | |
247 | |
248 // Returns kOpusDefaultPlaybackRate if params[kCodecParamMaxPlaybackRate] is not | |
249 // defined. Returns the value of params[kCodecParamMaxPlaybackRate] otherwise. | |
250 int GetOpusMaxPlaybackRate(const AudioCodec& codec) { | |
251 int value; | |
252 if (codec.GetParam(kCodecParamMaxPlaybackRate, &value)) { | |
253 return value; | |
254 } | |
255 return kOpusDefaultMaxPlaybackRate; | |
256 } | |
257 | |
258 void GetOpusConfig(const AudioCodec& codec, webrtc::CodecInst* voe_codec, | |
259 bool* enable_codec_fec, int* max_playback_rate, | |
260 bool* enable_codec_dtx) { | |
261 *enable_codec_fec = IsCodecFeatureEnabled(codec, kCodecParamUseInbandFec); | |
262 *enable_codec_dtx = IsCodecFeatureEnabled(codec, kCodecParamUseDtx); | |
263 *max_playback_rate = GetOpusMaxPlaybackRate(codec); | |
264 | |
265 // If OPUS, change what we send according to the "stereo" codec | |
266 // parameter, and not the "channels" parameter. We set | |
267 // voe_codec.channels to 2 if "stereo=1" and 1 otherwise. If | |
268 // the bitrate is not specified, i.e. is <= zero, we set it to the | |
269 // appropriate default value for mono or stereo Opus. | |
270 | |
271 voe_codec->channels = IsCodecFeatureEnabled(codec, kCodecParamStereo) ? 2 : 1; | |
272 voe_codec->rate = GetOpusBitrate(codec, *max_playback_rate); | |
273 } | |
274 | |
275 webrtc::AudioState::Config MakeAudioStateConfig(VoEWrapper* voe_wrapper) { | |
276 webrtc::AudioState::Config config; | |
277 config.voice_engine = voe_wrapper->engine(); | |
278 return config; | |
279 } | |
280 | |
281 class WebRtcVoiceCodecs final { | |
282 public: | |
283 // TODO(solenberg): Do this filtering once off-line, add a simple AudioCodec | |
284 // list and add a test which verifies VoE supports the listed codecs. | |
285 static std::vector<AudioCodec> SupportedCodecs() { | |
286 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:"; | |
287 std::vector<AudioCodec> result; | |
288 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
289 // Change the sample rate of G722 to 8000 to match SDP. | |
290 MaybeFixupG722(&voe_codec, 8000); | |
291 // Skip uncompressed formats. | |
292 if (IsCodec(voe_codec, kL16CodecName)) { | |
293 continue; | |
294 } | |
295 | |
296 const CodecPref* pref = NULL; | |
297 for (size_t j = 0; j < arraysize(kCodecPrefs); ++j) { | |
298 if (IsCodec(voe_codec, kCodecPrefs[j].name) && | |
299 kCodecPrefs[j].clockrate == voe_codec.plfreq && | |
300 kCodecPrefs[j].channels == voe_codec.channels) { | |
301 pref = &kCodecPrefs[j]; | |
302 break; | |
303 } | |
304 } | |
305 | |
306 if (pref) { | |
307 // Use the payload type that we've configured in our pref table; | |
308 // use the offset in our pref table to determine the sort order. | |
309 AudioCodec codec( | |
310 pref->payload_type, voe_codec.plname, voe_codec.plfreq, | |
311 voe_codec.rate, voe_codec.channels, | |
312 static_cast<int>(arraysize(kCodecPrefs)) - (pref - kCodecPrefs)); | |
313 LOG(LS_INFO) << ToString(codec); | |
314 if (IsCodec(codec, kIsacCodecName)) { | |
315 // Indicate auto-bitrate in signaling. | |
316 codec.bitrate = 0; | |
317 } | |
318 if (IsCodec(codec, kOpusCodecName)) { | |
319 // Only add fmtp parameters that differ from the spec. | |
320 if (kPreferredMinPTime != kOpusDefaultMinPTime) { | |
321 codec.params[kCodecParamMinPTime] = | |
322 rtc::ToString(kPreferredMinPTime); | |
323 } | |
324 if (kPreferredMaxPTime != kOpusDefaultMaxPTime) { | |
325 codec.params[kCodecParamMaxPTime] = | |
326 rtc::ToString(kPreferredMaxPTime); | |
327 } | |
328 codec.SetParam(kCodecParamUseInbandFec, 1); | |
329 codec.AddFeedbackParam( | |
330 FeedbackParam(kRtcpFbParamTransportCc, kParamValueEmpty)); | |
331 | |
332 // TODO(hellner): Add ptime, sprop-stereo, and stereo | |
333 // when they can be set to values other than the default. | |
334 } | |
335 result.push_back(codec); | |
336 } else { | |
337 LOG(LS_WARNING) << "Unexpected codec: " << ToString(voe_codec); | |
338 } | |
339 } | |
340 // Make sure they are in local preference order. | |
341 std::sort(result.begin(), result.end(), &AudioCodec::Preferable); | |
342 return result; | |
343 } | |
344 | |
345 static bool ToCodecInst(const AudioCodec& in, | |
346 webrtc::CodecInst* out) { | |
347 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
348 // Change the sample rate of G722 to 8000 to match SDP. | |
349 MaybeFixupG722(&voe_codec, 8000); | |
350 AudioCodec codec(voe_codec.pltype, voe_codec.plname, voe_codec.plfreq, | |
351 voe_codec.rate, voe_codec.channels, 0); | |
352 bool multi_rate = IsCodecMultiRate(voe_codec); | |
353 // Allow arbitrary rates for ISAC to be specified. | |
354 if (multi_rate) { | |
355 // Set codec.bitrate to 0 so the check for codec.Matches() passes. | |
356 codec.bitrate = 0; | |
357 } | |
358 if (codec.Matches(in)) { | |
359 if (out) { | |
360 // Fixup the payload type. | |
361 voe_codec.pltype = in.id; | |
362 | |
363 // Set bitrate if specified. | |
364 if (multi_rate && in.bitrate != 0) { | |
365 voe_codec.rate = in.bitrate; | |
366 } | |
367 | |
368 // Reset G722 sample rate to 16000 to match WebRTC. | |
369 MaybeFixupG722(&voe_codec, 16000); | |
370 | |
371 // Apply codec-specific settings. | |
372 if (IsCodec(codec, kIsacCodecName)) { | |
373 // If ISAC and an explicit bitrate is not specified, | |
374 // enable auto bitrate adjustment. | |
375 voe_codec.rate = (in.bitrate > 0) ? in.bitrate : -1; | |
376 } | |
377 *out = voe_codec; | |
378 } | |
379 return true; | |
380 } | |
381 } | |
382 return false; | |
383 } | |
384 | |
385 static bool IsCodecMultiRate(const webrtc::CodecInst& codec) { | |
386 for (size_t i = 0; i < arraysize(kCodecPrefs); ++i) { | |
387 if (IsCodec(codec, kCodecPrefs[i].name) && | |
388 kCodecPrefs[i].clockrate == codec.plfreq) { | |
389 return kCodecPrefs[i].is_multi_rate; | |
390 } | |
391 } | |
392 return false; | |
393 } | |
394 | |
395 // If the AudioCodec param kCodecParamPTime is set, then we will set it to | |
396 // codec pacsize if it's valid, or we will pick the next smallest value we | |
397 // support. | |
398 // TODO(Brave): Query supported packet sizes from ACM when the API is ready. | |
399 static bool SetPTimeAsPacketSize(webrtc::CodecInst* codec, int ptime_ms) { | |
400 for (const CodecPref& codec_pref : kCodecPrefs) { | |
401 if ((IsCodec(*codec, codec_pref.name) && | |
402 codec_pref.clockrate == codec->plfreq) || | |
403 IsCodec(*codec, kG722CodecName)) { | |
404 int packet_size_ms = SelectPacketSize(codec_pref, ptime_ms); | |
405 if (packet_size_ms) { | |
406 // Convert unit from milli-seconds to samples. | |
407 codec->pacsize = (codec->plfreq / 1000) * packet_size_ms; | |
408 return true; | |
409 } | |
410 } | |
411 } | |
412 return false; | |
413 } | |
414 | |
415 static const AudioCodec* GetPreferredCodec( | |
416 const std::vector<AudioCodec>& codecs, | |
417 webrtc::CodecInst* voe_codec, | |
418 int* red_payload_type) { | |
419 RTC_DCHECK(voe_codec); | |
420 RTC_DCHECK(red_payload_type); | |
421 // Select the preferred send codec (the first non-telephone-event/CN codec). | |
422 for (const AudioCodec& codec : codecs) { | |
423 *red_payload_type = -1; | |
424 if (IsCodec(codec, kDtmfCodecName) || IsCodec(codec, kCnCodecName)) { | |
425 // Skip telephone-event/CN codec, which will be handled later. | |
426 continue; | |
427 } | |
428 | |
429 // We'll use the first codec in the list to actually send audio data. | |
430 // Be sure to use the payload type requested by the remote side. | |
431 // "red", for RED audio, is a special case where the actual codec to be | |
432 // used is specified in params. | |
433 const AudioCodec* found_codec = &codec; | |
434 if (IsCodec(*found_codec, kRedCodecName)) { | |
435 // Parse out the RED parameters. If we fail, just ignore RED; | |
436 // we don't support all possible params/usage scenarios. | |
437 *red_payload_type = codec.id; | |
438 found_codec = GetRedSendCodec(*found_codec, codecs); | |
439 if (!found_codec) { | |
440 continue; | |
441 } | |
442 } | |
443 // Ignore codecs we don't know about. The negotiation step should prevent | |
444 // this, but double-check to be sure. | |
445 if (!ToCodecInst(*found_codec, voe_codec)) { | |
446 LOG(LS_WARNING) << "Unknown codec " << ToString(*found_codec); | |
447 continue; | |
448 } | |
449 return found_codec; | |
450 } | |
451 return nullptr; | |
452 } | |
453 | |
454 private: | |
455 static const int kMaxNumPacketSize = 6; | |
456 struct CodecPref { | |
457 const char* name; | |
458 int clockrate; | |
459 size_t channels; | |
460 int payload_type; | |
461 bool is_multi_rate; | |
462 int packet_sizes_ms[kMaxNumPacketSize]; | |
463 }; | |
464 // Note: keep the supported packet sizes in ascending order. | |
465 static const CodecPref kCodecPrefs[12]; | |
466 | |
467 static int SelectPacketSize(const CodecPref& codec_pref, int ptime_ms) { | |
468 int selected_packet_size_ms = codec_pref.packet_sizes_ms[0]; | |
469 for (int packet_size_ms : codec_pref.packet_sizes_ms) { | |
470 if (packet_size_ms && packet_size_ms <= ptime_ms) { | |
471 selected_packet_size_ms = packet_size_ms; | |
472 } | |
473 } | |
474 return selected_packet_size_ms; | |
475 } | |
476 | |
477 // Changes RTP timestamp rate of G722. This is due to the "bug" in the RFC | |
478 // which says that G722 should be advertised as 8 kHz although it is a 16 kHz | |
479 // codec. | |
480 static void MaybeFixupG722(webrtc::CodecInst* voe_codec, int new_plfreq) { | |
481 if (IsCodec(*voe_codec, kG722CodecName)) { | |
482 // If the ASSERT triggers, the codec definition in WebRTC VoiceEngine | |
483 // has changed, and this special case is no longer needed. | |
484 RTC_DCHECK(voe_codec->plfreq != new_plfreq); | |
485 voe_codec->plfreq = new_plfreq; | |
486 } | |
487 } | |
488 | |
489 static const AudioCodec* GetRedSendCodec( | |
490 const AudioCodec& red_codec, | |
491 const std::vector<AudioCodec>& all_codecs) { | |
492 // Get the RED encodings from the parameter with no name. This may | |
493 // change based on what is discussed on the Jingle list. | |
494 // The encoding parameter is of the form "a/b"; we only support where | |
495 // a == b. Verify this and parse out the value into red_pt. | |
496 // If the parameter value is absent (as it will be until we wire up the | |
497 // signaling of this message), use the second codec specified (i.e. the | |
498 // one after "red") as the encoding parameter. | |
499 int red_pt = -1; | |
500 std::string red_params; | |
501 CodecParameterMap::const_iterator it = red_codec.params.find(""); | |
502 if (it != red_codec.params.end()) { | |
503 red_params = it->second; | |
504 std::vector<std::string> red_pts; | |
505 if (rtc::split(red_params, '/', &red_pts) != 2 || | |
506 red_pts[0] != red_pts[1] || !rtc::FromString(red_pts[0], &red_pt)) { | |
507 LOG(LS_WARNING) << "RED params " << red_params << " not supported."; | |
508 return nullptr; | |
509 } | |
510 } else if (red_codec.params.empty()) { | |
511 LOG(LS_WARNING) << "RED params not present, using defaults"; | |
512 if (all_codecs.size() > 1) { | |
513 red_pt = all_codecs[1].id; | |
514 } | |
515 } | |
516 | |
517 // Try to find red_pt in |codecs|. | |
518 for (const AudioCodec& codec : all_codecs) { | |
519 if (codec.id == red_pt) { | |
520 return &codec; | |
521 } | |
522 } | |
523 LOG(LS_WARNING) << "RED params " << red_params << " are invalid."; | |
524 return nullptr; | |
525 } | |
526 }; | |
527 | |
528 const WebRtcVoiceCodecs::CodecPref WebRtcVoiceCodecs::kCodecPrefs[12] = { | |
529 { kOpusCodecName, 48000, 2, 111, true, { 10, 20, 40, 60 } }, | |
530 { kIsacCodecName, 16000, 1, 103, true, { 30, 60 } }, | |
531 { kIsacCodecName, 32000, 1, 104, true, { 30 } }, | |
532 // G722 should be advertised as 8000 Hz because of the RFC "bug". | |
533 { kG722CodecName, 8000, 1, 9, false, { 10, 20, 30, 40, 50, 60 } }, | |
534 { kIlbcCodecName, 8000, 1, 102, false, { 20, 30, 40, 60 } }, | |
535 { kPcmuCodecName, 8000, 1, 0, false, { 10, 20, 30, 40, 50, 60 } }, | |
536 { kPcmaCodecName, 8000, 1, 8, false, { 10, 20, 30, 40, 50, 60 } }, | |
537 { kCnCodecName, 32000, 1, 106, false, { } }, | |
538 { kCnCodecName, 16000, 1, 105, false, { } }, | |
539 { kCnCodecName, 8000, 1, 13, false, { } }, | |
540 { kRedCodecName, 8000, 1, 127, false, { } }, | |
541 { kDtmfCodecName, 8000, 1, 126, false, { } }, | |
542 }; | |
543 } // namespace { | |
544 | |
545 bool WebRtcVoiceEngine::ToCodecInst(const AudioCodec& in, | |
546 webrtc::CodecInst* out) { | |
547 return WebRtcVoiceCodecs::ToCodecInst(in, out); | |
548 } | |
549 | |
550 WebRtcVoiceEngine::WebRtcVoiceEngine() | |
551 : voe_wrapper_(new VoEWrapper()), | |
552 audio_state_(webrtc::AudioState::Create(MakeAudioStateConfig(voe()))) { | |
553 Construct(); | |
554 } | |
555 | |
556 WebRtcVoiceEngine::WebRtcVoiceEngine(VoEWrapper* voe_wrapper) | |
557 : voe_wrapper_(voe_wrapper) { | |
558 Construct(); | |
559 } | |
560 | |
561 void WebRtcVoiceEngine::Construct() { | |
562 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
563 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::WebRtcVoiceEngine"; | |
564 | |
565 signal_thread_checker_.DetachFromThread(); | |
566 std::memset(&default_agc_config_, 0, sizeof(default_agc_config_)); | |
567 voe_config_.Set<webrtc::VoicePacing>(new webrtc::VoicePacing(true)); | |
568 | |
569 webrtc::Trace::set_level_filter(kDefaultTraceFilter); | |
570 webrtc::Trace::SetTraceCallback(this); | |
571 | |
572 // Load our audio codec list. | |
573 codecs_ = WebRtcVoiceCodecs::SupportedCodecs(); | |
574 } | |
575 | |
576 WebRtcVoiceEngine::~WebRtcVoiceEngine() { | |
577 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
578 LOG(LS_VERBOSE) << "WebRtcVoiceEngine::~WebRtcVoiceEngine"; | |
579 if (adm_) { | |
580 voe_wrapper_.reset(); | |
581 adm_->Release(); | |
582 adm_ = NULL; | |
583 } | |
584 webrtc::Trace::SetTraceCallback(nullptr); | |
585 } | |
586 | |
587 bool WebRtcVoiceEngine::Init(rtc::Thread* worker_thread) { | |
588 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
589 RTC_DCHECK(worker_thread == rtc::Thread::Current()); | |
590 LOG(LS_INFO) << "WebRtcVoiceEngine::Init"; | |
591 bool res = InitInternal(); | |
592 if (res) { | |
593 LOG(LS_INFO) << "WebRtcVoiceEngine::Init Done!"; | |
594 } else { | |
595 LOG(LS_ERROR) << "WebRtcVoiceEngine::Init failed"; | |
596 Terminate(); | |
597 } | |
598 return res; | |
599 } | |
600 | |
601 bool WebRtcVoiceEngine::InitInternal() { | |
602 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
603 // Temporarily turn logging level up for the Init call | |
604 webrtc::Trace::set_level_filter(kElevatedTraceFilter); | |
605 LOG(LS_INFO) << webrtc::VoiceEngine::GetVersionString(); | |
606 if (voe_wrapper_->base()->Init(adm_) == -1) { | |
607 LOG_RTCERR0_EX(Init, voe_wrapper_->error()); | |
608 return false; | |
609 } | |
610 webrtc::Trace::set_level_filter(kDefaultTraceFilter); | |
611 | |
612 // Save the default AGC configuration settings. This must happen before | |
613 // calling ApplyOptions or the default will be overwritten. | |
614 if (voe_wrapper_->processing()->GetAgcConfig(default_agc_config_) == -1) { | |
615 LOG_RTCERR0(GetAgcConfig); | |
616 return false; | |
617 } | |
618 | |
619 // Set default engine options. | |
620 { | |
621 AudioOptions options; | |
622 options.echo_cancellation = rtc::Optional<bool>(true); | |
623 options.auto_gain_control = rtc::Optional<bool>(true); | |
624 options.noise_suppression = rtc::Optional<bool>(true); | |
625 options.highpass_filter = rtc::Optional<bool>(true); | |
626 options.stereo_swapping = rtc::Optional<bool>(false); | |
627 options.audio_jitter_buffer_max_packets = rtc::Optional<int>(50); | |
628 options.audio_jitter_buffer_fast_accelerate = rtc::Optional<bool>(false); | |
629 options.typing_detection = rtc::Optional<bool>(true); | |
630 options.adjust_agc_delta = rtc::Optional<int>(0); | |
631 options.experimental_agc = rtc::Optional<bool>(false); | |
632 options.extended_filter_aec = rtc::Optional<bool>(false); | |
633 options.delay_agnostic_aec = rtc::Optional<bool>(false); | |
634 options.experimental_ns = rtc::Optional<bool>(false); | |
635 options.aec_dump = rtc::Optional<bool>(false); | |
636 if (!ApplyOptions(options)) { | |
637 return false; | |
638 } | |
639 } | |
640 | |
641 // Print our codec list again for the call diagnostic log | |
642 LOG(LS_INFO) << "WebRtc VoiceEngine codecs:"; | |
643 for (const AudioCodec& codec : codecs_) { | |
644 LOG(LS_INFO) << ToString(codec); | |
645 } | |
646 | |
647 SetDefaultDevices(); | |
648 | |
649 initialized_ = true; | |
650 return true; | |
651 } | |
652 | |
653 void WebRtcVoiceEngine::Terminate() { | |
654 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
655 LOG(LS_INFO) << "WebRtcVoiceEngine::Terminate"; | |
656 initialized_ = false; | |
657 | |
658 StopAecDump(); | |
659 | |
660 voe_wrapper_->base()->Terminate(); | |
661 } | |
662 | |
663 rtc::scoped_refptr<webrtc::AudioState> | |
664 WebRtcVoiceEngine::GetAudioState() const { | |
665 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
666 return audio_state_; | |
667 } | |
668 | |
669 VoiceMediaChannel* WebRtcVoiceEngine::CreateChannel(webrtc::Call* call, | |
670 const AudioOptions& options) { | |
671 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
672 return new WebRtcVoiceMediaChannel(this, options, call); | |
673 } | |
674 | |
675 bool WebRtcVoiceEngine::ApplyOptions(const AudioOptions& options_in) { | |
676 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
677 LOG(LS_INFO) << "ApplyOptions: " << options_in.ToString(); | |
678 AudioOptions options = options_in; // The options are modified below. | |
679 | |
680 // kEcConference is AEC with high suppression. | |
681 webrtc::EcModes ec_mode = webrtc::kEcConference; | |
682 webrtc::AecmModes aecm_mode = webrtc::kAecmSpeakerphone; | |
683 webrtc::AgcModes agc_mode = webrtc::kAgcAdaptiveAnalog; | |
684 webrtc::NsModes ns_mode = webrtc::kNsHighSuppression; | |
685 if (options.aecm_generate_comfort_noise) { | |
686 LOG(LS_VERBOSE) << "Comfort noise explicitly set to " | |
687 << *options.aecm_generate_comfort_noise | |
688 << " (default is false)."; | |
689 } | |
690 | |
691 #if defined(WEBRTC_IOS) | |
692 // On iOS, VPIO provides built-in EC and AGC. | |
693 options.echo_cancellation = rtc::Optional<bool>(false); | |
694 options.auto_gain_control = rtc::Optional<bool>(false); | |
695 LOG(LS_INFO) << "Always disable AEC and AGC on iOS. Use built-in instead."; | |
696 #elif defined(ANDROID) | |
697 ec_mode = webrtc::kEcAecm; | |
698 #endif | |
699 | |
700 #if defined(WEBRTC_IOS) || defined(ANDROID) | |
701 // Set the AGC mode for iOS as well despite disabling it above, to avoid | |
702 // unsupported configuration errors from webrtc. | |
703 agc_mode = webrtc::kAgcFixedDigital; | |
704 options.typing_detection = rtc::Optional<bool>(false); | |
705 options.experimental_agc = rtc::Optional<bool>(false); | |
706 options.extended_filter_aec = rtc::Optional<bool>(false); | |
707 options.experimental_ns = rtc::Optional<bool>(false); | |
708 #endif | |
709 | |
710 // Delay Agnostic AEC automatically turns on EC if not set except on iOS | |
711 // where the feature is not supported. | |
712 bool use_delay_agnostic_aec = false; | |
713 #if !defined(WEBRTC_IOS) | |
714 if (options.delay_agnostic_aec) { | |
715 use_delay_agnostic_aec = *options.delay_agnostic_aec; | |
716 if (use_delay_agnostic_aec) { | |
717 options.echo_cancellation = rtc::Optional<bool>(true); | |
718 options.extended_filter_aec = rtc::Optional<bool>(true); | |
719 ec_mode = webrtc::kEcConference; | |
720 } | |
721 } | |
722 #endif | |
723 | |
724 webrtc::VoEAudioProcessing* voep = voe_wrapper_->processing(); | |
725 | |
726 if (options.echo_cancellation) { | |
727 // Check if platform supports built-in EC. Currently only supported on | |
728 // Android and in combination with Java based audio layer. | |
729 // TODO(henrika): investigate possibility to support built-in EC also | |
730 // in combination with Open SL ES audio. | |
731 const bool built_in_aec = voe_wrapper_->hw()->BuiltInAECIsAvailable(); | |
732 if (built_in_aec) { | |
733 // Built-in EC exists on this device and use_delay_agnostic_aec is not | |
734 // overriding it. Enable/Disable it according to the echo_cancellation | |
735 // audio option. | |
736 const bool enable_built_in_aec = | |
737 *options.echo_cancellation && !use_delay_agnostic_aec; | |
738 if (voe_wrapper_->hw()->EnableBuiltInAEC(enable_built_in_aec) == 0 && | |
739 enable_built_in_aec) { | |
740 // Disable internal software EC if built-in EC is enabled, | |
741 // i.e., replace the software EC with the built-in EC. | |
742 options.echo_cancellation = rtc::Optional<bool>(false); | |
743 LOG(LS_INFO) << "Disabling EC since built-in EC will be used instead"; | |
744 } | |
745 } | |
746 if (voep->SetEcStatus(*options.echo_cancellation, ec_mode) == -1) { | |
747 LOG_RTCERR2(SetEcStatus, *options.echo_cancellation, ec_mode); | |
748 return false; | |
749 } else { | |
750 LOG(LS_INFO) << "Echo control set to " << *options.echo_cancellation | |
751 << " with mode " << ec_mode; | |
752 } | |
753 #if !defined(ANDROID) | |
754 // TODO(ajm): Remove the error return on Android from webrtc. | |
755 if (voep->SetEcMetricsStatus(*options.echo_cancellation) == -1) { | |
756 LOG_RTCERR1(SetEcMetricsStatus, *options.echo_cancellation); | |
757 return false; | |
758 } | |
759 #endif | |
760 if (ec_mode == webrtc::kEcAecm) { | |
761 bool cn = options.aecm_generate_comfort_noise.value_or(false); | |
762 if (voep->SetAecmMode(aecm_mode, cn) != 0) { | |
763 LOG_RTCERR2(SetAecmMode, aecm_mode, cn); | |
764 return false; | |
765 } | |
766 } | |
767 } | |
768 | |
769 if (options.auto_gain_control) { | |
770 const bool built_in_agc = voe_wrapper_->hw()->BuiltInAGCIsAvailable(); | |
771 if (built_in_agc) { | |
772 if (voe_wrapper_->hw()->EnableBuiltInAGC(*options.auto_gain_control) == | |
773 0 && | |
774 *options.auto_gain_control) { | |
775 // Disable internal software AGC if built-in AGC is enabled, | |
776 // i.e., replace the software AGC with the built-in AGC. | |
777 options.auto_gain_control = rtc::Optional<bool>(false); | |
778 LOG(LS_INFO) << "Disabling AGC since built-in AGC will be used instead"; | |
779 } | |
780 } | |
781 if (voep->SetAgcStatus(*options.auto_gain_control, agc_mode) == -1) { | |
782 LOG_RTCERR2(SetAgcStatus, *options.auto_gain_control, agc_mode); | |
783 return false; | |
784 } else { | |
785 LOG(LS_INFO) << "Auto gain set to " << *options.auto_gain_control | |
786 << " with mode " << agc_mode; | |
787 } | |
788 } | |
789 | |
790 if (options.tx_agc_target_dbov || options.tx_agc_digital_compression_gain || | |
791 options.tx_agc_limiter) { | |
792 // Override default_agc_config_. Generally, an unset option means "leave | |
793 // the VoE bits alone" in this function, so we want whatever is set to be | |
794 // stored as the new "default". If we didn't, then setting e.g. | |
795 // tx_agc_target_dbov would reset digital compression gain and limiter | |
796 // settings. | |
797 // Also, if we don't update default_agc_config_, then adjust_agc_delta | |
798 // would be an offset from the original values, and not whatever was set | |
799 // explicitly. | |
800 default_agc_config_.targetLeveldBOv = options.tx_agc_target_dbov.value_or( | |
801 default_agc_config_.targetLeveldBOv); | |
802 default_agc_config_.digitalCompressionGaindB = | |
803 options.tx_agc_digital_compression_gain.value_or( | |
804 default_agc_config_.digitalCompressionGaindB); | |
805 default_agc_config_.limiterEnable = | |
806 options.tx_agc_limiter.value_or(default_agc_config_.limiterEnable); | |
807 if (voe_wrapper_->processing()->SetAgcConfig(default_agc_config_) == -1) { | |
808 LOG_RTCERR3(SetAgcConfig, | |
809 default_agc_config_.targetLeveldBOv, | |
810 default_agc_config_.digitalCompressionGaindB, | |
811 default_agc_config_.limiterEnable); | |
812 return false; | |
813 } | |
814 } | |
815 | |
816 if (options.noise_suppression) { | |
817 const bool built_in_ns = voe_wrapper_->hw()->BuiltInNSIsAvailable(); | |
818 if (built_in_ns) { | |
819 if (voe_wrapper_->hw()->EnableBuiltInNS(*options.noise_suppression) == | |
820 0 && | |
821 *options.noise_suppression) { | |
822 // Disable internal software NS if built-in NS is enabled, | |
823 // i.e., replace the software NS with the built-in NS. | |
824 options.noise_suppression = rtc::Optional<bool>(false); | |
825 LOG(LS_INFO) << "Disabling NS since built-in NS will be used instead"; | |
826 } | |
827 } | |
828 if (voep->SetNsStatus(*options.noise_suppression, ns_mode) == -1) { | |
829 LOG_RTCERR2(SetNsStatus, *options.noise_suppression, ns_mode); | |
830 return false; | |
831 } else { | |
832 LOG(LS_INFO) << "Noise suppression set to " << *options.noise_suppression | |
833 << " with mode " << ns_mode; | |
834 } | |
835 } | |
836 | |
837 if (options.highpass_filter) { | |
838 LOG(LS_INFO) << "High pass filter enabled? " << *options.highpass_filter; | |
839 if (voep->EnableHighPassFilter(*options.highpass_filter) == -1) { | |
840 LOG_RTCERR1(SetHighpassFilterStatus, *options.highpass_filter); | |
841 return false; | |
842 } | |
843 } | |
844 | |
845 if (options.stereo_swapping) { | |
846 LOG(LS_INFO) << "Stereo swapping enabled? " << *options.stereo_swapping; | |
847 voep->EnableStereoChannelSwapping(*options.stereo_swapping); | |
848 if (voep->IsStereoChannelSwappingEnabled() != *options.stereo_swapping) { | |
849 LOG_RTCERR1(EnableStereoChannelSwapping, *options.stereo_swapping); | |
850 return false; | |
851 } | |
852 } | |
853 | |
854 if (options.audio_jitter_buffer_max_packets) { | |
855 LOG(LS_INFO) << "NetEq capacity is " | |
856 << *options.audio_jitter_buffer_max_packets; | |
857 voe_config_.Set<webrtc::NetEqCapacityConfig>( | |
858 new webrtc::NetEqCapacityConfig( | |
859 *options.audio_jitter_buffer_max_packets)); | |
860 } | |
861 | |
862 if (options.audio_jitter_buffer_fast_accelerate) { | |
863 LOG(LS_INFO) << "NetEq fast mode? " | |
864 << *options.audio_jitter_buffer_fast_accelerate; | |
865 voe_config_.Set<webrtc::NetEqFastAccelerate>( | |
866 new webrtc::NetEqFastAccelerate( | |
867 *options.audio_jitter_buffer_fast_accelerate)); | |
868 } | |
869 | |
870 if (options.typing_detection) { | |
871 LOG(LS_INFO) << "Typing detection is enabled? " | |
872 << *options.typing_detection; | |
873 if (voep->SetTypingDetectionStatus(*options.typing_detection) == -1) { | |
874 // In case of error, log the info and continue | |
875 LOG_RTCERR1(SetTypingDetectionStatus, *options.typing_detection); | |
876 } | |
877 } | |
878 | |
879 if (options.adjust_agc_delta) { | |
880 LOG(LS_INFO) << "Adjust agc delta is " << *options.adjust_agc_delta; | |
881 if (!AdjustAgcLevel(*options.adjust_agc_delta)) { | |
882 return false; | |
883 } | |
884 } | |
885 | |
886 if (options.aec_dump) { | |
887 LOG(LS_INFO) << "Aec dump is enabled? " << *options.aec_dump; | |
888 if (*options.aec_dump) | |
889 StartAecDump(kAecDumpByAudioOptionFilename); | |
890 else | |
891 StopAecDump(); | |
892 } | |
893 | |
894 webrtc::Config config; | |
895 | |
896 if (options.delay_agnostic_aec) | |
897 delay_agnostic_aec_ = options.delay_agnostic_aec; | |
898 if (delay_agnostic_aec_) { | |
899 LOG(LS_INFO) << "Delay agnostic aec is enabled? " << *delay_agnostic_aec_; | |
900 config.Set<webrtc::DelayAgnostic>( | |
901 new webrtc::DelayAgnostic(*delay_agnostic_aec_)); | |
902 } | |
903 | |
904 if (options.extended_filter_aec) { | |
905 extended_filter_aec_ = options.extended_filter_aec; | |
906 } | |
907 if (extended_filter_aec_) { | |
908 LOG(LS_INFO) << "Extended filter aec is enabled? " << *extended_filter_aec_; | |
909 config.Set<webrtc::ExtendedFilter>( | |
910 new webrtc::ExtendedFilter(*extended_filter_aec_)); | |
911 } | |
912 | |
913 if (options.experimental_ns) { | |
914 experimental_ns_ = options.experimental_ns; | |
915 } | |
916 if (experimental_ns_) { | |
917 LOG(LS_INFO) << "Experimental ns is enabled? " << *experimental_ns_; | |
918 config.Set<webrtc::ExperimentalNs>( | |
919 new webrtc::ExperimentalNs(*experimental_ns_)); | |
920 } | |
921 | |
922 // We check audioproc for the benefit of tests, since FakeWebRtcVoiceEngine | |
923 // returns NULL on audio_processing(). | |
924 webrtc::AudioProcessing* audioproc = voe_wrapper_->base()->audio_processing(); | |
925 if (audioproc) { | |
926 audioproc->SetExtraOptions(config); | |
927 } | |
928 | |
929 if (options.recording_sample_rate) { | |
930 LOG(LS_INFO) << "Recording sample rate is " | |
931 << *options.recording_sample_rate; | |
932 if (voe_wrapper_->hw()->SetRecordingSampleRate( | |
933 *options.recording_sample_rate)) { | |
934 LOG_RTCERR1(SetRecordingSampleRate, *options.recording_sample_rate); | |
935 } | |
936 } | |
937 | |
938 if (options.playout_sample_rate) { | |
939 LOG(LS_INFO) << "Playout sample rate is " << *options.playout_sample_rate; | |
940 if (voe_wrapper_->hw()->SetPlayoutSampleRate( | |
941 *options.playout_sample_rate)) { | |
942 LOG_RTCERR1(SetPlayoutSampleRate, *options.playout_sample_rate); | |
943 } | |
944 } | |
945 | |
946 return true; | |
947 } | |
948 | |
949 void WebRtcVoiceEngine::SetDefaultDevices() { | |
950 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
951 #if !defined(WEBRTC_IOS) | |
952 int in_id = kDefaultAudioDeviceId; | |
953 int out_id = kDefaultAudioDeviceId; | |
954 LOG(LS_INFO) << "Setting microphone to (id=" << in_id | |
955 << ") and speaker to (id=" << out_id << ")"; | |
956 | |
957 bool ret = true; | |
958 if (voe_wrapper_->hw()->SetRecordingDevice(in_id) == -1) { | |
959 LOG_RTCERR1(SetRecordingDevice, in_id); | |
960 ret = false; | |
961 } | |
962 webrtc::AudioProcessing* ap = voe()->base()->audio_processing(); | |
963 if (ap) { | |
964 ap->Initialize(); | |
965 } | |
966 | |
967 if (voe_wrapper_->hw()->SetPlayoutDevice(out_id) == -1) { | |
968 LOG_RTCERR1(SetPlayoutDevice, out_id); | |
969 ret = false; | |
970 } | |
971 | |
972 if (ret) { | |
973 LOG(LS_INFO) << "Set microphone to (id=" << in_id | |
974 << ") and speaker to (id=" << out_id << ")"; | |
975 } | |
976 #endif // !WEBRTC_IOS | |
977 } | |
978 | |
979 bool WebRtcVoiceEngine::GetOutputVolume(int* level) { | |
980 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
981 unsigned int ulevel; | |
982 if (voe_wrapper_->volume()->GetSpeakerVolume(ulevel) == -1) { | |
983 LOG_RTCERR1(GetSpeakerVolume, level); | |
984 return false; | |
985 } | |
986 *level = ulevel; | |
987 return true; | |
988 } | |
989 | |
990 bool WebRtcVoiceEngine::SetOutputVolume(int level) { | |
991 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
992 RTC_DCHECK(level >= 0 && level <= 255); | |
993 if (voe_wrapper_->volume()->SetSpeakerVolume(level) == -1) { | |
994 LOG_RTCERR1(SetSpeakerVolume, level); | |
995 return false; | |
996 } | |
997 return true; | |
998 } | |
999 | |
1000 int WebRtcVoiceEngine::GetInputLevel() { | |
1001 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1002 unsigned int ulevel; | |
1003 return (voe_wrapper_->volume()->GetSpeechInputLevel(ulevel) != -1) ? | |
1004 static_cast<int>(ulevel) : -1; | |
1005 } | |
1006 | |
1007 const std::vector<AudioCodec>& WebRtcVoiceEngine::codecs() { | |
1008 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
1009 return codecs_; | |
1010 } | |
1011 | |
1012 RtpCapabilities WebRtcVoiceEngine::GetCapabilities() const { | |
1013 RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); | |
1014 RtpCapabilities capabilities; | |
1015 capabilities.header_extensions.push_back(RtpHeaderExtension( | |
1016 kRtpAudioLevelHeaderExtension, kRtpAudioLevelHeaderExtensionDefaultId)); | |
1017 capabilities.header_extensions.push_back( | |
1018 RtpHeaderExtension(kRtpAbsoluteSenderTimeHeaderExtension, | |
1019 kRtpAbsoluteSenderTimeHeaderExtensionDefaultId)); | |
1020 if (webrtc::field_trial::FindFullName("WebRTC-Audio-SendSideBwe") == | |
1021 "Enabled") { | |
1022 capabilities.header_extensions.push_back(RtpHeaderExtension( | |
1023 kRtpTransportSequenceNumberHeaderExtension, | |
1024 kRtpTransportSequenceNumberHeaderExtensionDefaultId)); | |
1025 } | |
1026 return capabilities; | |
1027 } | |
1028 | |
1029 int WebRtcVoiceEngine::GetLastEngineError() { | |
1030 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1031 return voe_wrapper_->error(); | |
1032 } | |
1033 | |
1034 void WebRtcVoiceEngine::Print(webrtc::TraceLevel level, const char* trace, | |
1035 int length) { | |
1036 // Note: This callback can happen on any thread! | |
1037 rtc::LoggingSeverity sev = rtc::LS_VERBOSE; | |
1038 if (level == webrtc::kTraceError || level == webrtc::kTraceCritical) | |
1039 sev = rtc::LS_ERROR; | |
1040 else if (level == webrtc::kTraceWarning) | |
1041 sev = rtc::LS_WARNING; | |
1042 else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo) | |
1043 sev = rtc::LS_INFO; | |
1044 else if (level == webrtc::kTraceTerseInfo) | |
1045 sev = rtc::LS_INFO; | |
1046 | |
1047 // Skip past boilerplate prefix text | |
1048 if (length < 72) { | |
1049 std::string msg(trace, length); | |
1050 LOG(LS_ERROR) << "Malformed webrtc log message: "; | |
1051 LOG_V(sev) << msg; | |
1052 } else { | |
1053 std::string msg(trace + 71, length - 72); | |
1054 LOG_V(sev) << "webrtc: " << msg; | |
1055 } | |
1056 } | |
1057 | |
1058 void WebRtcVoiceEngine::RegisterChannel(WebRtcVoiceMediaChannel* channel) { | |
1059 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1060 RTC_DCHECK(channel); | |
1061 channels_.push_back(channel); | |
1062 } | |
1063 | |
1064 void WebRtcVoiceEngine::UnregisterChannel(WebRtcVoiceMediaChannel* channel) { | |
1065 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1066 auto it = std::find(channels_.begin(), channels_.end(), channel); | |
1067 RTC_DCHECK(it != channels_.end()); | |
1068 channels_.erase(it); | |
1069 } | |
1070 | |
1071 // Adjusts the default AGC target level by the specified delta. | |
1072 // NB: If we start messing with other config fields, we'll want | |
1073 // to save the current webrtc::AgcConfig as well. | |
1074 bool WebRtcVoiceEngine::AdjustAgcLevel(int delta) { | |
1075 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1076 webrtc::AgcConfig config = default_agc_config_; | |
1077 config.targetLeveldBOv -= delta; | |
1078 | |
1079 LOG(LS_INFO) << "Adjusting AGC level from default -" | |
1080 << default_agc_config_.targetLeveldBOv << "dB to -" | |
1081 << config.targetLeveldBOv << "dB"; | |
1082 | |
1083 if (voe_wrapper_->processing()->SetAgcConfig(config) == -1) { | |
1084 LOG_RTCERR1(SetAgcConfig, config.targetLeveldBOv); | |
1085 return false; | |
1086 } | |
1087 return true; | |
1088 } | |
1089 | |
1090 bool WebRtcVoiceEngine::SetAudioDeviceModule(webrtc::AudioDeviceModule* adm) { | |
1091 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1092 if (initialized_) { | |
1093 LOG(LS_WARNING) << "SetAudioDeviceModule can not be called after Init."; | |
1094 return false; | |
1095 } | |
1096 if (adm_) { | |
1097 adm_->Release(); | |
1098 adm_ = NULL; | |
1099 } | |
1100 if (adm) { | |
1101 adm_ = adm; | |
1102 adm_->AddRef(); | |
1103 } | |
1104 return true; | |
1105 } | |
1106 | |
1107 bool WebRtcVoiceEngine::StartAecDump(rtc::PlatformFile file, | |
1108 int64_t max_size_bytes) { | |
1109 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1110 FILE* aec_dump_file_stream = rtc::FdopenPlatformFileForWriting(file); | |
1111 if (!aec_dump_file_stream) { | |
1112 LOG(LS_ERROR) << "Could not open AEC dump file stream."; | |
1113 if (!rtc::ClosePlatformFile(file)) | |
1114 LOG(LS_WARNING) << "Could not close file."; | |
1115 return false; | |
1116 } | |
1117 StopAecDump(); | |
1118 if (voe_wrapper_->base()->audio_processing()->StartDebugRecording( | |
1119 aec_dump_file_stream, max_size_bytes) != | |
1120 webrtc::AudioProcessing::kNoError) { | |
1121 LOG_RTCERR0(StartDebugRecording); | |
1122 fclose(aec_dump_file_stream); | |
1123 return false; | |
1124 } | |
1125 is_dumping_aec_ = true; | |
1126 return true; | |
1127 } | |
1128 | |
1129 void WebRtcVoiceEngine::StartAecDump(const std::string& filename) { | |
1130 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1131 if (!is_dumping_aec_) { | |
1132 // Start dumping AEC when we are not dumping. | |
1133 if (voe_wrapper_->base()->audio_processing()->StartDebugRecording( | |
1134 filename.c_str(), -1) != webrtc::AudioProcessing::kNoError) { | |
1135 LOG_RTCERR1(StartDebugRecording, filename.c_str()); | |
1136 } else { | |
1137 is_dumping_aec_ = true; | |
1138 } | |
1139 } | |
1140 } | |
1141 | |
1142 void WebRtcVoiceEngine::StopAecDump() { | |
1143 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1144 if (is_dumping_aec_) { | |
1145 // Stop dumping AEC when we are dumping. | |
1146 if (voe_wrapper_->base()->audio_processing()->StopDebugRecording() != | |
1147 webrtc::AudioProcessing::kNoError) { | |
1148 LOG_RTCERR0(StopDebugRecording); | |
1149 } | |
1150 is_dumping_aec_ = false; | |
1151 } | |
1152 } | |
1153 | |
1154 bool WebRtcVoiceEngine::StartRtcEventLog(rtc::PlatformFile file) { | |
1155 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1156 webrtc::RtcEventLog* event_log = voe_wrapper_->codec()->GetEventLog(); | |
1157 if (event_log) { | |
1158 return event_log->StartLogging(file); | |
1159 } | |
1160 LOG_RTCERR0(StartRtcEventLog); | |
1161 return false; | |
1162 } | |
1163 | |
1164 void WebRtcVoiceEngine::StopRtcEventLog() { | |
1165 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1166 webrtc::RtcEventLog* event_log = voe_wrapper_->codec()->GetEventLog(); | |
1167 if (event_log) { | |
1168 event_log->StopLogging(); | |
1169 return; | |
1170 } | |
1171 LOG_RTCERR0(StopRtcEventLog); | |
1172 } | |
1173 | |
1174 int WebRtcVoiceEngine::CreateVoEChannel() { | |
1175 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1176 return voe_wrapper_->base()->CreateChannel(voe_config_); | |
1177 } | |
1178 | |
1179 class WebRtcVoiceMediaChannel::WebRtcAudioSendStream | |
1180 : public AudioRenderer::Sink { | |
1181 public: | |
1182 WebRtcAudioSendStream(int ch, webrtc::AudioTransport* voe_audio_transport, | |
1183 uint32_t ssrc, const std::string& c_name, | |
1184 const std::vector<webrtc::RtpExtension>& extensions, | |
1185 webrtc::Call* call) | |
1186 : voe_audio_transport_(voe_audio_transport), | |
1187 call_(call), | |
1188 config_(nullptr) { | |
1189 RTC_DCHECK_GE(ch, 0); | |
1190 // TODO(solenberg): Once we're not using FakeWebRtcVoiceEngine anymore: | |
1191 // RTC_DCHECK(voe_audio_transport); | |
1192 RTC_DCHECK(call); | |
1193 audio_capture_thread_checker_.DetachFromThread(); | |
1194 config_.rtp.ssrc = ssrc; | |
1195 config_.rtp.c_name = c_name; | |
1196 config_.voe_channel_id = ch; | |
1197 RecreateAudioSendStream(extensions); | |
1198 } | |
1199 | |
1200 ~WebRtcAudioSendStream() override { | |
1201 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1202 Stop(); | |
1203 call_->DestroyAudioSendStream(stream_); | |
1204 } | |
1205 | |
1206 void RecreateAudioSendStream( | |
1207 const std::vector<webrtc::RtpExtension>& extensions) { | |
1208 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1209 if (stream_) { | |
1210 call_->DestroyAudioSendStream(stream_); | |
1211 stream_ = nullptr; | |
1212 } | |
1213 config_.rtp.extensions = extensions; | |
1214 RTC_DCHECK(!stream_); | |
1215 stream_ = call_->CreateAudioSendStream(config_); | |
1216 RTC_CHECK(stream_); | |
1217 } | |
1218 | |
1219 bool SendTelephoneEvent(int payload_type, uint8_t event, | |
1220 uint32_t duration_ms) { | |
1221 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1222 RTC_DCHECK(stream_); | |
1223 return stream_->SendTelephoneEvent(payload_type, event, duration_ms); | |
1224 } | |
1225 | |
1226 webrtc::AudioSendStream::Stats GetStats() const { | |
1227 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1228 RTC_DCHECK(stream_); | |
1229 return stream_->GetStats(); | |
1230 } | |
1231 | |
1232 // Starts the rendering by setting a sink to the renderer to get data | |
1233 // callback. | |
1234 // This method is called on the libjingle worker thread. | |
1235 // TODO(xians): Make sure Start() is called only once. | |
1236 void Start(AudioRenderer* renderer) { | |
1237 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1238 RTC_DCHECK(renderer); | |
1239 if (renderer_) { | |
1240 RTC_DCHECK(renderer_ == renderer); | |
1241 return; | |
1242 } | |
1243 renderer->SetSink(this); | |
1244 renderer_ = renderer; | |
1245 } | |
1246 | |
1247 // Stops rendering by setting the sink of the renderer to nullptr. No data | |
1248 // callback will be received after this method. | |
1249 // This method is called on the libjingle worker thread. | |
1250 void Stop() { | |
1251 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1252 if (renderer_) { | |
1253 renderer_->SetSink(nullptr); | |
1254 renderer_ = nullptr; | |
1255 } | |
1256 } | |
1257 | |
1258 // AudioRenderer::Sink implementation. | |
1259 // This method is called on the audio thread. | |
1260 void OnData(const void* audio_data, | |
1261 int bits_per_sample, | |
1262 int sample_rate, | |
1263 size_t number_of_channels, | |
1264 size_t number_of_frames) override { | |
1265 RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread()); | |
1266 RTC_DCHECK(audio_capture_thread_checker_.CalledOnValidThread()); | |
1267 RTC_DCHECK(voe_audio_transport_); | |
1268 voe_audio_transport_->OnData(config_.voe_channel_id, | |
1269 audio_data, | |
1270 bits_per_sample, | |
1271 sample_rate, | |
1272 number_of_channels, | |
1273 number_of_frames); | |
1274 } | |
1275 | |
1276 // Callback from the |renderer_| when it is going away. In case Start() has | |
1277 // never been called, this callback won't be triggered. | |
1278 void OnClose() override { | |
1279 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1280 // Set |renderer_| to nullptr to make sure no more callback will get into | |
1281 // the renderer. | |
1282 renderer_ = nullptr; | |
1283 } | |
1284 | |
1285 // Accessor to the VoE channel ID. | |
1286 int channel() const { | |
1287 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1288 return config_.voe_channel_id; | |
1289 } | |
1290 | |
1291 private: | |
1292 rtc::ThreadChecker worker_thread_checker_; | |
1293 rtc::ThreadChecker audio_capture_thread_checker_; | |
1294 webrtc::AudioTransport* const voe_audio_transport_ = nullptr; | |
1295 webrtc::Call* call_ = nullptr; | |
1296 webrtc::AudioSendStream::Config config_; | |
1297 // The stream is owned by WebRtcAudioSendStream and may be reallocated if | |
1298 // configuration changes. | |
1299 webrtc::AudioSendStream* stream_ = nullptr; | |
1300 | |
1301 // Raw pointer to AudioRenderer owned by LocalAudioTrackHandler. | |
1302 // PeerConnection will make sure invalidating the pointer before the object | |
1303 // goes away. | |
1304 AudioRenderer* renderer_ = nullptr; | |
1305 | |
1306 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioSendStream); | |
1307 }; | |
1308 | |
1309 class WebRtcVoiceMediaChannel::WebRtcAudioReceiveStream { | |
1310 public: | |
1311 WebRtcAudioReceiveStream(int ch, | |
1312 uint32_t remote_ssrc, | |
1313 uint32_t local_ssrc, | |
1314 bool use_transport_cc, | |
1315 const std::string& sync_group, | |
1316 const std::vector<webrtc::RtpExtension>& extensions, | |
1317 webrtc::Call* call) | |
1318 : call_(call), config_() { | |
1319 RTC_DCHECK_GE(ch, 0); | |
1320 RTC_DCHECK(call); | |
1321 config_.rtp.remote_ssrc = remote_ssrc; | |
1322 config_.rtp.local_ssrc = local_ssrc; | |
1323 config_.voe_channel_id = ch; | |
1324 config_.sync_group = sync_group; | |
1325 RecreateAudioReceiveStream(use_transport_cc, extensions); | |
1326 } | |
1327 | |
1328 ~WebRtcAudioReceiveStream() { | |
1329 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1330 call_->DestroyAudioReceiveStream(stream_); | |
1331 } | |
1332 | |
1333 void RecreateAudioReceiveStream( | |
1334 const std::vector<webrtc::RtpExtension>& extensions) { | |
1335 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1336 RecreateAudioReceiveStream(config_.rtp.transport_cc, extensions); | |
1337 } | |
1338 void RecreateAudioReceiveStream(bool use_transport_cc) { | |
1339 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1340 RecreateAudioReceiveStream(use_transport_cc, config_.rtp.extensions); | |
1341 } | |
1342 | |
1343 webrtc::AudioReceiveStream::Stats GetStats() const { | |
1344 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1345 RTC_DCHECK(stream_); | |
1346 return stream_->GetStats(); | |
1347 } | |
1348 | |
1349 int channel() const { | |
1350 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1351 return config_.voe_channel_id; | |
1352 } | |
1353 | |
1354 void SetRawAudioSink(rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) { | |
1355 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1356 stream_->SetSink(std::move(sink)); | |
1357 } | |
1358 | |
1359 private: | |
1360 void RecreateAudioReceiveStream( | |
1361 bool use_transport_cc, | |
1362 const std::vector<webrtc::RtpExtension>& extensions) { | |
1363 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1364 if (stream_) { | |
1365 call_->DestroyAudioReceiveStream(stream_); | |
1366 stream_ = nullptr; | |
1367 } | |
1368 config_.rtp.extensions = extensions; | |
1369 config_.rtp.transport_cc = use_transport_cc; | |
1370 RTC_DCHECK(!stream_); | |
1371 stream_ = call_->CreateAudioReceiveStream(config_); | |
1372 RTC_CHECK(stream_); | |
1373 } | |
1374 | |
1375 rtc::ThreadChecker worker_thread_checker_; | |
1376 webrtc::Call* call_ = nullptr; | |
1377 webrtc::AudioReceiveStream::Config config_; | |
1378 // The stream is owned by WebRtcAudioReceiveStream and may be reallocated if | |
1379 // configuration changes. | |
1380 webrtc::AudioReceiveStream* stream_ = nullptr; | |
1381 | |
1382 RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(WebRtcAudioReceiveStream); | |
1383 }; | |
1384 | |
1385 WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel(WebRtcVoiceEngine* engine, | |
1386 const AudioOptions& options, | |
1387 webrtc::Call* call) | |
1388 : engine_(engine), call_(call) { | |
1389 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::WebRtcVoiceMediaChannel"; | |
1390 RTC_DCHECK(call); | |
1391 engine->RegisterChannel(this); | |
1392 SetOptions(options); | |
1393 } | |
1394 | |
1395 WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel() { | |
1396 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1397 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::~WebRtcVoiceMediaChannel"; | |
1398 // TODO(solenberg): Should be able to delete the streams directly, without | |
1399 // going through RemoveNnStream(), once stream objects handle | |
1400 // all (de)configuration. | |
1401 while (!send_streams_.empty()) { | |
1402 RemoveSendStream(send_streams_.begin()->first); | |
1403 } | |
1404 while (!recv_streams_.empty()) { | |
1405 RemoveRecvStream(recv_streams_.begin()->first); | |
1406 } | |
1407 engine()->UnregisterChannel(this); | |
1408 } | |
1409 | |
1410 bool WebRtcVoiceMediaChannel::SetSendParameters( | |
1411 const AudioSendParameters& params) { | |
1412 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1413 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendParameters: " | |
1414 << params.ToString(); | |
1415 // TODO(pthatcher): Refactor this to be more clean now that we have | |
1416 // all the information at once. | |
1417 | |
1418 if (!SetSendCodecs(params.codecs)) { | |
1419 return false; | |
1420 } | |
1421 | |
1422 if (!ValidateRtpExtensions(params.extensions)) { | |
1423 return false; | |
1424 } | |
1425 std::vector<webrtc::RtpExtension> filtered_extensions = | |
1426 FilterRtpExtensions(params.extensions, | |
1427 webrtc::RtpExtension::IsSupportedForAudio, true); | |
1428 if (send_rtp_extensions_ != filtered_extensions) { | |
1429 send_rtp_extensions_.swap(filtered_extensions); | |
1430 for (auto& it : send_streams_) { | |
1431 it.second->RecreateAudioSendStream(send_rtp_extensions_); | |
1432 } | |
1433 } | |
1434 | |
1435 if (!SetMaxSendBandwidth(params.max_bandwidth_bps)) { | |
1436 return false; | |
1437 } | |
1438 return SetOptions(params.options); | |
1439 } | |
1440 | |
1441 bool WebRtcVoiceMediaChannel::SetRecvParameters( | |
1442 const AudioRecvParameters& params) { | |
1443 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1444 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetRecvParameters: " | |
1445 << params.ToString(); | |
1446 // TODO(pthatcher): Refactor this to be more clean now that we have | |
1447 // all the information at once. | |
1448 | |
1449 if (!SetRecvCodecs(params.codecs)) { | |
1450 return false; | |
1451 } | |
1452 | |
1453 if (!ValidateRtpExtensions(params.extensions)) { | |
1454 return false; | |
1455 } | |
1456 std::vector<webrtc::RtpExtension> filtered_extensions = | |
1457 FilterRtpExtensions(params.extensions, | |
1458 webrtc::RtpExtension::IsSupportedForAudio, false); | |
1459 if (recv_rtp_extensions_ != filtered_extensions) { | |
1460 recv_rtp_extensions_.swap(filtered_extensions); | |
1461 for (auto& it : recv_streams_) { | |
1462 it.second->RecreateAudioReceiveStream(recv_rtp_extensions_); | |
1463 } | |
1464 } | |
1465 return true; | |
1466 } | |
1467 | |
1468 bool WebRtcVoiceMediaChannel::SetOptions(const AudioOptions& options) { | |
1469 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1470 LOG(LS_INFO) << "Setting voice channel options: " | |
1471 << options.ToString(); | |
1472 | |
1473 // Check if DSCP value is changed from previous. | |
1474 bool dscp_option_changed = (options_.dscp != options.dscp); | |
1475 | |
1476 // We retain all of the existing options, and apply the given ones | |
1477 // on top. This means there is no way to "clear" options such that | |
1478 // they go back to the engine default. | |
1479 options_.SetAll(options); | |
1480 if (!engine()->ApplyOptions(options_)) { | |
1481 LOG(LS_WARNING) << | |
1482 "Failed to apply engine options during channel SetOptions."; | |
1483 return false; | |
1484 } | |
1485 | |
1486 if (dscp_option_changed) { | |
1487 rtc::DiffServCodePoint dscp = rtc::DSCP_DEFAULT; | |
1488 if (options_.dscp.value_or(false)) { | |
1489 dscp = kAudioDscpValue; | |
1490 } | |
1491 if (MediaChannel::SetDscp(dscp) != 0) { | |
1492 LOG(LS_WARNING) << "Failed to set DSCP settings for audio channel"; | |
1493 } | |
1494 } | |
1495 | |
1496 LOG(LS_INFO) << "Set voice channel options. Current options: " | |
1497 << options_.ToString(); | |
1498 return true; | |
1499 } | |
1500 | |
1501 bool WebRtcVoiceMediaChannel::SetRecvCodecs( | |
1502 const std::vector<AudioCodec>& codecs) { | |
1503 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1504 | |
1505 // Set the payload types to be used for incoming media. | |
1506 LOG(LS_INFO) << "Setting receive voice codecs."; | |
1507 | |
1508 if (!VerifyUniquePayloadTypes(codecs)) { | |
1509 LOG(LS_ERROR) << "Codec payload types overlap."; | |
1510 return false; | |
1511 } | |
1512 | |
1513 std::vector<AudioCodec> new_codecs; | |
1514 // Find all new codecs. We allow adding new codecs but don't allow changing | |
1515 // the payload type of codecs that is already configured since we might | |
1516 // already be receiving packets with that payload type. | |
1517 for (const AudioCodec& codec : codecs) { | |
1518 AudioCodec old_codec; | |
1519 if (FindCodec(recv_codecs_, codec, &old_codec)) { | |
1520 if (old_codec.id != codec.id) { | |
1521 LOG(LS_ERROR) << codec.name << " payload type changed."; | |
1522 return false; | |
1523 } | |
1524 } else { | |
1525 new_codecs.push_back(codec); | |
1526 } | |
1527 } | |
1528 if (new_codecs.empty()) { | |
1529 // There are no new codecs to configure. Already configured codecs are | |
1530 // never removed. | |
1531 return true; | |
1532 } | |
1533 | |
1534 if (playout_) { | |
1535 // Receive codecs can not be changed while playing. So we temporarily | |
1536 // pause playout. | |
1537 PausePlayout(); | |
1538 } | |
1539 | |
1540 bool result = true; | |
1541 for (const AudioCodec& codec : new_codecs) { | |
1542 webrtc::CodecInst voe_codec; | |
1543 if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
1544 LOG(LS_INFO) << ToString(codec); | |
1545 voe_codec.pltype = codec.id; | |
1546 for (const auto& ch : recv_streams_) { | |
1547 if (engine()->voe()->codec()->SetRecPayloadType( | |
1548 ch.second->channel(), voe_codec) == -1) { | |
1549 LOG_RTCERR2(SetRecPayloadType, ch.second->channel(), | |
1550 ToString(voe_codec)); | |
1551 result = false; | |
1552 } | |
1553 } | |
1554 } else { | |
1555 LOG(LS_WARNING) << "Unknown codec " << ToString(codec); | |
1556 result = false; | |
1557 break; | |
1558 } | |
1559 } | |
1560 if (result) { | |
1561 recv_codecs_ = codecs; | |
1562 } | |
1563 | |
1564 if (desired_playout_ && !playout_) { | |
1565 ResumePlayout(); | |
1566 } | |
1567 return result; | |
1568 } | |
1569 | |
1570 bool WebRtcVoiceMediaChannel::SetSendCodecs( | |
1571 int channel, const std::vector<AudioCodec>& codecs) { | |
1572 // Disable VAD, FEC, and RED unless we know the other side wants them. | |
1573 engine()->voe()->codec()->SetVADStatus(channel, false); | |
1574 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0); | |
1575 engine()->voe()->rtp()->SetREDStatus(channel, false); | |
1576 engine()->voe()->codec()->SetFECStatus(channel, false); | |
1577 | |
1578 // Scan through the list to figure out the codec to use for sending, along | |
1579 // with the proper configuration for VAD. | |
1580 webrtc::CodecInst send_codec; | |
1581 memset(&send_codec, 0, sizeof(send_codec)); | |
1582 | |
1583 bool nack_enabled = nack_enabled_; | |
1584 bool enable_codec_fec = false; | |
1585 bool enable_opus_dtx = false; | |
1586 int opus_max_playback_rate = 0; | |
1587 int red_payload_type = -1; | |
1588 | |
1589 // Set send codec (the first non-telephone-event/CN codec) | |
1590 const AudioCodec* codec = WebRtcVoiceCodecs::GetPreferredCodec( | |
1591 codecs, &send_codec, &red_payload_type); | |
1592 if (codec) { | |
1593 if (red_payload_type != -1) { | |
1594 // Enable redundant encoding of the specified codec. Treat any | |
1595 // failure as a fatal internal error. | |
1596 LOG(LS_INFO) << "Enabling RED on channel " << channel; | |
1597 if (engine()->voe()->rtp()->SetREDStatus(channel, true, | |
1598 red_payload_type) == -1) { | |
1599 LOG_RTCERR3(SetREDStatus, channel, true, red_payload_type); | |
1600 return false; | |
1601 } | |
1602 } else { | |
1603 nack_enabled = HasNack(*codec); | |
1604 // For Opus as the send codec, we are to determine inband FEC, maximum | |
1605 // playback rate, and opus internal dtx. | |
1606 if (IsCodec(*codec, kOpusCodecName)) { | |
1607 GetOpusConfig(*codec, &send_codec, &enable_codec_fec, | |
1608 &opus_max_playback_rate, &enable_opus_dtx); | |
1609 } | |
1610 | |
1611 // Set packet size if the AudioCodec param kCodecParamPTime is set. | |
1612 int ptime_ms = 0; | |
1613 if (codec->GetParam(kCodecParamPTime, &ptime_ms)) { | |
1614 if (!WebRtcVoiceCodecs::SetPTimeAsPacketSize(&send_codec, ptime_ms)) { | |
1615 LOG(LS_WARNING) << "Failed to set packet size for codec " | |
1616 << send_codec.plname; | |
1617 return false; | |
1618 } | |
1619 } | |
1620 } | |
1621 } | |
1622 | |
1623 if (nack_enabled_ != nack_enabled) { | |
1624 SetNack(channel, nack_enabled); | |
1625 nack_enabled_ = nack_enabled; | |
1626 } | |
1627 if (!codec) { | |
1628 LOG(LS_WARNING) << "Received empty list of codecs."; | |
1629 return false; | |
1630 } | |
1631 | |
1632 // Set the codec immediately, since SetVADStatus() depends on whether | |
1633 // the current codec is mono or stereo. | |
1634 if (!SetSendCodec(channel, send_codec)) | |
1635 return false; | |
1636 | |
1637 // FEC should be enabled after SetSendCodec. | |
1638 if (enable_codec_fec) { | |
1639 LOG(LS_INFO) << "Attempt to enable codec internal FEC on channel " | |
1640 << channel; | |
1641 if (engine()->voe()->codec()->SetFECStatus(channel, true) == -1) { | |
1642 // Enable codec internal FEC. Treat any failure as fatal internal error. | |
1643 LOG_RTCERR2(SetFECStatus, channel, true); | |
1644 return false; | |
1645 } | |
1646 } | |
1647 | |
1648 if (IsCodec(send_codec, kOpusCodecName)) { | |
1649 // DTX and maxplaybackrate should be set after SetSendCodec. Because current | |
1650 // send codec has to be Opus. | |
1651 | |
1652 // Set Opus internal DTX. | |
1653 LOG(LS_INFO) << "Attempt to " | |
1654 << (enable_opus_dtx ? "enable" : "disable") | |
1655 << " Opus DTX on channel " | |
1656 << channel; | |
1657 if (engine()->voe()->codec()->SetOpusDtx(channel, enable_opus_dtx)) { | |
1658 LOG_RTCERR2(SetOpusDtx, channel, enable_opus_dtx); | |
1659 return false; | |
1660 } | |
1661 | |
1662 // If opus_max_playback_rate <= 0, the default maximum playback rate | |
1663 // (48 kHz) will be used. | |
1664 if (opus_max_playback_rate > 0) { | |
1665 LOG(LS_INFO) << "Attempt to set maximum playback rate to " | |
1666 << opus_max_playback_rate | |
1667 << " Hz on channel " | |
1668 << channel; | |
1669 if (engine()->voe()->codec()->SetOpusMaxPlaybackRate( | |
1670 channel, opus_max_playback_rate) == -1) { | |
1671 LOG_RTCERR2(SetOpusMaxPlaybackRate, channel, opus_max_playback_rate); | |
1672 return false; | |
1673 } | |
1674 } | |
1675 } | |
1676 | |
1677 // Always update the |send_codec_| to the currently set send codec. | |
1678 send_codec_.reset(new webrtc::CodecInst(send_codec)); | |
1679 | |
1680 if (send_bitrate_setting_) { | |
1681 SetSendBitrateInternal(send_bitrate_bps_); | |
1682 } | |
1683 | |
1684 // Loop through the codecs list again to config the CN codec. | |
1685 for (const AudioCodec& codec : codecs) { | |
1686 // Ignore codecs we don't know about. The negotiation step should prevent | |
1687 // this, but double-check to be sure. | |
1688 webrtc::CodecInst voe_codec; | |
1689 if (!WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
1690 LOG(LS_WARNING) << "Unknown codec " << ToString(codec); | |
1691 continue; | |
1692 } | |
1693 | |
1694 if (IsCodec(codec, kCnCodecName)) { | |
1695 // Turn voice activity detection/comfort noise on if supported. | |
1696 // Set the wideband CN payload type appropriately. | |
1697 // (narrowband always uses the static payload type 13). | |
1698 webrtc::PayloadFrequencies cn_freq; | |
1699 switch (codec.clockrate) { | |
1700 case 8000: | |
1701 cn_freq = webrtc::kFreq8000Hz; | |
1702 break; | |
1703 case 16000: | |
1704 cn_freq = webrtc::kFreq16000Hz; | |
1705 break; | |
1706 case 32000: | |
1707 cn_freq = webrtc::kFreq32000Hz; | |
1708 break; | |
1709 default: | |
1710 LOG(LS_WARNING) << "CN frequency " << codec.clockrate | |
1711 << " not supported."; | |
1712 continue; | |
1713 } | |
1714 // Set the CN payloadtype and the VAD status. | |
1715 // The CN payload type for 8000 Hz clockrate is fixed at 13. | |
1716 if (cn_freq != webrtc::kFreq8000Hz) { | |
1717 if (engine()->voe()->codec()->SetSendCNPayloadType( | |
1718 channel, codec.id, cn_freq) == -1) { | |
1719 LOG_RTCERR3(SetSendCNPayloadType, channel, codec.id, cn_freq); | |
1720 // TODO(ajm): This failure condition will be removed from VoE. | |
1721 // Restore the return here when we update to a new enough webrtc. | |
1722 // | |
1723 // Not returning false because the SetSendCNPayloadType will fail if | |
1724 // the channel is already sending. | |
1725 // This can happen if the remote description is applied twice, for | |
1726 // example in the case of ROAP on top of JSEP, where both side will | |
1727 // send the offer. | |
1728 } | |
1729 } | |
1730 // Only turn on VAD if we have a CN payload type that matches the | |
1731 // clockrate for the codec we are going to use. | |
1732 if (codec.clockrate == send_codec.plfreq && send_codec.channels != 2) { | |
1733 // TODO(minyue): If CN frequency == 48000 Hz is allowed, consider the | |
1734 // interaction between VAD and Opus FEC. | |
1735 LOG(LS_INFO) << "Enabling VAD"; | |
1736 if (engine()->voe()->codec()->SetVADStatus(channel, true) == -1) { | |
1737 LOG_RTCERR2(SetVADStatus, channel, true); | |
1738 return false; | |
1739 } | |
1740 } | |
1741 } | |
1742 } | |
1743 return true; | |
1744 } | |
1745 | |
1746 bool WebRtcVoiceMediaChannel::SetSendCodecs( | |
1747 const std::vector<AudioCodec>& codecs) { | |
1748 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1749 // TODO(solenberg): Validate input - that payload types don't overlap, are | |
1750 // within range, filter out codecs we don't support, | |
1751 // redundant codecs etc. | |
1752 | |
1753 // Find the DTMF telephone event "codec" payload type. | |
1754 dtmf_payload_type_ = rtc::Optional<int>(); | |
1755 for (const AudioCodec& codec : codecs) { | |
1756 if (IsCodec(codec, kDtmfCodecName)) { | |
1757 dtmf_payload_type_ = rtc::Optional<int>(codec.id); | |
1758 break; | |
1759 } | |
1760 } | |
1761 | |
1762 // Cache the codecs in order to configure the channel created later. | |
1763 send_codecs_ = codecs; | |
1764 for (const auto& ch : send_streams_) { | |
1765 if (!SetSendCodecs(ch.second->channel(), codecs)) { | |
1766 return false; | |
1767 } | |
1768 } | |
1769 | |
1770 // Set nack status on receive channels and update |nack_enabled_|. | |
1771 for (const auto& ch : recv_streams_) { | |
1772 SetNack(ch.second->channel(), nack_enabled_); | |
1773 } | |
1774 | |
1775 // Check if the transport cc feedback has changed on the preferred send codec, | |
1776 // and in that case reconfigure all receive streams. | |
1777 webrtc::CodecInst voe_codec; | |
1778 int red_payload_type; | |
1779 const AudioCodec* send_codec = WebRtcVoiceCodecs::GetPreferredCodec( | |
1780 send_codecs_, &voe_codec, &red_payload_type); | |
1781 if (send_codec) { | |
1782 bool transport_cc = HasTransportCc(*send_codec); | |
1783 if (transport_cc_enabled_ != transport_cc) { | |
1784 LOG(LS_INFO) << "Recreate all the receive streams because the send " | |
1785 "codec has changed."; | |
1786 transport_cc_enabled_ = transport_cc; | |
1787 for (auto& kv : recv_streams_) { | |
1788 RTC_DCHECK(kv.second != nullptr); | |
1789 kv.second->RecreateAudioReceiveStream(transport_cc_enabled_); | |
1790 } | |
1791 } | |
1792 } | |
1793 | |
1794 return true; | |
1795 } | |
1796 | |
1797 void WebRtcVoiceMediaChannel::SetNack(int channel, bool nack_enabled) { | |
1798 if (nack_enabled) { | |
1799 LOG(LS_INFO) << "Enabling NACK for channel " << channel; | |
1800 engine()->voe()->rtp()->SetNACKStatus(channel, true, kNackMaxPackets); | |
1801 } else { | |
1802 LOG(LS_INFO) << "Disabling NACK for channel " << channel; | |
1803 engine()->voe()->rtp()->SetNACKStatus(channel, false, 0); | |
1804 } | |
1805 } | |
1806 | |
1807 bool WebRtcVoiceMediaChannel::SetSendCodec( | |
1808 int channel, const webrtc::CodecInst& send_codec) { | |
1809 LOG(LS_INFO) << "Send channel " << channel << " selected voice codec " | |
1810 << ToString(send_codec) << ", bitrate=" << send_codec.rate; | |
1811 | |
1812 webrtc::CodecInst current_codec; | |
1813 if (engine()->voe()->codec()->GetSendCodec(channel, current_codec) == 0 && | |
1814 (send_codec == current_codec)) { | |
1815 // Codec is already configured, we can return without setting it again. | |
1816 return true; | |
1817 } | |
1818 | |
1819 if (engine()->voe()->codec()->SetSendCodec(channel, send_codec) == -1) { | |
1820 LOG_RTCERR2(SetSendCodec, channel, ToString(send_codec)); | |
1821 return false; | |
1822 } | |
1823 return true; | |
1824 } | |
1825 | |
1826 bool WebRtcVoiceMediaChannel::SetPlayout(bool playout) { | |
1827 desired_playout_ = playout; | |
1828 return ChangePlayout(desired_playout_); | |
1829 } | |
1830 | |
1831 bool WebRtcVoiceMediaChannel::PausePlayout() { | |
1832 return ChangePlayout(false); | |
1833 } | |
1834 | |
1835 bool WebRtcVoiceMediaChannel::ResumePlayout() { | |
1836 return ChangePlayout(desired_playout_); | |
1837 } | |
1838 | |
1839 bool WebRtcVoiceMediaChannel::ChangePlayout(bool playout) { | |
1840 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1841 if (playout_ == playout) { | |
1842 return true; | |
1843 } | |
1844 | |
1845 for (const auto& ch : recv_streams_) { | |
1846 if (!SetPlayout(ch.second->channel(), playout)) { | |
1847 LOG(LS_ERROR) << "SetPlayout " << playout << " on channel " | |
1848 << ch.second->channel() << " failed"; | |
1849 return false; | |
1850 } | |
1851 } | |
1852 playout_ = playout; | |
1853 return true; | |
1854 } | |
1855 | |
1856 bool WebRtcVoiceMediaChannel::SetSend(SendFlags send) { | |
1857 desired_send_ = send; | |
1858 if (!send_streams_.empty()) { | |
1859 return ChangeSend(desired_send_); | |
1860 } | |
1861 return true; | |
1862 } | |
1863 | |
1864 bool WebRtcVoiceMediaChannel::PauseSend() { | |
1865 return ChangeSend(SEND_NOTHING); | |
1866 } | |
1867 | |
1868 bool WebRtcVoiceMediaChannel::ResumeSend() { | |
1869 return ChangeSend(desired_send_); | |
1870 } | |
1871 | |
1872 bool WebRtcVoiceMediaChannel::ChangeSend(SendFlags send) { | |
1873 if (send_ == send) { | |
1874 return true; | |
1875 } | |
1876 | |
1877 // Apply channel specific options when channel is enabled for sending. | |
1878 if (send == SEND_MICROPHONE) { | |
1879 engine()->ApplyOptions(options_); | |
1880 } | |
1881 | |
1882 // Change the settings on each send channel. | |
1883 for (const auto& ch : send_streams_) { | |
1884 if (!ChangeSend(ch.second->channel(), send)) { | |
1885 return false; | |
1886 } | |
1887 } | |
1888 | |
1889 send_ = send; | |
1890 return true; | |
1891 } | |
1892 | |
1893 bool WebRtcVoiceMediaChannel::ChangeSend(int channel, SendFlags send) { | |
1894 if (send == SEND_MICROPHONE) { | |
1895 if (engine()->voe()->base()->StartSend(channel) == -1) { | |
1896 LOG_RTCERR1(StartSend, channel); | |
1897 return false; | |
1898 } | |
1899 } else { // SEND_NOTHING | |
1900 RTC_DCHECK(send == SEND_NOTHING); | |
1901 if (engine()->voe()->base()->StopSend(channel) == -1) { | |
1902 LOG_RTCERR1(StopSend, channel); | |
1903 return false; | |
1904 } | |
1905 } | |
1906 | |
1907 return true; | |
1908 } | |
1909 | |
1910 bool WebRtcVoiceMediaChannel::SetAudioSend(uint32_t ssrc, | |
1911 bool enable, | |
1912 const AudioOptions* options, | |
1913 AudioRenderer* renderer) { | |
1914 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1915 // TODO(solenberg): The state change should be fully rolled back if any one of | |
1916 // these calls fail. | |
1917 if (!SetLocalRenderer(ssrc, renderer)) { | |
1918 return false; | |
1919 } | |
1920 if (!MuteStream(ssrc, !enable)) { | |
1921 return false; | |
1922 } | |
1923 if (enable && options) { | |
1924 return SetOptions(*options); | |
1925 } | |
1926 return true; | |
1927 } | |
1928 | |
1929 int WebRtcVoiceMediaChannel::CreateVoEChannel() { | |
1930 int id = engine()->CreateVoEChannel(); | |
1931 if (id == -1) { | |
1932 LOG_RTCERR0(CreateVoEChannel); | |
1933 return -1; | |
1934 } | |
1935 if (engine()->voe()->network()->RegisterExternalTransport(id, *this) == -1) { | |
1936 LOG_RTCERR2(RegisterExternalTransport, id, this); | |
1937 engine()->voe()->base()->DeleteChannel(id); | |
1938 return -1; | |
1939 } | |
1940 return id; | |
1941 } | |
1942 | |
1943 bool WebRtcVoiceMediaChannel::DeleteVoEChannel(int channel) { | |
1944 if (engine()->voe()->network()->DeRegisterExternalTransport(channel) == -1) { | |
1945 LOG_RTCERR1(DeRegisterExternalTransport, channel); | |
1946 } | |
1947 if (engine()->voe()->base()->DeleteChannel(channel) == -1) { | |
1948 LOG_RTCERR1(DeleteChannel, channel); | |
1949 return false; | |
1950 } | |
1951 return true; | |
1952 } | |
1953 | |
1954 bool WebRtcVoiceMediaChannel::AddSendStream(const StreamParams& sp) { | |
1955 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
1956 LOG(LS_INFO) << "AddSendStream: " << sp.ToString(); | |
1957 | |
1958 uint32_t ssrc = sp.first_ssrc(); | |
1959 RTC_DCHECK(0 != ssrc); | |
1960 | |
1961 if (GetSendChannelId(ssrc) != -1) { | |
1962 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; | |
1963 return false; | |
1964 } | |
1965 | |
1966 // Create a new channel for sending audio data. | |
1967 int channel = CreateVoEChannel(); | |
1968 if (channel == -1) { | |
1969 return false; | |
1970 } | |
1971 | |
1972 // Save the channel to send_streams_, so that RemoveSendStream() can still | |
1973 // delete the channel in case failure happens below. | |
1974 webrtc::AudioTransport* audio_transport = | |
1975 engine()->voe()->base()->audio_transport(); | |
1976 send_streams_.insert(std::make_pair(ssrc, new WebRtcAudioSendStream( | |
1977 channel, audio_transport, ssrc, sp.cname, send_rtp_extensions_, call_))); | |
1978 | |
1979 // Set the current codecs to be used for the new channel. We need to do this | |
1980 // after adding the channel to send_channels_, because of how max bitrate is | |
1981 // currently being configured by SetSendCodec(). | |
1982 if (!send_codecs_.empty() && !SetSendCodecs(channel, send_codecs_)) { | |
1983 RemoveSendStream(ssrc); | |
1984 return false; | |
1985 } | |
1986 | |
1987 // At this point the channel's local SSRC has been updated. If the channel is | |
1988 // the first send channel make sure that all the receive channels are updated | |
1989 // with the same SSRC in order to send receiver reports. | |
1990 if (send_streams_.size() == 1) { | |
1991 receiver_reports_ssrc_ = ssrc; | |
1992 for (const auto& stream : recv_streams_) { | |
1993 int recv_channel = stream.second->channel(); | |
1994 if (engine()->voe()->rtp()->SetLocalSSRC(recv_channel, ssrc) != 0) { | |
1995 LOG_RTCERR2(SetLocalSSRC, recv_channel, ssrc); | |
1996 return false; | |
1997 } | |
1998 engine()->voe()->base()->AssociateSendChannel(recv_channel, channel); | |
1999 LOG(LS_INFO) << "VoiceEngine channel #" << recv_channel | |
2000 << " is associated with channel #" << channel << "."; | |
2001 } | |
2002 } | |
2003 | |
2004 return ChangeSend(channel, desired_send_); | |
2005 } | |
2006 | |
2007 bool WebRtcVoiceMediaChannel::RemoveSendStream(uint32_t ssrc) { | |
2008 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2009 LOG(LS_INFO) << "RemoveSendStream: " << ssrc; | |
2010 | |
2011 auto it = send_streams_.find(ssrc); | |
2012 if (it == send_streams_.end()) { | |
2013 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc | |
2014 << " which doesn't exist."; | |
2015 return false; | |
2016 } | |
2017 | |
2018 int channel = it->second->channel(); | |
2019 ChangeSend(channel, SEND_NOTHING); | |
2020 | |
2021 // Clean up and delete the send stream+channel. | |
2022 LOG(LS_INFO) << "Removing audio send stream " << ssrc | |
2023 << " with VoiceEngine channel #" << channel << "."; | |
2024 delete it->second; | |
2025 send_streams_.erase(it); | |
2026 if (!DeleteVoEChannel(channel)) { | |
2027 return false; | |
2028 } | |
2029 if (send_streams_.empty()) { | |
2030 ChangeSend(SEND_NOTHING); | |
2031 } | |
2032 return true; | |
2033 } | |
2034 | |
2035 bool WebRtcVoiceMediaChannel::AddRecvStream(const StreamParams& sp) { | |
2036 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2037 LOG(LS_INFO) << "AddRecvStream: " << sp.ToString(); | |
2038 | |
2039 if (!ValidateStreamParams(sp)) { | |
2040 return false; | |
2041 } | |
2042 | |
2043 const uint32_t ssrc = sp.first_ssrc(); | |
2044 if (ssrc == 0) { | |
2045 LOG(LS_WARNING) << "AddRecvStream with ssrc==0 is not supported."; | |
2046 return false; | |
2047 } | |
2048 | |
2049 // Remove the default receive stream if one had been created with this ssrc; | |
2050 // we'll recreate it then. | |
2051 if (IsDefaultRecvStream(ssrc)) { | |
2052 RemoveRecvStream(ssrc); | |
2053 } | |
2054 | |
2055 if (GetReceiveChannelId(ssrc) != -1) { | |
2056 LOG(LS_ERROR) << "Stream already exists with ssrc " << ssrc; | |
2057 return false; | |
2058 } | |
2059 | |
2060 // Create a new channel for receiving audio data. | |
2061 const int channel = CreateVoEChannel(); | |
2062 if (channel == -1) { | |
2063 return false; | |
2064 } | |
2065 | |
2066 // Turn off all supported codecs. | |
2067 // TODO(solenberg): Remove once "no codecs" is the default state of a stream. | |
2068 for (webrtc::CodecInst voe_codec : webrtc::acm2::RentACodec::Database()) { | |
2069 voe_codec.pltype = -1; | |
2070 if (engine()->voe()->codec()->SetRecPayloadType(channel, voe_codec) == -1) { | |
2071 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec)); | |
2072 DeleteVoEChannel(channel); | |
2073 return false; | |
2074 } | |
2075 } | |
2076 | |
2077 // Only enable those configured for this channel. | |
2078 for (const auto& codec : recv_codecs_) { | |
2079 webrtc::CodecInst voe_codec; | |
2080 if (WebRtcVoiceEngine::ToCodecInst(codec, &voe_codec)) { | |
2081 voe_codec.pltype = codec.id; | |
2082 if (engine()->voe()->codec()->SetRecPayloadType( | |
2083 channel, voe_codec) == -1) { | |
2084 LOG_RTCERR2(SetRecPayloadType, channel, ToString(voe_codec)); | |
2085 DeleteVoEChannel(channel); | |
2086 return false; | |
2087 } | |
2088 } | |
2089 } | |
2090 | |
2091 const int send_channel = GetSendChannelId(receiver_reports_ssrc_); | |
2092 if (send_channel != -1) { | |
2093 // Associate receive channel with first send channel (so the receive channel | |
2094 // can obtain RTT from the send channel) | |
2095 engine()->voe()->base()->AssociateSendChannel(channel, send_channel); | |
2096 LOG(LS_INFO) << "VoiceEngine channel #" << channel | |
2097 << " is associated with channel #" << send_channel << "."; | |
2098 } | |
2099 | |
2100 transport_cc_enabled_ = | |
2101 !send_codecs_.empty() ? HasTransportCc(send_codecs_[0]) : false; | |
2102 | |
2103 recv_streams_.insert(std::make_pair( | |
2104 ssrc, new WebRtcAudioReceiveStream(channel, ssrc, receiver_reports_ssrc_, | |
2105 transport_cc_enabled_, sp.sync_label, | |
2106 recv_rtp_extensions_, call_))); | |
2107 | |
2108 SetNack(channel, nack_enabled_); | |
2109 SetPlayout(channel, playout_); | |
2110 | |
2111 return true; | |
2112 } | |
2113 | |
2114 bool WebRtcVoiceMediaChannel::RemoveRecvStream(uint32_t ssrc) { | |
2115 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2116 LOG(LS_INFO) << "RemoveRecvStream: " << ssrc; | |
2117 | |
2118 const auto it = recv_streams_.find(ssrc); | |
2119 if (it == recv_streams_.end()) { | |
2120 LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc | |
2121 << " which doesn't exist."; | |
2122 return false; | |
2123 } | |
2124 | |
2125 // Deregister default channel, if that's the one being destroyed. | |
2126 if (IsDefaultRecvStream(ssrc)) { | |
2127 default_recv_ssrc_ = -1; | |
2128 } | |
2129 | |
2130 const int channel = it->second->channel(); | |
2131 | |
2132 // Clean up and delete the receive stream+channel. | |
2133 LOG(LS_INFO) << "Removing audio receive stream " << ssrc | |
2134 << " with VoiceEngine channel #" << channel << "."; | |
2135 it->second->SetRawAudioSink(nullptr); | |
2136 delete it->second; | |
2137 recv_streams_.erase(it); | |
2138 return DeleteVoEChannel(channel); | |
2139 } | |
2140 | |
2141 bool WebRtcVoiceMediaChannel::SetLocalRenderer(uint32_t ssrc, | |
2142 AudioRenderer* renderer) { | |
2143 auto it = send_streams_.find(ssrc); | |
2144 if (it == send_streams_.end()) { | |
2145 if (renderer) { | |
2146 // Return an error if trying to set a valid renderer with an invalid ssrc. | |
2147 LOG(LS_ERROR) << "SetLocalRenderer failed with ssrc "<< ssrc; | |
2148 return false; | |
2149 } | |
2150 | |
2151 // The channel likely has gone away, do nothing. | |
2152 return true; | |
2153 } | |
2154 | |
2155 if (renderer) { | |
2156 it->second->Start(renderer); | |
2157 } else { | |
2158 it->second->Stop(); | |
2159 } | |
2160 | |
2161 return true; | |
2162 } | |
2163 | |
2164 bool WebRtcVoiceMediaChannel::GetActiveStreams( | |
2165 AudioInfo::StreamList* actives) { | |
2166 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2167 actives->clear(); | |
2168 for (const auto& ch : recv_streams_) { | |
2169 int level = GetOutputLevel(ch.second->channel()); | |
2170 if (level > 0) { | |
2171 actives->push_back(std::make_pair(ch.first, level)); | |
2172 } | |
2173 } | |
2174 return true; | |
2175 } | |
2176 | |
2177 int WebRtcVoiceMediaChannel::GetOutputLevel() { | |
2178 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2179 int highest = 0; | |
2180 for (const auto& ch : recv_streams_) { | |
2181 highest = std::max(GetOutputLevel(ch.second->channel()), highest); | |
2182 } | |
2183 return highest; | |
2184 } | |
2185 | |
2186 int WebRtcVoiceMediaChannel::GetTimeSinceLastTyping() { | |
2187 int ret; | |
2188 if (engine()->voe()->processing()->TimeSinceLastTyping(ret) == -1) { | |
2189 // In case of error, log the info and continue | |
2190 LOG_RTCERR0(TimeSinceLastTyping); | |
2191 ret = -1; | |
2192 } else { | |
2193 ret *= 1000; // We return ms, webrtc returns seconds. | |
2194 } | |
2195 return ret; | |
2196 } | |
2197 | |
2198 void WebRtcVoiceMediaChannel::SetTypingDetectionParameters(int time_window, | |
2199 int cost_per_typing, int reporting_threshold, int penalty_decay, | |
2200 int type_event_delay) { | |
2201 if (engine()->voe()->processing()->SetTypingDetectionParameters( | |
2202 time_window, cost_per_typing, | |
2203 reporting_threshold, penalty_decay, type_event_delay) == -1) { | |
2204 // In case of error, log the info and continue | |
2205 LOG_RTCERR5(SetTypingDetectionParameters, time_window, | |
2206 cost_per_typing, reporting_threshold, penalty_decay, | |
2207 type_event_delay); | |
2208 } | |
2209 } | |
2210 | |
2211 bool WebRtcVoiceMediaChannel::SetOutputVolume(uint32_t ssrc, double volume) { | |
2212 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2213 if (ssrc == 0) { | |
2214 default_recv_volume_ = volume; | |
2215 if (default_recv_ssrc_ == -1) { | |
2216 return true; | |
2217 } | |
2218 ssrc = static_cast<uint32_t>(default_recv_ssrc_); | |
2219 } | |
2220 int ch_id = GetReceiveChannelId(ssrc); | |
2221 if (ch_id < 0) { | |
2222 LOG(LS_WARNING) << "Cannot find channel for ssrc:" << ssrc; | |
2223 return false; | |
2224 } | |
2225 | |
2226 if (-1 == engine()->voe()->volume()->SetChannelOutputVolumeScaling(ch_id, | |
2227 volume)) { | |
2228 LOG_RTCERR2(SetChannelOutputVolumeScaling, ch_id, volume); | |
2229 return false; | |
2230 } | |
2231 LOG(LS_INFO) << "SetOutputVolume to " << volume | |
2232 << " for channel " << ch_id << " and ssrc " << ssrc; | |
2233 return true; | |
2234 } | |
2235 | |
2236 bool WebRtcVoiceMediaChannel::CanInsertDtmf() { | |
2237 return dtmf_payload_type_ ? true : false; | |
2238 } | |
2239 | |
2240 bool WebRtcVoiceMediaChannel::InsertDtmf(uint32_t ssrc, int event, | |
2241 int duration) { | |
2242 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2243 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::InsertDtmf"; | |
2244 if (!dtmf_payload_type_) { | |
2245 return false; | |
2246 } | |
2247 | |
2248 // Figure out which WebRtcAudioSendStream to send the event on. | |
2249 auto it = ssrc != 0 ? send_streams_.find(ssrc) : send_streams_.begin(); | |
2250 if (it == send_streams_.end()) { | |
2251 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; | |
2252 return false; | |
2253 } | |
2254 if (event < kMinTelephoneEventCode || | |
2255 event > kMaxTelephoneEventCode) { | |
2256 LOG(LS_WARNING) << "DTMF event code " << event << " out of range."; | |
2257 return false; | |
2258 } | |
2259 if (duration < kMinTelephoneEventDuration || | |
2260 duration > kMaxTelephoneEventDuration) { | |
2261 LOG(LS_WARNING) << "DTMF event duration " << duration << " out of range."; | |
2262 return false; | |
2263 } | |
2264 return it->second->SendTelephoneEvent(*dtmf_payload_type_, event, duration); | |
2265 } | |
2266 | |
2267 void WebRtcVoiceMediaChannel::OnPacketReceived( | |
2268 rtc::Buffer* packet, const rtc::PacketTime& packet_time) { | |
2269 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2270 | |
2271 uint32_t ssrc = 0; | |
2272 if (!GetRtpSsrc(packet->data(), packet->size(), &ssrc)) { | |
2273 return; | |
2274 } | |
2275 | |
2276 // If we don't have a default channel, and the SSRC is unknown, create a | |
2277 // default channel. | |
2278 if (default_recv_ssrc_ == -1 && GetReceiveChannelId(ssrc) == -1) { | |
2279 StreamParams sp; | |
2280 sp.ssrcs.push_back(ssrc); | |
2281 LOG(LS_INFO) << "Creating default receive stream for SSRC=" << ssrc << "."; | |
2282 if (!AddRecvStream(sp)) { | |
2283 LOG(LS_WARNING) << "Could not create default receive stream."; | |
2284 return; | |
2285 } | |
2286 default_recv_ssrc_ = ssrc; | |
2287 SetOutputVolume(default_recv_ssrc_, default_recv_volume_); | |
2288 if (default_sink_) { | |
2289 rtc::scoped_ptr<webrtc::AudioSinkInterface> proxy_sink( | |
2290 new ProxySink(default_sink_.get())); | |
2291 SetRawAudioSink(default_recv_ssrc_, std::move(proxy_sink)); | |
2292 } | |
2293 } | |
2294 | |
2295 // Forward packet to Call. If the SSRC is unknown we'll return after this. | |
2296 const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, | |
2297 packet_time.not_before); | |
2298 webrtc::PacketReceiver::DeliveryStatus delivery_result = | |
2299 call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, | |
2300 reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), | |
2301 webrtc_packet_time); | |
2302 if (webrtc::PacketReceiver::DELIVERY_OK != delivery_result) { | |
2303 // If the SSRC is unknown here, route it to the default channel, if we have | |
2304 // one. See: https://bugs.chromium.org/p/webrtc/issues/detail?id=5208 | |
2305 if (default_recv_ssrc_ == -1) { | |
2306 return; | |
2307 } else { | |
2308 ssrc = default_recv_ssrc_; | |
2309 } | |
2310 } | |
2311 | |
2312 // Find the channel to send this packet to. It must exist since webrtc::Call | |
2313 // was able to demux the packet. | |
2314 int channel = GetReceiveChannelId(ssrc); | |
2315 RTC_DCHECK(channel != -1); | |
2316 | |
2317 // Pass it off to the decoder. | |
2318 engine()->voe()->network()->ReceivedRTPPacket( | |
2319 channel, packet->data(), packet->size(), webrtc_packet_time); | |
2320 } | |
2321 | |
2322 void WebRtcVoiceMediaChannel::OnRtcpReceived( | |
2323 rtc::Buffer* packet, const rtc::PacketTime& packet_time) { | |
2324 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2325 | |
2326 // Forward packet to Call as well. | |
2327 const webrtc::PacketTime webrtc_packet_time(packet_time.timestamp, | |
2328 packet_time.not_before); | |
2329 call_->Receiver()->DeliverPacket(webrtc::MediaType::AUDIO, | |
2330 reinterpret_cast<const uint8_t*>(packet->data()), packet->size(), | |
2331 webrtc_packet_time); | |
2332 | |
2333 // Sending channels need all RTCP packets with feedback information. | |
2334 // Even sender reports can contain attached report blocks. | |
2335 // Receiving channels need sender reports in order to create | |
2336 // correct receiver reports. | |
2337 int type = 0; | |
2338 if (!GetRtcpType(packet->data(), packet->size(), &type)) { | |
2339 LOG(LS_WARNING) << "Failed to parse type from received RTCP packet"; | |
2340 return; | |
2341 } | |
2342 | |
2343 // If it is a sender report, find the receive channel that is listening. | |
2344 if (type == kRtcpTypeSR) { | |
2345 uint32_t ssrc = 0; | |
2346 if (!GetRtcpSsrc(packet->data(), packet->size(), &ssrc)) { | |
2347 return; | |
2348 } | |
2349 int recv_channel_id = GetReceiveChannelId(ssrc); | |
2350 if (recv_channel_id != -1) { | |
2351 engine()->voe()->network()->ReceivedRTCPPacket( | |
2352 recv_channel_id, packet->data(), packet->size()); | |
2353 } | |
2354 } | |
2355 | |
2356 // SR may continue RR and any RR entry may correspond to any one of the send | |
2357 // channels. So all RTCP packets must be forwarded all send channels. VoE | |
2358 // will filter out RR internally. | |
2359 for (const auto& ch : send_streams_) { | |
2360 engine()->voe()->network()->ReceivedRTCPPacket( | |
2361 ch.second->channel(), packet->data(), packet->size()); | |
2362 } | |
2363 } | |
2364 | |
2365 bool WebRtcVoiceMediaChannel::MuteStream(uint32_t ssrc, bool muted) { | |
2366 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2367 int channel = GetSendChannelId(ssrc); | |
2368 if (channel == -1) { | |
2369 LOG(LS_WARNING) << "The specified ssrc " << ssrc << " is not in use."; | |
2370 return false; | |
2371 } | |
2372 if (engine()->voe()->volume()->SetInputMute(channel, muted) == -1) { | |
2373 LOG_RTCERR2(SetInputMute, channel, muted); | |
2374 return false; | |
2375 } | |
2376 // We set the AGC to mute state only when all the channels are muted. | |
2377 // This implementation is not ideal, instead we should signal the AGC when | |
2378 // the mic channel is muted/unmuted. We can't do it today because there | |
2379 // is no good way to know which stream is mapping to the mic channel. | |
2380 bool all_muted = muted; | |
2381 for (const auto& ch : send_streams_) { | |
2382 if (!all_muted) { | |
2383 break; | |
2384 } | |
2385 if (engine()->voe()->volume()->GetInputMute(ch.second->channel(), | |
2386 all_muted)) { | |
2387 LOG_RTCERR1(GetInputMute, ch.second->channel()); | |
2388 return false; | |
2389 } | |
2390 } | |
2391 | |
2392 webrtc::AudioProcessing* ap = engine()->voe()->base()->audio_processing(); | |
2393 if (ap) { | |
2394 ap->set_output_will_be_muted(all_muted); | |
2395 } | |
2396 return true; | |
2397 } | |
2398 | |
2399 // TODO(minyue): SetMaxSendBandwidth() is subject to be renamed to | |
2400 // SetMaxSendBitrate() in future. | |
2401 bool WebRtcVoiceMediaChannel::SetMaxSendBandwidth(int bps) { | |
2402 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetMaxSendBandwidth."; | |
2403 return SetSendBitrateInternal(bps); | |
2404 } | |
2405 | |
2406 bool WebRtcVoiceMediaChannel::SetSendBitrateInternal(int bps) { | |
2407 LOG(LS_INFO) << "WebRtcVoiceMediaChannel::SetSendBitrateInternal."; | |
2408 | |
2409 send_bitrate_setting_ = true; | |
2410 send_bitrate_bps_ = bps; | |
2411 | |
2412 if (!send_codec_) { | |
2413 LOG(LS_INFO) << "The send codec has not been set up yet. " | |
2414 << "The send bitrate setting will be applied later."; | |
2415 return true; | |
2416 } | |
2417 | |
2418 // Bitrate is auto by default. | |
2419 // TODO(bemasc): Fix this so that if SetMaxSendBandwidth(50) is followed by | |
2420 // SetMaxSendBandwith(0), the second call removes the previous limit. | |
2421 if (bps <= 0) | |
2422 return true; | |
2423 | |
2424 webrtc::CodecInst codec = *send_codec_; | |
2425 bool is_multi_rate = WebRtcVoiceCodecs::IsCodecMultiRate(codec); | |
2426 | |
2427 if (is_multi_rate) { | |
2428 // If codec is multi-rate then just set the bitrate. | |
2429 codec.rate = bps; | |
2430 for (const auto& ch : send_streams_) { | |
2431 if (!SetSendCodec(ch.second->channel(), codec)) { | |
2432 LOG(LS_INFO) << "Failed to set codec " << codec.plname | |
2433 << " to bitrate " << bps << " bps."; | |
2434 return false; | |
2435 } | |
2436 } | |
2437 return true; | |
2438 } else { | |
2439 // If codec is not multi-rate and |bps| is less than the fixed bitrate | |
2440 // then fail. If codec is not multi-rate and |bps| exceeds or equal the | |
2441 // fixed bitrate then ignore. | |
2442 if (bps < codec.rate) { | |
2443 LOG(LS_INFO) << "Failed to set codec " << codec.plname | |
2444 << " to bitrate " << bps << " bps" | |
2445 << ", requires at least " << codec.rate << " bps."; | |
2446 return false; | |
2447 } | |
2448 return true; | |
2449 } | |
2450 } | |
2451 | |
2452 bool WebRtcVoiceMediaChannel::GetStats(VoiceMediaInfo* info) { | |
2453 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2454 RTC_DCHECK(info); | |
2455 | |
2456 // Get SSRC and stats for each sender. | |
2457 RTC_DCHECK(info->senders.size() == 0); | |
2458 for (const auto& stream : send_streams_) { | |
2459 webrtc::AudioSendStream::Stats stats = stream.second->GetStats(); | |
2460 VoiceSenderInfo sinfo; | |
2461 sinfo.add_ssrc(stats.local_ssrc); | |
2462 sinfo.bytes_sent = stats.bytes_sent; | |
2463 sinfo.packets_sent = stats.packets_sent; | |
2464 sinfo.packets_lost = stats.packets_lost; | |
2465 sinfo.fraction_lost = stats.fraction_lost; | |
2466 sinfo.codec_name = stats.codec_name; | |
2467 sinfo.ext_seqnum = stats.ext_seqnum; | |
2468 sinfo.jitter_ms = stats.jitter_ms; | |
2469 sinfo.rtt_ms = stats.rtt_ms; | |
2470 sinfo.audio_level = stats.audio_level; | |
2471 sinfo.aec_quality_min = stats.aec_quality_min; | |
2472 sinfo.echo_delay_median_ms = stats.echo_delay_median_ms; | |
2473 sinfo.echo_delay_std_ms = stats.echo_delay_std_ms; | |
2474 sinfo.echo_return_loss = stats.echo_return_loss; | |
2475 sinfo.echo_return_loss_enhancement = stats.echo_return_loss_enhancement; | |
2476 sinfo.typing_noise_detected = | |
2477 (send_ == SEND_NOTHING ? false : stats.typing_noise_detected); | |
2478 info->senders.push_back(sinfo); | |
2479 } | |
2480 | |
2481 // Get SSRC and stats for each receiver. | |
2482 RTC_DCHECK(info->receivers.size() == 0); | |
2483 for (const auto& stream : recv_streams_) { | |
2484 webrtc::AudioReceiveStream::Stats stats = stream.second->GetStats(); | |
2485 VoiceReceiverInfo rinfo; | |
2486 rinfo.add_ssrc(stats.remote_ssrc); | |
2487 rinfo.bytes_rcvd = stats.bytes_rcvd; | |
2488 rinfo.packets_rcvd = stats.packets_rcvd; | |
2489 rinfo.packets_lost = stats.packets_lost; | |
2490 rinfo.fraction_lost = stats.fraction_lost; | |
2491 rinfo.codec_name = stats.codec_name; | |
2492 rinfo.ext_seqnum = stats.ext_seqnum; | |
2493 rinfo.jitter_ms = stats.jitter_ms; | |
2494 rinfo.jitter_buffer_ms = stats.jitter_buffer_ms; | |
2495 rinfo.jitter_buffer_preferred_ms = stats.jitter_buffer_preferred_ms; | |
2496 rinfo.delay_estimate_ms = stats.delay_estimate_ms; | |
2497 rinfo.audio_level = stats.audio_level; | |
2498 rinfo.expand_rate = stats.expand_rate; | |
2499 rinfo.speech_expand_rate = stats.speech_expand_rate; | |
2500 rinfo.secondary_decoded_rate = stats.secondary_decoded_rate; | |
2501 rinfo.accelerate_rate = stats.accelerate_rate; | |
2502 rinfo.preemptive_expand_rate = stats.preemptive_expand_rate; | |
2503 rinfo.decoding_calls_to_silence_generator = | |
2504 stats.decoding_calls_to_silence_generator; | |
2505 rinfo.decoding_calls_to_neteq = stats.decoding_calls_to_neteq; | |
2506 rinfo.decoding_normal = stats.decoding_normal; | |
2507 rinfo.decoding_plc = stats.decoding_plc; | |
2508 rinfo.decoding_cng = stats.decoding_cng; | |
2509 rinfo.decoding_plc_cng = stats.decoding_plc_cng; | |
2510 rinfo.capture_start_ntp_time_ms = stats.capture_start_ntp_time_ms; | |
2511 info->receivers.push_back(rinfo); | |
2512 } | |
2513 | |
2514 return true; | |
2515 } | |
2516 | |
2517 void WebRtcVoiceMediaChannel::SetRawAudioSink( | |
2518 uint32_t ssrc, | |
2519 rtc::scoped_ptr<webrtc::AudioSinkInterface> sink) { | |
2520 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2521 LOG(LS_VERBOSE) << "WebRtcVoiceMediaChannel::SetRawAudioSink: ssrc:" << ssrc | |
2522 << " " << (sink ? "(ptr)" : "NULL"); | |
2523 if (ssrc == 0) { | |
2524 if (default_recv_ssrc_ != -1) { | |
2525 rtc::scoped_ptr<webrtc::AudioSinkInterface> proxy_sink( | |
2526 sink ? new ProxySink(sink.get()) : nullptr); | |
2527 SetRawAudioSink(default_recv_ssrc_, std::move(proxy_sink)); | |
2528 } | |
2529 default_sink_ = std::move(sink); | |
2530 return; | |
2531 } | |
2532 const auto it = recv_streams_.find(ssrc); | |
2533 if (it == recv_streams_.end()) { | |
2534 LOG(LS_WARNING) << "SetRawAudioSink: no recv stream" << ssrc; | |
2535 return; | |
2536 } | |
2537 it->second->SetRawAudioSink(std::move(sink)); | |
2538 } | |
2539 | |
2540 int WebRtcVoiceMediaChannel::GetOutputLevel(int channel) { | |
2541 unsigned int ulevel = 0; | |
2542 int ret = engine()->voe()->volume()->GetSpeechOutputLevel(channel, ulevel); | |
2543 return (ret == 0) ? static_cast<int>(ulevel) : -1; | |
2544 } | |
2545 | |
2546 int WebRtcVoiceMediaChannel::GetReceiveChannelId(uint32_t ssrc) const { | |
2547 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2548 const auto it = recv_streams_.find(ssrc); | |
2549 if (it != recv_streams_.end()) { | |
2550 return it->second->channel(); | |
2551 } | |
2552 return -1; | |
2553 } | |
2554 | |
2555 int WebRtcVoiceMediaChannel::GetSendChannelId(uint32_t ssrc) const { | |
2556 RTC_DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
2557 const auto it = send_streams_.find(ssrc); | |
2558 if (it != send_streams_.end()) { | |
2559 return it->second->channel(); | |
2560 } | |
2561 return -1; | |
2562 } | |
2563 | |
2564 bool WebRtcVoiceMediaChannel::SetPlayout(int channel, bool playout) { | |
2565 if (playout) { | |
2566 LOG(LS_INFO) << "Starting playout for channel #" << channel; | |
2567 if (engine()->voe()->base()->StartPlayout(channel) == -1) { | |
2568 LOG_RTCERR1(StartPlayout, channel); | |
2569 return false; | |
2570 } | |
2571 } else { | |
2572 LOG(LS_INFO) << "Stopping playout for channel #" << channel; | |
2573 engine()->voe()->base()->StopPlayout(channel); | |
2574 } | |
2575 return true; | |
2576 } | |
2577 } // namespace cricket | |
2578 | |
2579 #endif // HAVE_WEBRTC_VOICE | |
OLD | NEW |