OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h" | 11 #include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h" |
12 | 12 |
13 #include <string.h> | 13 #include <string.h> |
14 | 14 |
15 #include "webrtc/base/logging.h" | |
15 #include "webrtc/base/trace_event.h" | 16 #include "webrtc/base/trace_event.h" |
16 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" | 17 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" |
17 #include "webrtc/modules/rtp_rtcp/source/byte_io.h" | 18 #include "webrtc/modules/rtp_rtcp/source/byte_io.h" |
18 #include "webrtc/system_wrappers/include/tick_util.h" | 19 #include "webrtc/system_wrappers/include/tick_util.h" |
19 | 20 |
20 namespace webrtc { | 21 namespace webrtc { |
21 | 22 |
22 static const int kDtmfFrequencyHz = 8000; | 23 static const int kDtmfFrequencyHz = 8000; |
23 | 24 |
24 RTPSenderAudio::RTPSenderAudio(Clock* clock, | 25 RTPSenderAudio::RTPSenderAudio(Clock* clock, |
(...skipping 13 matching lines...) Expand all Loading... | |
38 _dtmfLevel(0), | 39 _dtmfLevel(0), |
39 _dtmfTimeLastSent(0), | 40 _dtmfTimeLastSent(0), |
40 _dtmfTimestampLastSent(0), | 41 _dtmfTimestampLastSent(0), |
41 _REDPayloadType(-1), | 42 _REDPayloadType(-1), |
42 _inbandVADactive(false), | 43 _inbandVADactive(false), |
43 _cngNBPayloadType(-1), | 44 _cngNBPayloadType(-1), |
44 _cngWBPayloadType(-1), | 45 _cngWBPayloadType(-1), |
45 _cngSWBPayloadType(-1), | 46 _cngSWBPayloadType(-1), |
46 _cngFBPayloadType(-1), | 47 _cngFBPayloadType(-1), |
47 _lastPayloadType(-1), | 48 _lastPayloadType(-1), |
48 _audioLevel_dBov(0) {} | 49 _audioLevel_dBov(0), |
50 _firstPacketSent(false) {} | |
49 | 51 |
50 RTPSenderAudio::~RTPSenderAudio() {} | 52 RTPSenderAudio::~RTPSenderAudio() {} |
51 | 53 |
52 int RTPSenderAudio::AudioFrequency() const { | 54 int RTPSenderAudio::AudioFrequency() const { |
53 return kDtmfFrequencyHz; | 55 return kDtmfFrequencyHz; |
54 } | 56 } |
55 | 57 |
56 // set audio packet size, used to determine when it's time to send a DTMF packet | 58 // set audio packet size, used to determine when it's time to send a DTMF packet |
57 // in silence (CNG) | 59 // in silence (CNG) |
58 int32_t RTPSenderAudio::SetAudioPacketSize(uint16_t packetSizeSamples) { | 60 int32_t RTPSenderAudio::SetAudioPacketSize(uint16_t packetSizeSamples) { |
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
335 dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0]; | 337 dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0]; |
336 memcpy(dataBuffer + rtpHeaderLength, | 338 memcpy(dataBuffer + rtpHeaderLength, |
337 payloadData + fragmentation->fragmentationOffset[0], | 339 payloadData + fragmentation->fragmentationOffset[0], |
338 fragmentation->fragmentationLength[0]); | 340 fragmentation->fragmentationLength[0]); |
339 | 341 |
340 payloadSize = fragmentation->fragmentationLength[0]; | 342 payloadSize = fragmentation->fragmentationLength[0]; |
341 } else { | 343 } else { |
342 memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize); | 344 memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize); |
343 } | 345 } |
344 } | 346 } |
347 | |
348 bool firstPacket = false; | |
345 { | 349 { |
346 CriticalSectionScoped cs(_sendAudioCritsect.get()); | 350 CriticalSectionScoped cs(_sendAudioCritsect.get()); |
347 _lastPayloadType = payloadType; | 351 _lastPayloadType = payloadType; |
352 if (_firstPacketSent) { | |
pthatcher1
2016/03/16 04:55:27
Should this be if(!_firstPacketSent) ?
mflodman
2016/03/16 14:30:54
Yes.
skvlad
2016/03/16 19:07:04
Good catch, fixed!
| |
353 firstPacket = true; | |
354 _firstPacketSent = true; | |
355 } | |
348 } | 356 } |
349 // Update audio level extension, if included. | 357 // Update audio level extension, if included. |
350 size_t packetSize = payloadSize + rtpHeaderLength; | 358 size_t packetSize = payloadSize + rtpHeaderLength; |
351 RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize); | 359 RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize); |
352 RTPHeader rtp_header; | 360 RTPHeader rtp_header; |
353 rtp_parser.Parse(&rtp_header); | 361 rtp_parser.Parse(&rtp_header); |
354 _rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header, | 362 _rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header, |
355 (frameType == kAudioFrameSpeech), | 363 (frameType == kAudioFrameSpeech), |
356 audio_level_dbov); | 364 audio_level_dbov); |
357 TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp", | 365 TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp", |
358 _rtpSender->Timestamp(), "seqnum", | 366 _rtpSender->Timestamp(), "seqnum", |
359 _rtpSender->SequenceNumber()); | 367 _rtpSender->SequenceNumber()); |
360 return _rtpSender->SendToNetwork(dataBuffer, payloadSize, rtpHeaderLength, | 368 int32_t sendResult = _rtpSender->SendToNetwork( |
361 TickTime::MillisecondTimestamp(), | 369 dataBuffer, payloadSize, rtpHeaderLength, |
362 kAllowRetransmission, | 370 TickTime::MillisecondTimestamp(), kAllowRetransmission, |
363 RtpPacketSender::kHighPriority); | 371 RtpPacketSender::kHighPriority); |
372 if (firstPacket) { | |
373 LOG(LS_INFO) << "First audio RTP packet sent"; | |
374 } | |
375 return sendResult; | |
364 } | 376 } |
365 | 377 |
366 // Audio level magnitude and voice activity flag are set for each RTP packet | 378 // Audio level magnitude and voice activity flag are set for each RTP packet |
367 int32_t RTPSenderAudio::SetAudioLevel(uint8_t level_dBov) { | 379 int32_t RTPSenderAudio::SetAudioLevel(uint8_t level_dBov) { |
368 if (level_dBov > 127) { | 380 if (level_dBov > 127) { |
369 return -1; | 381 return -1; |
370 } | 382 } |
371 CriticalSectionScoped cs(_sendAudioCritsect.get()); | 383 CriticalSectionScoped cs(_sendAudioCritsect.get()); |
372 _audioLevel_dBov = level_dBov; | 384 _audioLevel_dBov = level_dBov; |
373 return 0; | 385 return 0; |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
455 dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber()); | 467 dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber()); |
456 retVal = _rtpSender->SendToNetwork( | 468 retVal = _rtpSender->SendToNetwork( |
457 dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(), | 469 dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(), |
458 kAllowRetransmission, RtpPacketSender::kHighPriority); | 470 kAllowRetransmission, RtpPacketSender::kHighPriority); |
459 sendCount--; | 471 sendCount--; |
460 } while (sendCount > 0 && retVal == 0); | 472 } while (sendCount > 0 && retVal == 0); |
461 | 473 |
462 return retVal; | 474 return retVal; |
463 } | 475 } |
464 } // namespace webrtc | 476 } // namespace webrtc |
OLD | NEW |