Index: webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc |
diff --git a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc |
index 6d0f7a4627bc49cad1680b11f34fb26090b9ff1e..234e43b120de15784d872001ef7aaa68c2e7cc3b 100644 |
--- a/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc |
+++ b/webrtc/modules/rtp_rtcp/source/rtp_sender_audio.cc |
@@ -16,6 +16,7 @@ |
#include "webrtc/base/trace_event.h" |
#include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" |
#include "webrtc/modules/rtp_rtcp/source/byte_io.h" |
+#include "webrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h" |
#include "webrtc/system_wrappers/include/tick_util.h" |
namespace webrtc { |
@@ -153,7 +154,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
size_t dataSize, |
const RTPFragmentationHeader* fragmentation) { |
// TODO(pwestin) Breakup function in smaller functions. |
- size_t payloadSize = dataSize; |
+ size_t payload_size = dataSize; |
size_t maxPayloadLength = _rtpSender->MaxPayloadLength(); |
uint16_t dtmfLengthMS = 0; |
uint8_t key = 0; |
@@ -239,7 +240,7 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
} |
return 0; |
} |
- if (payloadSize == 0 || payloadData == NULL) { |
+ if (payload_size == 0 || payloadData == NULL) { |
if (frameType == kEmptyFrame) { |
// we don't send empty audio RTP packets |
// no error since we use it to drive DTMF when we use VAD |
@@ -247,10 +248,9 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
} |
return -1; |
} |
- uint8_t dataBuffer[IP_PACKET_SIZE]; |
+ std::unique_ptr<RtpPacketToSend> packet = _rtpSender->CreatePacket(); |
bool markerBit = MarkerBit(frameType, payloadType); |
- int32_t rtpHeaderLength = 0; |
uint16_t timestampOffset = 0; |
if (red_payload_type >= 0 && fragmentation && !markerBit && |
@@ -258,23 +258,23 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
// have we configured RED? use its payload type |
// we need to get the current timestamp to calc the diff |
uint32_t oldTimeStamp = _rtpSender->Timestamp(); |
- rtpHeaderLength = _rtpSender->BuildRTPheader(dataBuffer, red_payload_type, |
- markerBit, captureTimeStamp, |
- _clock->TimeInMilliseconds()); |
- |
+ _rtpSender->BuildRtpHeader(packet.get(), captureTimeStamp, true); |
timestampOffset = uint16_t(_rtpSender->Timestamp() - oldTimeStamp); |
+ packet->SetPayloadType(red_payload_type); |
} else { |
- rtpHeaderLength = _rtpSender->BuildRTPheader(dataBuffer, payloadType, |
- markerBit, captureTimeStamp, |
- _clock->TimeInMilliseconds()); |
- } |
- if (rtpHeaderLength <= 0) { |
- return -1; |
+ _rtpSender->BuildRtpHeader(packet.get(), captureTimeStamp, true); |
+ packet->SetPayloadType(payloadType); |
} |
- if (maxPayloadLength < (rtpHeaderLength + payloadSize)) { |
+ packet->SetMarker(markerBit); |
+ // Update audio level extension, if included. |
+ packet->SetExtension<AudioLevel>(frameType == kAudioFrameSpeech, |
+ audio_level_dbov); |
+ |
+ if (maxPayloadLength < (packet->headers_size() + payload_size)) { |
// Too large payload buffer. |
return -1; |
} |
+ size_t red_header_size = 0; |
if (red_payload_type >= 0 && // Have we configured RED? |
fragmentation && fragmentation->fragmentationVectorSize > 1 && |
!markerBit) { |
@@ -283,9 +283,14 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
// we only support 2 codecs when using RED |
return -1; |
} |
+ red_header_size = 5; |
+ payload_size = fragmentation->fragmentationLength[0] + |
+ fragmentation->fragmentationLength[1]; |
+ |
+ uint8_t* payload = |
+ packet->AllocatePayload(red_header_size + payload_size); |
// only 0x80 if we have multiple blocks |
- dataBuffer[rtpHeaderLength++] = |
- 0x80 + fragmentation->fragmentationPlType[1]; |
+ payload[0] = 0x80 + fragmentation->fragmentationPlType[1]; |
size_t blockLength = fragmentation->fragmentationLength[1]; |
// sanity blockLength |
@@ -293,44 +298,43 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
return -1; |
} |
uint32_t REDheader = (timestampOffset << 10) + blockLength; |
- ByteWriter<uint32_t>::WriteBigEndian(dataBuffer + rtpHeaderLength, |
- REDheader); |
- rtpHeaderLength += 3; |
+ ByteWriter<uint32_t, 3>::WriteBigEndian(payload + 1, REDheader); |
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0]; |
+ payload[4] = fragmentation->fragmentationPlType[0]; |
// copy the RED data |
- memcpy(dataBuffer + rtpHeaderLength, |
+ memcpy(payload + red_header_size, |
payloadData + fragmentation->fragmentationOffset[1], |
fragmentation->fragmentationLength[1]); |
// copy the normal data |
- memcpy( |
- dataBuffer + rtpHeaderLength + fragmentation->fragmentationLength[1], |
- payloadData + fragmentation->fragmentationOffset[0], |
- fragmentation->fragmentationLength[0]); |
- |
- payloadSize = fragmentation->fragmentationLength[0] + |
- fragmentation->fragmentationLength[1]; |
+ memcpy(payload + red_header_size + fragmentation->fragmentationLength[1], |
+ payloadData + fragmentation->fragmentationOffset[0], |
+ fragmentation->fragmentationLength[0]); |
} else { |
+ red_header_size = 1; |
+ payload_size = fragmentation->fragmentationLength[0]; |
+ uint8_t* payload = |
+ packet->AllocatePayload(red_header_size + payload_size); |
// silence for too long send only new data |
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0]; |
- memcpy(dataBuffer + rtpHeaderLength, |
+ payload[0] = fragmentation->fragmentationPlType[0]; |
+ memcpy(payload + red_header_size, |
payloadData + fragmentation->fragmentationOffset[0], |
fragmentation->fragmentationLength[0]); |
- |
- payloadSize = fragmentation->fragmentationLength[0]; |
} |
} else { |
if (fragmentation && fragmentation->fragmentationVectorSize > 0) { |
// use the fragment info if we have one |
- dataBuffer[rtpHeaderLength++] = fragmentation->fragmentationPlType[0]; |
- memcpy(dataBuffer + rtpHeaderLength, |
+ red_header_size = 1; |
+ payload_size = fragmentation->fragmentationLength[0]; |
+ uint8_t* payload = |
+ packet->AllocatePayload(red_header_size + payload_size); |
+ payload[0] = fragmentation->fragmentationPlType[0]; |
+ memcpy(payload + red_header_size, |
payloadData + fragmentation->fragmentationOffset[0], |
fragmentation->fragmentationLength[0]); |
- |
- payloadSize = fragmentation->fragmentationLength[0]; |
} else { |
- memcpy(dataBuffer + rtpHeaderLength, payloadData, payloadSize); |
+ uint8_t* payload = packet->AllocatePayload(payload_size); |
+ memcpy(payload, payloadData, payload_size); |
} |
} |
@@ -338,25 +342,12 @@ int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
rtc::CritScope cs(&_sendAudioCritsect); |
_lastPayloadType = payloadType; |
} |
- // Update audio level extension, if included. |
- size_t packetSize = payloadSize + rtpHeaderLength; |
- RtpUtility::RtpHeaderParser rtp_parser(dataBuffer, packetSize); |
- RTPHeader rtp_header; |
- rtp_parser.Parse(&rtp_header); |
- _rtpSender->UpdateAudioLevel(dataBuffer, packetSize, rtp_header, |
- (frameType == kAudioFrameSpeech), |
- audio_level_dbov); |
TRACE_EVENT_ASYNC_END2("webrtc", "Audio", captureTimeStamp, "timestamp", |
- _rtpSender->Timestamp(), "seqnum", |
- _rtpSender->SequenceNumber()); |
- int32_t send_result = _rtpSender->SendToNetwork( |
- dataBuffer, payloadSize, rtpHeaderLength, |
- TickTime::MillisecondTimestamp(), kAllowRetransmission, |
- RtpPacketSender::kHighPriority); |
- if (first_packet_sent_()) { |
- LOG(LS_INFO) << "First audio RTP packet sent to pacer"; |
- } |
- return send_result; |
+ packet->Timestamp(), "seqnum", |
+ packet->SequenceNumber()); |
+ return _rtpSender->SendToNetwork( |
+ std::move(packet), payload_size, TickTime::MillisecondTimestamp(), |
+ kAllowRetransmission, RtpPacketSender::kHighPriority); |
} |
// Audio level magnitude and voice activity flag are set for each RTP packet |
@@ -409,7 +400,6 @@ int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended, |
uint32_t dtmfTimeStamp, |
uint16_t duration, |
bool markerBit) { |
- uint8_t dtmfbuffer[IP_PACKET_SIZE]; |
uint8_t sendCount = 1; |
int32_t retVal = 0; |
@@ -418,13 +408,11 @@ int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended, |
sendCount = 3; |
} |
do { |
+ std::unique_ptr<RtpPacketToSend> packet = _rtpSender->CreatePacket(); |
// Send DTMF data |
- _rtpSender->BuildRTPheader(dtmfbuffer, dtmf_payload_type, markerBit, |
- dtmfTimeStamp, _clock->TimeInMilliseconds()); |
- |
- // reset CSRC and X bit |
- dtmfbuffer[0] &= 0xe0; |
- |
+ _rtpSender->BuildRtpHeader(packet.get(), dtmfTimeStamp, false); |
+ packet->SetMarker(markerBit); |
+ packet->SetPayloadType(dtmf_payload_type); |
// Create DTMF data |
/* From RFC 2833: |
@@ -440,17 +428,18 @@ int32_t RTPSenderAudio::SendTelephoneEventPacket(bool ended, |
// First packet un-ended |
uint8_t E = ended ? 0x80 : 0x00; |
+ uint8_t* dtmfbuffer = packet->AllocatePayload(4); |
// First byte is Event number, equals key number |
- dtmfbuffer[12] = _dtmfKey; |
- dtmfbuffer[13] = E | R | volume; |
- ByteWriter<uint16_t>::WriteBigEndian(dtmfbuffer + 14, duration); |
+ dtmfbuffer[0] = _dtmfKey; |
+ dtmfbuffer[1] = E | R | volume; |
+ ByteWriter<uint16_t>::WriteBigEndian(dtmfbuffer + 2, duration); |
TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), |
"Audio::SendTelephoneEvent", "timestamp", |
dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber()); |
retVal = _rtpSender->SendToNetwork( |
- dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(), |
+ std::move(packet), 4, TickTime::MillisecondTimestamp(), |
kAllowRetransmission, RtpPacketSender::kHighPriority); |
sendCount--; |
} while (sendCount > 0 && retVal == 0); |