| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 24 #include "webrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h" | 24 #include "webrtc/modules/rtp_rtcp/source/rtp_packet_to_send.h" |
| 25 | 25 |
| 26 namespace webrtc { | 26 namespace webrtc { |
| 27 | 27 |
| 28 RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtp_sender) | 28 RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtp_sender) |
| 29 : clock_(clock), | 29 : clock_(clock), |
| 30 rtp_sender_(rtp_sender) {} | 30 rtp_sender_(rtp_sender) {} |
| 31 | 31 |
| 32 RTPSenderAudio::~RTPSenderAudio() {} | 32 RTPSenderAudio::~RTPSenderAudio() {} |
| 33 | 33 |
| 34 // set audio packet size, used to determine when it's time to send a DTMF packet | |
| 35 // in silence (CNG) | |
| 36 int32_t RTPSenderAudio::SetAudioPacketSize(uint16_t packet_size_samples) { | |
| 37 rtc::CritScope cs(&send_audio_critsect_); | |
| 38 packet_size_samples_ = packet_size_samples; | |
| 39 return 0; | |
| 40 } | |
| 41 | |
| 42 int32_t RTPSenderAudio::RegisterAudioPayload( | 34 int32_t RTPSenderAudio::RegisterAudioPayload( |
| 43 const char payloadName[RTP_PAYLOAD_NAME_SIZE], | 35 const char payloadName[RTP_PAYLOAD_NAME_SIZE], |
| 44 const int8_t payload_type, | 36 const int8_t payload_type, |
| 45 const uint32_t frequency, | 37 const uint32_t frequency, |
| 46 const size_t channels, | 38 const size_t channels, |
| 47 const uint32_t rate, | 39 const uint32_t rate, |
| 48 RtpUtility::Payload** payload) { | 40 RtpUtility::Payload** payload) { |
| 49 if (RtpUtility::StringCompare(payloadName, "cn", 2)) { | 41 if (RtpUtility::StringCompare(payloadName, "cn", 2)) { |
| 50 rtc::CritScope cs(&send_audio_critsect_); | 42 rtc::CritScope cs(&send_audio_critsect_); |
| 51 // we can have multiple CNG payload types | 43 // we can have multiple CNG payload types |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 125 } | 117 } |
| 126 return marker_bit; | 118 return marker_bit; |
| 127 } | 119 } |
| 128 | 120 |
| 129 bool RTPSenderAudio::SendAudio(FrameType frame_type, | 121 bool RTPSenderAudio::SendAudio(FrameType frame_type, |
| 130 int8_t payload_type, | 122 int8_t payload_type, |
| 131 uint32_t rtp_timestamp, | 123 uint32_t rtp_timestamp, |
| 132 const uint8_t* payload_data, | 124 const uint8_t* payload_data, |
| 133 size_t payload_size, | 125 size_t payload_size, |
| 134 const RTPFragmentationHeader* fragmentation) { | 126 const RTPFragmentationHeader* fragmentation) { |
| 127 // From RFC 4733: |
| 128 // A source has wide latitude as to how often it sends event updates. A |
| 129 // natural interval is the spacing between non-event audio packets. [...] |
| 130 // Alternatively, a source MAY decide to use a different spacing for event |
| 131 // updates, with a value of 50 ms RECOMMENDED. |
| 132 constexpr int kDtmfIntervalTimeMs = 50; |
| 135 uint8_t audio_level_dbov = 0; | 133 uint8_t audio_level_dbov = 0; |
| 136 uint16_t packet_size_samples = 0; | |
| 137 uint32_t dtmf_payload_freq = 0; | 134 uint32_t dtmf_payload_freq = 0; |
| 138 { | 135 { |
| 139 rtc::CritScope cs(&send_audio_critsect_); | 136 rtc::CritScope cs(&send_audio_critsect_); |
| 140 audio_level_dbov = audio_level_dbov_; | 137 audio_level_dbov = audio_level_dbov_; |
| 141 packet_size_samples = packet_size_samples_; | |
| 142 dtmf_payload_freq = dtmf_payload_freq_; | 138 dtmf_payload_freq = dtmf_payload_freq_; |
| 143 } | 139 } |
| 144 | 140 |
| 145 // Check if we have pending DTMFs to send | 141 // Check if we have pending DTMFs to send |
| 146 if (!dtmf_event_is_on_ && dtmf_queue_.PendingDtmf()) { | 142 if (!dtmf_event_is_on_ && dtmf_queue_.PendingDtmf()) { |
| 147 if ((clock_->TimeInMilliseconds() - dtmf_time_last_sent_) > 50) { | 143 if ((clock_->TimeInMilliseconds() - dtmf_time_last_sent_) > |
| 144 kDtmfIntervalTimeMs) { |
| 148 // New tone to play | 145 // New tone to play |
| 149 dtmf_timestamp_ = rtp_timestamp; | 146 dtmf_timestamp_ = rtp_timestamp; |
| 150 if (dtmf_queue_.NextDtmf(&dtmf_current_event_)) { | 147 if (dtmf_queue_.NextDtmf(&dtmf_current_event_)) { |
| 151 dtmf_event_first_packet_sent_ = false; | 148 dtmf_event_first_packet_sent_ = false; |
| 152 dtmf_length_samples_ = | 149 dtmf_length_samples_ = |
| 153 dtmf_current_event_.duration_ms * (dtmf_payload_freq / 1000); | 150 dtmf_current_event_.duration_ms * (dtmf_payload_freq / 1000); |
| 154 dtmf_event_is_on_ = true; | 151 dtmf_event_is_on_ = true; |
| 155 } | 152 } |
| 156 } | 153 } |
| 157 } | 154 } |
| 158 | 155 |
| 159 // A source MAY send events and coded audio packets for the same time | 156 // A source MAY send events and coded audio packets for the same time |
| 160 // but we don't support it | 157 // but we don't support it |
| 161 if (dtmf_event_is_on_) { | 158 if (dtmf_event_is_on_) { |
| 162 if (frame_type == kEmptyFrame) { | 159 if (frame_type == kEmptyFrame) { |
| 163 // kEmptyFrame is used to drive the DTMF when in CN mode | 160 // kEmptyFrame is used to drive the DTMF when in CN mode |
| 164 // it can be triggered more frequently than we want to send the | 161 // it can be triggered more frequently than we want to send the |
| 165 // DTMF packets. | 162 // DTMF packets. |
| 166 if (packet_size_samples > (rtp_timestamp - dtmf_timestamp_last_sent_)) { | 163 const unsigned int dtmf_interval_time_rtp = |
| 164 dtmf_payload_freq * kDtmfIntervalTimeMs / 1000; |
| 165 if ((rtp_timestamp - dtmf_timestamp_last_sent_) < |
| 166 dtmf_interval_time_rtp) { |
| 167 // not time to send yet | 167 // not time to send yet |
| 168 return true; | 168 return true; |
| 169 } | 169 } |
| 170 } | 170 } |
| 171 dtmf_timestamp_last_sent_ = rtp_timestamp; | 171 dtmf_timestamp_last_sent_ = rtp_timestamp; |
| 172 uint32_t dtmf_duration_samples = rtp_timestamp - dtmf_timestamp_; | 172 uint32_t dtmf_duration_samples = rtp_timestamp - dtmf_timestamp_; |
| 173 bool ended = false; | 173 bool ended = false; |
| 174 bool send = true; | 174 bool send = true; |
| 175 | 175 |
| 176 if (dtmf_length_samples_ > dtmf_duration_samples) { | 176 if (dtmf_length_samples_ > dtmf_duration_samples) { |
| (...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 340 TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Audio::SendTelephoneEvent", | 340 TRACE_DISABLED_BY_DEFAULT("webrtc_rtp"), "Audio::SendTelephoneEvent", |
| 341 "timestamp", packet->Timestamp(), "seqnum", packet->SequenceNumber()); | 341 "timestamp", packet->Timestamp(), "seqnum", packet->SequenceNumber()); |
| 342 result = rtp_sender_->SendToNetwork(std::move(packet), kAllowRetransmission, | 342 result = rtp_sender_->SendToNetwork(std::move(packet), kAllowRetransmission, |
| 343 RtpPacketSender::kHighPriority); | 343 RtpPacketSender::kHighPriority); |
| 344 send_count--; | 344 send_count--; |
| 345 } while (send_count > 0 && result); | 345 } while (send_count > 0 && result); |
| 346 | 346 |
| 347 return result; | 347 return result; |
| 348 } | 348 } |
| 349 } // namespace webrtc | 349 } // namespace webrtc |
| OLD | NEW |