OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h" | 11 #include "webrtc/modules/rtp_rtcp/source/rtp_sender_audio.h" |
12 | 12 |
13 #include <string.h> | 13 #include <string.h> |
14 | 14 |
15 #include "webrtc/base/trace_event.h" | 15 #include "webrtc/base/trace_event.h" |
16 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" | 16 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp_defines.h" |
17 #include "webrtc/modules/rtp_rtcp/source/byte_io.h" | 17 #include "webrtc/modules/rtp_rtcp/source/byte_io.h" |
18 #include "webrtc/system_wrappers/include/tick_util.h" | 18 #include "webrtc/system_wrappers/include/tick_util.h" |
19 | 19 |
20 namespace webrtc { | 20 namespace webrtc { |
21 | 21 |
22 static const int kDtmfFrequencyHz = 8000; | 22 static const int kDtmfFrequencyHz = 8000; |
23 | 23 |
24 RTPSenderAudio::RTPSenderAudio(Clock* clock, | 24 RTPSenderAudio::RTPSenderAudio(Clock* clock, RTPSender* rtpSender) |
25 RTPSender* rtpSender, | |
26 RtpAudioFeedback* audio_feedback) | |
27 : _clock(clock), | 25 : _clock(clock), |
28 _rtpSender(rtpSender), | 26 _rtpSender(rtpSender), |
29 _audioFeedback(audio_feedback), | |
30 _sendAudioCritsect(CriticalSectionWrapper::CreateCriticalSection()), | 27 _sendAudioCritsect(CriticalSectionWrapper::CreateCriticalSection()), |
31 _packetSizeSamples(160), | 28 _packetSizeSamples(160), |
32 _dtmfEventIsOn(false), | 29 _dtmfEventIsOn(false), |
33 _dtmfEventFirstPacketSent(false), | 30 _dtmfEventFirstPacketSent(false), |
34 _dtmfPayloadType(-1), | 31 _dtmfPayloadType(-1), |
35 _dtmfTimestamp(0), | 32 _dtmfTimestamp(0), |
36 _dtmfKey(0), | 33 _dtmfKey(0), |
37 _dtmfLengthSamples(0), | 34 _dtmfLengthSamples(0), |
38 _dtmfLevel(0), | 35 _dtmfLevel(0), |
39 _dtmfTimeLastSent(0), | 36 _dtmfTimeLastSent(0), |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
151 | 148 |
152 int32_t RTPSenderAudio::SendAudio(FrameType frameType, | 149 int32_t RTPSenderAudio::SendAudio(FrameType frameType, |
153 int8_t payloadType, | 150 int8_t payloadType, |
154 uint32_t captureTimeStamp, | 151 uint32_t captureTimeStamp, |
155 const uint8_t* payloadData, | 152 const uint8_t* payloadData, |
156 size_t dataSize, | 153 size_t dataSize, |
157 const RTPFragmentationHeader* fragmentation) { | 154 const RTPFragmentationHeader* fragmentation) { |
158 // TODO(pwestin) Breakup function in smaller functions. | 155 // TODO(pwestin) Breakup function in smaller functions. |
159 size_t payloadSize = dataSize; | 156 size_t payloadSize = dataSize; |
160 size_t maxPayloadLength = _rtpSender->MaxPayloadLength(); | 157 size_t maxPayloadLength = _rtpSender->MaxPayloadLength(); |
161 bool dtmfToneStarted = false; | |
162 uint16_t dtmfLengthMS = 0; | 158 uint16_t dtmfLengthMS = 0; |
163 uint8_t key = 0; | 159 uint8_t key = 0; |
164 int red_payload_type; | 160 int red_payload_type; |
165 uint8_t audio_level_dbov; | 161 uint8_t audio_level_dbov; |
166 int8_t dtmf_payload_type; | 162 int8_t dtmf_payload_type; |
167 uint16_t packet_size_samples; | 163 uint16_t packet_size_samples; |
168 { | 164 { |
169 CriticalSectionScoped cs(_sendAudioCritsect.get()); | 165 CriticalSectionScoped cs(_sendAudioCritsect.get()); |
170 red_payload_type = _REDPayloadType; | 166 red_payload_type = _REDPayloadType; |
171 audio_level_dbov = _audioLevel_dBov; | 167 audio_level_dbov = _audioLevel_dBov; |
172 dtmf_payload_type = _dtmfPayloadType; | 168 dtmf_payload_type = _dtmfPayloadType; |
173 packet_size_samples = _packetSizeSamples; | 169 packet_size_samples = _packetSizeSamples; |
174 } | 170 } |
175 | 171 |
176 // Check if we have pending DTMFs to send | 172 // Check if we have pending DTMFs to send |
177 if (!_dtmfEventIsOn && PendingDTMF()) { | 173 if (!_dtmfEventIsOn && PendingDTMF()) { |
178 int64_t delaySinceLastDTMF = | 174 int64_t delaySinceLastDTMF = |
179 _clock->TimeInMilliseconds() - _dtmfTimeLastSent; | 175 _clock->TimeInMilliseconds() - _dtmfTimeLastSent; |
180 | 176 |
181 if (delaySinceLastDTMF > 100) { | 177 if (delaySinceLastDTMF > 100) { |
182 // New tone to play | 178 // New tone to play |
183 _dtmfTimestamp = captureTimeStamp; | 179 _dtmfTimestamp = captureTimeStamp; |
184 if (NextDTMF(&key, &dtmfLengthMS, &_dtmfLevel) >= 0) { | 180 if (NextDTMF(&key, &dtmfLengthMS, &_dtmfLevel) >= 0) { |
185 _dtmfEventFirstPacketSent = false; | 181 _dtmfEventFirstPacketSent = false; |
186 _dtmfKey = key; | 182 _dtmfKey = key; |
187 _dtmfLengthSamples = (kDtmfFrequencyHz / 1000) * dtmfLengthMS; | 183 _dtmfLengthSamples = (kDtmfFrequencyHz / 1000) * dtmfLengthMS; |
188 dtmfToneStarted = true; | |
189 _dtmfEventIsOn = true; | 184 _dtmfEventIsOn = true; |
190 } | 185 } |
191 } | 186 } |
192 } | 187 } |
193 if (dtmfToneStarted) { | |
194 if (_audioFeedback) | |
195 _audioFeedback->OnPlayTelephoneEvent(key, dtmfLengthMS, _dtmfLevel); | |
196 } | |
197 | 188 |
198 // A source MAY send events and coded audio packets for the same time | 189 // A source MAY send events and coded audio packets for the same time |
199 // but we don't support it | 190 // but we don't support it |
200 if (_dtmfEventIsOn) { | 191 if (_dtmfEventIsOn) { |
201 if (frameType == kEmptyFrame) { | 192 if (frameType == kEmptyFrame) { |
202 // kEmptyFrame is used to drive the DTMF when in CN mode | 193 // kEmptyFrame is used to drive the DTMF when in CN mode |
203 // it can be triggered more frequently than we want to send the | 194 // it can be triggered more frequently than we want to send the |
204 // DTMF packets. | 195 // DTMF packets. |
205 if (packet_size_samples > (captureTimeStamp - _dtmfTimestampLastSent)) { | 196 if (packet_size_samples > (captureTimeStamp - _dtmfTimestampLastSent)) { |
206 // not time to send yet | 197 // not time to send yet |
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
455 dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber()); | 446 dtmfTimeStamp, "seqnum", _rtpSender->SequenceNumber()); |
456 retVal = _rtpSender->SendToNetwork( | 447 retVal = _rtpSender->SendToNetwork( |
457 dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(), | 448 dtmfbuffer, 4, 12, TickTime::MillisecondTimestamp(), |
458 kAllowRetransmission, RtpPacketSender::kHighPriority); | 449 kAllowRetransmission, RtpPacketSender::kHighPriority); |
459 sendCount--; | 450 sendCount--; |
460 } while (sendCount > 0 && retVal == 0); | 451 } while (sendCount > 0 && retVal == 0); |
461 | 452 |
462 return retVal; | 453 return retVal; |
463 } | 454 } |
464 } // namespace webrtc | 455 } // namespace webrtc |
OLD | NEW |