| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 462 } | 462 } |
| 463 if (payload_type_ == payload_type) { | 463 if (payload_type_ == payload_type) { |
| 464 if (!audio_configured_) { | 464 if (!audio_configured_) { |
| 465 *video_type = video_->VideoCodecType(); | 465 *video_type = video_->VideoCodecType(); |
| 466 } | 466 } |
| 467 return 0; | 467 return 0; |
| 468 } | 468 } |
| 469 std::map<int8_t, RtpUtility::Payload*>::iterator it = | 469 std::map<int8_t, RtpUtility::Payload*>::iterator it = |
| 470 payload_type_map_.find(payload_type); | 470 payload_type_map_.find(payload_type); |
| 471 if (it == payload_type_map_.end()) { | 471 if (it == payload_type_map_.end()) { |
| 472 LOG(LS_WARNING) << "Payload type " << payload_type << " not registered."; | 472 LOG(LS_WARNING) << "Payload type " << static_cast<int>(payload_type) |
| 473 << " not registered."; |
| 473 return -1; | 474 return -1; |
| 474 } | 475 } |
| 475 SetSendPayloadType(payload_type); | 476 SetSendPayloadType(payload_type); |
| 476 RtpUtility::Payload* payload = it->second; | 477 RtpUtility::Payload* payload = it->second; |
| 477 assert(payload); | 478 assert(payload); |
| 478 if (!payload->audio && !audio_configured_) { | 479 if (!payload->audio && !audio_configured_) { |
| 479 video_->SetVideoCodecType(payload->typeSpecific.Video.videoCodecType); | 480 video_->SetVideoCodecType(payload->typeSpecific.Video.videoCodecType); |
| 480 *video_type = payload->typeSpecific.Video.videoCodecType; | 481 *video_type = payload->typeSpecific.Video.videoCodecType; |
| 481 video_->SetMaxConfiguredBitrateVideo(payload->typeSpecific.Video.maxRate); | 482 video_->SetMaxConfiguredBitrateVideo(payload->typeSpecific.Video.maxRate); |
| 482 } | 483 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 505 { | 506 { |
| 506 // Drop this packet if we're not sending media packets. | 507 // Drop this packet if we're not sending media packets. |
| 507 CriticalSectionScoped cs(send_critsect_.get()); | 508 CriticalSectionScoped cs(send_critsect_.get()); |
| 508 ssrc = ssrc_; | 509 ssrc = ssrc_; |
| 509 if (!sending_media_) { | 510 if (!sending_media_) { |
| 510 return 0; | 511 return 0; |
| 511 } | 512 } |
| 512 } | 513 } |
| 513 RtpVideoCodecTypes video_type = kRtpVideoGeneric; | 514 RtpVideoCodecTypes video_type = kRtpVideoGeneric; |
| 514 if (CheckPayloadType(payload_type, &video_type) != 0) { | 515 if (CheckPayloadType(payload_type, &video_type) != 0) { |
| 515 LOG(LS_ERROR) << "Don't send data with unknown payload type."; | 516 LOG(LS_ERROR) << "Don't send data with unknown payload type: " |
| 517 << static_cast<int>(payload_type) << "."; |
| 516 return -1; | 518 return -1; |
| 517 } | 519 } |
| 518 | 520 |
| 519 int32_t ret_val; | 521 int32_t ret_val; |
| 520 if (audio_configured_) { | 522 if (audio_configured_) { |
| 521 TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp, | 523 TRACE_EVENT_ASYNC_STEP1("webrtc", "Audio", capture_timestamp, |
| 522 "Send", "type", FrameTypeToString(frame_type)); | 524 "Send", "type", FrameTypeToString(frame_type)); |
| 523 assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN || | 525 assert(frame_type == kAudioFrameSpeech || frame_type == kAudioFrameCN || |
| 524 frame_type == kEmptyFrame); | 526 frame_type == kEmptyFrame); |
| 525 | 527 |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 718 RtpUtility::RtpHeaderParser rtp_parser(data_buffer, length); | 720 RtpUtility::RtpHeaderParser rtp_parser(data_buffer, length); |
| 719 RTPHeader header; | 721 RTPHeader header; |
| 720 if (!rtp_parser.Parse(header)) { | 722 if (!rtp_parser.Parse(header)) { |
| 721 assert(false); | 723 assert(false); |
| 722 return -1; | 724 return -1; |
| 723 } | 725 } |
| 724 // Convert from TickTime to Clock since capture_time_ms is based on | 726 // Convert from TickTime to Clock since capture_time_ms is based on |
| 725 // TickTime. | 727 // TickTime. |
| 726 int64_t corrected_capture_tims_ms = capture_time_ms + clock_delta_ms_; | 728 int64_t corrected_capture_tims_ms = capture_time_ms + clock_delta_ms_; |
| 727 paced_sender_->InsertPacket( | 729 paced_sender_->InsertPacket( |
| 728 RtpPacketSender::kHighPriority, header.ssrc, header.sequenceNumber, | 730 RtpPacketSender::kNormalPriority, header.ssrc, header.sequenceNumber, |
| 729 corrected_capture_tims_ms, length - header.headerLength, true); | 731 corrected_capture_tims_ms, length - header.headerLength, true); |
| 730 | 732 |
| 731 return length; | 733 return length; |
| 732 } | 734 } |
| 733 int rtx = kRtxOff; | 735 int rtx = kRtxOff; |
| 734 { | 736 { |
| 735 CriticalSectionScoped lock(send_critsect_.get()); | 737 CriticalSectionScoped lock(send_critsect_.get()); |
| 736 rtx = rtx_; | 738 rtx = rtx_; |
| 737 } | 739 } |
| 738 if (!PrepareAndSendPacket(data_buffer, length, capture_time_ms, | 740 if (!PrepareAndSendPacket(data_buffer, length, capture_time_ms, |
| (...skipping 259 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 998 uint8_t pt_fec; | 1000 uint8_t pt_fec; |
| 999 video_->GenericFECStatus(fec_enabled, pt_red, pt_fec); | 1001 video_->GenericFECStatus(fec_enabled, pt_red, pt_fec); |
| 1000 return fec_enabled && | 1002 return fec_enabled && |
| 1001 header.payloadType == pt_red && | 1003 header.payloadType == pt_red && |
| 1002 buffer[header.headerLength] == pt_fec; | 1004 buffer[header.headerLength] == pt_fec; |
| 1003 } | 1005 } |
| 1004 | 1006 |
| 1005 size_t RTPSender::TimeToSendPadding(size_t bytes) { | 1007 size_t RTPSender::TimeToSendPadding(size_t bytes) { |
| 1006 if (bytes == 0) | 1008 if (bytes == 0) |
| 1007 return 0; | 1009 return 0; |
| 1010 if (audio_configured_) |
| 1011 return 0; |
| 1008 { | 1012 { |
| 1009 CriticalSectionScoped cs(send_critsect_.get()); | 1013 CriticalSectionScoped cs(send_critsect_.get()); |
| 1010 if (!sending_media_) | 1014 if (!sending_media_) |
| 1011 return 0; | 1015 return 0; |
| 1012 } | 1016 } |
| 1013 size_t bytes_sent = TrySendRedundantPayloads(bytes); | 1017 size_t bytes_sent = TrySendRedundantPayloads(bytes); |
| 1014 if (bytes_sent < bytes) | 1018 if (bytes_sent < bytes) |
| 1015 bytes_sent += SendPadData(bytes - bytes_sent, false, 0, 0); | 1019 bytes_sent += SendPadData(bytes - bytes_sent, false, 0, 0); |
| 1016 return bytes_sent; | 1020 return bytes_sent; |
| 1017 } | 1021 } |
| (...skipping 885 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1903 CriticalSectionScoped lock(send_critsect_.get()); | 1907 CriticalSectionScoped lock(send_critsect_.get()); |
| 1904 | 1908 |
| 1905 RtpState state; | 1909 RtpState state; |
| 1906 state.sequence_number = sequence_number_rtx_; | 1910 state.sequence_number = sequence_number_rtx_; |
| 1907 state.start_timestamp = start_timestamp_; | 1911 state.start_timestamp = start_timestamp_; |
| 1908 | 1912 |
| 1909 return state; | 1913 return state; |
| 1910 } | 1914 } |
| 1911 | 1915 |
| 1912 } // namespace webrtc | 1916 } // namespace webrtc |
| OLD | NEW |