| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2014 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 33 static const size_t kFuAHeaderSize = 2; | 33 static const size_t kFuAHeaderSize = 2; |
| 34 static const size_t kLengthFieldSize = 2; | 34 static const size_t kLengthFieldSize = 2; |
| 35 static const size_t kStapAHeaderSize = kNalHeaderSize + kLengthFieldSize; | 35 static const size_t kStapAHeaderSize = kNalHeaderSize + kLengthFieldSize; |
| 36 | 36 |
| 37 // Bit masks for FU (A and B) indicators. | 37 // Bit masks for FU (A and B) indicators. |
| 38 enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F }; | 38 enum NalDefs { kFBit = 0x80, kNriMask = 0x60, kTypeMask = 0x1F }; |
| 39 | 39 |
| 40 // Bit masks for FU (A and B) headers. | 40 // Bit masks for FU (A and B) headers. |
| 41 enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 }; | 41 enum FuDefs { kSBit = 0x80, kEBit = 0x40, kRBit = 0x20 }; |
| 42 | 42 |
| 43 void ParseSingleNalu(RtpDepacketizer::ParsedPayload* parsed_payload, | 43 bool ParseSingleNalu(RtpDepacketizer::ParsedPayload* parsed_payload, |
| 44 const uint8_t* payload_data, | 44 const uint8_t* payload_data, |
| 45 size_t payload_data_length) { | 45 size_t payload_data_length) { |
| 46 parsed_payload->type.Video.width = 0; | 46 parsed_payload->type.Video.width = 0; |
| 47 parsed_payload->type.Video.height = 0; | 47 parsed_payload->type.Video.height = 0; |
| 48 parsed_payload->type.Video.codec = kRtpVideoH264; | 48 parsed_payload->type.Video.codec = kRtpVideoH264; |
| 49 parsed_payload->type.Video.isFirstPacket = true; | 49 parsed_payload->type.Video.isFirstPacket = true; |
| 50 RTPVideoHeaderH264* h264_header = | 50 RTPVideoHeaderH264* h264_header = |
| 51 &parsed_payload->type.Video.codecHeader.H264; | 51 &parsed_payload->type.Video.codecHeader.H264; |
| 52 | 52 |
| 53 const uint8_t* nalu_start = payload_data + kNalHeaderSize; | 53 const uint8_t* nalu_start = payload_data + kNalHeaderSize; |
| 54 size_t nalu_length = payload_data_length - kNalHeaderSize; | 54 size_t nalu_length = payload_data_length - kNalHeaderSize; |
| 55 uint8_t nal_type = payload_data[0] & kTypeMask; | 55 uint8_t nal_type = payload_data[0] & kTypeMask; |
| 56 if (nal_type == kStapA) { | 56 if (nal_type == kStapA) { |
| 57 // Skip the StapA header (StapA nal type + length). | 57 // Skip the StapA header (StapA nal type + length). |
| 58 if (payload_data_length <= kStapAHeaderSize) { |
| 59 LOG(LS_ERROR) << "StapA header truncated."; |
| 60 return false; |
| 61 } |
| 58 nal_type = payload_data[kStapAHeaderSize] & kTypeMask; | 62 nal_type = payload_data[kStapAHeaderSize] & kTypeMask; |
| 59 nalu_start += kStapAHeaderSize; | 63 nalu_start += kStapAHeaderSize; |
| 60 nalu_length -= kStapAHeaderSize; | 64 nalu_length -= kStapAHeaderSize; |
| 61 h264_header->packetization_type = kH264StapA; | 65 h264_header->packetization_type = kH264StapA; |
| 62 } else { | 66 } else { |
| 63 h264_header->packetization_type = kH264SingleNalu; | 67 h264_header->packetization_type = kH264SingleNalu; |
| 64 } | 68 } |
| 65 h264_header->nalu_type = nal_type; | 69 h264_header->nalu_type = nal_type; |
| 66 | 70 |
| 67 // We can read resolution out of sps packets. | 71 // We can read resolution out of sps packets. |
| 68 if (nal_type == kSps) { | 72 if (nal_type == kSps) { |
| 69 H264SpsParser parser(nalu_start, nalu_length); | 73 H264SpsParser parser(nalu_start, nalu_length); |
| 70 if (parser.Parse()) { | 74 if (parser.Parse()) { |
| 71 parsed_payload->type.Video.width = parser.width(); | 75 parsed_payload->type.Video.width = parser.width(); |
| 72 parsed_payload->type.Video.height = parser.height(); | 76 parsed_payload->type.Video.height = parser.height(); |
| 73 } | 77 } |
| 74 } | 78 } |
| 75 switch (nal_type) { | 79 switch (nal_type) { |
| 76 case kSps: | 80 case kSps: |
| 77 case kPps: | 81 case kPps: |
| 78 case kIdr: | 82 case kIdr: |
| 79 parsed_payload->frame_type = kVideoFrameKey; | 83 parsed_payload->frame_type = kVideoFrameKey; |
| 80 break; | 84 break; |
| 81 default: | 85 default: |
| 82 parsed_payload->frame_type = kVideoFrameDelta; | 86 parsed_payload->frame_type = kVideoFrameDelta; |
| 83 break; | 87 break; |
| 84 } | 88 } |
| 89 return true; |
| 85 } | 90 } |
| 86 | 91 |
| 87 void ParseFuaNalu(RtpDepacketizer::ParsedPayload* parsed_payload, | 92 bool ParseFuaNalu(RtpDepacketizer::ParsedPayload* parsed_payload, |
| 88 const uint8_t* payload_data, | 93 const uint8_t* payload_data, |
| 89 size_t payload_data_length, | 94 size_t payload_data_length, |
| 90 size_t* offset) { | 95 size_t* offset) { |
| 96 if (payload_data_length < kFuAHeaderSize) { |
| 97 LOG(LS_ERROR) << "FU-A NAL units truncated."; |
| 98 return false; |
| 99 } |
| 91 uint8_t fnri = payload_data[0] & (kFBit | kNriMask); | 100 uint8_t fnri = payload_data[0] & (kFBit | kNriMask); |
| 92 uint8_t original_nal_type = payload_data[1] & kTypeMask; | 101 uint8_t original_nal_type = payload_data[1] & kTypeMask; |
| 93 bool first_fragment = (payload_data[1] & kSBit) > 0; | 102 bool first_fragment = (payload_data[1] & kSBit) > 0; |
| 94 | 103 |
| 95 uint8_t original_nal_header = fnri | original_nal_type; | 104 uint8_t original_nal_header = fnri | original_nal_type; |
| 96 if (first_fragment) { | 105 if (first_fragment) { |
| 97 *offset = kNalHeaderSize; | 106 *offset = kNalHeaderSize; |
| 98 uint8_t* payload = const_cast<uint8_t*>(payload_data + *offset); | 107 uint8_t* payload = const_cast<uint8_t*>(payload_data + *offset); |
| 99 payload[0] = original_nal_header; | 108 payload[0] = original_nal_header; |
| 100 } else { | 109 } else { |
| 101 *offset = kFuAHeaderSize; | 110 *offset = kFuAHeaderSize; |
| 102 } | 111 } |
| 103 | 112 |
| 104 if (original_nal_type == kIdr) { | 113 if (original_nal_type == kIdr) { |
| 105 parsed_payload->frame_type = kVideoFrameKey; | 114 parsed_payload->frame_type = kVideoFrameKey; |
| 106 } else { | 115 } else { |
| 107 parsed_payload->frame_type = kVideoFrameDelta; | 116 parsed_payload->frame_type = kVideoFrameDelta; |
| 108 } | 117 } |
| 109 parsed_payload->type.Video.width = 0; | 118 parsed_payload->type.Video.width = 0; |
| 110 parsed_payload->type.Video.height = 0; | 119 parsed_payload->type.Video.height = 0; |
| 111 parsed_payload->type.Video.codec = kRtpVideoH264; | 120 parsed_payload->type.Video.codec = kRtpVideoH264; |
| 112 parsed_payload->type.Video.isFirstPacket = first_fragment; | 121 parsed_payload->type.Video.isFirstPacket = first_fragment; |
| 113 RTPVideoHeaderH264* h264_header = | 122 RTPVideoHeaderH264* h264_header = |
| 114 &parsed_payload->type.Video.codecHeader.H264; | 123 &parsed_payload->type.Video.codecHeader.H264; |
| 115 h264_header->packetization_type = kH264FuA; | 124 h264_header->packetization_type = kH264FuA; |
| 116 h264_header->nalu_type = original_nal_type; | 125 h264_header->nalu_type = original_nal_type; |
| 126 return true; |
| 117 } | 127 } |
| 118 } // namespace | 128 } // namespace |
| 119 | 129 |
| 120 RtpPacketizerH264::RtpPacketizerH264(FrameType frame_type, | 130 RtpPacketizerH264::RtpPacketizerH264(FrameType frame_type, |
| 121 size_t max_payload_len) | 131 size_t max_payload_len) |
| 122 : payload_data_(NULL), | 132 : payload_data_(NULL), |
| 123 payload_size_(0), | 133 payload_size_(0), |
| 124 max_payload_len_(max_payload_len), | 134 max_payload_len_(max_payload_len), |
| 125 frame_type_(frame_type) { | 135 frame_type_(frame_type) { |
| 126 } | 136 } |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 319 assert(parsed_payload != NULL); | 329 assert(parsed_payload != NULL); |
| 320 if (payload_data_length == 0) { | 330 if (payload_data_length == 0) { |
| 321 LOG(LS_ERROR) << "Empty payload."; | 331 LOG(LS_ERROR) << "Empty payload."; |
| 322 return false; | 332 return false; |
| 323 } | 333 } |
| 324 | 334 |
| 325 uint8_t nal_type = payload_data[0] & kTypeMask; | 335 uint8_t nal_type = payload_data[0] & kTypeMask; |
| 326 size_t offset = 0; | 336 size_t offset = 0; |
| 327 if (nal_type == kFuA) { | 337 if (nal_type == kFuA) { |
| 328 // Fragmented NAL units (FU-A). | 338 // Fragmented NAL units (FU-A). |
| 329 ParseFuaNalu(parsed_payload, payload_data, payload_data_length, &offset); | 339 if (!ParseFuaNalu( |
| 340 parsed_payload, payload_data, payload_data_length, &offset)) { |
| 341 return false; |
| 342 } |
| 330 } else { | 343 } else { |
| 331 // We handle STAP-A and single NALU's the same way here. The jitter buffer | 344 // We handle STAP-A and single NALU's the same way here. The jitter buffer |
| 332 // will depacketize the STAP-A into NAL units later. | 345 // will depacketize the STAP-A into NAL units later. |
| 333 ParseSingleNalu(parsed_payload, payload_data, payload_data_length); | 346 if (!ParseSingleNalu(parsed_payload, payload_data, payload_data_length)) |
| 347 return false; |
| 334 } | 348 } |
| 335 | 349 |
| 336 parsed_payload->payload = payload_data + offset; | 350 parsed_payload->payload = payload_data + offset; |
| 337 parsed_payload->payload_length = payload_data_length - offset; | 351 parsed_payload->payload_length = payload_data_length - offset; |
| 338 return true; | 352 return true; |
| 339 } | 353 } |
| 340 } // namespace webrtc | 354 } // namespace webrtc |
| OLD | NEW |