| Index: webrtc/modules/interface/module_common_types.h
|
| diff --git a/webrtc/modules/interface/module_common_types.h b/webrtc/modules/interface/module_common_types.h
|
| index 1202eee0435c3774b7daf45a2eb20384dc6d4205..62fe69481afbb585817c413b68b6507d35688723 100644
|
| --- a/webrtc/modules/interface/module_common_types.h
|
| +++ b/webrtc/modules/interface/module_common_types.h
|
| @@ -32,8 +32,15 @@ struct RTPAudioHeader {
|
| };
|
|
|
| const int16_t kNoPictureId = -1;
|
| +const int16_t kMaxOneBytePictureId = 0x7F; // 7 bits
|
| +const int16_t kMaxTwoBytePictureId = 0x7FFF; // 15 bits
|
| const int16_t kNoTl0PicIdx = -1;
|
| const uint8_t kNoTemporalIdx = 0xFF;
|
| +const uint8_t kNoSpatialIdx = 0xFF;
|
| +const uint8_t kNoGofIdx = 0xFF;
|
| +const size_t kMaxVp9RefPics = 3;
|
| +const size_t kMaxVp9FramesInGof = 16;
|
| +const size_t kMaxVp9NumberOfSpatialLayers = 8;
|
| const int kNoKeyIdx = -1;
|
|
|
| struct RTPVideoHeaderVP8 {
|
| @@ -62,6 +69,80 @@ struct RTPVideoHeaderVP8 {
|
| // in a VP8 partition. Otherwise false
|
| };
|
|
|
| +struct GofInfoVP9 {
|
| + void CopyGofInfoVP9(const GofInfoVP9& src) {
|
| + num_frames_in_gof = src.num_frames_in_gof;
|
| + for (size_t i = 0; i < num_frames_in_gof; ++i) {
|
| + temporal_idx[i] = src.temporal_idx[i];
|
| + temporal_up_switch[i] = src.temporal_up_switch[i];
|
| + num_ref_pics[i] = src.num_ref_pics[i];
|
| + for (size_t r = 0; r < num_ref_pics[i]; ++r) {
|
| + pid_diff[i][r] = src.pid_diff[i][r];
|
| + }
|
| + }
|
| + }
|
| +
|
| + size_t num_frames_in_gof;
|
| + uint8_t temporal_idx[kMaxVp9FramesInGof];
|
| + bool temporal_up_switch[kMaxVp9FramesInGof];
|
| + size_t num_ref_pics[kMaxVp9FramesInGof];
|
| + int16_t pid_diff[kMaxVp9FramesInGof][kMaxVp9RefPics];
|
| +};
|
| +
|
| +struct RTPVideoHeaderVP9 {
|
| + void InitRTPVideoHeaderVP9() {
|
| + inter_pic_predicted = false;
|
| + flexible_mode = false;
|
| + beginning_of_frame = false;
|
| + end_of_frame = false;
|
| + ss_data_available = false;
|
| + picture_id = kNoPictureId;
|
| + max_picture_id = kMaxTwoBytePictureId;
|
| + tl0_pic_idx = kNoTl0PicIdx;
|
| + temporal_idx = kNoTemporalIdx;
|
| + spatial_idx = kNoSpatialIdx;
|
| + temporal_up_switch = false;
|
| + inter_layer_predicted = false;
|
| + gof_idx = kNoGofIdx;
|
| + num_ref_pics = 0;
|
| + }
|
| +
|
| + bool inter_pic_predicted; // This layer frame is dependent on previously
|
| + // coded frame(s).
|
| + bool flexible_mode; // This frame is in flexible mode.
|
| + bool beginning_of_frame; // True if this packet is the first in a VP9 layer
|
| + // frame.
|
| + bool end_of_frame; // True if this packet is the last in a VP9 layer frame.
|
| + bool ss_data_available; // True if SS data is available in this payload
|
| + // descriptor.
|
| + int16_t picture_id; // PictureID index, 15 bits;
|
| + // kNoPictureId if PictureID does not exist.
|
| + int16_t max_picture_id; // Maximum picture ID index; either 0x7F or 0x7FFF;
|
| + int16_t tl0_pic_idx; // TL0PIC_IDX, 8 bits;
|
| + // kNoTl0PicIdx means no value provided.
|
| + uint8_t temporal_idx; // Temporal layer index, or kNoTemporalIdx.
|
| + uint8_t spatial_idx; // Spatial layer index, or kNoSpatialIdx.
|
| + bool temporal_up_switch; // True if upswitch to higher frame rate is possible
|
| + // starting from this frame.
|
| + bool inter_layer_predicted; // Frame is dependent on directly lower spatial
|
| + // layer frame.
|
| +
|
| + uint8_t gof_idx; // Index to predefined temporal frame info in SS data.
|
| +
|
| + size_t num_ref_pics; // Number of reference pictures used by this layer
|
| + // frame.
|
| + int16_t pid_diff[kMaxVp9RefPics]; // P_DIFF signaled to derive the PictureID
|
| + // of the reference pictures.
|
| + int16_t ref_picture_id[kMaxVp9RefPics]; // PictureID of reference pictures.
|
| +
|
| + // SS data.
|
| + size_t num_spatial_layers;
|
| + bool spatial_layer_resolution_present;
|
| + uint16_t width[kMaxVp9NumberOfSpatialLayers];
|
| + uint16_t height[kMaxVp9NumberOfSpatialLayers];
|
| + GofInfoVP9 gof;
|
| +};
|
| +
|
| // The packetization types that we support: single, aggregated, and fragmented.
|
| enum H264PacketizationTypes {
|
| kH264SingleNalu, // This packet contains a single NAL unit.
|
| @@ -85,6 +166,7 @@ struct RTPVideoHeaderH264 {
|
|
|
| union RTPVideoTypeHeader {
|
| RTPVideoHeaderVP8 VP8;
|
| + RTPVideoHeaderVP9 VP9;
|
| RTPVideoHeaderH264 H264;
|
| };
|
|
|
| @@ -92,6 +174,7 @@ enum RtpVideoCodecTypes {
|
| kRtpVideoNone,
|
| kRtpVideoGeneric,
|
| kRtpVideoVp8,
|
| + kRtpVideoVp9,
|
| kRtpVideoH264
|
| };
|
| // Since RTPVideoHeader is used as a member of a union, it can't have a
|
|
|