| Index: webrtc/modules/video_coding/video_sender_unittest.cc
|
| diff --git a/webrtc/modules/video_coding/video_sender_unittest.cc b/webrtc/modules/video_coding/video_sender_unittest.cc
|
| index e15e87285a2486e1885973a0462d18204bd5e8b7..50283fc8c9039078c7f3e36c4bb3f69f3212147f 100644
|
| --- a/webrtc/modules/video_coding/video_sender_unittest.cc
|
| +++ b/webrtc/modules/video_coding/video_sender_unittest.cc
|
| @@ -86,19 +86,19 @@
|
| std::unique_ptr<VideoFrame> frame_;
|
| };
|
|
|
| -class PacketizationCallback : public VCMPacketizationCallback {
|
| +class EncodedImageCallbackImpl : public EncodedImageCallback {
|
| public:
|
| - explicit PacketizationCallback(Clock* clock)
|
| + explicit EncodedImageCallbackImpl(Clock* clock)
|
| : clock_(clock), start_time_ms_(clock_->TimeInMilliseconds()) {}
|
|
|
| - virtual ~PacketizationCallback() {}
|
| -
|
| - int32_t SendData(uint8_t payload_type,
|
| - const EncodedImage& encoded_image,
|
| - const RTPFragmentationHeader* fragmentation_header,
|
| - const RTPVideoHeader* rtp_video_header) override {
|
| - assert(rtp_video_header);
|
| - frame_data_.push_back(FrameData(encoded_image._length, *rtp_video_header));
|
| + virtual ~EncodedImageCallbackImpl() {}
|
| +
|
| + int32_t Encoded(const EncodedImage& encoded_image,
|
| + const CodecSpecificInfo* codec_specific_info,
|
| + const RTPFragmentationHeader* fragmentation) override {
|
| + assert(codec_specific_info);
|
| + frame_data_.push_back(
|
| + FrameData(encoded_image._length, *codec_specific_info));
|
| return 0;
|
| }
|
|
|
| @@ -130,11 +130,12 @@
|
| struct FrameData {
|
| FrameData() {}
|
|
|
| - FrameData(size_t payload_size, const RTPVideoHeader& rtp_video_header)
|
| - : payload_size(payload_size), rtp_video_header(rtp_video_header) {}
|
| + FrameData(size_t payload_size, const CodecSpecificInfo& codec_specific_info)
|
| + : payload_size(payload_size),
|
| + codec_specific_info(codec_specific_info) {}
|
|
|
| size_t payload_size;
|
| - RTPVideoHeader rtp_video_header;
|
| + CodecSpecificInfo codec_specific_info;
|
| };
|
|
|
| int64_t interval_ms() {
|
| @@ -146,9 +147,9 @@
|
| int CountFramesWithinTemporalLayer(int temporal_layer) {
|
| int frames = 0;
|
| for (size_t i = 0; i < frame_data_.size(); ++i) {
|
| - EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
| + EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
| const uint8_t temporal_idx =
|
| - frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
| + frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
| if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
| frames++;
|
| }
|
| @@ -158,9 +159,9 @@
|
| size_t SumPayloadBytesWithinTemporalLayer(int temporal_layer) {
|
| size_t payload_size = 0;
|
| for (size_t i = 0; i < frame_data_.size(); ++i) {
|
| - EXPECT_EQ(kRtpVideoVp8, frame_data_[i].rtp_video_header.codec);
|
| + EXPECT_EQ(kVideoCodecVP8, frame_data_[i].codec_specific_info.codecType);
|
| const uint8_t temporal_idx =
|
| - frame_data_[i].rtp_video_header.codecHeader.VP8.temporalIdx;
|
| + frame_data_[i].codec_specific_info.codecSpecific.VP8.temporalIdx;
|
| if (temporal_idx <= temporal_layer || temporal_idx == kNoTemporalIdx)
|
| payload_size += frame_data_[i].payload_size;
|
| }
|
| @@ -176,12 +177,11 @@
|
| protected:
|
| // Note: simulated clock starts at 1 seconds, since parts of webrtc use 0 as
|
| // a special case (e.g. frame rate in media optimization).
|
| - TestVideoSender() : clock_(1000), packetization_callback_(&clock_) {}
|
| + TestVideoSender() : clock_(1000), encoded_frame_callback_(&clock_) {}
|
|
|
| void SetUp() override {
|
| sender_.reset(
|
| - new VideoSender(&clock_, &post_encode_callback_, nullptr, nullptr));
|
| - EXPECT_EQ(0, sender_->RegisterTransportCallback(&packetization_callback_));
|
| + new VideoSender(&clock_, &encoded_frame_callback_, nullptr, nullptr));
|
| }
|
|
|
| void AddFrame() {
|
| @@ -190,8 +190,7 @@
|
| }
|
|
|
| SimulatedClock clock_;
|
| - PacketizationCallback packetization_callback_;
|
| - MockEncodedImageCallback post_encode_callback_;
|
| + EncodedImageCallbackImpl encoded_frame_callback_;
|
| // Used by subclassing tests, need to outlive sender_.
|
| std::unique_ptr<VideoEncoder> encoder_;
|
| std::unique_ptr<VideoSender> sender_;
|
| @@ -415,8 +414,6 @@
|
| void InsertFrames(float framerate, float seconds) {
|
| for (int i = 0; i < seconds * framerate; ++i) {
|
| clock_.AdvanceTimeMilliseconds(1000.0f / framerate);
|
| - EXPECT_CALL(post_encode_callback_, Encoded(_, NULL, NULL))
|
| - .WillOnce(Return(0));
|
| AddFrame();
|
| // SetChannelParameters needs to be called frequently to propagate
|
| // framerate from the media optimization into the encoder.
|
| @@ -435,10 +432,10 @@
|
| // It appears that this 5 seconds simulation is needed to allow
|
| // bitrate and framerate to stabilize.
|
| InsertFrames(framerate, short_simulation_interval);
|
| - packetization_callback_.Reset();
|
| + encoded_frame_callback_.Reset();
|
|
|
| InsertFrames(framerate, long_simulation_interval);
|
| - return packetization_callback_.CalculateVp8StreamInfo();
|
| + return encoded_frame_callback_.CalculateVp8StreamInfo();
|
| }
|
|
|
| protected:
|
|
|