| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 86 // To keep track of spatial resize actions by encoder. | 86 // To keep track of spatial resize actions by encoder. |
| 87 last_encoder_frame_width_ = config_.codec_settings->width; | 87 last_encoder_frame_width_ = config_.codec_settings->width; |
| 88 last_encoder_frame_height_ = config_.codec_settings->height; | 88 last_encoder_frame_height_ = config_.codec_settings->height; |
| 89 | 89 |
| 90 // Setup required callbacks for the encoder/decoder: | 90 // Setup required callbacks for the encoder/decoder: |
| 91 encode_callback_ = new VideoProcessorEncodeCompleteCallback(this); | 91 encode_callback_ = new VideoProcessorEncodeCompleteCallback(this); |
| 92 decode_callback_ = new VideoProcessorDecodeCompleteCallback(this); | 92 decode_callback_ = new VideoProcessorDecodeCompleteCallback(this); |
| 93 int32_t register_result = | 93 int32_t register_result = |
| 94 encoder_->RegisterEncodeCompleteCallback(encode_callback_); | 94 encoder_->RegisterEncodeCompleteCallback(encode_callback_); |
| 95 if (register_result != WEBRTC_VIDEO_CODEC_OK) { | 95 if (register_result != WEBRTC_VIDEO_CODEC_OK) { |
| 96 fprintf(stderr, "Failed to register encode complete callback, return code: " | 96 fprintf(stderr, |
| 97 "%d\n", register_result); | 97 "Failed to register encode complete callback, return code: " |
| 98 "%d\n", |
| 99 register_result); |
| 98 return false; | 100 return false; |
| 99 } | 101 } |
| 100 register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_); | 102 register_result = decoder_->RegisterDecodeCompleteCallback(decode_callback_); |
| 101 if (register_result != WEBRTC_VIDEO_CODEC_OK) { | 103 if (register_result != WEBRTC_VIDEO_CODEC_OK) { |
| 102 fprintf(stderr, "Failed to register decode complete callback, return code: " | 104 fprintf(stderr, |
| 103 "%d\n", register_result); | 105 "Failed to register decode complete callback, return code: " |
| 106 "%d\n", |
| 107 register_result); |
| 104 return false; | 108 return false; |
| 105 } | 109 } |
| 106 // Init the encoder and decoder | 110 // Init the encoder and decoder |
| 107 uint32_t nbr_of_cores = 1; | 111 uint32_t nbr_of_cores = 1; |
| 108 if (!config_.use_single_core) { | 112 if (!config_.use_single_core) { |
| 109 nbr_of_cores = CpuInfo::DetectNumberOfCores(); | 113 nbr_of_cores = CpuInfo::DetectNumberOfCores(); |
| 110 } | 114 } |
| 111 int32_t init_result = | 115 int32_t init_result = |
| 112 encoder_->InitEncode(config_.codec_settings, nbr_of_cores, | 116 encoder_->InitEncode(config_.codec_settings, nbr_of_cores, |
| 113 config_.networking_config.max_payload_size_in_bytes); | 117 config_.networking_config.max_payload_size_in_bytes); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 139 | 143 |
| 140 VideoProcessorImpl::~VideoProcessorImpl() { | 144 VideoProcessorImpl::~VideoProcessorImpl() { |
| 141 delete[] source_buffer_; | 145 delete[] source_buffer_; |
| 142 delete[] last_successful_frame_buffer_; | 146 delete[] last_successful_frame_buffer_; |
| 143 encoder_->RegisterEncodeCompleteCallback(NULL); | 147 encoder_->RegisterEncodeCompleteCallback(NULL); |
| 144 delete encode_callback_; | 148 delete encode_callback_; |
| 145 decoder_->RegisterDecodeCompleteCallback(NULL); | 149 decoder_->RegisterDecodeCompleteCallback(NULL); |
| 146 delete decode_callback_; | 150 delete decode_callback_; |
| 147 } | 151 } |
| 148 | 152 |
| 149 | |
| 150 void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) { | 153 void VideoProcessorImpl::SetRates(int bit_rate, int frame_rate) { |
| 151 int set_rates_result = encoder_->SetRates(bit_rate, frame_rate); | 154 int set_rates_result = encoder_->SetRates(bit_rate, frame_rate); |
| 152 assert(set_rates_result >= 0); | 155 assert(set_rates_result >= 0); |
| 153 if (set_rates_result < 0) { | 156 if (set_rates_result < 0) { |
| 154 fprintf(stderr, "Failed to update encoder with new rate %d, " | 157 fprintf(stderr, |
| 155 "return code: %d\n", bit_rate, set_rates_result); | 158 "Failed to update encoder with new rate %d, " |
| 159 "return code: %d\n", |
| 160 bit_rate, set_rates_result); |
| 156 } | 161 } |
| 157 num_dropped_frames_ = 0; | 162 num_dropped_frames_ = 0; |
| 158 num_spatial_resizes_ = 0; | 163 num_spatial_resizes_ = 0; |
| 159 } | 164 } |
| 160 | 165 |
| 161 size_t VideoProcessorImpl::EncodedFrameSize() { | 166 size_t VideoProcessorImpl::EncodedFrameSize() { |
| 162 return encoded_frame_size_; | 167 return encoded_frame_size_; |
| 163 } | 168 } |
| 164 | 169 |
| 165 FrameType VideoProcessorImpl::EncodedFrameType() { | 170 FrameType VideoProcessorImpl::EncodedFrameType() { |
| 166 return encoded_frame_type_; | 171 return encoded_frame_type_; |
| 167 } | 172 } |
| 168 | 173 |
| 169 int VideoProcessorImpl::NumberDroppedFrames() { | 174 int VideoProcessorImpl::NumberDroppedFrames() { |
| 170 return num_dropped_frames_; | 175 return num_dropped_frames_; |
| 171 } | 176 } |
| 172 | 177 |
| 173 int VideoProcessorImpl::NumberSpatialResizes() { | 178 int VideoProcessorImpl::NumberSpatialResizes() { |
| 174 return num_spatial_resizes_; | 179 return num_spatial_resizes_; |
| 175 } | 180 } |
| 176 | 181 |
| 177 bool VideoProcessorImpl::ProcessFrame(int frame_number) { | 182 bool VideoProcessorImpl::ProcessFrame(int frame_number) { |
| 178 assert(frame_number >=0); | 183 assert(frame_number >= 0); |
| 179 if (!initialized_) { | 184 if (!initialized_) { |
| 180 fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n"); | 185 fprintf(stderr, "Attempting to use uninitialized VideoProcessor!\n"); |
| 181 return false; | 186 return false; |
| 182 } | 187 } |
| 183 // |prev_time_stamp_| is used for getting number of dropped frames. | 188 // |prev_time_stamp_| is used for getting number of dropped frames. |
| 184 if (frame_number == 0) { | 189 if (frame_number == 0) { |
| 185 prev_time_stamp_ = -1; | 190 prev_time_stamp_ = -1; |
| 186 } | 191 } |
| 187 if (frame_reader_->ReadFrame(source_buffer_)) { | 192 if (frame_reader_->ReadFrame(source_buffer_)) { |
| 188 // Copy the source frame to the newly read frame data. | 193 // Copy the source frame to the newly read frame data. |
| 189 source_frame_.CreateFrame(source_buffer_, | 194 source_frame_.CreateFrame(source_buffer_, config_.codec_settings->width, |
| 190 config_.codec_settings->width, | 195 config_.codec_settings->height, kVideoRotation_0); |
| 191 config_.codec_settings->height, | |
| 192 kVideoRotation_0); | |
| 193 | 196 |
| 194 // Ensure we have a new statistics data object we can fill: | 197 // Ensure we have a new statistics data object we can fill: |
| 195 FrameStatistic& stat = stats_->NewFrame(frame_number); | 198 FrameStatistic& stat = stats_->NewFrame(frame_number); |
| 196 | 199 |
| 197 encode_start_ = TickTime::Now(); | 200 encode_start_ = TickTime::Now(); |
| 198 // Use the frame number as "timestamp" to identify frames | 201 // Use the frame number as "timestamp" to identify frames |
| 199 source_frame_.set_timestamp(frame_number); | 202 source_frame_.set_timestamp(frame_number); |
| 200 | 203 |
| 201 // Decide if we're going to force a keyframe: | 204 // Decide if we're going to force a keyframe: |
| 202 std::vector<FrameType> frame_types(1, kVideoFrameDelta); | 205 std::vector<FrameType> frame_types(1, kVideoFrameDelta); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 217 } | 220 } |
| 218 stat.encode_return_code = encode_result; | 221 stat.encode_return_code = encode_result; |
| 219 return true; | 222 return true; |
| 220 } else { | 223 } else { |
| 221 return false; // we've reached the last frame | 224 return false; // we've reached the last frame |
| 222 } | 225 } |
| 223 } | 226 } |
| 224 | 227 |
| 225 void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { | 228 void VideoProcessorImpl::FrameEncoded(const EncodedImage& encoded_image) { |
| 226 // Timestamp is frame number, so this gives us #dropped frames. | 229 // Timestamp is frame number, so this gives us #dropped frames. |
| 227 int num_dropped_from_prev_encode = encoded_image._timeStamp - | 230 int num_dropped_from_prev_encode = |
| 228 prev_time_stamp_ - 1; | 231 encoded_image._timeStamp - prev_time_stamp_ - 1; |
| 229 num_dropped_frames_ += num_dropped_from_prev_encode; | 232 num_dropped_frames_ += num_dropped_from_prev_encode; |
| 230 prev_time_stamp_ = encoded_image._timeStamp; | 233 prev_time_stamp_ = encoded_image._timeStamp; |
| 231 if (num_dropped_from_prev_encode > 0) { | 234 if (num_dropped_from_prev_encode > 0) { |
| 232 // For dropped frames, we write out the last decoded frame to avoid getting | 235 // For dropped frames, we write out the last decoded frame to avoid getting |
| 233 // out of sync for the computation of PSNR and SSIM. | 236 // out of sync for the computation of PSNR and SSIM. |
| 234 for (int i = 0; i < num_dropped_from_prev_encode; i++) { | 237 for (int i = 0; i < num_dropped_from_prev_encode; i++) { |
| 235 frame_writer_->WriteFrame(last_successful_frame_buffer_); | 238 frame_writer_->WriteFrame(last_successful_frame_buffer_); |
| 236 } | 239 } |
| 237 } | 240 } |
| 238 // Frame is not dropped, so update the encoded frame size | 241 // Frame is not dropped, so update the encoded frame size |
| 239 // (encoder callback is only called for non-zero length frames). | 242 // (encoder callback is only called for non-zero length frames). |
| 240 encoded_frame_size_ = encoded_image._length; | 243 encoded_frame_size_ = encoded_image._length; |
| 241 | 244 |
| 242 encoded_frame_type_ = encoded_image._frameType; | 245 encoded_frame_type_ = encoded_image._frameType; |
| 243 | 246 |
| 244 TickTime encode_stop = TickTime::Now(); | 247 TickTime encode_stop = TickTime::Now(); |
| 245 int frame_number = encoded_image._timeStamp; | 248 int frame_number = encoded_image._timeStamp; |
| 246 FrameStatistic& stat = stats_->stats_[frame_number]; | 249 FrameStatistic& stat = stats_->stats_[frame_number]; |
| 247 stat.encode_time_in_us = GetElapsedTimeMicroseconds(encode_start_, | 250 stat.encode_time_in_us = |
| 248 encode_stop); | 251 GetElapsedTimeMicroseconds(encode_start_, encode_stop); |
| 249 stat.encoding_successful = true; | 252 stat.encoding_successful = true; |
| 250 stat.encoded_frame_length_in_bytes = encoded_image._length; | 253 stat.encoded_frame_length_in_bytes = encoded_image._length; |
| 251 stat.frame_number = encoded_image._timeStamp; | 254 stat.frame_number = encoded_image._timeStamp; |
| 252 stat.frame_type = encoded_image._frameType; | 255 stat.frame_type = encoded_image._frameType; |
| 253 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; | 256 stat.bit_rate_in_kbps = encoded_image._length * bit_rate_factor_; |
| 254 stat.total_packets = encoded_image._length / | 257 stat.total_packets = |
| 255 config_.networking_config.packet_size_in_bytes + 1; | 258 encoded_image._length / config_.networking_config.packet_size_in_bytes + |
| 259 1; |
| 256 | 260 |
| 257 // Perform packet loss if criteria is fullfilled: | 261 // Perform packet loss if criteria is fullfilled: |
| 258 bool exclude_this_frame = false; | 262 bool exclude_this_frame = false; |
| 259 // Only keyframes can be excluded | 263 // Only keyframes can be excluded |
| 260 if (encoded_image._frameType == kVideoFrameKey) { | 264 if (encoded_image._frameType == kVideoFrameKey) { |
| 261 switch (config_.exclude_frame_types) { | 265 switch (config_.exclude_frame_types) { |
| 262 case kExcludeOnlyFirstKeyFrame: | 266 case kExcludeOnlyFirstKeyFrame: |
| 263 if (!first_key_frame_has_been_excluded_) { | 267 if (!first_key_frame_has_been_excluded_) { |
| 264 first_key_frame_has_been_excluded_ = true; | 268 first_key_frame_has_been_excluded_ = true; |
| 265 exclude_this_frame = true; | 269 exclude_this_frame = true; |
| 266 } | 270 } |
| 267 break; | 271 break; |
| 268 case kExcludeAllKeyFrames: | 272 case kExcludeAllKeyFrames: |
| 269 exclude_this_frame = true; | 273 exclude_this_frame = true; |
| 270 break; | 274 break; |
| 271 default: | 275 default: |
| 272 assert(false); | 276 assert(false); |
| 273 } | 277 } |
| 274 } | 278 } |
| 275 rtc::scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[encoded_image._length]); | 279 rtc::scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[encoded_image._length]); |
| 276 memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length); | 280 memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length); |
| 277 EncodedImage copied_image; | 281 EncodedImage copied_image; |
| 278 memcpy(&copied_image, &encoded_image, sizeof(copied_image)); | 282 memcpy(&copied_image, &encoded_image, sizeof(copied_image)); |
| 279 copied_image._size = copied_image._length; | 283 copied_image._size = copied_image._length; |
| 280 copied_image._buffer = copied_buffer.get(); | 284 copied_image._buffer = copied_buffer.get(); |
| 281 if (!exclude_this_frame) { | 285 if (!exclude_this_frame) { |
| 282 stat.packets_dropped = | 286 stat.packets_dropped = |
| 283 packet_manipulator_->ManipulatePackets(&copied_image); | 287 packet_manipulator_->ManipulatePackets(&copied_image); |
| 284 } | 288 } |
| 285 | 289 |
| 286 // Keep track of if frames are lost due to packet loss so we can tell | 290 // Keep track of if frames are lost due to packet loss so we can tell |
| 287 // this to the encoder (this is handled by the RTP logic in the full stack) | 291 // this to the encoder (this is handled by the RTP logic in the full stack) |
| 288 decode_start_ = TickTime::Now(); | 292 decode_start_ = TickTime::Now(); |
| 289 // TODO(kjellander): Pass fragmentation header to the decoder when | 293 // TODO(kjellander): Pass fragmentation header to the decoder when |
| 290 // CL 172001 has been submitted and PacketManipulator supports this. | 294 // CL 172001 has been submitted and PacketManipulator supports this. |
| 291 int32_t decode_result = | 295 int32_t decode_result = |
| 292 decoder_->Decode(copied_image, last_frame_missing_, NULL); | 296 decoder_->Decode(copied_image, last_frame_missing_, NULL); |
| 293 stat.decode_return_code = decode_result; | 297 stat.decode_return_code = decode_result; |
| 294 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { | 298 if (decode_result != WEBRTC_VIDEO_CODEC_OK) { |
| 295 // Write the last successful frame the output file to avoid getting it out | 299 // Write the last successful frame the output file to avoid getting it out |
| 296 // of sync with the source file for SSIM and PSNR comparisons: | 300 // of sync with the source file for SSIM and PSNR comparisons: |
| 297 frame_writer_->WriteFrame(last_successful_frame_buffer_); | 301 frame_writer_->WriteFrame(last_successful_frame_buffer_); |
| 298 } | 302 } |
| 299 // save status for losses so we can inform the decoder for the next frame: | 303 // save status for losses so we can inform the decoder for the next frame: |
| 300 last_frame_missing_ = copied_image._length == 0; | 304 last_frame_missing_ = copied_image._length == 0; |
| 301 } | 305 } |
| 302 | 306 |
| 303 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { | 307 void VideoProcessorImpl::FrameDecoded(const VideoFrame& image) { |
| 304 TickTime decode_stop = TickTime::Now(); | 308 TickTime decode_stop = TickTime::Now(); |
| 305 int frame_number = image.timestamp(); | 309 int frame_number = image.timestamp(); |
| 306 // Report stats | 310 // Report stats |
| 307 FrameStatistic& stat = stats_->stats_[frame_number]; | 311 FrameStatistic& stat = stats_->stats_[frame_number]; |
| 308 stat.decode_time_in_us = GetElapsedTimeMicroseconds(decode_start_, | 312 stat.decode_time_in_us = |
| 309 decode_stop); | 313 GetElapsedTimeMicroseconds(decode_start_, decode_stop); |
| 310 stat.decoding_successful = true; | 314 stat.decoding_successful = true; |
| 311 | 315 |
| 312 // Check for resize action (either down or up): | 316 // Check for resize action (either down or up): |
| 313 if (static_cast<int>(image.width()) != last_encoder_frame_width_ || | 317 if (static_cast<int>(image.width()) != last_encoder_frame_width_ || |
| 314 static_cast<int>(image.height()) != last_encoder_frame_height_ ) { | 318 static_cast<int>(image.height()) != last_encoder_frame_height_) { |
| 315 ++num_spatial_resizes_; | 319 ++num_spatial_resizes_; |
| 316 last_encoder_frame_width_ = image.width(); | 320 last_encoder_frame_width_ = image.width(); |
| 317 last_encoder_frame_height_ = image.height(); | 321 last_encoder_frame_height_ = image.height(); |
| 318 } | 322 } |
| 319 // Check if codec size is different from native/original size, and if so, | 323 // Check if codec size is different from native/original size, and if so, |
| 320 // upsample back to original size: needed for PSNR and SSIM computations. | 324 // upsample back to original size: needed for PSNR and SSIM computations. |
| 321 if (image.width() != config_.codec_settings->width || | 325 if (image.width() != config_.codec_settings->width || |
| 322 image.height() != config_.codec_settings->height) { | 326 image.height() != config_.codec_settings->height) { |
| 323 VideoFrame up_image; | 327 VideoFrame up_image; |
| 324 int ret_val = scaler_.Set(image.width(), image.height(), | 328 int ret_val = scaler_.Set( |
| 325 config_.codec_settings->width, | 329 image.width(), image.height(), config_.codec_settings->width, |
| 326 config_.codec_settings->height, | 330 config_.codec_settings->height, kI420, kI420, kScaleBilinear); |
| 327 kI420, kI420, kScaleBilinear); | |
| 328 assert(ret_val >= 0); | 331 assert(ret_val >= 0); |
| 329 if (ret_val < 0) { | 332 if (ret_val < 0) { |
| 330 fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n", | 333 fprintf(stderr, "Failed to set scalar for frame: %d, return code: %d\n", |
| 331 frame_number, ret_val); | 334 frame_number, ret_val); |
| 332 } | 335 } |
| 333 ret_val = scaler_.Scale(image, &up_image); | 336 ret_val = scaler_.Scale(image, &up_image); |
| 334 assert(ret_val >= 0); | 337 assert(ret_val >= 0); |
| 335 if (ret_val < 0) { | 338 if (ret_val < 0) { |
| 336 fprintf(stderr, "Failed to scale frame: %d, return code: %d\n", | 339 fprintf(stderr, "Failed to scale frame: %d, return code: %d\n", |
| 337 frame_number, ret_val); | 340 frame_number, ret_val); |
| (...skipping 21 matching lines...) Expand all Loading... |
| 359 | 362 |
| 360 bool write_success = frame_writer_->WriteFrame(image_buffer.get()); | 363 bool write_success = frame_writer_->WriteFrame(image_buffer.get()); |
| 361 assert(write_success); | 364 assert(write_success); |
| 362 if (!write_success) { | 365 if (!write_success) { |
| 363 fprintf(stderr, "Failed to write frame %d to disk!", frame_number); | 366 fprintf(stderr, "Failed to write frame %d to disk!", frame_number); |
| 364 } | 367 } |
| 365 } | 368 } |
| 366 } | 369 } |
| 367 | 370 |
| 368 int VideoProcessorImpl::GetElapsedTimeMicroseconds( | 371 int VideoProcessorImpl::GetElapsedTimeMicroseconds( |
| 369 const webrtc::TickTime& start, const webrtc::TickTime& stop) { | 372 const webrtc::TickTime& start, |
| 373 const webrtc::TickTime& stop) { |
| 370 uint64_t encode_time = (stop - start).Microseconds(); | 374 uint64_t encode_time = (stop - start).Microseconds(); |
| 371 assert(encode_time < | 375 assert(encode_time < |
| 372 static_cast<unsigned int>(std::numeric_limits<int>::max())); | 376 static_cast<unsigned int>(std::numeric_limits<int>::max())); |
| 373 return static_cast<int>(encode_time); | 377 return static_cast<int>(encode_time); |
| 374 } | 378 } |
| 375 | 379 |
| 376 const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) { | 380 const char* ExcludeFrameTypesToStr(ExcludeFrameTypes e) { |
| 377 switch (e) { | 381 switch (e) { |
| 378 case kExcludeOnlyFirstKeyFrame: | 382 case kExcludeOnlyFirstKeyFrame: |
| 379 return "ExcludeOnlyFirstKeyFrame"; | 383 return "ExcludeOnlyFirstKeyFrame"; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 397 return "ULPFEC"; | 401 return "ULPFEC"; |
| 398 case kVideoCodecUnknown: | 402 case kVideoCodecUnknown: |
| 399 return "Unknown"; | 403 return "Unknown"; |
| 400 default: | 404 default: |
| 401 assert(false); | 405 assert(false); |
| 402 return "Unknown"; | 406 return "Unknown"; |
| 403 } | 407 } |
| 404 } | 408 } |
| 405 | 409 |
| 406 // Callbacks | 410 // Callbacks |
| 407 int32_t | 411 int32_t VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded( |
| 408 VideoProcessorImpl::VideoProcessorEncodeCompleteCallback::Encoded( | |
| 409 const EncodedImage& encoded_image, | 412 const EncodedImage& encoded_image, |
| 410 const webrtc::CodecSpecificInfo* codec_specific_info, | 413 const webrtc::CodecSpecificInfo* codec_specific_info, |
| 411 const webrtc::RTPFragmentationHeader* fragmentation) { | 414 const webrtc::RTPFragmentationHeader* fragmentation) { |
| 412 video_processor_->FrameEncoded(encoded_image); // Forward to parent class. | 415 video_processor_->FrameEncoded(encoded_image); // Forward to parent class. |
| 413 return 0; | 416 return 0; |
| 414 } | 417 } |
| 415 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded( | 418 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded( |
| 416 VideoFrame& image) { | 419 VideoFrame& image) { |
| 417 video_processor_->FrameDecoded(image); // forward to parent class | 420 video_processor_->FrameDecoded(image); // forward to parent class |
| 418 return 0; | 421 return 0; |
| 419 } | 422 } |
| 420 | 423 |
| 421 } // namespace test | 424 } // namespace test |
| 422 } // namespace webrtc | 425 } // namespace webrtc |
| OLD | NEW |