Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3158)

Unified Diff: webrtc/video/video_quality_test.cc

Issue 1397363002: Revert of Adding support for simulcast and spatial layers into VideoQualityTest (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « webrtc/video/video_quality_test.h ('k') | webrtc/video/video_send_stream.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: webrtc/video/video_quality_test.cc
diff --git a/webrtc/video/video_quality_test.cc b/webrtc/video/video_quality_test.cc
index 9feca167e77d4d7b3d013dd52db6309a486915b6..1513b818cedd24fb1cbb6898409c359d1fbf72af 100644
--- a/webrtc/video/video_quality_test.cc
+++ b/webrtc/video/video_quality_test.cc
@@ -12,7 +12,6 @@
#include <algorithm>
#include <deque>
#include <map>
-#include <sstream>
#include <vector>
#include "testing/gtest/include/gtest/gtest.h"
@@ -46,22 +45,18 @@
public EncodedFrameObserver,
public EncodingTimeObserver {
public:
- VideoAnalyzer(test::LayerFilteringTransport* transport,
+ VideoAnalyzer(Transport* transport,
const std::string& test_label,
double avg_psnr_threshold,
double avg_ssim_threshold,
int duration_frames,
- FILE* graph_data_output_file,
- const std::string &graph_title,
- uint32_t ssrc_to_analyze)
+ FILE* graph_data_output_file)
: input_(nullptr),
transport_(transport),
receiver_(nullptr),
send_stream_(nullptr),
test_label_(test_label),
graph_data_output_file_(graph_data_output_file),
- graph_title_(graph_title),
- ssrc_to_analyze_(ssrc_to_analyze),
frames_to_process_(duration_frames),
frames_recorded_(0),
frames_processed_(0),
@@ -156,9 +151,6 @@
RTPHeader header;
parser->Parse(packet, length, &header);
- int64_t current_time =
- Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
- bool result = transport_->SendRtp(packet, length, options);
{
rtc::CritScope lock(&crit_);
if (rtp_timestamp_delta_ == 0) {
@@ -166,14 +158,13 @@
first_send_frame_.Reset();
}
uint32_t timestamp = header.timestamp - rtp_timestamp_delta_;
- send_times_[timestamp] = current_time;
- if (!transport_->DiscardedLastPacket() &&
- header.ssrc == ssrc_to_analyze_) {
- encoded_frame_sizes_[timestamp] +=
- length - (header.headerLength + header.paddingLength);
- }
- }
- return result;
+ send_times_[timestamp] =
+ Clock::GetRealTimeClock()->CurrentNtpInMilliseconds();
+ encoded_frame_sizes_[timestamp] +=
+ length - (header.headerLength + header.paddingLength);
+ }
+
+ return transport_->SendRtp(packet, length, options);
}
bool SendRtcp(const uint8_t* packet, size_t length) override {
@@ -203,11 +194,6 @@
VideoFrame reference_frame = frames_.front();
frames_.pop_front();
assert(!reference_frame.IsZeroSize());
- if (send_timestamp == reference_frame.timestamp() - 1) {
- // TODO(ivica): Make this work for > 2 streams.
- // Look at rtp_sender.c:RTPSender::BuildRTPHeader.
- ++send_timestamp;
- }
EXPECT_EQ(reference_frame.timestamp(), send_timestamp);
assert(reference_frame.timestamp() == send_timestamp);
@@ -261,7 +247,7 @@
}
VideoCaptureInput* input_;
- test::LayerFilteringTransport* transport_;
+ Transport* transport_;
PacketReceiver* receiver_;
VideoSendStream* send_stream_;
@@ -336,13 +322,8 @@
int64_t recv_time_ms = recv_times_[reference.timestamp()];
recv_times_.erase(reference.timestamp());
- // TODO(ivica): Make this work for > 2 streams.
- auto it = encoded_frame_sizes_.find(reference.timestamp());
- if (it == encoded_frame_sizes_.end())
- it = encoded_frame_sizes_.find(reference.timestamp() - 1);
- size_t encoded_size = it == encoded_frame_sizes_.end() ? 0 : it->second;
- if (it != encoded_frame_sizes_.end())
- encoded_frame_sizes_.erase(it);
+ size_t encoded_size = encoded_frame_sizes_[reference.timestamp()];
+ encoded_frame_sizes_.erase(reference.timestamp());
VideoFrame reference_copy;
VideoFrame render_copy;
@@ -530,7 +511,7 @@
return A.input_time_ms < B.input_time_ms;
});
- fprintf(out, "%s\n", graph_title_.c_str());
+ fprintf(out, "%s\n", test_label_.c_str());
fprintf(out, "%" PRIuS "\n", samples_.size());
fprintf(out,
"dropped "
@@ -568,8 +549,6 @@
const std::string test_label_;
FILE* const graph_data_output_file_;
- const std::string graph_title_;
- const uint32_t ssrc_to_analyze_;
std::vector<Sample> samples_ GUARDED_BY(comparison_lock_);
std::map<int64_t, int> samples_encode_time_ms_ GUARDED_BY(comparison_lock_);
test::Statistics sender_time_ GUARDED_BY(comparison_lock_);
@@ -609,173 +588,30 @@
const rtc::scoped_ptr<EventWrapper> done_;
};
-
VideoQualityTest::VideoQualityTest() : clock_(Clock::GetRealTimeClock()) {}
+void VideoQualityTest::ValidateParams(const Params& params) {
+ RTC_CHECK_GE(params.common.max_bitrate_bps, params.common.target_bitrate_bps);
+ RTC_CHECK_GE(params.common.target_bitrate_bps, params.common.min_bitrate_bps);
+ RTC_CHECK_LT(params.common.tl_discard_threshold,
+ params.common.num_temporal_layers);
+}
+
void VideoQualityTest::TestBody() {}
-std::string VideoQualityTest::GenerateGraphTitle() const {
- std::stringstream ss;
- ss << params_.common.codec;
- ss << " (" << params_.common.target_bitrate_bps / 1000 << "kbps";
- ss << ", " << params_.common.fps << " FPS";
- if (params_.screenshare.scroll_duration)
- ss << ", " << params_.screenshare.scroll_duration << "s scroll";
- if (params_.ss.streams.size() > 1)
- ss << ", Stream #" << params_.ss.selected_stream;
- if (params_.ss.num_spatial_layers > 1)
- ss << ", Layer #" << params_.ss.selected_sl;
- ss << ")";
- return ss.str();
-}
-
-void VideoQualityTest::CheckParams() {
- // Add a default stream in none specified.
- if (params_.ss.streams.empty())
- params_.ss.streams.push_back(VideoQualityTest::DefaultVideoStream(params_));
- if (params_.ss.num_spatial_layers == 0)
- params_.ss.num_spatial_layers = 1;
-
- // TODO(ivica): Should max_bitrate_bps == -1 represent inf max bitrate, as it
- // does in some parts of the code?
- RTC_CHECK_GE(params_.common.max_bitrate_bps,
- params_.common.target_bitrate_bps);
- RTC_CHECK_GE(params_.common.target_bitrate_bps,
- params_.common.min_bitrate_bps);
- RTC_CHECK_LT(params_.common.selected_tl,
- params_.common.num_temporal_layers);
- RTC_CHECK_LT(params_.ss.selected_stream, params_.ss.streams.size());
- for (const VideoStream& stream : params_.ss.streams) {
- RTC_CHECK_GE(stream.min_bitrate_bps, 0);
- RTC_CHECK_GE(stream.target_bitrate_bps, stream.min_bitrate_bps);
- RTC_CHECK_GE(stream.max_bitrate_bps, stream.target_bitrate_bps);
- RTC_CHECK_EQ(static_cast<int>(stream.temporal_layer_thresholds_bps.size()),
- params_.common.num_temporal_layers - 1);
- }
- // TODO(ivica): Should we check if the sum of all streams/layers is equal to
- // the total bitrate? We anyway have to update them in the case bitrate
- // estimator changes the total bitrates.
- RTC_CHECK_GE(params_.ss.num_spatial_layers, 1);
- RTC_CHECK_LT(params_.ss.selected_sl, params_.ss.num_spatial_layers);
- RTC_CHECK(params_.ss.spatial_layers.empty() ||
- params_.ss.spatial_layers.size() ==
- static_cast<size_t>(params_.ss.num_spatial_layers));
- if (params_.common.codec == "VP8") {
- RTC_CHECK_EQ(params_.ss.num_spatial_layers, 1);
- } else if (params_.common.codec == "VP9") {
- RTC_CHECK_EQ(params_.ss.streams.size(), 1u);
- }
-}
-
-// Static.
-std::vector<int> VideoQualityTest::ParseCSV(const std::string &str) {
- // Parse comma separated nonnegative integers, where some elements may be
- // empty. The empty values are replaced with -1.
- // E.g. "10,-20,,30,40" --> {10, 20, -1, 30,40}
- // E.g. ",,10,,20," --> {-1, -1, 10, -1, 20, -1}
- std::vector<int> result;
- if (str.empty()) return result;
-
- const char* p = str.c_str();
- int value = -1;
- int pos;
- while (*p) {
- if (*p == ',') {
- result.push_back(value);
- value = -1;
- ++p;
- continue;
- }
- RTC_CHECK_EQ(sscanf(p, "%d%n", &value, &pos), 1)
- << "Unexpected non-number value.";
- p += pos;
- }
- result.push_back(value);
- return result;
-}
-
-// Static.
-VideoStream VideoQualityTest::DefaultVideoStream(const Params& params) {
- VideoStream stream;
- stream.width = params.common.width;
- stream.height = params.common.height;
- stream.max_framerate = params.common.fps;
- stream.min_bitrate_bps = params.common.min_bitrate_bps;
- stream.target_bitrate_bps = params.common.target_bitrate_bps;
- stream.max_bitrate_bps = params.common.max_bitrate_bps;
- stream.max_qp = 52;
- if (params.common.num_temporal_layers == 2)
- stream.temporal_layer_thresholds_bps.push_back(stream.target_bitrate_bps);
- return stream;
-}
-
-// Static.
-void VideoQualityTest::FillScalabilitySettings(
- Params* params,
- const std::vector<std::string>& stream_descriptors,
- size_t selected_stream,
- int num_spatial_layers,
- int selected_sl,
- const std::vector<std::string>& sl_descriptors) {
- // Read VideoStream and SpatialLayer elements from a list of comma separated
- // lists. To use a default value for an element, use -1 or leave empty.
- // Validity checks performed in CheckParams.
-
- RTC_CHECK(params->ss.streams.empty());
- for (auto descriptor : stream_descriptors) {
- if (descriptor.empty())
- continue;
- std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
- VideoStream stream(VideoQualityTest::DefaultVideoStream(*params));
- if (v[0] != -1) stream.width = static_cast<size_t>(v[0]);
- if (v[1] != -1) stream.height = static_cast<size_t>(v[1]);
- if (v[2] != -1) stream.max_framerate = v[2];
- if (v[3] != -1) stream.min_bitrate_bps = v[3];
- if (v[4] != -1) stream.target_bitrate_bps = v[4];
- if (v[5] != -1) stream.max_bitrate_bps = v[5];
- if (v.size() > 6 && v[6] != -1) stream.max_qp = v[6];
- if (v.size() > 7) {
- stream.temporal_layer_thresholds_bps.clear();
- stream.temporal_layer_thresholds_bps.insert(
- stream.temporal_layer_thresholds_bps.end(), v.begin() + 7, v.end());
- } else {
- // Automatic TL thresholds for more than two layers not supported.
- RTC_CHECK_LE(params->common.num_temporal_layers, 2);
- }
- params->ss.streams.push_back(stream);
- }
- params->ss.selected_stream = selected_stream;
-
- params->ss.num_spatial_layers = num_spatial_layers ? num_spatial_layers : 1;
- params->ss.selected_sl = selected_sl;
- RTC_CHECK(params->ss.spatial_layers.empty());
- for (auto descriptor : sl_descriptors) {
- if (descriptor.empty())
- continue;
- std::vector<int> v = VideoQualityTest::ParseCSV(descriptor);
- RTC_CHECK_GT(v[2], 0);
-
- SpatialLayer layer;
- layer.scaling_factor_num = v[0] == -1 ? 1 : v[0];
- layer.scaling_factor_den = v[1] == -1 ? 1 : v[1];
- layer.target_bitrate_bps = v[2];
- params->ss.spatial_layers.push_back(layer);
- }
-}
-
-void VideoQualityTest::SetupCommon(Transport* send_transport,
- Transport* recv_transport) {
- if (params_.logs)
+void VideoQualityTest::SetupFullStack(const Params& params,
+ Transport* send_transport,
+ Transport* recv_transport) {
+ if (params.logs)
trace_to_stderr_.reset(new test::TraceToStderr);
- size_t num_streams = params_.ss.streams.size();
- CreateSendConfig(num_streams, send_transport);
+ CreateSendConfig(1, send_transport);
int payload_type;
- if (params_.common.codec == "VP8") {
+ if (params.common.codec == "VP8") {
encoder_.reset(VideoEncoder::Create(VideoEncoder::kVp8));
payload_type = kPayloadTypeVP8;
- } else if (params_.common.codec == "VP9") {
+ } else if (params.common.codec == "VP9") {
encoder_.reset(VideoEncoder::Create(VideoEncoder::kVp9));
payload_type = kPayloadTypeVP9;
} else {
@@ -783,15 +619,15 @@
return;
}
send_config_.encoder_settings.encoder = encoder_.get();
- send_config_.encoder_settings.payload_name = params_.common.codec;
+ send_config_.encoder_settings.payload_name = params.common.codec;
send_config_.encoder_settings.payload_type = payload_type;
+
send_config_.rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[0]);
send_config_.rtp.rtx.payload_type = kSendRtxPayloadType;
- for (size_t i = 0; i < num_streams; ++i)
- send_config_.rtp.rtx.ssrcs.push_back(kSendRtxSsrcs[i]);
send_config_.rtp.extensions.clear();
- if (params_.common.send_side_bwe) {
+ if (params.common.send_side_bwe) {
send_config_.rtp.extensions.push_back(RtpExtension(
RtpExtension::kTransportSequenceNumber, kTransportSeqExtensionId));
} else {
@@ -799,41 +635,49 @@
RtpExtension(RtpExtension::kAbsSendTime, kAbsSendTimeExtensionId));
}
- encoder_config_.min_transmit_bitrate_bps = params_.common.min_transmit_bps;
- encoder_config_.streams = params_.ss.streams;
- encoder_config_.spatial_layers = params_.ss.spatial_layers;
+ // Automatically fill out streams[0] with params.
+ VideoStream* stream = &encoder_config_.streams[0];
+ stream->width = params.common.width;
+ stream->height = params.common.height;
+ stream->min_bitrate_bps = params.common.min_bitrate_bps;
+ stream->target_bitrate_bps = params.common.target_bitrate_bps;
+ stream->max_bitrate_bps = params.common.max_bitrate_bps;
+ stream->max_framerate = static_cast<int>(params.common.fps);
+
+ stream->temporal_layer_thresholds_bps.clear();
+ if (params.common.num_temporal_layers > 1) {
+ stream->temporal_layer_thresholds_bps.push_back(stream->target_bitrate_bps);
+ }
CreateMatchingReceiveConfigs(recv_transport);
- for (size_t i = 0; i < num_streams; ++i) {
- receive_configs_[i].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
- receive_configs_[i].rtp.rtx[kSendRtxPayloadType].ssrc = kSendRtxSsrcs[i];
- receive_configs_[i].rtp.rtx[kSendRtxPayloadType].payload_type =
- kSendRtxPayloadType;
- }
+ receive_configs_[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
+ receive_configs_[0].rtp.rtx[kSendRtxPayloadType].ssrc = kSendRtxSsrcs[0];
+ receive_configs_[0].rtp.rtx[kSendRtxPayloadType].payload_type =
+ kSendRtxPayloadType;
+
+ encoder_config_.min_transmit_bitrate_bps = params.common.min_transmit_bps;
}
-void VideoQualityTest::SetupScreenshare() {
- RTC_CHECK(params_.screenshare.enabled);
+void VideoQualityTest::SetupScreenshare(const Params& params) {
+ RTC_CHECK(params.screenshare.enabled);
// Fill out codec settings.
encoder_config_.content_type = VideoEncoderConfig::ContentType::kScreen;
- if (params_.common.codec == "VP8") {
+ if (params.common.codec == "VP8") {
codec_settings_.VP8 = VideoEncoder::GetDefaultVp8Settings();
codec_settings_.VP8.denoisingOn = false;
codec_settings_.VP8.frameDroppingOn = false;
codec_settings_.VP8.numberOfTemporalLayers =
- static_cast<unsigned char>(params_.common.num_temporal_layers);
+ static_cast<unsigned char>(params.common.num_temporal_layers);
encoder_config_.encoder_specific_settings = &codec_settings_.VP8;
- } else if (params_.common.codec == "VP9") {
+ } else if (params.common.codec == "VP9") {
codec_settings_.VP9 = VideoEncoder::GetDefaultVp9Settings();
codec_settings_.VP9.denoisingOn = false;
codec_settings_.VP9.frameDroppingOn = false;
codec_settings_.VP9.numberOfTemporalLayers =
- static_cast<unsigned char>(params_.common.num_temporal_layers);
+ static_cast<unsigned char>(params.common.num_temporal_layers);
encoder_config_.encoder_specific_settings = &codec_settings_.VP9;
- codec_settings_.VP9.numberOfSpatialLayers =
- static_cast<unsigned char>(params_.ss.num_spatial_layers);
}
// Setup frame generator.
@@ -845,127 +689,105 @@
slides.push_back(test::ResourcePath("photo_1850_1110", "yuv"));
slides.push_back(test::ResourcePath("difficult_photo_1850_1110", "yuv"));
- if (params_.screenshare.scroll_duration == 0) {
+ if (params.screenshare.scroll_duration == 0) {
// Cycle image every slide_change_interval seconds.
frame_generator_.reset(test::FrameGenerator::CreateFromYuvFile(
slides, kWidth, kHeight,
- params_.screenshare.slide_change_interval * params_.common.fps));
+ params.screenshare.slide_change_interval * params.common.fps));
} else {
- RTC_CHECK_LE(params_.common.width, kWidth);
- RTC_CHECK_LE(params_.common.height, kHeight);
- RTC_CHECK_GT(params_.screenshare.slide_change_interval, 0);
- const int kPauseDurationMs = (params_.screenshare.slide_change_interval -
- params_.screenshare.scroll_duration) * 1000;
- RTC_CHECK_LE(params_.screenshare.scroll_duration,
- params_.screenshare.slide_change_interval);
-
- frame_generator_.reset(
- test::FrameGenerator::CreateScrollingInputFromYuvFiles(
- clock_, slides, kWidth, kHeight, params_.common.width,
- params_.common.height, params_.screenshare.scroll_duration * 1000,
- kPauseDurationMs));
+ RTC_CHECK_LE(params.common.width, kWidth);
+ RTC_CHECK_LE(params.common.height, kHeight);
+ RTC_CHECK_GT(params.screenshare.slide_change_interval, 0);
+ const int kPauseDurationMs = (params.screenshare.slide_change_interval -
+ params.screenshare.scroll_duration) * 1000;
+ RTC_CHECK_LE(params.screenshare.scroll_duration,
+ params.screenshare.slide_change_interval);
+
+ if (params.screenshare.scroll_duration) {
+ frame_generator_.reset(
+ test::FrameGenerator::CreateScrollingInputFromYuvFiles(
+ clock_, slides, kWidth, kHeight, params.common.width,
+ params.common.height, params.screenshare.scroll_duration * 1000,
+ kPauseDurationMs));
+ } else {
+ frame_generator_.reset(test::FrameGenerator::CreateFromYuvFile(
+ slides, kWidth, kHeight,
+ params.screenshare.slide_change_interval * params.common.fps));
+ }
}
}
-void VideoQualityTest::CreateCapturer(VideoCaptureInput* input) {
- if (params_.screenshare.enabled) {
+void VideoQualityTest::CreateCapturer(const Params& params,
+ VideoCaptureInput* input) {
+ if (params.screenshare.enabled) {
test::FrameGeneratorCapturer *frame_generator_capturer =
new test::FrameGeneratorCapturer(
- clock_, input, frame_generator_.release(), params_.common.fps);
+ clock_, input, frame_generator_.release(), params.common.fps);
EXPECT_TRUE(frame_generator_capturer->Init());
capturer_.reset(frame_generator_capturer);
} else {
- if (params_.video.clip_name.empty()) {
+ if (params.video.clip_name.empty()) {
capturer_.reset(test::VideoCapturer::Create(
- input, params_.common.width, params_.common.height,
- params_.common.fps, clock_));
+ input, params.common.width, params.common.height, params.common.fps,
+ clock_));
} else {
capturer_.reset(test::FrameGeneratorCapturer::CreateFromYuvFile(
- input, test::ResourcePath(params_.video.clip_name, "yuv"),
- params_.common.width, params_.common.height, params_.common.fps,
+ input, test::ResourcePath(params.video.clip_name, "yuv"),
+ params.common.width, params.common.height, params.common.fps,
clock_));
ASSERT_TRUE(capturer_.get() != nullptr)
- << "Could not create capturer for " << params_.video.clip_name
+ << "Could not create capturer for " << params.video.clip_name
<< ".yuv. Is this resource file present?";
}
}
}
-void VideoQualityTest::RunWithAnalyzer(const Params& _params) {
- params_ = _params;
-
+void VideoQualityTest::RunWithAnalyzer(const Params& params) {
// TODO(ivica): Merge with RunWithRenderer and use a flag / argument to
// differentiate between the analyzer and the renderer case.
- CheckParams();
+ ValidateParams(params);
FILE* graph_data_output_file = nullptr;
- if (!params_.analyzer.graph_data_output_filename.empty()) {
+ if (!params.analyzer.graph_data_output_filename.empty()) {
graph_data_output_file =
- fopen(params_.analyzer.graph_data_output_filename.c_str(), "w");
+ fopen(params.analyzer.graph_data_output_filename.c_str(), "w");
RTC_CHECK(graph_data_output_file != nullptr)
<< "Can't open the file "
- << params_.analyzer.graph_data_output_filename << "!";
- }
+ << params.analyzer.graph_data_output_filename << "!";
+ }
+
test::LayerFilteringTransport send_transport(
- params_.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
- params_.common.selected_tl, params_.ss.selected_sl);
- test::DirectTransport recv_transport(params_.pipe);
-
- std::string graph_title = params_.analyzer.graph_title;
- if (graph_title.empty())
- graph_title = VideoQualityTest::GenerateGraphTitle();
-
- // In the case of different resolutions, the functions calculating PSNR and
- // SSIM return -1.0, instead of a positive value as usual. VideoAnalyzer
- // aborts if the average psnr/ssim are below the given threshold, which is
- // 0.0 by default. Setting the thresholds to -1.1 prevents the unnecessary
- // abort.
- VideoStream& selected_stream =
- params_.ss.streams[params_.ss.selected_stream];
- int selected_sl = params_.ss.selected_sl != -1
- ? params_.ss.selected_sl : params_.ss.num_spatial_layers - 1;
- bool disable_quality_check =
- selected_stream.width != params_.common.width ||
- selected_stream.height != params_.common.height ||
- (!params_.ss.spatial_layers.empty() &&
- params_.ss.spatial_layers[selected_sl].scaling_factor_num !=
- params_.ss.spatial_layers[selected_sl].scaling_factor_den);
- if (disable_quality_check) {
- fprintf(stderr,
- "Warning: Calculating PSNR and SSIM for downsized resolution "
- "not implemented yet! Skipping PSNR and SSIM calculations!");
- }
-
+ params.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
+ static_cast<uint8_t>(params.common.tl_discard_threshold), 0);
+ test::DirectTransport recv_transport(params.pipe);
VideoAnalyzer analyzer(
- &send_transport, params_.analyzer.test_label,
- disable_quality_check ? -1.1 : params_.analyzer.avg_psnr_threshold,
- disable_quality_check ? -1.1 : params_.analyzer.avg_ssim_threshold,
- params_.analyzer.test_durations_secs * params_.common.fps,
- graph_data_output_file, graph_title,
- kSendSsrcs[params_.ss.selected_stream]);
+ &send_transport, params.analyzer.test_label,
+ params.analyzer.avg_psnr_threshold, params.analyzer.avg_ssim_threshold,
+ params.analyzer.test_durations_secs * params.common.fps,
+ graph_data_output_file);
Call::Config call_config;
- call_config.bitrate_config = params_.common.call_bitrate_config;
+ call_config.bitrate_config = params.common.call_bitrate_config;
CreateCalls(call_config, call_config);
analyzer.SetReceiver(receiver_call_->Receiver());
send_transport.SetReceiver(&analyzer);
recv_transport.SetReceiver(sender_call_->Receiver());
- SetupCommon(&analyzer, &recv_transport);
+ SetupFullStack(params, &analyzer, &recv_transport);
send_config_.encoding_time_observer = &analyzer;
- receive_configs_[params_.ss.selected_stream].renderer = &analyzer;
+ receive_configs_[0].renderer = &analyzer;
for (auto& config : receive_configs_)
config.pre_decode_callback = &analyzer;
- if (params_.screenshare.enabled)
- SetupScreenshare();
+ if (params.screenshare.enabled)
+ SetupScreenshare(params);
CreateStreams();
analyzer.input_ = send_stream_->Input();
analyzer.send_stream_ = send_stream_;
- CreateCapturer(&analyzer);
+ CreateCapturer(params, &analyzer);
send_stream_->Start();
for (size_t i = 0; i < receive_streams_.size(); ++i)
@@ -988,52 +810,41 @@
fclose(graph_data_output_file);
}
-void VideoQualityTest::RunWithVideoRenderer(const Params& _params) {
- params_ = _params;
- CheckParams();
+void VideoQualityTest::RunWithVideoRenderer(const Params& params) {
+ ValidateParams(params);
rtc::scoped_ptr<test::VideoRenderer> local_preview(
- test::VideoRenderer::Create("Local Preview", params_.common.width,
- params_.common.height));
- size_t stream_id = params_.ss.selected_stream;
- char title[32];
- if (params_.ss.streams.size() == 1) {
- sprintf(title, "Loopback Video");
- } else {
- sprintf(title, "Loopback Video - Stream #%" PRIuS, stream_id);
- }
+ test::VideoRenderer::Create("Local Preview", params.common.width,
+ params.common.height));
rtc::scoped_ptr<test::VideoRenderer> loopback_video(
- test::VideoRenderer::Create(
- title, params_.ss.streams[stream_id].width,
- params_.ss.streams[stream_id].height));
+ test::VideoRenderer::Create("Loopback Video", params.common.width,
+ params.common.height));
// TODO(ivica): Remove bitrate_config and use the default Call::Config(), to
// match the full stack tests.
Call::Config call_config;
- call_config.bitrate_config = params_.common.call_bitrate_config;
+ call_config.bitrate_config = params.common.call_bitrate_config;
rtc::scoped_ptr<Call> call(Call::Create(call_config));
test::LayerFilteringTransport transport(
- params_.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
- params_.common.selected_tl, params_.ss.selected_sl);
-
+ params.pipe, kPayloadTypeVP8, kPayloadTypeVP9,
+ static_cast<uint8_t>(params.common.tl_discard_threshold), 0);
// TODO(ivica): Use two calls to be able to merge with RunWithAnalyzer or at
// least share as much code as possible. That way this test would also match
// the full stack tests better.
transport.SetReceiver(call->Receiver());
- SetupCommon(&transport, &transport);
-
+ SetupFullStack(params, &transport, &transport);
send_config_.local_renderer = local_preview.get();
- receive_configs_[stream_id].renderer = loopback_video.get();
-
- if (params_.screenshare.enabled)
- SetupScreenshare();
+ receive_configs_[0].renderer = loopback_video.get();
+
+ if (params.screenshare.enabled)
+ SetupScreenshare(params);
send_stream_ = call->CreateVideoSendStream(send_config_, encoder_config_);
VideoReceiveStream* receive_stream =
- call->CreateVideoReceiveStream(receive_configs_[stream_id]);
- CreateCapturer(send_stream_->Input());
+ call->CreateVideoReceiveStream(receive_configs_[0]);
+ CreateCapturer(params, send_stream_->Input());
receive_stream->Start();
send_stream_->Start();
« no previous file with comments | « webrtc/video/video_quality_test.h ('k') | webrtc/video/video_send_stream.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698