Index: telemetry/telemetry/benchmark_runner.py |
diff --git a/telemetry/telemetry/benchmark_runner.py b/telemetry/telemetry/benchmark_runner.py |
index 74c2348557a2493a68df3455e04297e09e4b04d2..c37f7e3a5e24d7f419f8756ffc62454f073e9592 100644 |
--- a/telemetry/telemetry/benchmark_runner.py |
+++ b/telemetry/telemetry/benchmark_runner.py |
@@ -33,13 +33,12 @@ from py_utils import discover |
# is a viable one for Chrome Telemetry tests. This is done by seeing at least |
# one all-green test run. As this happens for each bot, we'll add it to this |
# whitelist, making it eligible to run only BattOr power tests. |
-GOOD_POWER_PERF_BOT_WHITELIST = [ |
+GOOD_POWER_PERF_BOT_WHITELIST = [\ |
"Mac Power Dual-GPU Perf", |
- "Mac Power Low-End Perf" |
-] |
+ "Mac Power Low-End Perf"] |
-DEFAULT_LOG_FORMAT = ( |
+DEFAULT_LOG_FORMAT = (\ |
'(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' |
'%(message)s') |
@@ -64,9 +63,9 @@ def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout): |
print >> output_pipe, 'No benchmarks found!' |
return |
- bad_benchmark = next( |
+ bad_benchmark = next(\ |
(b for b in benchmarks if not issubclass(b, benchmark.Benchmark)), None) |
- assert bad_benchmark is None, ( |
+ assert bad_benchmark is None, (\ |
'|benchmarks| param contains non benchmark class: %s' % bad_benchmark) |
# Align the benchmark names to the longest one. |
@@ -250,8 +249,8 @@ def _ScriptName(): |
def _MatchingCommands(string, commands): |
- return [command for command in commands |
- if command.Name().startswith(string)] |
+ return [command for command in commands \ |
+ if command.Name().startswith(string)] |
@decorators.Cache |
def _Benchmarks(environment): |
@@ -321,11 +320,10 @@ def _GetJsonBenchmarkList(possible_browser, possible_reference_browser, |
if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST: |
only_run_battor_benchmarks = True |
- output = { |
+ output = {\ |
'version': 1, |
'steps': { |
- } |
- } |
+ }} |
for benchmark_class in benchmark_classes: |
# Filter out benchmarks in tools/perf/contrib/ directory |
# This is a terrible hack but we should no longer need this |
@@ -352,20 +350,18 @@ def _GetJsonBenchmarkList(possible_browser, possible_reference_browser, |
device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name) |
- output['steps'][base_name] = { |
- 'cmd': ' '.join(base_cmd + [ |
+ output['steps'][base_name] = {\ |
+ 'cmd': ' '.join(base_cmd + [\ |
'--browser=%s' % possible_browser.browser_type]), |
'device_affinity': device_affinity, |
- 'perf_dashboard_id': perf_dashboard_id, |
- } |
+ 'perf_dashboard_id': perf_dashboard_id} |
if (possible_reference_browser and |
_IsBenchmarkEnabled(benchmark_class, possible_reference_browser)): |
- output['steps'][base_name + '.reference'] = { |
- 'cmd': ' '.join(base_cmd + [ |
+ output['steps'][base_name + '.reference'] = {\ |
+ 'cmd': ' '.join(base_cmd + [\ |
'--browser=reference', '--output-trace-tag=_ref']), |
'device_affinity': device_affinity, |
- 'perf_dashboard_id': perf_dashboard_id, |
- } |
+ 'perf_dashboard_id': perf_dashboard_id} |
return json.dumps(output, indent=2, sort_keys=True) |