Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: telemetry/telemetry/benchmark_runner.py

Issue 2978643002: Removing bad-continuation param and fixing resulting errors. Fixed indentation errors, in telemetry… (Closed)
Patch Set: Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Parses the command line, discovers the appropriate benchmarks, and runs them. 5 """Parses the command line, discovers the appropriate benchmarks, and runs them.
6 6
7 Handles benchmark configuration, but all the logic for 7 Handles benchmark configuration, but all the logic for
8 actually running the benchmark is in Benchmark and PageRunner.""" 8 actually running the benchmark is in Benchmark and PageRunner."""
9 9
10 import argparse 10 import argparse
(...skipping 15 matching lines...) Expand all
26 from py_utils import discover 26 from py_utils import discover
27 27
28 # Right now, we only have one of each of our power perf bots. This means that 28 # Right now, we only have one of each of our power perf bots. This means that
29 # all eligible Telemetry benchmarks are run unsharded, which results in very 29 # all eligible Telemetry benchmarks are run unsharded, which results in very
30 # long (12h) cycle times. We'd like to reduce the number of tests that we run 30 # long (12h) cycle times. We'd like to reduce the number of tests that we run
31 # on each bot drastically until we get more of the same hardware to shard tests 31 # on each bot drastically until we get more of the same hardware to shard tests
32 # with, but we can't do so until we've verified that the hardware configuration 32 # with, but we can't do so until we've verified that the hardware configuration
33 # is a viable one for Chrome Telemetry tests. This is done by seeing at least 33 # is a viable one for Chrome Telemetry tests. This is done by seeing at least
34 # one all-green test run. As this happens for each bot, we'll add it to this 34 # one all-green test run. As this happens for each bot, we'll add it to this
35 # whitelist, making it eligible to run only BattOr power tests. 35 # whitelist, making it eligible to run only BattOr power tests.
36 GOOD_POWER_PERF_BOT_WHITELIST = [ 36 GOOD_POWER_PERF_BOT_WHITELIST = [\
37 "Mac Power Dual-GPU Perf", 37 "Mac Power Dual-GPU Perf",
38 "Mac Power Low-End Perf" 38 "Mac Power Low-End Perf"]
39 ]
40 39
41 40
42 DEFAULT_LOG_FORMAT = ( 41 DEFAULT_LOG_FORMAT = (\
43 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' 42 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
44 '%(message)s') 43 '%(message)s')
45 44
46 45
47 def _IsBenchmarkEnabled(benchmark_class, possible_browser): 46 def _IsBenchmarkEnabled(benchmark_class, possible_browser):
48 return (issubclass(benchmark_class, benchmark.Benchmark) and 47 return (issubclass(benchmark_class, benchmark.Benchmark) and
49 decorators.IsBenchmarkEnabled(benchmark_class, possible_browser)) 48 decorators.IsBenchmarkEnabled(benchmark_class, possible_browser))
50 49
51 50
52 def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout): 51 def PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
53 """ Print benchmarks that are not filtered in the same order of benchmarks in 52 """ Print benchmarks that are not filtered in the same order of benchmarks in
54 the |benchmarks| list. 53 the |benchmarks| list.
55 54
56 Args: 55 Args:
57 benchmarks: the list of benchmarks to be printed (in the same order of the 56 benchmarks: the list of benchmarks to be printed (in the same order of the
58 list). 57 list).
59 possible_browser: the possible_browser instance that's used for checking 58 possible_browser: the possible_browser instance that's used for checking
60 which benchmarks are enabled. 59 which benchmarks are enabled.
61 output_pipe: the stream in which benchmarks are printed on. 60 output_pipe: the stream in which benchmarks are printed on.
62 """ 61 """
63 if not benchmarks: 62 if not benchmarks:
64 print >> output_pipe, 'No benchmarks found!' 63 print >> output_pipe, 'No benchmarks found!'
65 return 64 return
66 65
67 bad_benchmark = next( 66 bad_benchmark = next(\
68 (b for b in benchmarks if not issubclass(b, benchmark.Benchmark)), None) 67 (b for b in benchmarks if not issubclass(b, benchmark.Benchmark)), None)
69 assert bad_benchmark is None, ( 68 assert bad_benchmark is None, (\
70 '|benchmarks| param contains non benchmark class: %s' % bad_benchmark) 69 '|benchmarks| param contains non benchmark class: %s' % bad_benchmark)
71 70
72 # Align the benchmark names to the longest one. 71 # Align the benchmark names to the longest one.
73 format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks) 72 format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
74 disabled_benchmarks = [] 73 disabled_benchmarks = []
75 74
76 print >> output_pipe, 'Available benchmarks %sare:' % ( 75 print >> output_pipe, 'Available benchmarks %sare:' % (
77 'for %s ' % possible_browser.browser_type if possible_browser else '') 76 'for %s ' % possible_browser.browser_type if possible_browser else '')
78 77
79 # Sort the benchmarks by benchmark name. 78 # Sort the benchmarks by benchmark name.
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
243 242
244 def Run(self, args): 243 def Run(self, args):
245 return min(255, self._benchmark().Run(args)) 244 return min(255, self._benchmark().Run(args))
246 245
247 246
248 def _ScriptName(): 247 def _ScriptName():
249 return os.path.basename(sys.argv[0]) 248 return os.path.basename(sys.argv[0])
250 249
251 250
252 def _MatchingCommands(string, commands): 251 def _MatchingCommands(string, commands):
253 return [command for command in commands 252 return [command for command in commands \
254 if command.Name().startswith(string)] 253 if command.Name().startswith(string)]
255 254
256 @decorators.Cache 255 @decorators.Cache
257 def _Benchmarks(environment): 256 def _Benchmarks(environment):
258 benchmarks = [] 257 benchmarks = []
259 for search_dir in environment.benchmark_dirs: 258 for search_dir in environment.benchmark_dirs:
260 benchmarks += discover.DiscoverClasses(search_dir, 259 benchmarks += discover.DiscoverClasses(search_dir,
261 environment.top_level_dir, 260 environment.top_level_dir,
262 benchmark.Benchmark, 261 benchmark.Benchmark,
263 index_by_class_name=True).values() 262 index_by_class_name=True).values()
264 return benchmarks 263 return benchmarks
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 ... 313 ...
315 } 314 }
316 } 315 }
317 """ 316 """
318 # TODO(charliea): Remove this once we have more power perf bots. 317 # TODO(charliea): Remove this once we have more power perf bots.
319 only_run_battor_benchmarks = False 318 only_run_battor_benchmarks = False
320 print 'Environment variables: ', os.environ 319 print 'Environment variables: ', os.environ
321 if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST: 320 if os.environ.get('BUILDBOT_BUILDERNAME') in GOOD_POWER_PERF_BOT_WHITELIST:
322 only_run_battor_benchmarks = True 321 only_run_battor_benchmarks = True
323 322
324 output = { 323 output = {\
325 'version': 1, 324 'version': 1,
326 'steps': { 325 'steps': {
327 } 326 }}
328 }
329 for benchmark_class in benchmark_classes: 327 for benchmark_class in benchmark_classes:
330 # Filter out benchmarks in tools/perf/contrib/ directory 328 # Filter out benchmarks in tools/perf/contrib/ directory
331 # This is a terrible hack but we should no longer need this 329 # This is a terrible hack but we should no longer need this
332 # _GetJsonBenchmarkList once all the perf bots are moved to swarming 330 # _GetJsonBenchmarkList once all the perf bots are moved to swarming
333 # (crbug.com/715565) 331 # (crbug.com/715565)
334 if ('contrib' in 332 if ('contrib' in
335 os.path.abspath(sys.modules[benchmark_class.__module__].__file__)): 333 os.path.abspath(sys.modules[benchmark_class.__module__].__file__)):
336 continue 334 continue
337 335
338 if not _IsBenchmarkEnabled(benchmark_class, possible_browser): 336 if not _IsBenchmarkEnabled(benchmark_class, possible_browser):
339 continue 337 continue
340 338
341 base_name = benchmark_class.Name() 339 base_name = benchmark_class.Name()
342 # TODO(charliea): Remove this once we have more power perf bots. 340 # TODO(charliea): Remove this once we have more power perf bots.
343 # Only run battor power benchmarks to reduce the cycle time of this bot. 341 # Only run battor power benchmarks to reduce the cycle time of this bot.
344 # TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue 342 # TODO(rnephew): Enable media.* and power.* tests when Mac BattOr issue
345 # is solved. 343 # is solved.
346 if only_run_battor_benchmarks and not base_name.startswith('battor'): 344 if only_run_battor_benchmarks and not base_name.startswith('battor'):
347 continue 345 continue
348 base_cmd = [sys.executable, os.path.realpath(sys.argv[0]), 346 base_cmd = [sys.executable, os.path.realpath(sys.argv[0]),
349 '-v', '--output-format=chartjson', '--upload-results', 347 '-v', '--output-format=chartjson', '--upload-results',
350 base_name] 348 base_name]
351 perf_dashboard_id = base_name 349 perf_dashboard_id = base_name
352 350
353 device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name) 351 device_affinity = bot_utils.GetDeviceAffinity(num_shards, base_name)
354 352
355 output['steps'][base_name] = { 353 output['steps'][base_name] = {\
356 'cmd': ' '.join(base_cmd + [ 354 'cmd': ' '.join(base_cmd + [\
357 '--browser=%s' % possible_browser.browser_type]), 355 '--browser=%s' % possible_browser.browser_type]),
358 'device_affinity': device_affinity, 356 'device_affinity': device_affinity,
359 'perf_dashboard_id': perf_dashboard_id, 357 'perf_dashboard_id': perf_dashboard_id}
360 }
361 if (possible_reference_browser and 358 if (possible_reference_browser and
362 _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)): 359 _IsBenchmarkEnabled(benchmark_class, possible_reference_browser)):
363 output['steps'][base_name + '.reference'] = { 360 output['steps'][base_name + '.reference'] = {\
364 'cmd': ' '.join(base_cmd + [ 361 'cmd': ' '.join(base_cmd + [\
365 '--browser=reference', '--output-trace-tag=_ref']), 362 '--browser=reference', '--output-trace-tag=_ref']),
366 'device_affinity': device_affinity, 363 'device_affinity': device_affinity,
367 'perf_dashboard_id': perf_dashboard_id, 364 'perf_dashboard_id': perf_dashboard_id}
368 }
369 365
370 return json.dumps(output, indent=2, sort_keys=True) 366 return json.dumps(output, indent=2, sort_keys=True)
371 367
372 368
373 def main(environment, extra_commands=None, **log_config_kwargs): 369 def main(environment, extra_commands=None, **log_config_kwargs):
374 # The log level is set in browser_options. 370 # The log level is set in browser_options.
375 log_config_kwargs.pop('level', None) 371 log_config_kwargs.pop('level', None)
376 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT) 372 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
377 logging.basicConfig(**log_config_kwargs) 373 logging.basicConfig(**log_config_kwargs)
378 374
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 command.ProcessCommandLineArgs(parser, options, environment) 429 command.ProcessCommandLineArgs(parser, options, environment)
434 430
435 if command == Help: 431 if command == Help:
436 command_instance = command(all_commands) 432 command_instance = command(all_commands)
437 else: 433 else:
438 command_instance = command() 434 command_instance = command()
439 if isinstance(command_instance, command_line.OptparseCommand): 435 if isinstance(command_instance, command_line.OptparseCommand):
440 return command_instance.Run(options) 436 return command_instance.Run(options)
441 else: 437 else:
442 return command_instance.Run(options, args) 438 return command_instance.Run(options, args)
OLDNEW
« no previous file with comments | « telemetry/telemetry/benchmark_run_unittest.py ('k') | telemetry/telemetry/benchmark_runner_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698