Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(560)

Side by Side Diff: tools_webrtc/valgrind/chrome_tests.py

Issue 2945753002: Roll chromium_revision b032878ebd..e438353b8b (480186:480311) (Closed)
Patch Set: Updated .gni Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools_webrtc/valgrind/chrome_tests.bat ('k') | tools_webrtc/valgrind/chrome_tests.sh » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
3 #
4 # Use of this source code is governed by a BSD-style license
5 # that can be found in the LICENSE file in the root of the source
6 # tree. An additional intellectual property rights grant can be found
7 # in the file PATENTS. All contributing project authors may
8 # be found in the AUTHORS file in the root of the source tree.
9
10 ''' Runs various chrome tests through valgrind_test.py.'''
11
12 import glob
13 import logging
14 import multiprocessing
15 import optparse
16 import os
17 import stat
18 import subprocess
19 import sys
20
21 import logging_utils
22 import path_utils
23
24 import common
25 import valgrind_test
26
27 class TestNotFound(Exception): pass
28
29 class MultipleGTestFiltersSpecified(Exception): pass
30
31 class BuildDirNotFound(Exception): pass
32
33 class BuildDirAmbiguous(Exception): pass
34
35 class ExecutableNotFound(Exception): pass
36
37 class BadBinary(Exception): pass
38
39 class ChromeTests:
40 SLOW_TOOLS = ["memcheck", "drmemory"]
41 LAYOUT_TESTS_DEFAULT_CHUNK_SIZE = 300
42
43 def __init__(self, options, args, test):
44 if ':' in test:
45 (self._test, self._gtest_filter) = test.split(':', 1)
46 else:
47 self._test = test
48 self._gtest_filter = options.gtest_filter
49
50 if self._test not in self._test_list:
51 raise TestNotFound("Unknown test: %s" % test)
52
53 if options.gtest_filter and options.gtest_filter != self._gtest_filter:
54 raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
55 "and --test %s" % test)
56
57 self._options = options
58 self._args = args
59
60 script_dir = path_utils.ScriptDir()
61 # Compute the top of the tree (the "source dir") from the script dir (where
62 # this script lives). We assume that the script dir is in tools/valgrind/
63 # relative to the top of the tree.
64 self._source_dir = os.path.dirname(os.path.dirname(script_dir))
65 # since this path is used for string matching, make sure it's always
66 # an absolute Unix-style path
67 self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
68 valgrind_test_script = os.path.join(script_dir, "valgrind_test.py")
69 self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
70
71 if not self._options.build_dir:
72 dirs = [
73 os.path.join(self._source_dir, "xcodebuild", "Debug"),
74 os.path.join(self._source_dir, "out", "Debug"),
75 os.path.join(self._source_dir, "build", "Debug"),
76 ]
77 build_dir = [d for d in dirs if os.path.isdir(d)]
78 if len(build_dir) > 1:
79 raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
80 "%s\nPlease specify just one "
81 "using --build-dir" % ", ".join(build_dir))
82 elif build_dir:
83 self._options.build_dir = build_dir[0]
84 else:
85 self._options.build_dir = None
86
87 if self._options.build_dir:
88 build_dir = os.path.abspath(self._options.build_dir)
89 self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
90
91 def _EnsureBuildDirFound(self):
92 if not self._options.build_dir:
93 raise BuildDirNotFound("Oops, couldn't find a build dir, please "
94 "specify it manually using --build-dir")
95
96 def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
97 '''Generates the default command array that most tests will use.'''
98 if exe and common.IsWindows():
99 exe += '.exe'
100
101 cmd = list(self._command_preamble)
102
103 # Find all suppressions matching the following pattern:
104 # tools/valgrind/TOOL/suppressions[_PLATFORM].txt
105 # and list them with --suppressions= prefix.
106 script_dir = path_utils.ScriptDir()
107 tool_name = tool.ToolName();
108 suppression_file = os.path.join(script_dir, tool_name, "suppressions.txt")
109 if os.path.exists(suppression_file):
110 cmd.append("--suppressions=%s" % suppression_file)
111 # Platform-specific suppression
112 for platform in common.PlatformNames():
113 platform_suppression_file = \
114 os.path.join(script_dir, tool_name, 'suppressions_%s.txt' % platform)
115 if os.path.exists(platform_suppression_file):
116 cmd.append("--suppressions=%s" % platform_suppression_file)
117
118 if tool_name == "drmemory":
119 if self._options.drmemory_ops:
120 # prepending " " to avoid Dr. Memory's option confusing optparse
121 cmd += ["--drmemory_ops", " " + self._options.drmemory_ops]
122
123 if self._options.valgrind_tool_flags:
124 cmd += self._options.valgrind_tool_flags.split(" ")
125 if self._options.keep_logs:
126 cmd += ["--keep_logs"]
127 if valgrind_test_args != None:
128 for arg in valgrind_test_args:
129 cmd.append(arg)
130 if exe:
131 self._EnsureBuildDirFound()
132 exe_path = os.path.join(self._options.build_dir, exe)
133 if not os.path.exists(exe_path):
134 raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
135
136 # Make sure we don't try to test ASan-built binaries
137 # with other dynamic instrumentation-based tools.
138 # TODO(timurrrr): also check TSan and MSan?
139 # `nm` might not be available, so use try-except.
140 try:
141 # Do not perform this check on OS X, as 'nm' on 10.6 can't handle
142 # binaries built with Clang 3.5+.
143 if not common.IsMac():
144 nm_output = subprocess.check_output(["nm", exe_path])
145 if nm_output.find("__asan_init") != -1:
146 raise BadBinary("You're trying to run an executable instrumented "
147 "with AddressSanitizer under %s. Please provide "
148 "an uninstrumented executable." % tool_name)
149 except OSError:
150 pass
151
152 cmd.append(exe_path)
153 # Valgrind runs tests slowly, so slow tests hurt more; show elapased time
154 # so we can find the slowpokes.
155 cmd.append("--gtest_print_time")
156 # Built-in test launcher for gtest-based executables runs tests using
157 # multiple process by default. Force the single-process mode back.
158 cmd.append("--single-process-tests")
159 if self._options.gtest_repeat:
160 cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
161 if self._options.gtest_shuffle:
162 cmd.append("--gtest_shuffle")
163 if self._options.gtest_break_on_failure:
164 cmd.append("--gtest_break_on_failure")
165 if self._options.test_launcher_bot_mode:
166 cmd.append("--test-launcher-bot-mode")
167 if self._options.test_launcher_total_shards is not None:
168 cmd.append("--test-launcher-total-shards=%d"
169 % self._options.test_launcher_total_shards)
170 if self._options.test_launcher_shard_index is not None:
171 cmd.append("--test-launcher-shard-index=%d"
172 % self._options.test_launcher_shard_index)
173 return cmd
174
175 def Run(self):
176 ''' Runs the test specified by command-line argument --test '''
177 logging.info("running test %s" % (self._test))
178 return self._test_list[self._test](self)
179
180 def _AppendGtestFilter(self, tool, name, cmd):
181 '''Append an appropriate --gtest_filter flag to the googletest binary
182 invocation.
183 If the user passed their own filter mentioning only one test, just use
184 it. Otherwise, filter out tests listed in the appropriate gtest_exclude
185 files.
186 '''
187 if (self._gtest_filter and
188 ":" not in self._gtest_filter and
189 "?" not in self._gtest_filter and
190 "*" not in self._gtest_filter):
191 cmd.append("--gtest_filter=%s" % self._gtest_filter)
192 return
193
194 filters = []
195 gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
196
197 gtest_filter_files = [
198 os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
199 # Use ".gtest.txt" files only for slow tools, as they now contain
200 # Valgrind- and Dr.Memory-specific filters.
201 # TODO(glider): rename the files to ".gtest_slow.txt"
202 if tool.ToolName() in ChromeTests.SLOW_TOOLS:
203 gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
204 for platform_suffix in common.PlatformNames():
205 gtest_filter_files += [
206 os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
207 os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
208 (tool.ToolName(), platform_suffix))]
209 logging.info("Reading gtest exclude filter files:")
210 for filename in gtest_filter_files:
211 # strip the leading absolute path (may be very long on the bot)
212 # and the following / or \.
213 readable_filename = filename.replace("\\", "/") # '\' on Windows
214 readable_filename = readable_filename.replace(self._source_dir, "")[1:]
215 if not os.path.exists(filename):
216 logging.info(" \"%s\" - not found" % readable_filename)
217 continue
218 logging.info(" \"%s\" - OK" % readable_filename)
219 f = open(filename, 'r')
220 for line in f.readlines():
221 if line.startswith("#") or line.startswith("//") or line.isspace():
222 continue
223 line = line.rstrip()
224 test_prefixes = ["FLAKY", "FAILS"]
225 for p in test_prefixes:
226 # Strip prefixes from the test names.
227 line = line.replace(".%s_" % p, ".")
228 # Exclude the original test name.
229 filters.append(line)
230 if line[-2:] != ".*":
231 # List all possible prefixes if line doesn't end with ".*".
232 for p in test_prefixes:
233 filters.append(line.replace(".", ".%s_" % p))
234 # Get rid of duplicates.
235 filters = set(filters)
236 gtest_filter = self._gtest_filter
237 if len(filters):
238 if gtest_filter:
239 gtest_filter += ":"
240 if gtest_filter.find("-") < 0:
241 gtest_filter += "-"
242 else:
243 gtest_filter = "-"
244 gtest_filter += ":".join(filters)
245 if gtest_filter:
246 cmd.append("--gtest_filter=%s" % gtest_filter)
247
248 @staticmethod
249 def ShowTests():
250 test_to_names = {}
251 for name, test_function in ChromeTests._test_list.iteritems():
252 test_to_names.setdefault(test_function, []).append(name)
253
254 name_to_aliases = {}
255 for names in test_to_names.itervalues():
256 names.sort(key=lambda name: len(name))
257 name_to_aliases[names[0]] = names[1:]
258
259 print
260 print "Available tests:"
261 print "----------------"
262 for name, aliases in sorted(name_to_aliases.iteritems()):
263 if aliases:
264 print " {} (aka {})".format(name, ', '.join(aliases))
265 else:
266 print " {}".format(name)
267
268 def SetupLdPath(self, requires_build_dir):
269 if requires_build_dir:
270 self._EnsureBuildDirFound()
271 elif not self._options.build_dir:
272 return
273
274 # Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
275 if (os.getenv("LD_LIBRARY_PATH")):
276 os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
277 self._options.build_dir))
278 else:
279 os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
280
281 def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
282 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
283 cmd = self._DefaultCommand(tool, name, valgrind_test_args)
284 self._AppendGtestFilter(tool, name, cmd)
285 cmd.extend(['--test-tiny-timeout=1000'])
286 if cmd_args:
287 cmd.extend(cmd_args)
288
289 self.SetupLdPath(True)
290 return tool.Run(cmd, module)
291
292 def RunCmdLine(self):
293 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
294 cmd = self._DefaultCommand(tool, None, self._args)
295 self.SetupLdPath(False)
296 return tool.Run(cmd, None)
297
298 def TestAccessibility(self):
299 return self.SimpleTest("accessibility", "accessibility_unittests")
300
301 def TestAddressInput(self):
302 return self.SimpleTest("addressinput", "libaddressinput_unittests")
303
304 def TestAngle(self):
305 return self.SimpleTest("angle", "angle_unittests")
306
307 def TestAppList(self):
308 return self.SimpleTest("app_list", "app_list_unittests")
309
310 def TestAsh(self):
311 return self.SimpleTest("ash", "ash_unittests")
312
313 def TestAura(self):
314 return self.SimpleTest("aura", "aura_unittests")
315
316 def TestBase(self):
317 return self.SimpleTest("base", "base_unittests")
318
319 def TestBlinkHeap(self):
320 return self.SimpleTest("blink_heap", "blink_heap_unittests")
321
322 def TestBlinkPlatform(self):
323 return self.SimpleTest("blink_platform", "blink_platform_unittests")
324
325 def TestCacheInvalidation(self):
326 return self.SimpleTest("cacheinvalidation", "cacheinvalidation_unittests")
327
328 def TestCast(self):
329 return self.SimpleTest("chrome", "cast_unittests")
330
331 def TestCC(self):
332 return self.SimpleTest("cc", "cc_unittests",
333 cmd_args=[
334 "--cc-layer-tree-test-long-timeout"])
335
336 def TestChromeApp(self):
337 return self.SimpleTest("chrome_app", "chrome_app_unittests")
338
339 def TestChromeElf(self):
340 return self.SimpleTest("chrome_elf", "chrome_elf_unittests")
341
342 def TestChromeDriver(self):
343 return self.SimpleTest("chromedriver", "chromedriver_unittests")
344
345 def TestChromeOS(self):
346 return self.SimpleTest("chromeos", "chromeos_unittests")
347
348 def TestComponents(self):
349 return self.SimpleTest("components", "components_unittests")
350
351 def TestCompositor(self):
352 return self.SimpleTest("compositor", "compositor_unittests")
353
354 def TestContent(self):
355 return self.SimpleTest("content", "content_unittests")
356
357 def TestCourgette(self):
358 return self.SimpleTest("courgette", "courgette_unittests")
359
360 def TestCrypto(self):
361 return self.SimpleTest("crypto", "crypto_unittests")
362
363 def TestDevice(self):
364 return self.SimpleTest("device", "device_unittests")
365
366 def TestDisplay(self):
367 return self.SimpleTest("display", "display_unittests")
368
369 def TestEvents(self):
370 return self.SimpleTest("events", "events_unittests")
371
372 def TestExtensions(self):
373 return self.SimpleTest("extensions", "extensions_unittests")
374
375 def TestFFmpegRegressions(self):
376 return self.SimpleTest("chrome", "ffmpeg_regression_tests")
377
378 def TestGCM(self):
379 return self.SimpleTest("gcm", "gcm_unit_tests")
380
381 def TestGfx(self):
382 return self.SimpleTest("gfx", "gfx_unittests")
383
384 def TestGin(self):
385 return self.SimpleTest("gin", "gin_unittests")
386
387 def TestGoogleApis(self):
388 return self.SimpleTest("google_apis", "google_apis_unittests")
389
390 def TestGPU(self):
391 return self.SimpleTest("gpu", "gpu_unittests")
392
393 def TestIpc(self):
394 return self.SimpleTest("ipc", "ipc_tests",
395 valgrind_test_args=["--trace_children"])
396
397 def TestInstallerUtil(self):
398 return self.SimpleTest("installer_util", "installer_util_unittests")
399
400 def TestInstallStatic(self):
401 return self.SimpleTest("install_static", "install_static_unittests")
402
403 def TestJingle(self):
404 return self.SimpleTest("chrome", "jingle_unittests")
405
406 def TestKeyboard(self):
407 return self.SimpleTest("keyboard", "keyboard_unittests")
408
409 def TestLatency(self):
410 return self.SimpleTest("latency", "latency_unittests")
411
412 def TestMedia(self):
413 return self.SimpleTest("chrome", "media_unittests")
414
415 def TestMessageCenter(self):
416 return self.SimpleTest("message_center", "message_center_unittests")
417
418 def TestMidi(self):
419 return self.SimpleTest("chrome", "midi_unittests")
420
421 def TestMojoCommon(self):
422 return self.SimpleTest("mojo_common", "mojo_common_unittests")
423
424 def TestMojoPublicBindings(self):
425 return self.SimpleTest("mojo_public_bindings",
426 "mojo_public_bindings_unittests")
427
428 def TestMojoPublicSystem(self):
429 return self.SimpleTest("mojo_public_system",
430 "mojo_public_system_unittests")
431
432 def TestMojoPublicSysPerf(self):
433 return self.SimpleTest("mojo_public_sysperf",
434 "mojo_public_system_perftests")
435
436 def TestMojoSystem(self):
437 return self.SimpleTest("mojo_system", "mojo_system_unittests")
438
439 def TestNet(self):
440 return self.SimpleTest("net", "net_unittests")
441
442 def TestNetPerf(self):
443 return self.SimpleTest("net", "net_perftests")
444
445 def TestPhoneNumber(self):
446 return self.SimpleTest("phonenumber", "libphonenumber_unittests")
447
448 def TestPPAPI(self):
449 return self.SimpleTest("chrome", "ppapi_unittests")
450
451 def TestPrinting(self):
452 return self.SimpleTest("chrome", "printing_unittests")
453
454 def TestRemoting(self):
455 return self.SimpleTest("chrome", "remoting_unittests",
456 cmd_args=[
457 "--ui-test-action-timeout=60000",
458 "--ui-test-action-max-timeout=150000"])
459
460 def TestSkia(self):
461 return self.SimpleTest("skia", "skia_unittests")
462
463 def TestSql(self):
464 return self.SimpleTest("chrome", "sql_unittests")
465
466 def TestStorage(self):
467 return self.SimpleTest("storage", "storage_unittests")
468
469 def TestLinuxSandbox(self):
470 return self.SimpleTest("sandbox", "sandbox_linux_unittests")
471
472 def TestUnit(self):
473 # http://crbug.com/51716
474 # Disabling all unit tests
475 # Problems reappeared after r119922
476 if common.IsMac() and (self._options.valgrind_tool == "memcheck"):
477 logging.warning("unit_tests are disabled for memcheck on MacOS.")
478 return 0;
479 return self.SimpleTest("chrome", "unit_tests")
480
481 def TestUIBaseUnit(self):
482 return self.SimpleTest("chrome", "ui_base_unittests")
483
484 def TestUIChromeOS(self):
485 return self.SimpleTest("chrome", "ui_chromeos_unittests")
486
487 def TestURL(self):
488 return self.SimpleTest("chrome", "url_unittests")
489
490 def TestViews(self):
491 return self.SimpleTest("views", "views_unittests")
492
493
494 # Valgrind timeouts are in seconds.
495 UI_VALGRIND_ARGS = ["--timeout=14400", "--trace_children", "--indirect"]
496 # UI test timeouts are in milliseconds.
497 UI_TEST_ARGS = ["--ui-test-action-timeout=60000",
498 "--ui-test-action-max-timeout=150000",
499 "--no-sandbox"]
500
501 # TODO(thestig) fine-tune these values.
502 # Valgrind timeouts are in seconds.
503 BROWSER_VALGRIND_ARGS = ["--timeout=50000", "--trace_children", "--indirect"]
504 # Browser test timeouts are in milliseconds.
505 BROWSER_TEST_ARGS = ["--ui-test-action-timeout=400000",
506 "--ui-test-action-max-timeout=800000",
507 "--no-sandbox"]
508
509 def TestBrowser(self):
510 return self.SimpleTest("chrome", "browser_tests",
511 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
512 cmd_args=self.BROWSER_TEST_ARGS)
513
514 def TestContentBrowser(self):
515 return self.SimpleTest("content", "content_browsertests",
516 valgrind_test_args=self.BROWSER_VALGRIND_ARGS,
517 cmd_args=self.BROWSER_TEST_ARGS)
518
519 def TestInteractiveUI(self):
520 return self.SimpleTest("chrome", "interactive_ui_tests",
521 valgrind_test_args=self.UI_VALGRIND_ARGS,
522 cmd_args=self.UI_TEST_ARGS)
523
524 def TestSyncIntegration(self):
525 return self.SimpleTest("chrome", "sync_integration_tests",
526 valgrind_test_args=self.UI_VALGRIND_ARGS,
527 cmd_args=(["--ui-test-action-max-timeout=450000"]))
528
529 def TestLayoutChunk(self, chunk_num, chunk_size):
530 # Run tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size) from the
531 # list of tests. Wrap around to beginning of list at end.
532 # If chunk_size is zero, run all tests in the list once.
533 # If a text file is given as argument, it is used as the list of tests.
534 assert((chunk_size == 0) != (len(self._args) == 0))
535 # Build the ginormous commandline in 'cmd'.
536 # It's going to be roughly
537 # python valgrind_test.py ...
538 # but we'll use the --indirect flag to valgrind_test.py
539 # to avoid valgrinding python.
540 # Start by building the valgrind_test.py commandline.
541 tool = valgrind_test.CreateTool(self._options.valgrind_tool)
542 cmd = self._DefaultCommand(tool)
543 cmd.append("--trace_children")
544 cmd.append("--indirect_webkit_layout")
545 cmd.append("--ignore_exit_code")
546 # Now build script_cmd, the run-webkits-tests commandline.
547 # Store each chunk in its own directory so that we can find the data later
548 chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
549 out_dir = os.path.join(path_utils.ScriptDir(), "latest")
550 out_dir = os.path.join(out_dir, chunk_dir)
551 if os.path.exists(out_dir):
552 old_files = glob.glob(os.path.join(out_dir, "*.txt"))
553 for f in old_files:
554 os.remove(f)
555 else:
556 os.makedirs(out_dir)
557 script = os.path.join(self._source_dir, "third_party", "WebKit", "Tools",
558 "Scripts", "run-webkit-tests")
559 # http://crbug.com/260627: After the switch to content_shell from DRT, each
560 # test now brings up 3 processes. Under Valgrind, they become memory bound
561 # and can eventually OOM if we don't reduce the total count.
562 # It'd be nice if content_shell automatically throttled the startup of new
563 # tests if we're low on memory.
564 jobs = max(1, int(multiprocessing.cpu_count() * 0.3))
565 script_cmd = ["python", script, "-v",
566 # run a separate DumpRenderTree for each test
567 "--batch-size=1",
568 "--fully-parallel",
569 "--child-processes=%d" % jobs,
570 "--time-out-ms=800000",
571 "--no-retry-failures", # retrying takes too much time
572 # http://crbug.com/176908: Don't launch a browser when done.
573 "--no-show-results",
574 "--nocheck-sys-deps",
575 "--additional-driver-flag=--no-sandbox"]
576 # Pass build mode to run-webkit-tests. We aren't passed it directly,
577 # so parse it out of build_dir. run-webkit-tests can only handle
578 # the two values "Release" and "Debug".
579 # TODO(Hercules): unify how all our scripts pass around build mode
580 # (--mode / --target / --build-dir / --debug)
581 if self._options.build_dir:
582 build_root, mode = os.path.split(self._options.build_dir)
583 script_cmd.extend(["--build-directory", build_root, "--target", mode])
584 if (chunk_size > 0):
585 script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
586 if len(self._args):
587 # if the arg is a txt file, then treat it as a list of tests
588 if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
589 script_cmd.append("--test-list=%s" % self._args[0])
590 else:
591 script_cmd.extend(self._args)
592 self._AppendGtestFilter(tool, "layout", script_cmd)
593 # Now run script_cmd with the wrapper in cmd
594 cmd.extend(["--"])
595 cmd.extend(script_cmd)
596
597 # Layout tests often times fail quickly, but the buildbot remains green.
598 # Detect this situation when running with the default chunk size.
599 if chunk_size == self.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE:
600 min_runtime_in_seconds=120
601 else:
602 min_runtime_in_seconds=0
603 ret = tool.Run(cmd, "layout", min_runtime_in_seconds=min_runtime_in_seconds)
604 return ret
605
606
607 def TestLayout(self):
608 # A "chunk file" is maintained in the local directory so that each test
609 # runs a slice of the layout tests of size chunk_size that increments with
610 # each run. Since tests can be added and removed from the layout tests at
611 # any time, this is not going to give exact coverage, but it will allow us
612 # to continuously run small slices of the layout tests under valgrind rather
613 # than having to run all of them in one shot.
614 chunk_size = self._options.num_tests
615 if chunk_size == 0 or len(self._args):
616 return self.TestLayoutChunk(0, 0)
617 chunk_num = 0
618 chunk_file = os.path.join("valgrind_layout_chunk.txt")
619 logging.info("Reading state from " + chunk_file)
620 try:
621 f = open(chunk_file)
622 if f:
623 chunk_str = f.read()
624 if len(chunk_str):
625 chunk_num = int(chunk_str)
626 # This should be enough so that we have a couple of complete runs
627 # of test data stored in the archive (although note that when we loop
628 # that we almost guaranteed won't be at the end of the test list)
629 if chunk_num > 10000:
630 chunk_num = 0
631 f.close()
632 except IOError, (errno, strerror):
633 logging.error("error reading from file %s (%d, %s)" % (chunk_file,
634 errno, strerror))
635 # Save the new chunk size before running the tests. Otherwise if a
636 # particular chunk hangs the bot, the chunk number will never get
637 # incremented and the bot will be wedged.
638 logging.info("Saving state to " + chunk_file)
639 try:
640 f = open(chunk_file, "w")
641 chunk_num += 1
642 f.write("%d" % chunk_num)
643 f.close()
644 except IOError, (errno, strerror):
645 logging.error("error writing to file %s (%d, %s)" % (chunk_file, errno,
646 strerror))
647 # Since we're running small chunks of the layout tests, it's important to
648 # mark the ones that have errors in them. These won't be visible in the
649 # summary list for long, but will be useful for someone reviewing this bot.
650 return self.TestLayoutChunk(chunk_num, chunk_size)
651
652 # The known list of tests.
653 # Recognise the original abbreviations as well as full executable names.
654 _test_list = {
655 "cmdline" : RunCmdLine,
656 "addressinput": TestAddressInput,
657 "libaddressinput_unittests": TestAddressInput,
658 "accessibility": TestAccessibility,
659 "angle": TestAngle, "angle_unittests": TestAngle,
660 "app_list": TestAppList, "app_list_unittests": TestAppList,
661 "ash": TestAsh, "ash_unittests": TestAsh,
662 "aura": TestAura, "aura_unittests": TestAura,
663 "base": TestBase, "base_unittests": TestBase,
664 "blink_heap": TestBlinkHeap,
665 "blink_platform": TestBlinkPlatform,
666 "browser": TestBrowser, "browser_tests": TestBrowser,
667 "cacheinvalidation": TestCacheInvalidation,
668 "cacheinvalidation_unittests": TestCacheInvalidation,
669 "cast": TestCast, "cast_unittests": TestCast,
670 "cc": TestCC, "cc_unittests": TestCC,
671 "chrome_app": TestChromeApp,
672 "chrome_elf": TestChromeElf,
673 "chromedriver": TestChromeDriver,
674 "chromeos": TestChromeOS, "chromeos_unittests": TestChromeOS,
675 "components": TestComponents,"components_unittests": TestComponents,
676 "compositor": TestCompositor,"compositor_unittests": TestCompositor,
677 "content": TestContent, "content_unittests": TestContent,
678 "content_browsertests": TestContentBrowser,
679 "courgette": TestCourgette, "courgette_unittests": TestCourgette,
680 "crypto": TestCrypto, "crypto_unittests": TestCrypto,
681 "device": TestDevice, "device_unittests": TestDevice,
682 "display": TestDisplay, "display_unittests": TestDisplay,
683 "events": TestEvents, "events_unittests": TestEvents,
684 "extensions": TestExtensions, "extensions_unittests": TestExtensions,
685 "ffmpeg_regression_tests": TestFFmpegRegressions,
686 "gcm": TestGCM, "gcm_unit_tests": TestGCM,
687 "gin": TestGin, "gin_unittests": TestGin,
688 "gfx": TestGfx, "gfx_unittests": TestGfx,
689 "google_apis": TestGoogleApis,
690 "gpu": TestGPU, "gpu_unittests": TestGPU,
691 "ipc": TestIpc, "ipc_tests": TestIpc,
692 "installer_util": TestInstallerUtil,
693 "installer_util_unittests": TestInstallerUtil,
694 "install_static_unittests": TestInstallStatic,
695 "interactive_ui": TestInteractiveUI,
696 "jingle": TestJingle, "jingle_unittests": TestJingle,
697 "keyboard": TestKeyboard, "keyboard_unittests": TestKeyboard,
698 "latency": TestLatency, "latency_unittests": TestLatency,
699 "layout": TestLayout, "layout_tests": TestLayout,
700 "media": TestMedia, "media_unittests": TestMedia,
701 "message_center": TestMessageCenter,
702 "message_center_unittests" : TestMessageCenter,
703 "midi": TestMidi, "midi_unittests": TestMidi,
704 "mojo_common": TestMojoCommon,
705 "mojo_common_unittests": TestMojoCommon,
706 "mojo_system": TestMojoSystem,
707 "mojo_system_unittests": TestMojoSystem,
708 "mojo_public_system": TestMojoPublicSystem,
709 "mojo_public_system_unittests": TestMojoPublicSystem,
710 "mojo_public_bindings": TestMojoPublicBindings,
711 "mojo_public_bindings_unittests": TestMojoPublicBindings,
712 "mojo_public_sysperf": TestMojoPublicSysPerf,
713 "net": TestNet, "net_unittests": TestNet,
714 "net_perf": TestNetPerf, "net_perftests": TestNetPerf,
715 "phonenumber": TestPhoneNumber,
716 "libphonenumber_unittests": TestPhoneNumber,
717 "ppapi": TestPPAPI, "ppapi_unittests": TestPPAPI,
718 "printing": TestPrinting, "printing_unittests": TestPrinting,
719 "remoting": TestRemoting, "remoting_unittests": TestRemoting,
720 "sandbox": TestLinuxSandbox, "sandbox_linux_unittests": TestLinuxSandbox,
721 "skia": TestSkia, "skia_unittests": TestSkia,
722 "sql": TestSql, "sql_unittests": TestSql,
723 "storage": TestStorage, "storage_unittests": TestStorage,
724 "sync_integration_tests": TestSyncIntegration,
725 "sync_integration": TestSyncIntegration,
726 "ui_base_unit": TestUIBaseUnit, "ui_base_unittests": TestUIBaseUnit,
727 "ui_chromeos": TestUIChromeOS, "ui_chromeos_unittests": TestUIChromeOS,
728 "unit": TestUnit, "unit_tests": TestUnit,
729 "url": TestURL, "url_unittests": TestURL,
730 "views": TestViews, "views_unittests": TestViews,
731 "webkit": TestLayout,
732 }
733
734
735 def _main():
736 parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
737 "[-t <test> ...]")
738
739 parser.add_option("--help-tests", dest="help_tests", action="store_true",
740 default=False, help="List all available tests")
741 parser.add_option("-b", "--build-dir",
742 help="the location of the compiler output")
743 parser.add_option("--target", help="Debug or Release")
744 parser.add_option("-t", "--test", action="append", default=[],
745 help="which test to run, supports test:gtest_filter format "
746 "as well.")
747 parser.add_option("--baseline", action="store_true", default=False,
748 help="generate baseline data instead of validating")
749 parser.add_option("-f", "--force", action="store_true", default=False,
750 help="run a broken test anyway")
751 parser.add_option("--gtest_filter",
752 help="additional arguments to --gtest_filter")
753 parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
754 parser.add_option("--gtest_shuffle", action="store_true", default=False,
755 help="Randomize tests' orders on every iteration.")
756 parser.add_option("--gtest_break_on_failure", action="store_true",
757 default=False,
758 help="Drop in to debugger on assertion failure. Also "
759 "useful for forcing tests to exit with a stack dump "
760 "on the first assertion failure when running with "
761 "--gtest_repeat=-1")
762 parser.add_option("-v", "--verbose", action="store_true", default=False,
763 help="verbose output - enable debug log messages")
764 parser.add_option("--tool", dest="valgrind_tool", default="memcheck",
765 help="specify a valgrind tool to run the tests under")
766 parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
767 help="specify custom flags for the selected valgrind tool")
768 parser.add_option("--keep_logs", action="store_true", default=False,
769 help="store memory tool logs in the <tool>.logs directory "
770 "instead of /tmp.\nThis can be useful for tool "
771 "developers/maintainers.\nPlease note that the <tool>"
772 ".logs directory will be clobbered on tool startup.")
773 parser.add_option("-n", "--num_tests", type="int",
774 default=ChromeTests.LAYOUT_TESTS_DEFAULT_CHUNK_SIZE,
775 help="for layout tests: # of subtests per run. 0 for all.")
776 parser.add_option("--test-launcher-bot-mode", action="store_true",
777 help="run the tests with --test-launcher-bot-mode")
778 parser.add_option("--test-launcher-total-shards", type=int,
779 help="run the tests with --test-launcher-total-shards")
780 parser.add_option("--test-launcher-shard-index", type=int,
781 help="run the tests with --test-launcher-shard-index")
782 parser.add_option("--drmemory_ops",
783 help="extra options passed to Dr. Memory")
784
785 options, args = parser.parse_args()
786
787 # Bake target into build_dir.
788 if options.target and options.build_dir:
789 assert (options.target !=
790 os.path.basename(os.path.dirname(options.build_dir)))
791 options.build_dir = os.path.join(os.path.abspath(options.build_dir),
792 options.target)
793
794 if options.verbose:
795 logging_utils.config_root(logging.DEBUG)
796 else:
797 logging_utils.config_root()
798
799 if options.help_tests:
800 ChromeTests.ShowTests()
801 return 0
802
803 if not options.test:
804 parser.error("--test not specified")
805
806 if len(options.test) != 1 and options.gtest_filter:
807 parser.error("--gtest_filter and multiple tests don't make sense together")
808
809 BROKEN_TESTS = {
810 'drmemory_light': [
811 'addressinput',
812 'aura',
813 'base_unittests',
814 'cc',
815 'components', # x64 only?
816 'content',
817 'gfx',
818 'mojo_public_bindings',
819 ],
820 'drmemory_full': [
821 'addressinput',
822 'aura',
823 'base_unittests',
824 'blink_heap',
825 'blink_platform',
826 'browser_tests',
827 'cast',
828 'cc',
829 'chromedriver',
830 'compositor',
831 'content',
832 'content_browsertests',
833 'device',
834 'events',
835 'extensions',
836 'gfx',
837 'google_apis',
838 'gpu',
839 'ipc_tests',
840 'jingle',
841 'keyboard',
842 'media',
843 'midi',
844 'mojo_common',
845 'mojo_public_bindings',
846 'mojo_public_sysperf',
847 'mojo_public_system',
848 'mojo_system',
849 'net',
850 'remoting',
851 'unit',
852 'url',
853 ],
854 }
855
856 for t in options.test:
857 if t in BROKEN_TESTS[options.valgrind_tool] and not options.force:
858 logging.info("Skipping broken %s test %s -- see crbug.com/633693" %
859 (options.valgrind_tool, t))
860 return 0
861
862 tests = ChromeTests(options, args, t)
863 ret = tests.Run()
864 if ret: return ret
865 return 0
866
867
868 if __name__ == "__main__":
869 sys.exit(_main())
OLDNEW
« no previous file with comments | « tools_webrtc/valgrind/chrome_tests.bat ('k') | tools_webrtc/valgrind/chrome_tests.sh » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698