OLD | NEW |
(Empty) | |
| 1 # Copyright (c) 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 import json |
| 6 import os |
| 7 import re |
| 8 import tempfile |
| 9 |
| 10 |
| 11 # These labels should match the ones output by gtest's JSON. |
| 12 TEST_UNKNOWN_LABEL = 'UNKNOWN' |
| 13 TEST_SUCCESS_LABEL = 'SUCCESS' |
| 14 TEST_FAILURE_LABEL = 'FAILURE' |
| 15 TEST_CRASH_LABEL = 'CRASH' |
| 16 TEST_TIMEOUT_LABEL = 'TIMEOUT' |
| 17 TEST_WARNING_LABEL = 'WARNING' |
| 18 |
| 19 |
| 20 class XCTestLogParser(object): |
| 21 """This helper class process XCTest test output.""" |
| 22 |
| 23 def __init__(self): |
| 24 # State tracking for log parsing |
| 25 self.completed = False |
| 26 self._current_test = '' |
| 27 self._failure_description = [] |
| 28 self._current_report_hash = '' |
| 29 self._current_report = [] |
| 30 self._parsing_failures = False |
| 31 |
| 32 # Line number currently being processed. |
| 33 self._line_number = 0 |
| 34 |
| 35 # List of parsing errors, as human-readable strings. |
| 36 self._internal_error_lines = [] |
| 37 |
| 38 # Tests are stored here as 'test.name': (status, [description]). |
| 39 # The status should be one of ('started', 'OK', 'failed', 'timeout', |
| 40 # 'warning'). Warning indicates that a test did not pass when run in |
| 41 # parallel with other tests but passed when run alone. The description is |
| 42 # a list of lines detailing the test's error, as reported in the log. |
| 43 self._test_status = {} |
| 44 |
| 45 # This may be either text or a number. It will be used in the phrase |
| 46 # '%s disabled' or '%s flaky' on the waterfall display. |
| 47 self._disabled_tests = 0 |
| 48 self._flaky_tests = 0 |
| 49 |
| 50 test_name_regexp = r'\-\[(\w+)\s(\w+)\]' |
| 51 self._test_name = re.compile(test_name_regexp) |
| 52 self._test_start = re.compile( |
| 53 r'Test Case \'' + test_name_regexp + '\' started\.') |
| 54 self._test_ok = re.compile( |
| 55 r'Test Case \'' + test_name_regexp + |
| 56 '\' passed\s+\(\d+\.\d+\s+seconds\)?.') |
| 57 self._test_fail = re.compile( |
| 58 r'Test Case \'' + test_name_regexp + |
| 59 '\' failed\s+\(\d+\.\d+\s+seconds\)?.') |
| 60 self._test_passed = re.compile(r'\*\*\s+TEST\s+EXECUTE\s+SUCCEEDED\s+\*\*') |
| 61 self._retry_message = re.compile('RETRYING FAILED TESTS:') |
| 62 self.retrying_failed = False |
| 63 |
| 64 self.TEST_STATUS_MAP = { |
| 65 'OK': TEST_SUCCESS_LABEL, |
| 66 'failed': TEST_FAILURE_LABEL, |
| 67 'timeout': TEST_TIMEOUT_LABEL, |
| 68 'warning': TEST_WARNING_LABEL |
| 69 } |
| 70 |
| 71 def GetCurrentTest(self): |
| 72 return self._current_test |
| 73 |
| 74 def _StatusOfTest(self, test): |
| 75 """Returns the status code for the given test, or 'not known'.""" |
| 76 test_status = self._test_status.get(test, ('not known', [])) |
| 77 return test_status[0] |
| 78 |
| 79 def _TestsByStatus(self, status, include_fails, include_flaky): |
| 80 """Returns list of tests with the given status. |
| 81 |
| 82 Args: |
| 83 include_fails: If False, tests containing 'FAILS_' anywhere in their |
| 84 names will be excluded from the list. |
| 85 include_flaky: If False, tests containing 'FLAKY_' anywhere in their |
| 86 names will be excluded from the list. |
| 87 """ |
| 88 test_list = [x[0] for x in self._test_status.items() |
| 89 if self._StatusOfTest(x[0]) == status] |
| 90 |
| 91 if not include_fails: |
| 92 test_list = [x for x in test_list if x.find('FAILS_') == -1] |
| 93 if not include_flaky: |
| 94 test_list = [x for x in test_list if x.find('FLAKY_') == -1] |
| 95 |
| 96 return test_list |
| 97 |
| 98 def _RecordError(self, line, reason): |
| 99 """Record a log line that produced a parsing error. |
| 100 |
| 101 Args: |
| 102 line: text of the line at which the error occurred |
| 103 reason: a string describing the error |
| 104 """ |
| 105 self._internal_error_lines.append('%s: %s [%s]' % |
| 106 (self._line_number, line.strip(), reason)) |
| 107 |
| 108 def RunningTests(self): |
| 109 """Returns list of tests that appear to be currently running.""" |
| 110 return self._TestsByStatus('started', True, True) |
| 111 |
| 112 def ParsingErrors(self): |
| 113 """Returns a list of lines that have caused parsing errors.""" |
| 114 return self._internal_error_lines |
| 115 |
| 116 def ClearParsingErrors(self): |
| 117 """Clears the currently stored parsing errors.""" |
| 118 self._internal_error_lines = ['Cleared.'] |
| 119 |
| 120 def PassedTests(self, include_fails=False, include_flaky=False): |
| 121 """Returns list of tests that passed.""" |
| 122 return self._TestsByStatus('OK', include_fails, include_flaky) |
| 123 |
| 124 def FailedTests(self, include_fails=False, include_flaky=False): |
| 125 """Returns list of tests that failed, timed out, or didn't finish |
| 126 (crashed). |
| 127 |
| 128 This list will be incorrect until the complete log has been processed, |
| 129 because it will show currently running tests as having failed. |
| 130 |
| 131 Args: |
| 132 include_fails: If true, all failing tests with FAILS_ in their names will |
| 133 be included. Otherwise, they will only be included if they crashed or |
| 134 timed out. |
| 135 include_flaky: If true, all failing tests with FLAKY_ in their names will |
| 136 be included. Otherwise, they will only be included if they crashed or |
| 137 timed out. |
| 138 |
| 139 """ |
| 140 return (self._TestsByStatus('failed', include_fails, include_flaky) + |
| 141 self._TestsByStatus('timeout', True, True) + |
| 142 self._TestsByStatus('warning', include_fails, include_flaky) + |
| 143 self.RunningTests()) |
| 144 |
| 145 def TriesForTest(self, test): |
| 146 """Returns a list containing the state for all tries of the given test. |
| 147 This parser doesn't support retries so a single result is returned.""" |
| 148 return [self.TEST_STATUS_MAP.get(self._StatusOfTest(test), |
| 149 TEST_UNKNOWN_LABEL)] |
| 150 |
| 151 def FailureDescription(self, test): |
| 152 """Returns a list containing the failure description for the given test. |
| 153 |
| 154 If the test didn't fail or timeout, returns []. |
| 155 """ |
| 156 test_status = self._test_status.get(test, ('', [])) |
| 157 return ['%s: ' % test] + test_status[1] |
| 158 |
| 159 def CompletedWithoutFailure(self): |
| 160 """Returns True if all tests completed and no tests failed unexpectedly.""" |
| 161 return self.completed |
| 162 |
| 163 def ProcessLine(self, line): |
| 164 """This is called once with each line of the test log.""" |
| 165 |
| 166 # Track line number for error messages. |
| 167 self._line_number += 1 |
| 168 |
| 169 # Some tests (net_unittests in particular) run subprocesses which can write |
| 170 # stuff to shared stdout buffer. Sometimes such output appears between new |
| 171 # line and gtest directives ('[ RUN ]', etc) which breaks the parser. |
| 172 # Code below tries to detect such cases and recognize a mixed line as two |
| 173 # separate lines. |
| 174 |
| 175 # List of regexps that parses expects to find at the start of a line but |
| 176 # which can be somewhere in the middle. |
| 177 gtest_regexps = [ |
| 178 self._test_start, |
| 179 self._test_ok, |
| 180 self._test_fail, |
| 181 self._test_passed, |
| 182 ] |
| 183 |
| 184 for regexp in gtest_regexps: |
| 185 match = regexp.search(line) |
| 186 if match: |
| 187 break |
| 188 |
| 189 if not match or match.start() == 0: |
| 190 self._ProcessLine(line) |
| 191 else: |
| 192 self._ProcessLine(line[:match.start()]) |
| 193 self._ProcessLine(line[match.start():]) |
| 194 |
| 195 def _ProcessLine(self, line): |
| 196 """Parses the line and changes the state of parsed tests accordingly. |
| 197 |
| 198 Will recognize newly started tests, OK or FAILED statuses, timeouts, etc. |
| 199 """ |
| 200 |
| 201 # Is it a line declaring all tests passed? |
| 202 results = self._test_passed.match(line) |
| 203 if results: |
| 204 self.completed = True |
| 205 self._current_test = '' |
| 206 return |
| 207 |
| 208 # Is it the start of a test? |
| 209 results = self._test_start.match(line) |
| 210 if results: |
| 211 if self._current_test: |
| 212 if self._test_status[self._current_test][0] == 'started': |
| 213 self._test_status[self._current_test] = ( |
| 214 'timeout', self._failure_description) |
| 215 test_name = '%s.%s' % (results.group(1), results.group(2)) |
| 216 self._test_status[test_name] = ('started', ['Did not complete.']) |
| 217 self._current_test = test_name |
| 218 if self.retrying_failed: |
| 219 self._failure_description = self._test_status[test_name][1] |
| 220 self._failure_description.extend(['', 'RETRY OUTPUT:', '']) |
| 221 else: |
| 222 self._failure_description = [] |
| 223 return |
| 224 |
| 225 # Is it a test success line? |
| 226 results = self._test_ok.match(line) |
| 227 if results: |
| 228 test_name = '%s.%s' % (results.group(1), results.group(2)) |
| 229 status = self._StatusOfTest(test_name) |
| 230 if status != 'started': |
| 231 self._RecordError(line, 'success while in status %s' % status) |
| 232 if self.retrying_failed: |
| 233 self._test_status[test_name] = ('warning', self._failure_description) |
| 234 else: |
| 235 self._test_status[test_name] = ('OK', []) |
| 236 self._failure_description = [] |
| 237 self._current_test = '' |
| 238 return |
| 239 |
| 240 # Is it a test failure line? |
| 241 results = self._test_fail.match(line) |
| 242 if results: |
| 243 test_name = '%s.%s' % (results.group(1), results.group(2)) |
| 244 status = self._StatusOfTest(test_name) |
| 245 if status not in ('started', 'failed', 'timeout'): |
| 246 self._RecordError(line, 'failure while in status %s' % status) |
| 247 # Don't overwrite the failure description when a failing test is listed a |
| 248 # second time in the summary, or if it was already recorded as timing |
| 249 # out. |
| 250 if status not in ('failed', 'timeout'): |
| 251 self._test_status[test_name] = ('failed', self._failure_description) |
| 252 self._failure_description = [] |
| 253 self._current_test = '' |
| 254 return |
| 255 |
| 256 # Is it the start of the retry tests? |
| 257 results = self._retry_message.match(line) |
| 258 if results: |
| 259 self.retrying_failed = True |
| 260 return |
| 261 |
| 262 # Random line: if we're in a test, collect it for the failure description. |
| 263 # Tests may run simultaneously, so this might be off, but it's worth a try. |
| 264 # This also won't work if a test times out before it begins running. |
| 265 if self._current_test: |
| 266 self._failure_description.append(line) |
OLD | NEW |