123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261 |
- # Copyright (C) 2010 Google Inc. All rights reserved.
- # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
- #
- # Redistribution and use in source and binary forms, with or without
- # modification, are permitted provided that the following conditions are
- # met:
- #
- # * Redistributions of source code must retain the above copyright
- # notice, this list of conditions and the following disclaimer.
- # * Redistributions in binary form must reproduce the above
- # copyright notice, this list of conditions and the following disclaimer
- # in the documentation and/or other materials provided with the
- # distribution.
- # * Neither the name of Google Inc. nor the names of its
- # contributors may be used to endorse or promote products derived from
- # this software without specific prior written permission.
- #
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- import logging
- from webkitpy.layout_tests.models import test_expectations
- from webkitpy.layout_tests.models import test_failures
- _log = logging.getLogger(__name__)
- class TestRunResults(object):
- def __init__(self, expectations, num_tests):
- self.total = num_tests
- self.remaining = self.total
- self.expectations = expectations
- self.expected = 0
- self.unexpected = 0
- self.unexpected_failures = 0
- self.unexpected_crashes = 0
- self.unexpected_timeouts = 0
- self.tests_by_expectation = {}
- self.tests_by_timeline = {}
- self.results_by_name = {} # Map of test name to the last result for the test.
- self.all_results = [] # All results from a run, including every iteration of every test.
- self.unexpected_results_by_name = {}
- self.failures_by_name = {}
- self.total_failures = 0
- self.expected_skips = 0
- for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
- self.tests_by_expectation[expectation] = set()
- for timeline in test_expectations.TestExpectations.TIMELINES.values():
- self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
- self.slow_tests = set()
- self.interrupted = False
- def add(self, test_result, expected, test_is_slow):
- self.tests_by_expectation[test_result.type].add(test_result.test_name)
- self.results_by_name[test_result.test_name] = test_result
- if test_result.type != test_expectations.SKIP:
- self.all_results.append(test_result)
- self.remaining -= 1
- if len(test_result.failures):
- self.total_failures += 1
- self.failures_by_name[test_result.test_name] = test_result.failures
- if expected:
- self.expected += 1
- if test_result.type == test_expectations.SKIP:
- self.expected_skips += 1
- else:
- self.unexpected_results_by_name[test_result.test_name] = test_result
- self.unexpected += 1
- if len(test_result.failures):
- self.unexpected_failures += 1
- if test_result.type == test_expectations.CRASH:
- self.unexpected_crashes += 1
- elif test_result.type == test_expectations.TIMEOUT:
- self.unexpected_timeouts += 1
- if test_is_slow:
- self.slow_tests.add(test_result.test_name)
- class RunDetails(object):
- def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
- self.exit_code = exit_code
- self.summarized_results = summarized_results
- self.initial_results = initial_results
- self.retry_results = retry_results
- self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
- def _interpret_test_failures(failures):
- test_dict = {}
- failure_types = [type(failure) for failure in failures]
- # FIXME: get rid of all this is_* values once there is a 1:1 map between
- # TestFailure type and test_expectations.EXPECTATION.
- if test_failures.FailureMissingAudio in failure_types:
- test_dict['is_missing_audio'] = True
- if test_failures.FailureMissingResult in failure_types:
- test_dict['is_missing_text'] = True
- if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
- test_dict['is_missing_image'] = True
- if 'image_diff_percent' not in test_dict:
- for failure in failures:
- if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
- test_dict['image_diff_percent'] = failure.diff_percent
- return test_dict
- def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
- """Returns a dictionary containing a summary of the test runs, with the following fields:
- 'version': a version indicator
- 'fixable': The number of fixable tests (NOW - PASS)
- 'skipped': The number of skipped tests (NOW & SKIPPED)
- 'num_regressions': The number of non-flaky failures
- 'num_flaky': The number of flaky failures
- 'num_missing': The number of tests with missing results
- 'num_passes': The number of unexpected passes
- 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
- """
- results = {}
- results['version'] = 3
- tbe = initial_results.tests_by_expectation
- tbt = initial_results.tests_by_timeline
- results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
- results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
- num_passes = 0
- num_flaky = 0
- num_missing = 0
- num_regressions = 0
- keywords = {}
- for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
- keywords[expectation_enum] = expecation_string.upper()
- for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
- keywords[modifier_enum] = modifier_string.upper()
- tests = {}
- for test_name, result in initial_results.results_by_name.iteritems():
- # Note that if a test crashed in the original run, we ignore
- # whether or not it crashed when we retried it (if we retried it),
- # and always consider the result not flaky.
- expected = expectations.get_expectations_string(test_name)
- result_type = result.type
- actual = [keywords[result_type]]
- if result_type == test_expectations.SKIP:
- continue
- test_dict = {}
- if result.has_stderr:
- test_dict['has_stderr'] = True
- if result.reftest_type:
- test_dict.update(reftest_type=list(result.reftest_type))
- if expectations.has_modifier(test_name, test_expectations.WONTFIX):
- test_dict['wontfix'] = True
- if result_type == test_expectations.PASS:
- num_passes += 1
- # FIXME: include passing tests that have stderr output.
- if expected == 'PASS':
- continue
- elif result_type == test_expectations.CRASH:
- if test_name in initial_results.unexpected_results_by_name:
- num_regressions += 1
- elif result_type == test_expectations.MISSING:
- if test_name in initial_results.unexpected_results_by_name:
- num_missing += 1
- elif test_name in initial_results.unexpected_results_by_name:
- if retry_results and test_name not in retry_results.unexpected_results_by_name:
- actual.extend(expectations.get_expectations_string(test_name).split(" "))
- num_flaky += 1
- elif retry_results:
- retry_result_type = retry_results.unexpected_results_by_name[test_name].type
- if result_type != retry_result_type:
- if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
- num_regressions += 1
- else:
- num_flaky += 1
- actual.append(keywords[retry_result_type])
- else:
- num_regressions += 1
- else:
- num_regressions += 1
- test_dict['expected'] = expected
- test_dict['actual'] = " ".join(actual)
- test_dict.update(_interpret_test_failures(result.failures))
- if retry_results:
- retry_result = retry_results.unexpected_results_by_name.get(test_name)
- if retry_result:
- test_dict.update(_interpret_test_failures(retry_result.failures))
- # Store test hierarchically by directory. e.g.
- # foo/bar/baz.html: test_dict
- # foo/bar/baz1.html: test_dict
- #
- # becomes
- # foo: {
- # bar: {
- # baz.html: test_dict,
- # baz1.html: test_dict
- # }
- # }
- parts = test_name.split('/')
- current_map = tests
- for i, part in enumerate(parts):
- if i == (len(parts) - 1):
- current_map[part] = test_dict
- break
- if part not in current_map:
- current_map[part] = {}
- current_map = current_map[part]
- results['tests'] = tests
- results['num_passes'] = num_passes
- results['num_flaky'] = num_flaky
- results['num_missing'] = num_missing
- results['num_regressions'] = num_regressions
- results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
- results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
- results['layout_tests_dir'] = port_obj.layout_tests_dir()
- results['has_wdiff'] = port_obj.wdiff_available()
- results['has_pretty_patch'] = port_obj.pretty_patch_available()
- results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
- try:
- # We only use the svn revision for using trac links in the results.html file,
- # Don't do this by default since it takes >100ms.
- # FIXME: Do we really need to populate this both here and in the json_results_generator?
- if port_obj.get_option("builder_name"):
- port_obj.host.initialize_scm()
- results['revision'] = port_obj.host.scm().head_svn_revision()
- except Exception, e:
- _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
- # Handle cases where we're running outside of version control.
- import traceback
- _log.debug('Failed to learn head svn revision:')
- _log.debug(traceback.format_exc())
- results['revision'] = ""
- return results
|