run_webkit_tests_integrationtest.py 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969
  1. # Copyright (C) 2010 Google Inc. All rights reserved.
  2. # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
  3. # Copyright (C) 2011 Apple Inc. All rights reserved.
  4. #
  5. # Redistribution and use in source and binary forms, with or without
  6. # modification, are permitted provided that the following conditions are
  7. # met:
  8. #
  9. # * Redistributions of source code must retain the above copyright
  10. # notice, this list of conditions and the following disclaimer.
  11. # * Redistributions in binary form must reproduce the above
  12. # copyright notice, this list of conditions and the following disclaimer
  13. # in the documentation and/or other materials provided with the
  14. # distribution.
  15. # * Neither the name of Google Inc. nor the names of its
  16. # contributors may be used to endorse or promote products derived from
  17. # this software without specific prior written permission.
  18. #
  19. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. import codecs
  31. import json
  32. import logging
  33. import os
  34. import platform
  35. import Queue
  36. import re
  37. import StringIO
  38. import sys
  39. import thread
  40. import time
  41. import threading
  42. import unittest2 as unittest
  43. from webkitpy.common.system import outputcapture, path
  44. from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
  45. from webkitpy.common.system.systemhost import SystemHost
  46. from webkitpy.common.host import Host
  47. from webkitpy.common.host_mock import MockHost
  48. from webkitpy import port
  49. from webkitpy.layout_tests import run_webkit_tests
  50. from webkitpy.port import Port
  51. from webkitpy.port import test
  52. from webkitpy.test.skip import skip_if
  53. from webkitpy.tool.mocktool import MockOptions
  54. def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
  55. extra_args = extra_args or []
  56. args = []
  57. if not '--platform' in extra_args:
  58. args.extend(['--platform', 'test'])
  59. if not new_results:
  60. args.append('--no-new-test-results')
  61. if not '--child-processes' in extra_args:
  62. args.extend(['--child-processes', 1])
  63. args.extend(extra_args)
  64. if not tests_included:
  65. # We use the glob to test that globbing works.
  66. args.extend(['passes',
  67. 'http/tests',
  68. 'websocket/tests',
  69. 'failures/expected/*'])
  70. return run_webkit_tests.parse_args(args)
  71. def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
  72. options, parsed_args = parse_args(extra_args, tests_included)
  73. if not port_obj:
  74. host = host or MockHost()
  75. port_obj = host.port_factory.get(port_name=options.platform, options=options)
  76. if shared_port:
  77. port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
  78. logging_stream = StringIO.StringIO()
  79. run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
  80. return run_details.exit_code == 0
  81. def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
  82. options, parsed_args = parse_args(extra_args=extra_args,
  83. tests_included=tests_included,
  84. print_nothing=False, new_results=new_results)
  85. host = host or MockHost()
  86. if not port_obj:
  87. port_obj = host.port_factory.get(port_name=options.platform, options=options)
  88. run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
  89. return (run_details, output, host.user)
  90. def run_and_capture(port_obj, options, parsed_args, shared_port=True):
  91. if shared_port:
  92. port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
  93. oc = outputcapture.OutputCapture()
  94. try:
  95. oc.capture_output()
  96. logging_stream = StringIO.StringIO()
  97. run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
  98. finally:
  99. oc.restore_output()
  100. return (run_details, logging_stream)
  101. def get_tests_run(args, host=None):
  102. results = get_test_results(args, host)
  103. return [result.test_name for result in results]
  104. def get_test_batches(args, host=None):
  105. results = get_test_results(args, host)
  106. batches = []
  107. batch = []
  108. current_pid = None
  109. for result in results:
  110. if batch and result.pid != current_pid:
  111. batches.append(batch)
  112. batch = []
  113. batch.append(result.test_name)
  114. if batch:
  115. batches.append(batch)
  116. return batches
  117. def get_test_results(args, host=None):
  118. options, parsed_args = parse_args(args, tests_included=True)
  119. host = host or MockHost()
  120. port_obj = host.port_factory.get(port_name=options.platform, options=options)
  121. oc = outputcapture.OutputCapture()
  122. oc.capture_output()
  123. logging_stream = StringIO.StringIO()
  124. try:
  125. run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
  126. finally:
  127. oc.restore_output()
  128. all_results = []
  129. if run_details.initial_results:
  130. all_results.extend(run_details.initial_results.all_results)
  131. if run_details.retry_results:
  132. all_results.extend(run_details.retry_results.all_results)
  133. return all_results
  134. def parse_full_results(full_results_text):
  135. json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
  136. compressed_results = json.loads(json_to_eval)
  137. return compressed_results
  138. class StreamTestingMixin(object):
  139. def assertContains(self, stream, string):
  140. self.assertTrue(string in stream.getvalue())
  141. def assertEmpty(self, stream):
  142. self.assertFalse(stream.getvalue())
  143. def assertNotEmpty(self, stream):
  144. self.assertTrue(stream.getvalue())
  145. class RunTest(unittest.TestCase, StreamTestingMixin):
  146. def setUp(self):
  147. # A real PlatformInfo object is used here instead of a
  148. # MockPlatformInfo because we need to actually check for
  149. # Windows and Mac to skip some tests.
  150. self._platform = SystemHost().platform
  151. # FIXME: Remove this when we fix test-webkitpy to work
  152. # properly on cygwin (bug 63846).
  153. self.should_test_processes = not self._platform.is_win()
  154. def test_basic(self):
  155. options, args = parse_args(tests_included=True)
  156. logging_stream = StringIO.StringIO()
  157. host = MockHost()
  158. port_obj = host.port_factory.get(options.platform, options)
  159. details = run_webkit_tests.run(port_obj, options, args, logging_stream)
  160. # These numbers will need to be updated whenever we add new tests.
  161. self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
  162. self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
  163. self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
  164. self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
  165. self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
  166. one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
  167. details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
  168. len(details.initial_results.unexpected_results_by_name))
  169. self.assertTrue(one_line_summary in logging_stream.buflist)
  170. # Ensure the results were summarized properly.
  171. self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
  172. # Ensure the image diff percentage is in the results.
  173. self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
  174. # Ensure the results were written out and displayed.
  175. full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  176. json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
  177. self.assertEqual(json.loads(json_to_eval), details.summarized_results)
  178. self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
  179. def test_batch_size(self):
  180. batch_tests_run = get_test_batches(['--batch-size', '2'])
  181. for batch in batch_tests_run:
  182. self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
  183. def test_max_locked_shards(self):
  184. # Tests for the default of using one locked shard even in the case of more than one child process.
  185. if not self.should_test_processes:
  186. return
  187. save_env_webkit_test_max_locked_shards = None
  188. if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
  189. save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
  190. del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
  191. _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
  192. try:
  193. self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
  194. finally:
  195. if save_env_webkit_test_max_locked_shards:
  196. os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
  197. def test_child_processes_2(self):
  198. if self.should_test_processes:
  199. _, regular_output, _ = logging_run(
  200. ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
  201. self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
  202. def test_child_processes_min(self):
  203. if self.should_test_processes:
  204. _, regular_output, _ = logging_run(
  205. ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
  206. tests_included=True, shared_port=False)
  207. self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
  208. def test_dryrun(self):
  209. tests_run = get_tests_run(['--dry-run'])
  210. self.assertEqual(tests_run, [])
  211. tests_run = get_tests_run(['-n'])
  212. self.assertEqual(tests_run, [])
  213. def test_exception_raised(self):
  214. # Exceptions raised by a worker are treated differently depending on
  215. # whether they are in-process or out. inline exceptions work as normal,
  216. # which allows us to get the full stack trace and traceback from the
  217. # worker. The downside to this is that it could be any error, but this
  218. # is actually useful in testing.
  219. #
  220. # Exceptions raised in a separate process are re-packaged into
  221. # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
  222. # be printed, but don't display properly in the unit test exception handlers.
  223. self.assertRaises(BaseException, logging_run,
  224. ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
  225. if self.should_test_processes:
  226. self.assertRaises(BaseException, logging_run,
  227. ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
  228. def test_full_results_html(self):
  229. # FIXME: verify html?
  230. details, _, _ = logging_run(['--full-results-html'])
  231. self.assertEqual(details.exit_code, 0)
  232. def test_hung_thread(self):
  233. details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
  234. # Note that hang.html is marked as WontFix and all WontFix tests are
  235. # expected to Pass, so that actually running them generates an "unexpected" error.
  236. self.assertEqual(details.exit_code, 1)
  237. self.assertNotEmpty(err)
  238. def test_keyboard_interrupt(self):
  239. # Note that this also tests running a test marked as SKIP if
  240. # you specify it explicitly.
  241. self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
  242. if self.should_test_processes:
  243. self.assertRaises(KeyboardInterrupt, logging_run,
  244. ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
  245. def test_no_tests_found(self):
  246. details, err, _ = logging_run(['resources'], tests_included=True)
  247. self.assertEqual(details.exit_code, -1)
  248. self.assertContains(err, 'No tests to run.\n')
  249. def test_no_tests_found_2(self):
  250. details, err, _ = logging_run(['foo'], tests_included=True)
  251. self.assertEqual(details.exit_code, -1)
  252. self.assertContains(err, 'No tests to run.\n')
  253. def test_natural_order(self):
  254. tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
  255. tests_run = get_tests_run(['--order=natural'] + tests_to_run)
  256. self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
  257. def test_natural_order_test_specified_multiple_times(self):
  258. tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
  259. tests_run = get_tests_run(['--order=natural'] + tests_to_run)
  260. self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
  261. def test_random_order(self):
  262. tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
  263. tests_run = get_tests_run(['--order=random'] + tests_to_run)
  264. self.assertEqual(sorted(tests_to_run), sorted(tests_run))
  265. def test_random_order_test_specified_multiple_times(self):
  266. tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
  267. tests_run = get_tests_run(['--order=random'] + tests_to_run)
  268. self.assertEqual(tests_run.count('passes/audio.html'), 2)
  269. self.assertEqual(tests_run.count('passes/args.html'), 2)
  270. def test_no_order(self):
  271. tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
  272. tests_run = get_tests_run(['--order=none'] + tests_to_run)
  273. self.assertEqual(tests_to_run, tests_run)
  274. def test_no_order_test_specified_multiple_times(self):
  275. tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
  276. tests_run = get_tests_run(['--order=none'] + tests_to_run)
  277. self.assertEqual(tests_to_run, tests_run)
  278. def test_no_order_with_directory_entries_in_natural_order(self):
  279. tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
  280. tests_run = get_tests_run(['--order=none'] + tests_to_run)
  281. self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
  282. def test_gc_between_tests(self):
  283. self.assertTrue(passing_run(['--gc-between-tests']))
  284. def test_complex_text(self):
  285. self.assertTrue(passing_run(['--complex-text']))
  286. def test_threaded(self):
  287. self.assertTrue(passing_run(['--threaded']))
  288. def test_repeat_each(self):
  289. tests_to_run = ['passes/image.html', 'passes/text.html']
  290. tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
  291. self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
  292. def test_ignore_flag(self):
  293. # Note that passes/image.html is expected to be run since we specified it directly.
  294. tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
  295. self.assertFalse('passes/text.html' in tests_run)
  296. self.assertTrue('passes/image.html' in tests_run)
  297. def test_skipped_flag(self):
  298. tests_run = get_tests_run(['passes'])
  299. self.assertFalse('passes/skipped/skip.html' in tests_run)
  300. num_tests_run_by_default = len(tests_run)
  301. # Check that nothing changes when we specify skipped=default.
  302. self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
  303. num_tests_run_by_default)
  304. # Now check that we run one more test (the skipped one).
  305. tests_run = get_tests_run(['--skipped=ignore', 'passes'])
  306. self.assertTrue('passes/skipped/skip.html' in tests_run)
  307. self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
  308. # Now check that we only run the skipped test.
  309. self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
  310. # Now check that we don't run anything.
  311. self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
  312. def test_iterations(self):
  313. tests_to_run = ['passes/image.html', 'passes/text.html']
  314. tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
  315. self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
  316. def test_repeat_each_iterations_num_tests(self):
  317. # The total number of tests should be: number_of_tests *
  318. # repeat_each * iterations
  319. host = MockHost()
  320. _, err, _ = logging_run(
  321. ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
  322. tests_included=True, host=host)
  323. self.assertContains(err, "All 16 tests ran as expected.\n")
  324. def test_run_chunk(self):
  325. # Test that we actually select the right chunk
  326. all_tests_run = get_tests_run(['passes', 'failures'])
  327. chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
  328. self.assertEqual(all_tests_run[4:8], chunk_tests_run)
  329. # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
  330. tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
  331. chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
  332. self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
  333. def test_run_force(self):
  334. # This raises an exception because we run
  335. # failures/expected/exception.html, which is normally SKIPped.
  336. self.assertRaises(ValueError, logging_run, ['--force'])
  337. def test_run_part(self):
  338. # Test that we actually select the right part
  339. tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
  340. tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
  341. self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
  342. # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
  343. # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
  344. # last part repeats the first two tests).
  345. chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
  346. self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
  347. def test_run_singly(self):
  348. batch_tests_run = get_test_batches(['--run-singly'])
  349. for batch in batch_tests_run:
  350. self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
  351. def test_skip_failing_tests(self):
  352. # This tests that we skip both known failing and known flaky tests. Because there are
  353. # no known flaky tests in the default test_expectations, we add additional expectations.
  354. host = MockHost()
  355. host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
  356. batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
  357. has_passes_text = False
  358. for batch in batches:
  359. self.assertFalse('failures/expected/text.html' in batch)
  360. self.assertFalse('passes/image.html' in batch)
  361. has_passes_text = has_passes_text or ('passes/text.html' in batch)
  362. self.assertTrue(has_passes_text)
  363. def test_run_singly_actually_runs_tests(self):
  364. details, _, _ = logging_run(['--run-singly'], tests_included=True)
  365. self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1) # failures/expected/hang.html actually passes w/ --run-singly.
  366. def test_single_file(self):
  367. tests_run = get_tests_run(['passes/text.html'])
  368. self.assertEqual(tests_run, ['passes/text.html'])
  369. def test_single_file_with_prefix(self):
  370. tests_run = get_tests_run(['LayoutTests/passes/text.html'])
  371. self.assertEqual(['passes/text.html'], tests_run)
  372. def test_single_skipped_file(self):
  373. tests_run = get_tests_run(['failures/expected/keybaord.html'])
  374. self.assertEqual([], tests_run)
  375. def test_stderr_is_saved(self):
  376. host = MockHost()
  377. self.assertTrue(passing_run(host=host))
  378. self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
  379. 'stuff going to stderr')
  380. def test_test_list(self):
  381. host = MockHost()
  382. filename = '/tmp/foo.txt'
  383. host.filesystem.write_text_file(filename, 'passes/text.html')
  384. tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
  385. self.assertEqual(['passes/text.html'], tests_run)
  386. host.filesystem.remove(filename)
  387. details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
  388. self.assertEqual(details.exit_code, -1)
  389. self.assertNotEmpty(err)
  390. def test_test_list_with_prefix(self):
  391. host = MockHost()
  392. filename = '/tmp/foo.txt'
  393. host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
  394. tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
  395. self.assertEqual(['passes/text.html'], tests_run)
  396. def test_missing_and_unexpected_results(self):
  397. # Test that we update expectations in place. If the expectation
  398. # is missing, update the expected generic location.
  399. host = MockHost()
  400. details, err, _ = logging_run(['--no-show-results',
  401. 'failures/expected/missing_image.html',
  402. 'failures/unexpected/missing_text.html',
  403. 'failures/unexpected/text-image-checksum.html'],
  404. tests_included=True, host=host)
  405. file_list = host.filesystem.written_files.keys()
  406. self.assertEqual(details.exit_code, 1)
  407. expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
  408. json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  409. self.assertTrue(json_string.find(expected_token) != -1)
  410. self.assertTrue(json_string.find('"num_regressions":1') != -1)
  411. self.assertTrue(json_string.find('"num_flaky":0') != -1)
  412. self.assertTrue(json_string.find('"num_missing":1') != -1)
  413. def test_pixel_test_directories(self):
  414. host = MockHost()
  415. """Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
  416. args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
  417. 'failures/unexpected/pixeldir/image_in_pixeldir.html',
  418. 'failures/unexpected/image_not_in_pixeldir.html']
  419. details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
  420. self.assertEqual(details.exit_code, 1)
  421. expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
  422. json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  423. self.assertTrue(json_string.find(expected_token) != -1)
  424. def test_missing_and_unexpected_results_with_custom_exit_code(self):
  425. # Test that we update expectations in place. If the expectation
  426. # is missing, update the expected generic location.
  427. class CustomExitCodePort(test.TestPort):
  428. def exit_code_from_summarized_results(self, unexpected_results):
  429. return unexpected_results['num_regressions'] + unexpected_results['num_missing']
  430. host = MockHost()
  431. options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
  432. test_port = CustomExitCodePort(host, options=options)
  433. details, err, _ = logging_run(['--no-show-results',
  434. 'failures/expected/missing_image.html',
  435. 'failures/unexpected/missing_text.html',
  436. 'failures/unexpected/text-image-checksum.html'],
  437. tests_included=True, host=host, port_obj=test_port)
  438. self.assertEqual(details.exit_code, 2)
  439. def test_crash_with_stderr(self):
  440. host = MockHost()
  441. _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
  442. self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
  443. def test_no_image_failure_with_image_diff(self):
  444. host = MockHost()
  445. _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
  446. self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
  447. def test_crash_log(self):
  448. # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
  449. # Currently CrashLog uploading only works on Darwin.
  450. if not self._platform.is_mac():
  451. return
  452. mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
  453. host = MockHost()
  454. host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
  455. _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
  456. expected_crash_log = mock_crash_report
  457. self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
  458. def test_web_process_crash_log(self):
  459. # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
  460. # Currently CrashLog uploading only works on Darwin.
  461. if not self._platform.is_mac():
  462. return
  463. mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
  464. host = MockHost()
  465. host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
  466. logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
  467. self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
  468. def test_exit_after_n_failures_upload(self):
  469. host = MockHost()
  470. details, regular_output, user = logging_run(
  471. ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
  472. tests_included=True, host=host)
  473. # By returning False, we know that the incremental results were generated and then deleted.
  474. self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
  475. # This checks that we report only the number of tests that actually failed.
  476. self.assertEqual(details.exit_code, 1)
  477. # This checks that passes/text.html is considered SKIPped.
  478. self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
  479. # This checks that we told the user we bailed out.
  480. self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
  481. # This checks that neither test ran as expected.
  482. # FIXME: This log message is confusing; tests that were skipped should be called out separately.
  483. self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
  484. def test_exit_after_n_failures(self):
  485. # Unexpected failures should result in tests stopping.
  486. tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
  487. self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
  488. # But we'll keep going for expected ones.
  489. tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
  490. self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
  491. def test_exit_after_n_crashes(self):
  492. # Unexpected crashes should result in tests stopping.
  493. tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
  494. self.assertEqual(['failures/unexpected/crash.html'], tests_run)
  495. # Same with timeouts.
  496. tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
  497. self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
  498. # But we'll keep going for expected ones.
  499. tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
  500. self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
  501. def test_results_directory_absolute(self):
  502. # We run a configuration that should fail, to generate output, then
  503. # look for what the output results url was.
  504. host = MockHost()
  505. with host.filesystem.mkdtemp() as tmpdir:
  506. _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
  507. self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
  508. def test_results_directory_default(self):
  509. # We run a configuration that should fail, to generate output, then
  510. # look for what the output results url was.
  511. # This is the default location.
  512. _, _, user = logging_run(tests_included=True)
  513. self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
  514. def test_results_directory_relative(self):
  515. # We run a configuration that should fail, to generate output, then
  516. # look for what the output results url was.
  517. host = MockHost()
  518. host.filesystem.maybe_make_directory('/tmp/cwd')
  519. host.filesystem.chdir('/tmp/cwd')
  520. _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
  521. self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
  522. def test_retrying_and_flaky_tests(self):
  523. host = MockHost()
  524. details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
  525. self.assertEqual(details.exit_code, 0)
  526. self.assertTrue('Retrying' in err.getvalue())
  527. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
  528. self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
  529. # Now we test that --clobber-old-results does remove the old entries and the old retries,
  530. # and that we don't retry again.
  531. host = MockHost()
  532. details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
  533. self.assertEqual(details.exit_code, 1)
  534. self.assertTrue('Clobbering old results' in err.getvalue())
  535. self.assertTrue('flaky/text.html' in err.getvalue())
  536. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
  537. self.assertFalse(host.filesystem.exists('retries'))
  538. def test_retrying_force_pixel_tests(self):
  539. host = MockHost()
  540. details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
  541. self.assertEqual(details.exit_code, 1)
  542. self.assertTrue('Retrying' in err.getvalue())
  543. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
  544. self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
  545. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
  546. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
  547. json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  548. json = parse_full_results(json_string)
  549. self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
  550. {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1})
  551. self.assertFalse(json["pixel_tests_enabled"])
  552. self.assertEqual(details.enabled_pixel_tests_in_retry, True)
  553. def test_retrying_uses_retries_directory(self):
  554. host = MockHost()
  555. details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
  556. self.assertEqual(details.exit_code, 1)
  557. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
  558. self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
  559. def test_run_order__inline(self):
  560. # These next tests test that we run the tests in ascending alphabetical
  561. # order per directory. HTTP tests are sharded separately from other tests,
  562. # so we have to test both.
  563. tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
  564. self.assertEqual(tests_run, sorted(tests_run))
  565. tests_run = get_tests_run(['http/tests/passes'])
  566. self.assertEqual(tests_run, sorted(tests_run))
  567. def test_tolerance(self):
  568. class ImageDiffTestPort(test.TestPort):
  569. def diff_image(self, expected_contents, actual_contents, tolerance=None):
  570. self.tolerance_used_for_diff_image = self._options.tolerance
  571. return (True, 1, None)
  572. def get_port_for_run(args):
  573. options, parsed_args = run_webkit_tests.parse_args(args)
  574. host = MockHost()
  575. test_port = ImageDiffTestPort(host, options=options)
  576. res = passing_run(args, port_obj=test_port, tests_included=True)
  577. self.assertTrue(res)
  578. return test_port
  579. base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
  580. # If we pass in an explicit tolerance argument, then that will be used.
  581. test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
  582. self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
  583. test_port = get_port_for_run(base_args + ['--tolerance', '0'])
  584. self.assertEqual(0, test_port.tolerance_used_for_diff_image)
  585. # Otherwise the port's default tolerance behavior (including ignoring it)
  586. # should be used.
  587. test_port = get_port_for_run(base_args)
  588. self.assertEqual(None, test_port.tolerance_used_for_diff_image)
  589. def test_virtual(self):
  590. self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
  591. 'virtual/passes/text.html', 'virtual/passes/args.html']))
  592. def test_reftest_run(self):
  593. tests_run = get_tests_run(['passes/reftest.html'])
  594. self.assertEqual(['passes/reftest.html'], tests_run)
  595. def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
  596. tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
  597. self.assertEqual(['passes/reftest.html'], tests_run)
  598. def test_reftest_skip_reftests_if_no_ref_tests(self):
  599. tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
  600. self.assertEqual([], tests_run)
  601. tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
  602. self.assertEqual([], tests_run)
  603. def test_reftest_expected_html_should_be_ignored(self):
  604. tests_run = get_tests_run(['passes/reftest-expected.html'])
  605. self.assertEqual([], tests_run)
  606. def test_reftest_driver_should_run_expected_html(self):
  607. tests_run = get_test_results(['passes/reftest.html'])
  608. self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
  609. def test_reftest_driver_should_run_expected_mismatch_html(self):
  610. tests_run = get_test_results(['passes/mismatch.html'])
  611. self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
  612. def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
  613. host = MockHost()
  614. _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
  615. json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  616. self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
  617. self.assertTrue(json_string.find('"num_regressions":4') != -1)
  618. self.assertTrue(json_string.find('"num_flaky":0') != -1)
  619. self.assertTrue(json_string.find('"num_missing":1') != -1)
  620. def test_additional_platform_directory(self):
  621. self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
  622. self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
  623. self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
  624. self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
  625. def test_additional_expectations(self):
  626. host = MockHost()
  627. host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
  628. self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
  629. tests_included=True, host=host))
  630. def test_no_http_and_force(self):
  631. # See test_run_force, using --force raises an exception.
  632. # FIXME: We would like to check the warnings generated.
  633. self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
  634. @staticmethod
  635. def has_test_of_type(tests, type):
  636. return [test for test in tests if type in test]
  637. def test_no_http_tests(self):
  638. batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
  639. self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
  640. self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
  641. batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
  642. self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
  643. self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
  644. batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
  645. self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
  646. self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
  647. def test_platform_tests_are_found(self):
  648. tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
  649. self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
  650. self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
  651. def test_output_diffs(self):
  652. # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
  653. # aren't available.
  654. host = MockHost()
  655. _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
  656. written_files = host.filesystem.written_files
  657. self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
  658. self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
  659. self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
  660. full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  661. full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
  662. self.assertEqual(full_results['has_wdiff'], False)
  663. self.assertEqual(full_results['has_pretty_patch'], False)
  664. def test_unsupported_platform(self):
  665. stdout = StringIO.StringIO()
  666. stderr = StringIO.StringIO()
  667. res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
  668. self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
  669. self.assertEqual(stdout.getvalue(), '')
  670. self.assertTrue('unsupported platform' in stderr.getvalue())
  671. def test_verbose_in_child_processes(self):
  672. # When we actually run multiple processes, we may have to reconfigure logging in the
  673. # child process (e.g., on win32) and we need to make sure that works and we still
  674. # see the verbose log output. However, we can't use logging_run() because using
  675. # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
  676. # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
  677. if not self.should_test_processes:
  678. return
  679. options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
  680. host = MockHost()
  681. port_obj = host.port_factory.get(port_name=options.platform, options=options)
  682. logging_stream = StringIO.StringIO()
  683. run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
  684. self.assertTrue('text.html passed' in logging_stream.getvalue())
  685. self.assertTrue('image.html passed' in logging_stream.getvalue())
  686. class EndToEndTest(unittest.TestCase):
  687. def test_reftest_with_two_notrefs(self):
  688. # Test that we update expectations in place. If the expectation
  689. # is missing, update the expected generic location.
  690. host = MockHost()
  691. _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
  692. file_list = host.filesystem.written_files.keys()
  693. json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
  694. json = parse_full_results(json_string)
  695. self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
  696. self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
  697. self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
  698. self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
  699. {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1})
  700. self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
  701. {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="]})
  702. self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
  703. {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="]})
  704. class RebaselineTest(unittest.TestCase, StreamTestingMixin):
  705. def assertBaselines(self, file_list, file, extensions, err):
  706. "assert that the file_list contains the baselines."""
  707. for ext in extensions:
  708. baseline = file + "-expected" + ext
  709. baseline_msg = 'Writing new expected result "%s"\n' % baseline
  710. self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
  711. self.assertContains(err, baseline_msg)
  712. # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
  713. # supposed to be.
  714. def test_reset_results(self):
  715. # Test that we update expectations in place. If the expectation
  716. # is missing, update the expected generic location.
  717. host = MockHost()
  718. details, err, _ = logging_run(
  719. ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
  720. tests_included=True, host=host, new_results=True)
  721. file_list = host.filesystem.written_files.keys()
  722. self.assertEqual(details.exit_code, 0)
  723. self.assertEqual(len(file_list), 8)
  724. self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
  725. self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
  726. def test_missing_results(self):
  727. # Test that we update expectations in place. If the expectation
  728. # is missing, update the expected generic location.
  729. host = MockHost()
  730. details, err, _ = logging_run(['--no-show-results',
  731. 'failures/unexpected/missing_text.html',
  732. 'failures/unexpected/missing_image.html',
  733. 'failures/unexpected/missing_audio.html',
  734. 'failures/unexpected/missing_render_tree_dump.html'],
  735. tests_included=True, host=host, new_results=True)
  736. file_list = host.filesystem.written_files.keys()
  737. self.assertEqual(details.exit_code, 0)
  738. self.assertEqual(len(file_list), 10)
  739. self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
  740. self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
  741. self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
  742. def test_new_baseline(self):
  743. # Test that we update the platform expectations in the version-specific directories
  744. # for both existing and new baselines.
  745. host = MockHost()
  746. details, err, _ = logging_run(
  747. ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
  748. tests_included=True, host=host, new_results=True)
  749. file_list = host.filesystem.written_files.keys()
  750. self.assertEqual(details.exit_code, 0)
  751. self.assertEqual(len(file_list), 8)
  752. self.assertBaselines(file_list,
  753. "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
  754. self.assertBaselines(file_list,
  755. "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
  756. class PortTest(unittest.TestCase):
  757. def assert_mock_port_works(self, port_name, args=[]):
  758. self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
  759. def disabled_test_chromium_mac_lion(self):
  760. self.assert_mock_port_works('chromium-mac-lion')
  761. def disabled_test_chromium_mac_lion_in_test_shell_mode(self):
  762. self.assert_mock_port_works('chromium-mac-lion', args=['--additional-drt-flag=--test-shell'])
  763. def disabled_test_qt_linux(self):
  764. self.assert_mock_port_works('qt-linux')
  765. def disabled_test_mac_lion(self):
  766. self.assert_mock_port_works('mac-lion')
  767. class MainTest(unittest.TestCase):
  768. def test_exception_handling(self):
  769. orig_run_fn = run_webkit_tests.run
  770. # unused args pylint: disable=W0613
  771. def interrupting_run(port, options, args, stderr):
  772. raise KeyboardInterrupt
  773. def successful_run(port, options, args, stderr):
  774. class FakeRunDetails(object):
  775. exit_code = -1
  776. return FakeRunDetails()
  777. def exception_raising_run(port, options, args, stderr):
  778. assert False
  779. stdout = StringIO.StringIO()
  780. stderr = StringIO.StringIO()
  781. try:
  782. run_webkit_tests.run = interrupting_run
  783. res = run_webkit_tests.main([], stdout, stderr)
  784. self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
  785. run_webkit_tests.run = successful_run
  786. res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
  787. self.assertEqual(res, -1)
  788. run_webkit_tests.run = exception_raising_run
  789. res = run_webkit_tests.main([], stdout, stderr)
  790. self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
  791. finally:
  792. run_webkit_tests.run = orig_run_fn