runtests.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667
  1. #!/usr/bin/env python
  2. #-------------------------------------------------------------------------------------------------------
  3. # Copyright (C) Microsoft. All rights reserved.
  4. # Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  5. #-------------------------------------------------------------------------------------------------------
  6. from __future__ import print_function
  7. from datetime import datetime
  8. from multiprocessing import Pool, Manager
  9. from threading import Timer
  10. import sys
  11. import os
  12. import glob
  13. import subprocess as SP
  14. import traceback
  15. import argparse
  16. import xml.etree.ElementTree as ET
  17. import re
  18. import time
  19. # handle command line args
  20. parser = argparse.ArgumentParser(
  21. description='ChakraCore *nix Test Script',
  22. formatter_class=argparse.RawDescriptionHelpFormatter,
  23. epilog='''\
  24. Samples:
  25. test all folders:
  26. runtests.py
  27. test only Array:
  28. runtests.py Array
  29. test a single file:
  30. runtests.py Basics/hello.js
  31. ''')
  32. DEFAULT_TIMEOUT = 60
  33. SLOW_TIMEOUT = 180
  34. parser.add_argument('folders', metavar='folder', nargs='*',
  35. help='folder subset to run tests')
  36. parser.add_argument('-b', '--binary', metavar='bin', help='ch full path')
  37. parser.add_argument('-d', '--debug', action='store_true',
  38. help='use debug build');
  39. parser.add_argument('-t', '--test', action='store_true', help='use test build')
  40. parser.add_argument('--variants', metavar='variant', nargs='+',
  41. help='run specified test variants')
  42. parser.add_argument('--include-slow', action='store_true',
  43. help='include slow tests (timeout ' + str(SLOW_TIMEOUT) + ' seconds)')
  44. parser.add_argument('--only-slow', action='store_true',
  45. help='run only slow tests')
  46. parser.add_argument('--nightly', action='store_true',
  47. help='run as nightly tests')
  48. parser.add_argument('--tag', nargs='*',
  49. help='select tests with given tags')
  50. parser.add_argument('--not-tag', nargs='*',
  51. help='exclude tests with given tags')
  52. parser.add_argument('--flags', default='',
  53. help='global test flags to ch')
  54. parser.add_argument('--timeout', type=int, default=DEFAULT_TIMEOUT,
  55. help='test timeout (default ' + str(DEFAULT_TIMEOUT) + ' seconds)')
  56. parser.add_argument('--swb', action='store_true', help='use binary from VcBuild.SWB to run the test')
  57. parser.add_argument('-l', '--logfile', metavar='logfile', help='file to log results to', default=None)
  58. parser.add_argument('--x86', action='store_true', help='use x86 build')
  59. parser.add_argument('--x64', action='store_true', help='use x64 build')
  60. args = parser.parse_args()
  61. test_root = os.path.dirname(os.path.realpath(__file__))
  62. repo_root = os.path.dirname(test_root)
  63. # arch: x86, x64
  64. arch = 'x86' if args.x86 else ('x64' if args.x64 else None)
  65. if arch == None:
  66. arch = os.environ.get('_BuildArch', 'x86')
  67. if sys.platform != 'win32':
  68. arch = 'x64' # xplat: hard code arch == x64
  69. arch_alias = 'amd64' if arch == 'x64' else None
  70. # flavor: debug, test, release
  71. type_flavor = {'chk':'Debug', 'test':'Test', 'fre':'Release'}
  72. flavor = 'Debug' if args.debug else ('Test' if args.test else None)
  73. if flavor == None:
  74. print("ERROR: Test build target wasn't defined.")
  75. print("Try '-t' (test build) or '-d' (debug build).")
  76. sys.exit(1)
  77. flavor_alias = 'chk' if flavor == 'Debug' else 'fre'
  78. # test variants
  79. if not args.variants:
  80. args.variants = ['interpreted', 'dynapogo']
  81. # binary: full ch path
  82. binary = args.binary
  83. if binary == None:
  84. if sys.platform == 'win32':
  85. build = "VcBuild.SWB" if args.swb else "VcBuild"
  86. binary = 'Build\\' + build + '\\bin\\{}_{}\\ch.exe'.format(arch, flavor)
  87. else:
  88. binary = 'out/{0}/ch'.format(flavor)
  89. binary = os.path.join(repo_root, binary)
  90. if not os.path.isfile(binary):
  91. print('{} not found. Did you run ./build.sh already?'.format(binary))
  92. sys.exit(1)
  93. # global tags/not_tags
  94. tags = set(args.tag or [])
  95. not_tags = set(args.not_tag or []).union(['fail', 'exclude_' + arch])
  96. if arch_alias:
  97. not_tags.add('exclude_' + arch_alias)
  98. if flavor_alias:
  99. not_tags.add('exclude_' + flavor_alias)
  100. if args.only_slow:
  101. tags.add('Slow')
  102. elif not args.include_slow:
  103. not_tags.add('Slow')
  104. elif args.include_slow and args.timeout == DEFAULT_TIMEOUT:
  105. args.timeout = SLOW_TIMEOUT
  106. not_tags.add('exclude_nightly' if args.nightly else 'nightly')
  107. # xplat: temp hard coded to exclude unsupported tests
  108. if sys.platform != 'win32':
  109. not_tags.add('exclude_xplat')
  110. not_tags.add('Intl')
  111. if sys.platform == 'darwin':
  112. not_tags.add('exclude_mac')
  113. not_compile_flags = set(['-simdjs']) \
  114. if sys.platform != 'win32' else None
  115. # use tags/not_tags/not_compile_flags as case-insensitive
  116. def lower_set(s):
  117. return set([x.lower() for x in s] if s else [])
  118. tags = lower_set(tags)
  119. not_tags = lower_set(not_tags)
  120. not_compile_flags = lower_set(not_compile_flags)
  121. # split tags text into tags set
  122. _empty_set = set()
  123. def split_tags(text):
  124. return set(x.strip() for x in text.lower().split(',')) if text \
  125. else _empty_set
  126. class LogFile(object):
  127. def __init__(self, log_file_path = None):
  128. self.file = None
  129. if log_file_path is None:
  130. # Set up the log file paths
  131. # Make sure the right directory exists and the log file doesn't
  132. log_file_name = "testrun.{0}{1}.log".format(arch, flavor)
  133. log_file_directory = os.path.join(repo_root, "test", "logs")
  134. if not os.path.exists(log_file_directory):
  135. os.mkdir(log_file_directory)
  136. self.log_file_path = os.path.join(log_file_directory, log_file_name)
  137. if os.path.exists(self.log_file_path):
  138. os.remove(self.log_file_path)
  139. else:
  140. self.log_file_path = log_file_path
  141. self.file = open(self.log_file_path, "w")
  142. def log(self, args):
  143. self.file.write(args)
  144. def __del__(self):
  145. if not (self.file is None):
  146. self.file.close()
  147. if __name__ == '__main__':
  148. log_file = LogFile(args.logfile)
  149. def log_message(msg = ""):
  150. log_file.log(msg + "\n")
  151. def print_and_log(msg = ""):
  152. print(msg)
  153. log_message(msg)
  154. # remove carriage returns at end of line to avoid platform difference
  155. def normalize_new_line(text):
  156. return re.sub(b'[\r]+\n', b'\n', text)
  157. # A test simply contains a collection of test attributes.
  158. # Misc attributes added by test run:
  159. # id unique counter to identify a test
  160. # filename full path of test file
  161. # elapsed_time elapsed time when running the test
  162. #
  163. class Test(dict):
  164. __setattr__ = dict.__setitem__
  165. __delattr__ = dict.__delitem__
  166. # support dot syntax for normal attribute access
  167. def __getattr__(self, key):
  168. return super(Test, self).__getattr__(key) if key.startswith('__') \
  169. else self.get(key)
  170. # mark start of this test run, to compute elapsed_time
  171. def start(self):
  172. self.start_time = datetime.now()
  173. # mark end of this test run, compute elapsed_time
  174. def done(self):
  175. if not self.elapsed_time:
  176. self.elapsed_time = (datetime.now() - self.start_time)\
  177. .total_seconds()
  178. # records pass_count/fail_count
  179. class PassFailCount(object):
  180. def __init__(self):
  181. self.pass_count = 0
  182. self.fail_count = 0
  183. def __str__(self):
  184. return 'passed {}, failed {}'.format(self.pass_count, self.fail_count)
  185. def total_count(self):
  186. return self.pass_count + self.fail_count
  187. # records total and individual folder's pass_count/fail_count
  188. class TestResult(PassFailCount):
  189. def __init__(self):
  190. super(self.__class__, self).__init__()
  191. self.folders = {}
  192. def _get_folder_result(self, folder):
  193. r = self.folders.get(folder)
  194. if not r:
  195. r = PassFailCount()
  196. self.folders[folder] = r
  197. return r
  198. def log(self, filename, fail=False):
  199. folder = os.path.basename(os.path.dirname(filename))
  200. r = self._get_folder_result(folder)
  201. if fail:
  202. r.fail_count += 1
  203. self.fail_count += 1
  204. else:
  205. r.pass_count += 1
  206. self.pass_count += 1
  207. # test variants:
  208. # interpreted: -maxInterpretCount:1 -maxSimpleJitRunCount:1 -bgjit-
  209. # dynapogo: -forceNative -off:simpleJit -bgJitDelay:0
  210. class TestVariant(object):
  211. def __init__(self, name, compile_flags=[], variant_not_tags=[]):
  212. self.name = name
  213. self.compile_flags = \
  214. ['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
  215. '-BaselineMode'] + compile_flags
  216. self._compile_flags_has_expansion = self._has_expansion(compile_flags)
  217. self.tags = tags.copy()
  218. self.not_tags = not_tags.union(variant_not_tags).union(
  219. ['{}_{}'.format(x, name) for x in ('fails','exclude')])
  220. self.msg_queue = Manager().Queue() # messages from multi processes
  221. self.test_result = TestResult()
  222. self.test_count = 0
  223. self._print_lines = [] # _print lines buffer
  224. self._last_len = 0
  225. @staticmethod
  226. def _has_expansion(flags):
  227. return any(re.match('.*\${.*}', f) for f in flags)
  228. @staticmethod
  229. def _expand(flag, test):
  230. return re.sub('\${id}', str(test.id), flag)
  231. def _expand_compile_flags(self, test):
  232. if self._compile_flags_has_expansion:
  233. return [self._expand(flag, test) for flag in self.compile_flags]
  234. return self.compile_flags
  235. # check if this test variant should run a given test
  236. def _should_test(self, test):
  237. tags = split_tags(test.get('tags'))
  238. if not tags.isdisjoint(self.not_tags):
  239. return False
  240. if self.tags and not self.tags.issubset(tags):
  241. return False
  242. if not_compile_flags: # exclude unsupported compile-flags if any
  243. flags = test.get('compile-flags')
  244. if flags and \
  245. not not_compile_flags.isdisjoint(flags.lower().split()):
  246. return False
  247. return True
  248. # print output from multi-process run, to be sent with result message
  249. def _print(self, line):
  250. self._print_lines.append(str(line))
  251. # queue a test result from multi-process runs
  252. def _log_result(self, test, fail):
  253. output = '\n'.join(self._print_lines) # collect buffered _print output
  254. self._print_lines = []
  255. self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
  256. # (on main process) process one queued message
  257. def _process_msg(self, msg):
  258. filename, fail, elapsed_time, output = msg
  259. self.test_result.log(filename, fail=fail)
  260. line = '[{}/{} {:4.2f}] {} -> {}'.format(
  261. self.test_result.total_count(),
  262. self.test_count,
  263. elapsed_time,
  264. 'Failed' if fail else 'Passed',
  265. self._short_name(filename))
  266. padding = self._last_len - len(line)
  267. print(line + ' ' * padding, end='\n' if fail else '\r')
  268. log_message(line)
  269. self._last_len = len(line) if not fail else 0
  270. if len(output) > 0:
  271. print_and_log(output)
  272. # get a shorter test file path for display only
  273. def _short_name(self, filename):
  274. folder = os.path.basename(os.path.dirname(filename))
  275. return os.path.join(folder, os.path.basename(filename))
  276. # (on main process) wait and process one queued message
  277. def _process_one_msg(self):
  278. self._process_msg(self.msg_queue.get())
  279. # log a failed test with details
  280. def _show_failed(self, test, flags, exit_code, output,
  281. expected_output=None, timedout=False):
  282. if timedout:
  283. self._print('ERROR: Test timed out!')
  284. self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
  285. if expected_output == None or timedout:
  286. self._print("\nOutput:")
  287. self._print("----------------------------")
  288. self._print(output.decode('utf-8'))
  289. self._print("----------------------------")
  290. else:
  291. lst_output = output.split(b'\n')
  292. lst_expected = expected_output.split(b'\n')
  293. ln = min(len(lst_output), len(lst_expected))
  294. for i in range(0, ln):
  295. if lst_output[i] != lst_expected[i]:
  296. self._print("Output: (at line " + str(i) + ")")
  297. self._print("----------------------------")
  298. self._print(lst_output[i])
  299. self._print("----------------------------")
  300. self._print("Expected Output:")
  301. self._print("----------------------------")
  302. self._print(lst_expected[i])
  303. self._print("----------------------------")
  304. break
  305. self._print("exit code: {}".format(exit_code))
  306. self._log_result(test, fail=True)
  307. # temp: try find real file name on hard drive if case mismatch
  308. def _check_file(self, folder, filename):
  309. path = os.path.join(folder, filename)
  310. if os.path.isfile(path):
  311. return path # file exists on disk
  312. filename_lower = filename.lower()
  313. files = os.listdir(folder)
  314. for i in range(len(files)):
  315. if files[i].lower() == filename_lower:
  316. self._print('\nWARNING: {} should be {}\n'.format(
  317. path, files[i]))
  318. return os.path.join(folder, files[i])
  319. # cann't find the file, just return the path and let it error out
  320. return path
  321. # run one test under this variant
  322. def test_one(self, test):
  323. try:
  324. test.start()
  325. self._run_one_test(test)
  326. except Exception:
  327. test.done()
  328. self._print(traceback.format_exc())
  329. self._log_result(test, fail=True)
  330. # internally perform one test run
  331. def _run_one_test(self, test):
  332. folder = test.folder
  333. js_file = test.filename = self._check_file(folder, test.files)
  334. js_output = b''
  335. working_path = os.path.dirname(js_file)
  336. flags = test.get('compile-flags') or ''
  337. flags = self._expand_compile_flags(test) + \
  338. args.flags.split() + \
  339. flags.split()
  340. cmd = [binary] + flags + [os.path.basename(js_file)]
  341. test.start()
  342. proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
  343. timeout_data = [proc, False]
  344. def timeout_func(timeout_data):
  345. timeout_data[0].kill()
  346. timeout_data[1] = True
  347. timeout = test.get('timeout', args.timeout) # test override or default
  348. timer = Timer(timeout, timeout_func, [timeout_data])
  349. try:
  350. timer.start()
  351. js_output = normalize_new_line(proc.communicate()[0])
  352. exit_code = proc.wait()
  353. finally:
  354. timer.cancel()
  355. test.done()
  356. # shared _show_failed args
  357. fail_args = { 'test': test, 'flags': flags,
  358. 'exit_code': exit_code, 'output': js_output };
  359. # check timed out
  360. if (timeout_data[1]):
  361. return self._show_failed(timedout=True, **fail_args)
  362. # check ch failed
  363. if exit_code != 0:
  364. return self._show_failed(**fail_args)
  365. # check output
  366. if 'baseline' not in test:
  367. # output lines must be 'pass' or 'passed' or empty
  368. lines = (line.lower() for line in js_output.split(b'\n'))
  369. if any(line != b'' and line != b'pass' and line != b'passed'
  370. for line in lines):
  371. return self._show_failed(**fail_args)
  372. else:
  373. baseline = test.get('baseline')
  374. if baseline:
  375. # perform baseline comparison
  376. baseline = self._check_file(working_path, baseline)
  377. with open(baseline, 'rb') as bs_file:
  378. baseline_output = bs_file.read()
  379. # Cleanup carriage return
  380. # todo: remove carriage return at the end of the line
  381. # or better fix ch to output same on all platforms
  382. expected_output = normalize_new_line(baseline_output)
  383. if expected_output != js_output:
  384. return self._show_failed(
  385. expected_output=expected_output, **fail_args)
  386. # passed
  387. self._log_result(test, fail=False)
  388. # run tests under this variant, using given multiprocessing Pool
  389. def _run(self, tests, pool):
  390. print_and_log('\n############# Starting {} variant #############'\
  391. .format(self.name))
  392. if self.tags:
  393. print_and_log(' tags: {}'.format(self.tags))
  394. for x in self.not_tags:
  395. print_and_log(' exclude: {}'.format(x))
  396. print_and_log()
  397. # filter tests to run
  398. tests = [x for x in tests if self._should_test(x)]
  399. self.test_count += len(tests)
  400. # run tests in parallel
  401. result = pool.map_async(run_one, [(self,test) for test in tests])
  402. while self.test_result.total_count() != self.test_count:
  403. self._process_one_msg()
  404. # print test result summary
  405. def print_summary(self):
  406. print_and_log('\n######## Logs for {} variant ########'\
  407. .format(self.name))
  408. for folder, result in sorted(self.test_result.folders.items()):
  409. print_and_log('{}: {}'.format(folder, result))
  410. print_and_log("----------------------------")
  411. print_and_log('Total: {}'.format(self.test_result))
  412. # run all tests from testLoader
  413. def run(self, testLoader, pool, sequential_pool):
  414. tests, sequential_tests = [], []
  415. for folder in testLoader.folders():
  416. if folder.tags.isdisjoint(self.not_tags):
  417. dest = tests if not folder.is_sequential else sequential_tests
  418. dest += folder.tests
  419. if tests:
  420. self._run(tests, pool)
  421. if sequential_tests:
  422. self._run(sequential_tests, sequential_pool)
  423. # global run one test function for multiprocessing, used by TestVariant
  424. def run_one(data):
  425. try:
  426. variant, test = data
  427. variant.test_one(test)
  428. except Exception:
  429. print('ERROR: Unhandled exception!!!')
  430. traceback.print_exc()
  431. # A test folder contains a list of tests and maybe some tags.
  432. class TestFolder(object):
  433. def __init__(self, tests, tags=_empty_set):
  434. self.tests = tests
  435. self.tags = tags
  436. self.is_sequential = 'sequential' in tags
  437. # TestLoader loads all tests
  438. class TestLoader(object):
  439. def __init__(self, paths):
  440. self._folder_tags = self._load_folder_tags()
  441. self._test_id = 0
  442. self._folders = []
  443. for path in paths:
  444. if os.path.isfile(path):
  445. folder, file = os.path.dirname(path), os.path.basename(path)
  446. else:
  447. folder, file = path, None
  448. ftags = self._get_folder_tags(folder)
  449. if ftags != None: # Only honor entries listed in rlexedirs.xml
  450. tests = self._load_tests(folder, file)
  451. self._folders.append(TestFolder(tests, ftags))
  452. def folders(self):
  453. return self._folders
  454. # load folder/tags info from test_root/rlexedirs.xml
  455. @staticmethod
  456. def _load_folder_tags():
  457. xmlpath = os.path.join(test_root, 'rlexedirs.xml')
  458. try:
  459. xml = ET.parse(xmlpath).getroot()
  460. except IOError:
  461. print_and_log('ERROR: failed to read {}'.format(xmlpath))
  462. exit(-1)
  463. folder_tags = {}
  464. for x in xml:
  465. d = x.find('default')
  466. key = d.find('files').text.lower() # avoid case mismatch
  467. tags = d.find('tags')
  468. folder_tags[key] = \
  469. split_tags(tags.text) if tags != None else _empty_set
  470. return folder_tags
  471. # get folder tags if any
  472. def _get_folder_tags(self, folder):
  473. key = os.path.basename(os.path.normpath(folder)).lower()
  474. return self._folder_tags.get(key)
  475. def _next_test_id(self):
  476. self._test_id += 1
  477. return self._test_id
  478. # load all tests in folder using rlexe.xml file
  479. def _load_tests(self, folder, file):
  480. try:
  481. xmlpath = os.path.join(folder, 'rlexe.xml')
  482. xml = ET.parse(xmlpath).getroot()
  483. except IOError:
  484. return []
  485. def test_override(condition, check_tag, check_value, test):
  486. target = condition.find(check_tag)
  487. if target != None and target.text == check_value:
  488. for override in condition.find('override'):
  489. test[override.tag] = override.text
  490. def load_test(testXml):
  491. test = Test(folder=folder)
  492. for c in testXml.find('default'):
  493. if c.tag == 'timeout': # timeout seconds
  494. test[c.tag] = int(c.text)
  495. elif c.tag == 'tags' and c.tag in test: # merge multiple <tags>
  496. test[c.tag] = test[c.tag] + ',' + c.text
  497. else:
  498. test[c.tag] = c.text
  499. condition = testXml.find('condition')
  500. if condition != None:
  501. test_override(condition, 'target', arch_alias, test)
  502. return test
  503. tests = [load_test(x) for x in xml]
  504. if file != None:
  505. tests = [x for x in tests if x.files == file]
  506. if len(tests) == 0 and self.is_jsfile(file):
  507. tests = [Test(folder=folder, files=file, baseline='')]
  508. for test in tests: # assign unique test.id
  509. test.id = self._next_test_id()
  510. return tests
  511. @staticmethod
  512. def is_jsfile(path):
  513. return os.path.splitext(path)[1] == '.js'
  514. def main():
  515. # Set the right timezone, the tests need Pacific Standard Time
  516. # TODO: Windows. time.tzset only supports Unix
  517. if hasattr(time, 'tzset'):
  518. os.environ['TZ'] = 'US/Pacific'
  519. time.tzset()
  520. # By default run all tests
  521. if len(args.folders) == 0:
  522. files = (os.path.join(test_root, x) for x in os.listdir(test_root))
  523. args.folders = [f for f in sorted(files) if not os.path.isfile(f)]
  524. # load all tests
  525. testLoader = TestLoader(args.folders)
  526. # test variants
  527. variants = [x for x in [
  528. TestVariant('interpreted', [
  529. '-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-',
  530. '-dynamicprofilecache:profile.dpl.${id}'
  531. ]),
  532. TestVariant('dynapogo', [
  533. '-forceNative', '-off:simpleJit', '-bgJitDelay:0',
  534. '-dynamicprofileinput:profile.dpl.${id}'
  535. ]),
  536. TestVariant('disable_jit', [
  537. '-nonative'
  538. ], [
  539. 'exclude_interpreted', 'fails_interpreted', 'require_backend'
  540. ])
  541. ] if x.name in args.variants]
  542. # rm profile.dpl.*
  543. for f in glob.glob(test_root + '/*/profile.dpl.*'):
  544. os.remove(f)
  545. # run each variant
  546. pool, sequential_pool = Pool(), Pool(1)
  547. start_time = datetime.now()
  548. for variant in variants:
  549. variant.run(testLoader, pool, sequential_pool)
  550. elapsed_time = datetime.now() - start_time
  551. # print summary
  552. for variant in variants:
  553. variant.print_summary()
  554. print()
  555. failed = any(variant.test_result.fail_count > 0 for variant in variants)
  556. print('[{}] {}'.format(
  557. str(elapsed_time), 'Success!' if not failed else 'Failed!'))
  558. return 1 if failed else 0
  559. if __name__ == '__main__':
  560. sys.exit(main())