runtests.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718
  1. #!/usr/bin/env python
  2. #-------------------------------------------------------------------------------------------------------
  3. # Copyright (C) Microsoft. All rights reserved.
  4. # Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  5. #-------------------------------------------------------------------------------------------------------
  6. from __future__ import print_function
  7. from datetime import datetime
  8. from multiprocessing import Pool, Manager, cpu_count
  9. from threading import Timer
  10. import sys
  11. import os
  12. import glob
  13. import subprocess as SP
  14. import traceback
  15. import argparse
  16. import xml.etree.ElementTree as ET
  17. import re
  18. import time
  19. # handle command line args
  20. parser = argparse.ArgumentParser(
  21. description='ChakraCore *nix Test Script',
  22. formatter_class=argparse.RawDescriptionHelpFormatter,
  23. epilog='''\
  24. Samples:
  25. test all folders:
  26. runtests.py
  27. test only Array:
  28. runtests.py Array
  29. test a single file:
  30. runtests.py Basics/hello.js
  31. ''')
  32. DEFAULT_TIMEOUT = 60
  33. SLOW_TIMEOUT = 180
  34. parser.add_argument('folders', metavar='folder', nargs='*',
  35. help='folder subset to run tests')
  36. parser.add_argument('-b', '--binary', metavar='bin',
  37. help='ch full path')
  38. parser.add_argument('-v', '--verbose', action='store_true',
  39. help='increase verbosity of output')
  40. parser.add_argument('--sanitize', metavar='sanitizers',
  41. help='ignore tests known to be broken with these sanitizers')
  42. parser.add_argument('-d', '--debug', action='store_true',
  43. help='use debug build');
  44. parser.add_argument('-t', '--test', '--test-build', action='store_true',
  45. help='use test build')
  46. parser.add_argument('--static', action='store_true',
  47. help='mark that we are testing a static build')
  48. parser.add_argument('--variants', metavar='variant', nargs='+',
  49. help='run specified test variants')
  50. parser.add_argument('--include-slow', action='store_true',
  51. help='include slow tests (timeout ' + str(SLOW_TIMEOUT) + ' seconds)')
  52. parser.add_argument('--only-slow', action='store_true',
  53. help='run only slow tests')
  54. parser.add_argument('--nightly', action='store_true',
  55. help='run as nightly tests')
  56. parser.add_argument('--tag', nargs='*',
  57. help='select tests with given tags')
  58. parser.add_argument('--not-tag', nargs='*',
  59. help='exclude tests with given tags')
  60. parser.add_argument('--flags', default='',
  61. help='global test flags to ch')
  62. parser.add_argument('--timeout', type=int, default=DEFAULT_TIMEOUT,
  63. help='test timeout (default ' + str(DEFAULT_TIMEOUT) + ' seconds)')
  64. parser.add_argument('--swb', action='store_true',
  65. help='use binary from VcBuild.SWB to run the test')
  66. parser.add_argument('-l', '--logfile', metavar='logfile',
  67. help='file to log results to', default=None)
  68. parser.add_argument('--x86', action='store_true',
  69. help='use x86 build')
  70. parser.add_argument('--x64', action='store_true',
  71. help='use x64 build')
  72. parser.add_argument('-j', '--processcount', metavar='processcount', type=int,
  73. help='number of parallel threads to use')
  74. parser.add_argument('--warn-on-timeout', action='store_true',
  75. help='warn when a test times out instead of labelling it as an error immediately')
  76. parser.add_argument('--override-test-root', type=str,
  77. help='change the base directory for the tests (where rlexedirs will be sought)')
  78. args = parser.parse_args()
  79. test_root = os.path.dirname(os.path.realpath(__file__))
  80. repo_root = os.path.dirname(test_root)
  81. # new test root
  82. if args.override_test_root:
  83. test_root = os.path.realpath(args.override_test_root)
  84. # arch: x86, x64
  85. arch = 'x86' if args.x86 else ('x64' if args.x64 else None)
  86. if arch == None:
  87. arch = os.environ.get('_BuildArch', 'x86')
  88. if sys.platform != 'win32':
  89. arch = 'x64' # xplat: hard code arch == x64
  90. arch_alias = 'amd64' if arch == 'x64' else None
  91. # flavor: debug, test, release
  92. type_flavor = {'chk':'Debug', 'test':'Test', 'fre':'Release'}
  93. flavor = 'Debug' if args.debug else ('Test' if args.test else None)
  94. if flavor == None:
  95. print("ERROR: Test build target wasn't defined.")
  96. print("Try '-t' (test build) or '-d' (debug build).")
  97. sys.exit(1)
  98. flavor_alias = 'chk' if flavor == 'Debug' else 'fre'
  99. # test variants
  100. if not args.variants:
  101. args.variants = ['interpreted', 'dynapogo']
  102. # binary: full ch path
  103. binary = args.binary
  104. if binary == None:
  105. if sys.platform == 'win32':
  106. build = "VcBuild.SWB" if args.swb else "VcBuild"
  107. binary = 'Build\\' + build + '\\bin\\{}_{}\\ch.exe'.format(arch, flavor)
  108. else:
  109. binary = 'out/{0}/ch'.format(flavor)
  110. binary = os.path.join(repo_root, binary)
  111. if not os.path.isfile(binary):
  112. print('{} not found. Did you run ./build.sh already?'.format(binary))
  113. sys.exit(1)
  114. # global tags/not_tags
  115. tags = set(args.tag or [])
  116. not_tags = set(args.not_tag or []).union(['fail', 'exclude_' + arch])
  117. if arch_alias:
  118. not_tags.add('exclude_' + arch_alias)
  119. if flavor_alias:
  120. not_tags.add('exclude_' + flavor_alias)
  121. if args.only_slow:
  122. tags.add('Slow')
  123. elif not args.include_slow:
  124. not_tags.add('Slow')
  125. elif args.include_slow and args.timeout == DEFAULT_TIMEOUT:
  126. args.timeout = SLOW_TIMEOUT
  127. not_tags.add('exclude_nightly' if args.nightly else 'nightly')
  128. # verbosity
  129. verbose = False
  130. if args.verbose:
  131. verbose = True
  132. print("Emitting verbose output...")
  133. # xplat: temp hard coded to exclude unsupported tests
  134. if sys.platform != 'win32':
  135. not_tags.add('exclude_xplat')
  136. not_tags.add('Intl')
  137. not_tags.add('require_simd')
  138. if args.sanitize != None:
  139. not_tags.add('exclude_sanitize_'+args.sanitize)
  140. if args.static != None:
  141. not_tags.add('exclude_static')
  142. if sys.platform == 'darwin':
  143. not_tags.add('exclude_mac')
  144. not_compile_flags = None
  145. # use -j flag to specify number of parallel processes
  146. processcount = cpu_count()
  147. if args.processcount != None:
  148. processcount = int(args.processcount)
  149. # handle warn on timeout
  150. warn_on_timeout = False
  151. if args.warn_on_timeout == True:
  152. warn_on_timeout = True
  153. # use tags/not_tags/not_compile_flags as case-insensitive
  154. def lower_set(s):
  155. return set([x.lower() for x in s] if s else [])
  156. tags = lower_set(tags)
  157. not_tags = lower_set(not_tags)
  158. not_compile_flags = lower_set(not_compile_flags)
  159. # split tags text into tags set
  160. _empty_set = set()
  161. def split_tags(text):
  162. return set(x.strip() for x in text.lower().split(',')) if text \
  163. else _empty_set
  164. class LogFile(object):
  165. def __init__(self, log_file_path = None):
  166. self.file = None
  167. if log_file_path is None:
  168. # Set up the log file paths
  169. # Make sure the right directory exists and the log file doesn't
  170. log_file_name = "testrun.{0}{1}.log".format(arch, flavor)
  171. log_file_directory = os.path.join(repo_root, "test", "logs")
  172. if not os.path.exists(log_file_directory):
  173. os.mkdir(log_file_directory)
  174. self.log_file_path = os.path.join(log_file_directory, log_file_name)
  175. if os.path.exists(self.log_file_path):
  176. os.remove(self.log_file_path)
  177. else:
  178. self.log_file_path = log_file_path
  179. self.file = open(self.log_file_path, "w")
  180. def log(self, args):
  181. self.file.write(args)
  182. def __del__(self):
  183. if not (self.file is None):
  184. self.file.close()
  185. if __name__ == '__main__':
  186. log_file = LogFile(args.logfile)
  187. def log_message(msg = ""):
  188. log_file.log(msg + "\n")
  189. def print_and_log(msg = ""):
  190. print(msg)
  191. log_message(msg)
  192. # remove carriage returns at end of line to avoid platform difference
  193. def normalize_new_line(text):
  194. return re.sub(b'[\r]+\n', b'\n', text)
  195. # A test simply contains a collection of test attributes.
  196. # Misc attributes added by test run:
  197. # id unique counter to identify a test
  198. # filename full path of test file
  199. # elapsed_time elapsed time when running the test
  200. #
  201. class Test(dict):
  202. __setattr__ = dict.__setitem__
  203. __delattr__ = dict.__delitem__
  204. # support dot syntax for normal attribute access
  205. def __getattr__(self, key):
  206. return super(Test, self).__getattr__(key) if key.startswith('__') \
  207. else self.get(key)
  208. # mark start of this test run, to compute elapsed_time
  209. def start(self):
  210. self.start_time = datetime.now()
  211. # mark end of this test run, compute elapsed_time
  212. def done(self):
  213. if not self.elapsed_time:
  214. self.elapsed_time = (datetime.now() - self.start_time)\
  215. .total_seconds()
  216. # records pass_count/fail_count
  217. class PassFailCount(object):
  218. def __init__(self):
  219. self.pass_count = 0
  220. self.fail_count = 0
  221. def __str__(self):
  222. return 'passed {}, failed {}'.format(self.pass_count, self.fail_count)
  223. def total_count(self):
  224. return self.pass_count + self.fail_count
  225. # records total and individual folder's pass_count/fail_count
  226. class TestResult(PassFailCount):
  227. def __init__(self):
  228. super(self.__class__, self).__init__()
  229. self.folders = {}
  230. def _get_folder_result(self, folder):
  231. r = self.folders.get(folder)
  232. if not r:
  233. r = PassFailCount()
  234. self.folders[folder] = r
  235. return r
  236. def log(self, filename, fail=False):
  237. folder = os.path.basename(os.path.dirname(filename))
  238. r = self._get_folder_result(folder)
  239. if fail:
  240. r.fail_count += 1
  241. self.fail_count += 1
  242. else:
  243. r.pass_count += 1
  244. self.pass_count += 1
  245. # test variants:
  246. # interpreted: -maxInterpretCount:1 -maxSimpleJitRunCount:1 -bgjit-
  247. # dynapogo: -forceNative -off:simpleJit -bgJitDelay:0
  248. class TestVariant(object):
  249. def __init__(self, name, compile_flags=[], variant_not_tags=[]):
  250. self.name = name
  251. self.compile_flags = \
  252. ['-WERExceptionSupport', '-ExtendedErrorStackForTestHost',
  253. '-BaselineMode'] + compile_flags
  254. self._compile_flags_has_expansion = self._has_expansion(compile_flags)
  255. self.tags = tags.copy()
  256. self.not_tags = not_tags.union(variant_not_tags).union(
  257. ['{}_{}'.format(x, name) for x in ('fails','exclude')])
  258. self.msg_queue = Manager().Queue() # messages from multi processes
  259. self.test_result = TestResult()
  260. self.test_count = 0
  261. self._print_lines = [] # _print lines buffer
  262. self._last_len = 0
  263. @staticmethod
  264. def _has_expansion(flags):
  265. return any(re.match('.*\${.*}', f) for f in flags)
  266. @staticmethod
  267. def _expand(flag, test):
  268. return re.sub('\${id}', str(test.id), flag)
  269. def _expand_compile_flags(self, test):
  270. if self._compile_flags_has_expansion:
  271. return [self._expand(flag, test) for flag in self.compile_flags]
  272. return self.compile_flags
  273. # check if this test variant should run a given test
  274. def _should_test(self, test):
  275. tags = split_tags(test.get('tags'))
  276. if not tags.isdisjoint(self.not_tags):
  277. return False
  278. if self.tags and not self.tags.issubset(tags):
  279. return False
  280. if not_compile_flags: # exclude unsupported compile-flags if any
  281. flags = test.get('compile-flags')
  282. if flags and \
  283. not not_compile_flags.isdisjoint(flags.lower().split()):
  284. return False
  285. return True
  286. # print output from multi-process run, to be sent with result message
  287. def _print(self, line):
  288. self._print_lines.append(str(line))
  289. # queue a test result from multi-process runs
  290. def _log_result(self, test, fail):
  291. output = '\n'.join(self._print_lines) # collect buffered _print output
  292. self._print_lines = []
  293. self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
  294. # (on main process) process one queued message
  295. def _process_msg(self, msg):
  296. filename, fail, elapsed_time, output = msg
  297. self.test_result.log(filename, fail=fail)
  298. line = '[{}/{} {:4.2f}] {} -> {}'.format(
  299. self.test_result.total_count(),
  300. self.test_count,
  301. elapsed_time,
  302. 'Failed' if fail else 'Passed',
  303. self._short_name(filename))
  304. padding = self._last_len - len(line)
  305. print(line + ' ' * padding, end='\n' if fail else '\r')
  306. log_message(line)
  307. self._last_len = len(line) if not fail else 0
  308. if len(output) > 0:
  309. print_and_log(output)
  310. # get a shorter test file path for display only
  311. def _short_name(self, filename):
  312. folder = os.path.basename(os.path.dirname(filename))
  313. return os.path.join(folder, os.path.basename(filename))
  314. # (on main process) wait and process one queued message
  315. def _process_one_msg(self):
  316. self._process_msg(self.msg_queue.get())
  317. # log a failed test with details
  318. def _show_failed(self, test, flags, exit_code, output,
  319. expected_output=None, timedout=False):
  320. if timedout:
  321. if warn_on_timeout:
  322. self._print('WARNING: Test timed out!')
  323. else:
  324. self._print('ERROR: Test timed out!')
  325. self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
  326. if expected_output == None or timedout:
  327. self._print("\nOutput:")
  328. self._print("----------------------------")
  329. self._print(output.decode('utf-8'))
  330. self._print("----------------------------")
  331. else:
  332. lst_output = output.split(b'\n')
  333. lst_expected = expected_output.split(b'\n')
  334. ln = min(len(lst_output), len(lst_expected))
  335. for i in range(0, ln):
  336. if lst_output[i] != lst_expected[i]:
  337. self._print("Output: (at line " + str(i) + ")")
  338. self._print("----------------------------")
  339. self._print(lst_output[i])
  340. self._print("----------------------------")
  341. self._print("Expected Output:")
  342. self._print("----------------------------")
  343. self._print(lst_expected[i])
  344. self._print("----------------------------")
  345. break
  346. self._print("exit code: {}".format(exit_code))
  347. if warn_on_timeout and timedout:
  348. self._log_result(test, fail=False)
  349. else:
  350. self._log_result(test, fail=True)
  351. # temp: try find real file name on hard drive if case mismatch
  352. def _check_file(self, folder, filename):
  353. path = os.path.join(folder, filename)
  354. if os.path.isfile(path):
  355. return path # file exists on disk
  356. filename_lower = filename.lower()
  357. files = os.listdir(folder)
  358. for i in range(len(files)):
  359. if files[i].lower() == filename_lower:
  360. self._print('\nWARNING: {} should be {}\n'.format(
  361. path, files[i]))
  362. return os.path.join(folder, files[i])
  363. # cann't find the file, just return the path and let it error out
  364. return path
  365. # run one test under this variant
  366. def test_one(self, test):
  367. try:
  368. test.start()
  369. self._run_one_test(test)
  370. except Exception:
  371. test.done()
  372. self._print(traceback.format_exc())
  373. self._log_result(test, fail=True)
  374. # internally perform one test run
  375. def _run_one_test(self, test):
  376. folder = test.folder
  377. js_file = test.filename = self._check_file(folder, test.files)
  378. js_output = b''
  379. working_path = os.path.dirname(js_file)
  380. flags = test.get('compile-flags') or ''
  381. flags = self._expand_compile_flags(test) + \
  382. args.flags.split() + \
  383. flags.split()
  384. cmd = [binary] + flags + [os.path.basename(js_file)]
  385. test.start()
  386. proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
  387. timeout_data = [proc, False]
  388. def timeout_func(timeout_data):
  389. timeout_data[0].kill()
  390. timeout_data[1] = True
  391. timeout = test.get('timeout', args.timeout) # test override or default
  392. timer = Timer(timeout, timeout_func, [timeout_data])
  393. try:
  394. timer.start()
  395. js_output = normalize_new_line(proc.communicate()[0])
  396. exit_code = proc.wait()
  397. finally:
  398. timer.cancel()
  399. test.done()
  400. # shared _show_failed args
  401. fail_args = { 'test': test, 'flags': flags,
  402. 'exit_code': exit_code, 'output': js_output };
  403. # check timed out
  404. if (timeout_data[1]):
  405. return self._show_failed(timedout=True, **fail_args)
  406. # check ch failed
  407. if exit_code != 0:
  408. return self._show_failed(**fail_args)
  409. # check output
  410. if 'baseline' not in test:
  411. # output lines must be 'pass' or 'passed' or empty
  412. lines = (line.lower() for line in js_output.split(b'\n'))
  413. if any(line != b'' and line != b'pass' and line != b'passed'
  414. for line in lines):
  415. return self._show_failed(**fail_args)
  416. else:
  417. baseline = test.get('baseline')
  418. if baseline:
  419. # perform baseline comparison
  420. baseline = self._check_file(working_path, baseline)
  421. with open(baseline, 'rb') as bs_file:
  422. baseline_output = bs_file.read()
  423. # Cleanup carriage return
  424. # todo: remove carriage return at the end of the line
  425. # or better fix ch to output same on all platforms
  426. expected_output = normalize_new_line(baseline_output)
  427. if expected_output != js_output:
  428. return self._show_failed(
  429. expected_output=expected_output, **fail_args)
  430. # passed
  431. if verbose:
  432. self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
  433. self._log_result(test, fail=False)
  434. # run tests under this variant, using given multiprocessing Pool
  435. def _run(self, tests, pool):
  436. print_and_log('\n############# Starting {} variant #############'\
  437. .format(self.name))
  438. if self.tags:
  439. print_and_log(' tags: {}'.format(self.tags))
  440. for x in self.not_tags:
  441. print_and_log(' exclude: {}'.format(x))
  442. print_and_log()
  443. # filter tests to run
  444. tests = [x for x in tests if self._should_test(x)]
  445. self.test_count += len(tests)
  446. # run tests in parallel
  447. result = pool.map_async(run_one, [(self,test) for test in tests])
  448. while self.test_result.total_count() != self.test_count:
  449. self._process_one_msg()
  450. # print test result summary
  451. def print_summary(self):
  452. print_and_log('\n######## Logs for {} variant ########'\
  453. .format(self.name))
  454. for folder, result in sorted(self.test_result.folders.items()):
  455. print_and_log('{}: {}'.format(folder, result))
  456. print_and_log("----------------------------")
  457. print_and_log('Total: {}'.format(self.test_result))
  458. # run all tests from testLoader
  459. def run(self, testLoader, pool, sequential_pool):
  460. tests, sequential_tests = [], []
  461. for folder in testLoader.folders():
  462. if folder.tags.isdisjoint(self.not_tags):
  463. dest = tests if not folder.is_sequential else sequential_tests
  464. dest += folder.tests
  465. if tests:
  466. self._run(tests, pool)
  467. if sequential_tests:
  468. self._run(sequential_tests, sequential_pool)
  469. # global run one test function for multiprocessing, used by TestVariant
  470. def run_one(data):
  471. try:
  472. variant, test = data
  473. variant.test_one(test)
  474. except Exception:
  475. print('ERROR: Unhandled exception!!!')
  476. traceback.print_exc()
  477. # A test folder contains a list of tests and maybe some tags.
  478. class TestFolder(object):
  479. def __init__(self, tests, tags=_empty_set):
  480. self.tests = tests
  481. self.tags = tags
  482. self.is_sequential = 'sequential' in tags
  483. # TestLoader loads all tests
  484. class TestLoader(object):
  485. def __init__(self, paths):
  486. self._folder_tags = self._load_folder_tags()
  487. self._test_id = 0
  488. self._folders = []
  489. for path in paths:
  490. if os.path.isfile(path):
  491. folder, file = os.path.dirname(path), os.path.basename(path)
  492. else:
  493. folder, file = path, None
  494. ftags = self._get_folder_tags(folder)
  495. if ftags != None: # Only honor entries listed in rlexedirs.xml
  496. tests = self._load_tests(folder, file)
  497. self._folders.append(TestFolder(tests, ftags))
  498. def folders(self):
  499. return self._folders
  500. # load folder/tags info from test_root/rlexedirs.xml
  501. @staticmethod
  502. def _load_folder_tags():
  503. xmlpath = os.path.join(test_root, 'rlexedirs.xml')
  504. try:
  505. xml = ET.parse(xmlpath).getroot()
  506. except IOError:
  507. print_and_log('ERROR: failed to read {}'.format(xmlpath))
  508. exit(-1)
  509. folder_tags = {}
  510. for x in xml:
  511. d = x.find('default')
  512. key = d.find('files').text.lower() # avoid case mismatch
  513. tags = d.find('tags')
  514. folder_tags[key] = \
  515. split_tags(tags.text) if tags != None else _empty_set
  516. return folder_tags
  517. # get folder tags if any
  518. def _get_folder_tags(self, folder):
  519. key = os.path.basename(os.path.normpath(folder)).lower()
  520. return self._folder_tags.get(key)
  521. def _next_test_id(self):
  522. self._test_id += 1
  523. return self._test_id
  524. # load all tests in folder using rlexe.xml file
  525. def _load_tests(self, folder, file):
  526. try:
  527. xmlpath = os.path.join(folder, 'rlexe.xml')
  528. xml = ET.parse(xmlpath).getroot()
  529. except IOError:
  530. return []
  531. def test_override(condition, check_tag, check_value, test):
  532. target = condition.find(check_tag)
  533. if target != None and target.text == check_value:
  534. for override in condition.find('override'):
  535. test[override.tag] = override.text
  536. def load_test(testXml):
  537. test = Test(folder=folder)
  538. for c in testXml.find('default'):
  539. if c.tag == 'timeout': # timeout seconds
  540. test[c.tag] = int(c.text)
  541. elif c.tag == 'tags' and c.tag in test: # merge multiple <tags>
  542. test[c.tag] = test[c.tag] + ',' + c.text
  543. else:
  544. test[c.tag] = c.text
  545. condition = testXml.find('condition')
  546. if condition != None:
  547. test_override(condition, 'target', arch_alias, test)
  548. return test
  549. tests = [load_test(x) for x in xml]
  550. if file != None:
  551. tests = [x for x in tests if x.files == file]
  552. if len(tests) == 0 and self.is_jsfile(file):
  553. tests = [Test(folder=folder, files=file, baseline='')]
  554. for test in tests: # assign unique test.id
  555. test.id = self._next_test_id()
  556. return tests
  557. @staticmethod
  558. def is_jsfile(path):
  559. return os.path.splitext(path)[1] == '.js'
  560. def main():
  561. # Set the right timezone, the tests need Pacific Standard Time
  562. # TODO: Windows. time.tzset only supports Unix
  563. if hasattr(time, 'tzset'):
  564. os.environ['TZ'] = 'US/Pacific'
  565. time.tzset()
  566. # By default run all tests
  567. if len(args.folders) == 0:
  568. files = (os.path.join(test_root, x) for x in os.listdir(test_root))
  569. args.folders = [f for f in sorted(files) if not os.path.isfile(f)]
  570. # load all tests
  571. testLoader = TestLoader(args.folders)
  572. # test variants
  573. variants = [x for x in [
  574. TestVariant('interpreted', [
  575. '-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-',
  576. '-dynamicprofilecache:profile.dpl.${id}'
  577. ]),
  578. TestVariant('dynapogo', [
  579. '-forceNative', '-off:simpleJit', '-bgJitDelay:0',
  580. '-dynamicprofileinput:profile.dpl.${id}'
  581. ]),
  582. TestVariant('disable_jit', [
  583. '-nonative'
  584. ], [
  585. 'exclude_interpreted', 'fails_interpreted', 'require_backend'
  586. ])
  587. ] if x.name in args.variants]
  588. # rm profile.dpl.*
  589. for f in glob.glob(test_root + '/*/profile.dpl.*'):
  590. os.remove(f)
  591. # run each variant
  592. pool, sequential_pool = Pool(processcount), Pool(1)
  593. start_time = datetime.now()
  594. for variant in variants:
  595. variant.run(testLoader, pool, sequential_pool)
  596. elapsed_time = datetime.now() - start_time
  597. # print summary
  598. for variant in variants:
  599. variant.print_summary()
  600. print()
  601. failed = any(variant.test_result.fail_count > 0 for variant in variants)
  602. print('[{}] {}'.format(
  603. str(elapsed_time), 'Success!' if not failed else 'Failed!'))
  604. return 1 if failed else 0
  605. if __name__ == '__main__':
  606. sys.exit(main())