runtests.py 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801
  1. #!/usr/bin/env python
  2. #-------------------------------------------------------------------------------------------------------
  3. # Copyright (C) Microsoft. All rights reserved.
  4. # Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
  5. #-------------------------------------------------------------------------------------------------------
  6. from __future__ import print_function
  7. from datetime import datetime
  8. from multiprocessing import Pool, Manager, cpu_count
  9. from threading import Timer
  10. import sys
  11. import os
  12. import glob
  13. import subprocess as SP
  14. import traceback
  15. import argparse
  16. import xml.etree.ElementTree as ET
  17. import re
  18. import time
  19. # handle command line args
  20. parser = argparse.ArgumentParser(
  21. description='ChakraCore *nix Test Script',
  22. formatter_class=argparse.RawDescriptionHelpFormatter,
  23. epilog='''\
  24. Samples:
  25. test all folders:
  26. runtests.py
  27. test only Array:
  28. runtests.py Array
  29. test a single file:
  30. runtests.py Basics/hello.js
  31. ''')
  32. DEFAULT_TIMEOUT = 60
  33. SLOW_TIMEOUT = 180
  34. parser.add_argument('folders', metavar='folder', nargs='*',
  35. help='folder subset to run tests')
  36. parser.add_argument('-b', '--binary', metavar='bin',
  37. help='ch full path')
  38. parser.add_argument('-v', '--verbose', action='store_true',
  39. help='increase verbosity of output')
  40. parser.add_argument('--sanitize', metavar='sanitizers',
  41. help='ignore tests known to be broken with these sanitizers')
  42. parser.add_argument('-d', '--debug', action='store_true',
  43. help='use debug build');
  44. parser.add_argument('-t', '--test', '--test-build', action='store_true',
  45. help='use test build')
  46. parser.add_argument('-f', '--full', '--chakrafull', action='store_true',
  47. help='test chakrafull instead of chakracore')
  48. parser.add_argument('--static', action='store_true',
  49. help='mark that we are testing a static build')
  50. parser.add_argument('--variants', metavar='variant', nargs='+',
  51. help='run specified test variants')
  52. parser.add_argument('--include-slow', action='store_true',
  53. help='include slow tests (timeout ' + str(SLOW_TIMEOUT) + ' seconds)')
  54. parser.add_argument('--only-slow', action='store_true',
  55. help='run only slow tests')
  56. parser.add_argument('--nightly', action='store_true',
  57. help='run as nightly tests')
  58. parser.add_argument('--tag', nargs='*',
  59. help='select tests with given tags')
  60. parser.add_argument('--not-tag', action='append',
  61. help='exclude tests with given tags')
  62. parser.add_argument('--flags', default='',
  63. help='global test flags to ch')
  64. parser.add_argument('--timeout', type=int, default=DEFAULT_TIMEOUT,
  65. help='test timeout (default ' + str(DEFAULT_TIMEOUT) + ' seconds)')
  66. parser.add_argument('--swb', action='store_true',
  67. help='use binary from VcBuild.SWB to run the test')
  68. parser.add_argument('--lldb', default=None,
  69. help='run test suit with lldb batch mode to get call stack for crashing processes (ignores baseline matching)', action='store_true')
  70. parser.add_argument('-l', '--logfile', metavar='logfile',
  71. help='file to log results to', default=None)
  72. parser.add_argument('--x86', action='store_true',
  73. help='use x86 build')
  74. parser.add_argument('--x64', action='store_true',
  75. help='use x64 build')
  76. parser.add_argument('--arm', action='store_true',
  77. help='use arm build')
  78. parser.add_argument('--arm64', action='store_true',
  79. help='use arm64 build')
  80. parser.add_argument('-j', '--processcount', metavar='processcount', type=int,
  81. help='number of parallel threads to use')
  82. parser.add_argument('--warn-on-timeout', action='store_true',
  83. help='warn when a test times out instead of labelling it as an error immediately')
  84. parser.add_argument('--override-test-root', type=str,
  85. help='change the base directory for the tests (where rlexedirs will be sought)')
  86. parser.add_argument('--extra-flags', type=str,
  87. help='add extra flags to all executed tests')
  88. args = parser.parse_args()
  89. test_root = os.path.dirname(os.path.realpath(__file__))
  90. repo_root = os.path.dirname(test_root)
  91. # new test root
  92. if args.override_test_root:
  93. test_root = os.path.realpath(args.override_test_root)
  94. # arch: x86, x64, arm, arm64
  95. arch = None
  96. if args.x86:
  97. arch = 'x86'
  98. elif args.x64:
  99. arch = 'x64'
  100. elif args.arm:
  101. arch = 'arm'
  102. elif args.arm64:
  103. arch = 'arm64'
  104. if arch == None:
  105. arch = os.environ.get('_BuildArch', 'x86')
  106. if sys.platform != 'win32':
  107. arch = 'x64' # xplat: hard code arch == x64
  108. arch_alias = 'amd64' if arch == 'x64' else None
  109. # flavor: debug, test, release
  110. type_flavor = {'chk':'Debug', 'test':'Test', 'fre':'Release'}
  111. flavor = 'Debug' if args.debug else ('Test' if args.test else None)
  112. if flavor == None:
  113. print("ERROR: Test build target wasn't defined.")
  114. print("Try '-t' (test build) or '-d' (debug build).")
  115. sys.exit(1)
  116. flavor_alias = 'chk' if flavor == 'Debug' else 'fre'
  117. # handling for extra flags
  118. extra_flags = []
  119. if args.extra_flags:
  120. extra_flags = args.extra_flags.split()
  121. # test variants
  122. if not args.variants:
  123. args.variants = ['interpreted', 'dynapogo']
  124. # target binary variants
  125. binary_name_noext = "ch"
  126. if args.full:
  127. binary_name_noext = "jshost"
  128. repo_root = os.path.dirname(repo_root)
  129. # we need this to have consistent error message formatting with ch
  130. extra_flags.append("-bvt")
  131. else:
  132. extra_flags.append('-WERExceptionSupport')
  133. # append exe to the binary name on windows
  134. binary_name = binary_name_noext
  135. if sys.platform == 'win32':
  136. binary_name = binary_name + ".exe"
  137. # binary: full ch path
  138. binary = args.binary
  139. if binary == None:
  140. if sys.platform == 'win32':
  141. build = "VcBuild.SWB" if args.swb else "VcBuild"
  142. binary = os.path.join(repo_root, 'Build', build, 'bin', '{}_{}'.format(arch, flavor), binary_name)
  143. else:
  144. binary = os.path.join(repo_root, 'out', flavor, binary_name)
  145. if not os.path.isfile(binary):
  146. print('{} not found. Did you run ./build.sh already?'.format(binary))
  147. sys.exit(1)
  148. # global tags/not_tags
  149. tags = set(args.tag or [])
  150. not_tags = set(args.not_tag or []).union(['fail', 'exclude_' + arch, 'exclude_' + flavor])
  151. if arch_alias:
  152. not_tags.add('exclude_' + arch_alias)
  153. if flavor_alias:
  154. not_tags.add('exclude_' + flavor_alias)
  155. if args.only_slow:
  156. tags.add('Slow')
  157. elif not args.include_slow:
  158. not_tags.add('Slow')
  159. elif args.include_slow and args.timeout == DEFAULT_TIMEOUT:
  160. args.timeout = SLOW_TIMEOUT
  161. not_tags.add('exclude_nightly' if args.nightly else 'nightly')
  162. # verbosity
  163. verbose = False
  164. if args.verbose:
  165. verbose = True
  166. print("Emitting verbose output...")
  167. # xplat: temp hard coded to exclude unsupported tests
  168. if sys.platform != 'win32':
  169. not_tags.add('exclude_xplat')
  170. not_tags.add('require_winglob')
  171. not_tags.add('require_simd')
  172. else:
  173. not_tags.add('exclude_windows')
  174. # exclude tests that depend on features not supported on a platform
  175. if arch == 'arm' or arch == 'arm64':
  176. not_tags.add('require_asmjs')
  177. # exclude tests that exclude the current binary
  178. not_tags.add('exclude_' + binary_name_noext)
  179. # exclude tests known to fail under certain sanitizers
  180. if args.sanitize != None:
  181. not_tags.add('exclude_sanitize_'+args.sanitize)
  182. if args.static != None:
  183. not_tags.add('exclude_static')
  184. if sys.platform == 'darwin':
  185. not_tags.add('exclude_mac')
  186. if 'require_icu' in not_tags or 'exclude_noicu' in not_tags:
  187. not_tags.add('Intl')
  188. not_compile_flags = None
  189. # use -j flag to specify number of parallel processes
  190. processcount = cpu_count()
  191. if args.processcount != None:
  192. processcount = int(args.processcount)
  193. # handle warn on timeout
  194. warn_on_timeout = False
  195. if args.warn_on_timeout == True:
  196. warn_on_timeout = True
  197. # use tags/not_tags/not_compile_flags as case-insensitive
  198. def lower_set(s):
  199. return set([x.lower() for x in s] if s else [])
  200. tags = lower_set(tags)
  201. not_tags = lower_set(not_tags)
  202. not_compile_flags = lower_set(not_compile_flags)
  203. # split tags text into tags set
  204. _empty_set = set()
  205. def split_tags(text):
  206. return set(x.strip() for x in text.lower().split(',')) if text \
  207. else _empty_set
  208. class LogFile(object):
  209. def __init__(self, log_file_path = None):
  210. self.file = None
  211. if log_file_path is None:
  212. # Set up the log file paths
  213. # Make sure the right directory exists and the log file doesn't
  214. log_file_name = "testrun.{0}{1}.log".format(arch, flavor)
  215. log_file_directory = os.path.join(test_root, "logs")
  216. if not os.path.exists(log_file_directory):
  217. os.mkdir(log_file_directory)
  218. self.log_file_path = os.path.join(log_file_directory, log_file_name)
  219. if os.path.exists(self.log_file_path):
  220. os.remove(self.log_file_path)
  221. else:
  222. self.log_file_path = log_file_path
  223. self.file = open(self.log_file_path, "w")
  224. def log(self, args):
  225. self.file.write(args)
  226. def __del__(self):
  227. if not (self.file is None):
  228. self.file.close()
  229. if __name__ == '__main__':
  230. log_file = LogFile(args.logfile)
  231. def log_message(msg = ""):
  232. log_file.log(msg + "\n")
  233. def print_and_log(msg = ""):
  234. print(msg)
  235. log_message(msg)
  236. # remove carriage returns at end of line to avoid platform difference
  237. def normalize_new_line(text):
  238. return re.sub(b'[\r]+\n', b'\n', text)
  239. # A test simply contains a collection of test attributes.
  240. # Misc attributes added by test run:
  241. # id unique counter to identify a test
  242. # filename full path of test file
  243. # elapsed_time elapsed time when running the test
  244. #
  245. class Test(dict):
  246. __setattr__ = dict.__setitem__
  247. __delattr__ = dict.__delitem__
  248. # support dot syntax for normal attribute access
  249. def __getattr__(self, key):
  250. return super(Test, self).__getattr__(key) if key.startswith('__') \
  251. else self.get(key)
  252. # mark start of this test run, to compute elapsed_time
  253. def start(self):
  254. self.start_time = datetime.now()
  255. # mark end of this test run, compute elapsed_time
  256. def done(self):
  257. if not self.elapsed_time:
  258. self.elapsed_time = (datetime.now() - self.start_time)\
  259. .total_seconds()
  260. # records pass_count/fail_count
  261. class PassFailCount(object):
  262. def __init__(self):
  263. self.pass_count = 0
  264. self.fail_count = 0
  265. def __str__(self):
  266. return 'passed {}, failed {}'.format(self.pass_count, self.fail_count)
  267. def total_count(self):
  268. return self.pass_count + self.fail_count
  269. # records total and individual folder's pass_count/fail_count
  270. class TestResult(PassFailCount):
  271. def __init__(self):
  272. super(self.__class__, self).__init__()
  273. self.folders = {}
  274. def _get_folder_result(self, folder):
  275. r = self.folders.get(folder)
  276. if not r:
  277. r = PassFailCount()
  278. self.folders[folder] = r
  279. return r
  280. def log(self, filename, fail=False):
  281. folder = os.path.basename(os.path.dirname(filename))
  282. r = self._get_folder_result(folder)
  283. if fail:
  284. r.fail_count += 1
  285. self.fail_count += 1
  286. else:
  287. r.pass_count += 1
  288. self.pass_count += 1
  289. # test variants:
  290. # interpreted: -maxInterpretCount:1 -maxSimpleJitRunCount:1 -bgjit-
  291. # dynapogo: -forceNative -off:simpleJit -bgJitDelay:0
  292. class TestVariant(object):
  293. def __init__(self, name, compile_flags=[], variant_not_tags=[]):
  294. self.name = name
  295. self.compile_flags = \
  296. ['-ExtendedErrorStackForTestHost',
  297. '-BaselineMode'] + compile_flags
  298. self._compile_flags_has_expansion = self._has_expansion(compile_flags)
  299. self.tags = tags.copy()
  300. self.not_tags = not_tags.union(variant_not_tags).union(
  301. ['{}_{}'.format(x, name) for x in ('fails','exclude')])
  302. self.msg_queue = Manager().Queue() # messages from multi processes
  303. self.test_result = TestResult()
  304. self.test_count = 0
  305. self._print_lines = [] # _print lines buffer
  306. self._last_len = 0
  307. if verbose:
  308. print("Added variant {0}:".format(name))
  309. print("Flags: " + ", ".join(self.compile_flags))
  310. print("Tags: " + ", ".join(self.tags))
  311. print("NotTags: " + ", ".join(self.not_tags))
  312. @staticmethod
  313. def _has_expansion(flags):
  314. return any(re.match('.*\${.*}', f) for f in flags)
  315. @staticmethod
  316. def _expand(flag, test):
  317. return re.sub('\${id}', str(test.id), flag)
  318. def _expand_compile_flags(self, test):
  319. if self._compile_flags_has_expansion:
  320. return [self._expand(flag, test) for flag in self.compile_flags]
  321. return self.compile_flags
  322. # check if this test variant should run a given test
  323. def _should_test(self, test):
  324. tags = split_tags(test.get('tags'))
  325. if not tags.isdisjoint(self.not_tags):
  326. return False
  327. if self.tags and not self.tags.issubset(tags):
  328. return False
  329. if not_compile_flags: # exclude unsupported compile-flags if any
  330. flags = test.get('compile-flags')
  331. if flags and \
  332. not not_compile_flags.isdisjoint(flags.lower().split()):
  333. return False
  334. return True
  335. # print output from multi-process run, to be sent with result message
  336. def _print(self, line):
  337. self._print_lines.append(line)
  338. # queue a test result from multi-process runs
  339. def _log_result(self, test, fail):
  340. output = u'\n'.join(self._print_lines).encode('utf-8') # collect buffered _print output
  341. self._print_lines = []
  342. self.msg_queue.put((test.filename, fail, test.elapsed_time, output))
  343. # (on main process) process one queued message
  344. def _process_msg(self, msg):
  345. filename, fail, elapsed_time, output = msg
  346. self.test_result.log(filename, fail=fail)
  347. line = '[{}/{} {:4.2f}] {} -> {}'.format(
  348. self.test_result.total_count(),
  349. self.test_count,
  350. elapsed_time,
  351. 'Failed' if fail else 'Passed',
  352. self._short_name(filename))
  353. padding = self._last_len - len(line)
  354. print(line + ' ' * padding, end='\n' if fail else '\r')
  355. log_message(line)
  356. self._last_len = len(line) if not fail else 0
  357. if len(output) > 0:
  358. print_and_log(output)
  359. # get a shorter test file path for display only
  360. def _short_name(self, filename):
  361. folder = os.path.basename(os.path.dirname(filename))
  362. return os.path.join(folder, os.path.basename(filename))
  363. # (on main process) wait and process one queued message
  364. def _process_one_msg(self):
  365. self._process_msg(self.msg_queue.get())
  366. # log a failed test with details
  367. def _show_failed(self, test, flags, exit_code, output,
  368. expected_output=None, timedout=False):
  369. if timedout:
  370. if warn_on_timeout:
  371. self._print('WARNING: Test timed out!')
  372. else:
  373. self._print('ERROR: Test timed out!')
  374. self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
  375. if expected_output == None or timedout:
  376. self._print("\nOutput:")
  377. self._print("----------------------------")
  378. self._print(output.decode('utf-8'))
  379. self._print("----------------------------")
  380. else:
  381. lst_output = output.split(b'\n')
  382. lst_expected = expected_output.split(b'\n')
  383. ln = min(len(lst_output), len(lst_expected))
  384. for i in range(0, ln):
  385. if lst_output[i] != lst_expected[i]:
  386. self._print("Output: (at line " + str(i+1) + ")")
  387. self._print("----------------------------")
  388. self._print(lst_output[i])
  389. self._print("----------------------------")
  390. self._print("Expected Output:")
  391. self._print("----------------------------")
  392. self._print(lst_expected[i])
  393. self._print("----------------------------")
  394. break
  395. self._print("exit code: {}".format(exit_code))
  396. if warn_on_timeout and timedout:
  397. self._log_result(test, fail=False)
  398. else:
  399. self._log_result(test, fail=True)
  400. # temp: try find real file name on hard drive if case mismatch
  401. def _check_file(self, folder, filename):
  402. path = os.path.join(folder, filename)
  403. if os.path.isfile(path):
  404. return path # file exists on disk
  405. filename_lower = filename.lower()
  406. files = os.listdir(folder)
  407. for i in range(len(files)):
  408. if files[i].lower() == filename_lower:
  409. self._print('\nWARNING: {} should be {}\n'.format(
  410. path, files[i]))
  411. return os.path.join(folder, files[i])
  412. # cann't find the file, just return the path and let it error out
  413. return path
  414. # run one test under this variant
  415. def test_one(self, test):
  416. try:
  417. test.start()
  418. self._run_one_test(test)
  419. except Exception:
  420. test.done()
  421. self._print(traceback.format_exc())
  422. self._log_result(test, fail=True)
  423. # internally perform one test run
  424. def _run_one_test(self, test):
  425. folder = test.folder
  426. js_file = test.filename = self._check_file(folder, test.files)
  427. js_output = b''
  428. working_path = os.path.dirname(js_file)
  429. flags = test.get('compile-flags') or ''
  430. flags = self._expand_compile_flags(test) + \
  431. args.flags.split() + \
  432. flags.split()
  433. if test.get('custom-config-file') != None:
  434. flags = ['-CustomConfigFile:' + test.get('custom-config-file')]
  435. if args.lldb == None:
  436. cmd = [binary] + flags + [os.path.basename(js_file)]
  437. else:
  438. lldb_file = open(working_path + '/' + os.path.basename(js_file) + '.lldb.cmd', 'w')
  439. lldb_command = ['run'] + flags + [os.path.basename(js_file)]
  440. lldb_file.write(' '.join([str(s) for s in lldb_command]));
  441. lldb_file.close()
  442. cmd = ['lldb'] + [binary] + ['-s'] + [os.path.basename(js_file) + '.lldb.cmd'] + ['-o bt'] + ['-b']
  443. test.start()
  444. proc = SP.Popen(cmd, stdout=SP.PIPE, stderr=SP.STDOUT, cwd=working_path)
  445. timeout_data = [proc, False]
  446. def timeout_func(timeout_data):
  447. timeout_data[0].kill()
  448. timeout_data[1] = True
  449. timeout = test.get('timeout', args.timeout) # test override or default
  450. timer = Timer(timeout, timeout_func, [timeout_data])
  451. skip_baseline_match = False
  452. try:
  453. timer.start()
  454. js_output = normalize_new_line(proc.communicate()[0])
  455. exit_code = proc.wait()
  456. # if -lldb was set; check if test was crashed before corrupting the output
  457. search_for = " exited with status = 0 (0x00000000)"
  458. if args.lldb != None and exit_code == 0 and js_output.index(search_for) > 0:
  459. js_output = js_output[0:js_output.index(search_for)]
  460. exit_pos = js_output.rfind('\nProcess ')
  461. if exit_pos > len(js_output) - 20: # if [Process ????? <seach for>]
  462. if 'baseline' not in test:
  463. js_output = "pass"
  464. else:
  465. skip_baseline_match = True
  466. finally:
  467. timer.cancel()
  468. test.done()
  469. # shared _show_failed args
  470. fail_args = { 'test': test, 'flags': flags,
  471. 'exit_code': exit_code, 'output': js_output }
  472. # check timed out
  473. if (timeout_data[1]):
  474. return self._show_failed(timedout=True, **fail_args)
  475. # check ch failed
  476. if exit_code != 0 and binary_name_noext == 'ch':
  477. return self._show_failed(**fail_args)
  478. # check output
  479. if 'baseline' not in test:
  480. # output lines must be 'pass' or 'passed' or empty
  481. lines = (line.lower() for line in js_output.split(b'\n'))
  482. if any(line != b'' and line != b'pass' and line != b'passed'
  483. for line in lines):
  484. return self._show_failed(**fail_args)
  485. else:
  486. baseline = test.get('baseline')
  487. if not skip_baseline_match and baseline:
  488. # perform baseline comparison
  489. baseline = self._check_file(folder, baseline)
  490. with open(baseline, 'rb') as bs_file:
  491. baseline_output = bs_file.read()
  492. # Cleanup carriage return
  493. # todo: remove carriage return at the end of the line
  494. # or better fix ch to output same on all platforms
  495. expected_output = normalize_new_line(baseline_output)
  496. if expected_output != js_output:
  497. return self._show_failed(
  498. expected_output=expected_output, **fail_args)
  499. # passed
  500. if verbose:
  501. self._print('{} {} {}'.format(binary, ' '.join(flags), test.filename))
  502. self._log_result(test, fail=False)
  503. # run tests under this variant, using given multiprocessing Pool
  504. def _run(self, tests, pool):
  505. print_and_log('\n############# Starting {} variant #############'\
  506. .format(self.name))
  507. if self.tags:
  508. print_and_log(' tags: {}'.format(self.tags))
  509. for x in self.not_tags:
  510. print_and_log(' exclude: {}'.format(x))
  511. print_and_log()
  512. # filter tests to run
  513. tests = [x for x in tests if self._should_test(x)]
  514. self.test_count += len(tests)
  515. # run tests in parallel
  516. result = pool.map_async(run_one, [(self,test) for test in tests])
  517. while self.test_result.total_count() != self.test_count:
  518. self._process_one_msg()
  519. # print test result summary
  520. def print_summary(self):
  521. print_and_log('\n######## Logs for {} variant ########'\
  522. .format(self.name))
  523. for folder, result in sorted(self.test_result.folders.items()):
  524. print_and_log('{}: {}'.format(folder, result))
  525. print_and_log("----------------------------")
  526. print_and_log('Total: {}'.format(self.test_result))
  527. # run all tests from testLoader
  528. def run(self, testLoader, pool, sequential_pool):
  529. tests, sequential_tests = [], []
  530. for folder in testLoader.folders():
  531. if folder.tags.isdisjoint(self.not_tags):
  532. dest = tests if not folder.is_sequential else sequential_tests
  533. dest += folder.tests
  534. if tests:
  535. self._run(tests, pool)
  536. if sequential_tests:
  537. self._run(sequential_tests, sequential_pool)
  538. # global run one test function for multiprocessing, used by TestVariant
  539. def run_one(data):
  540. try:
  541. variant, test = data
  542. variant.test_one(test)
  543. except Exception:
  544. print('ERROR: Unhandled exception!!!')
  545. traceback.print_exc()
  546. # A test folder contains a list of tests and maybe some tags.
  547. class TestFolder(object):
  548. def __init__(self, tests, tags=_empty_set):
  549. self.tests = tests
  550. self.tags = tags
  551. self.is_sequential = 'sequential' in tags
  552. # TestLoader loads all tests
  553. class TestLoader(object):
  554. def __init__(self, paths):
  555. self._folder_tags = self._load_folder_tags()
  556. self._test_id = 0
  557. self._folders = []
  558. for path in paths:
  559. if os.path.isfile(path):
  560. folder, file = os.path.dirname(path), os.path.basename(path)
  561. else:
  562. folder, file = path, None
  563. ftags = self._get_folder_tags(folder)
  564. if ftags != None: # Only honor entries listed in rlexedirs.xml
  565. tests = self._load_tests(folder, file)
  566. self._folders.append(TestFolder(tests, ftags))
  567. def folders(self):
  568. return self._folders
  569. # load folder/tags info from test_root/rlexedirs.xml
  570. @staticmethod
  571. def _load_folder_tags():
  572. xmlpath = os.path.join(test_root, 'rlexedirs.xml')
  573. try:
  574. xml = ET.parse(xmlpath).getroot()
  575. except IOError:
  576. print_and_log('ERROR: failed to read {}'.format(xmlpath))
  577. exit(-1)
  578. folder_tags = {}
  579. for x in xml:
  580. d = x.find('default')
  581. key = d.find('files').text.lower() # avoid case mismatch
  582. tags = d.find('tags')
  583. folder_tags[key] = \
  584. split_tags(tags.text) if tags != None else _empty_set
  585. return folder_tags
  586. # get folder tags if any
  587. def _get_folder_tags(self, folder):
  588. key = os.path.basename(os.path.normpath(folder)).lower()
  589. return self._folder_tags.get(key)
  590. def _next_test_id(self):
  591. self._test_id += 1
  592. return self._test_id
  593. # load all tests in folder using rlexe.xml file
  594. def _load_tests(self, folder, file):
  595. try:
  596. xmlpath = os.path.join(folder, 'rlexe.xml')
  597. xml = ET.parse(xmlpath).getroot()
  598. except IOError:
  599. return []
  600. def test_override(condition, check_tag, check_value, test):
  601. target = condition.find(check_tag)
  602. if target != None and target.text == check_value:
  603. for override in condition.find('override'):
  604. test[override.tag] = override.text
  605. def load_test(testXml):
  606. test = Test(folder=folder)
  607. for c in testXml.find('default'):
  608. if c.tag == 'timeout': # timeout seconds
  609. test[c.tag] = int(c.text)
  610. elif c.tag == 'tags' and c.tag in test: # merge multiple <tags>
  611. test[c.tag] = test[c.tag] + ',' + c.text
  612. else:
  613. test[c.tag] = c.text
  614. condition = testXml.find('condition')
  615. if condition != None:
  616. test_override(condition, 'target', arch_alias, test)
  617. return test
  618. tests = [load_test(x) for x in xml]
  619. if file != None:
  620. tests = [x for x in tests if x.files == file]
  621. if len(tests) == 0 and self.is_jsfile(file):
  622. tests = [Test(folder=folder, files=file, baseline='')]
  623. for test in tests: # assign unique test.id
  624. test.id = self._next_test_id()
  625. return tests
  626. @staticmethod
  627. def is_jsfile(path):
  628. return os.path.splitext(path)[1] == '.js'
  629. def main():
  630. # Set the right timezone, the tests need Pacific Standard Time
  631. # TODO: Windows. time.tzset only supports Unix
  632. if hasattr(time, 'tzset'):
  633. os.environ['TZ'] = 'US/Pacific'
  634. time.tzset()
  635. # By default run all tests
  636. if len(args.folders) == 0:
  637. files = (os.path.join(test_root, x) for x in os.listdir(test_root))
  638. args.folders = [f for f in sorted(files) if not os.path.isfile(f)]
  639. # load all tests
  640. testLoader = TestLoader(args.folders)
  641. # test variants
  642. variants = [x for x in [
  643. TestVariant('interpreted', extra_flags + [
  644. '-maxInterpretCount:1', '-maxSimpleJitRunCount:1', '-bgjit-',
  645. '-dynamicprofilecache:profile.dpl.${id}'
  646. ], [
  647. 'require_disable_jit'
  648. ]),
  649. TestVariant('dynapogo', extra_flags + [
  650. '-forceNative', '-off:simpleJit', '-bgJitDelay:0',
  651. '-dynamicprofileinput:profile.dpl.${id}'
  652. ], [
  653. 'require_disable_jit'
  654. ]),
  655. TestVariant('disable_jit', extra_flags + [
  656. '-nonative'
  657. ], [
  658. 'exclude_interpreted', 'fails_interpreted', 'require_backend'
  659. ])
  660. ] if x.name in args.variants]
  661. # rm profile.dpl.*
  662. for f in glob.glob(test_root + '/*/profile.dpl.*'):
  663. os.remove(f)
  664. # run each variant
  665. pool, sequential_pool = Pool(processcount), Pool(1)
  666. start_time = datetime.now()
  667. for variant in variants:
  668. variant.run(testLoader, pool, sequential_pool)
  669. elapsed_time = datetime.now() - start_time
  670. # print summary
  671. for variant in variants:
  672. variant.print_summary()
  673. print()
  674. failed = any(variant.test_result.fail_count > 0 for variant in variants)
  675. print('[{}] {}'.format(
  676. str(elapsed_time), 'Success!' if not failed else 'Failed!'))
  677. return 1 if failed else 0
  678. if __name__ == '__main__':
  679. sys.exit(main())