|
1 #!/usr/bin/env python |
|
2 """ |
|
3 The JS Shell Test Harness. |
|
4 |
|
5 See the adjacent README.txt for more details. |
|
6 """ |
|
7 |
|
8 import os, sys, textwrap |
|
9 from os.path import abspath, dirname, realpath |
|
10 from copy import copy |
|
11 from subprocess import list2cmdline, call |
|
12 |
|
13 from lib.results import NullTestOutput |
|
14 from lib.tests import TestCase, TBPL_FLAGS |
|
15 from lib.results import ResultsSink |
|
16 from lib.progressbar import ProgressBar |
|
17 |
|
18 if (sys.platform.startswith('linux') or |
|
19 sys.platform.startswith('darwin') |
|
20 ): |
|
21 from lib.tasks_unix import run_all_tests |
|
22 else: |
|
23 from lib.tasks_win import run_all_tests |
|
24 |
|
25 def run_tests(options, tests, results): |
|
26 """Run the given tests, sending raw results to the given results accumulator.""" |
|
27 try: |
|
28 completed = run_all_tests(tests, results, options) |
|
29 except KeyboardInterrupt: |
|
30 completed = False |
|
31 |
|
32 results.finish(completed) |
|
33 |
|
34 def get_cpu_count(): |
|
35 """ |
|
36 Guess at a reasonable parallelism count to set as the default for the |
|
37 current machine and run. |
|
38 """ |
|
39 # Python 2.6+ |
|
40 try: |
|
41 import multiprocessing |
|
42 return multiprocessing.cpu_count() |
|
43 except (ImportError,NotImplementedError): |
|
44 pass |
|
45 |
|
46 # POSIX |
|
47 try: |
|
48 res = int(os.sysconf('SC_NPROCESSORS_ONLN')) |
|
49 if res > 0: |
|
50 return res |
|
51 except (AttributeError,ValueError): |
|
52 pass |
|
53 |
|
54 # Windows |
|
55 try: |
|
56 res = int(os.environ['NUMBER_OF_PROCESSORS']) |
|
57 if res > 0: |
|
58 return res |
|
59 except (KeyError, ValueError): |
|
60 pass |
|
61 |
|
62 return 1 |
|
63 |
|
64 def parse_args(): |
|
65 """ |
|
66 Parse command line arguments. |
|
67 Returns a tuple of: (options, js_shell, requested_paths, excluded_paths) |
|
68 options :object: The raw OptionParser output. |
|
69 js_shell :str: The absolute location of the shell to test with. |
|
70 requested_paths :set<str>: Test paths specially requested on the CLI. |
|
71 excluded_paths :set<str>: Test paths specifically excluded by the CLI. |
|
72 """ |
|
73 from optparse import OptionParser, OptionGroup |
|
74 op = OptionParser(usage=textwrap.dedent(""" |
|
75 %prog [OPTIONS] JS_SHELL [TESTS] |
|
76 |
|
77 Shell output format: [ pass | fail | timeout | skip ] progress | time |
|
78 """).strip()) |
|
79 op.add_option('--xul-info', dest='xul_info_src', |
|
80 help='config data for xulRuntime (avoids search for config/autoconf.mk)') |
|
81 |
|
82 harness_og = OptionGroup(op, "Harness Controls", "Control how tests are run.") |
|
83 harness_og.add_option('-j', '--worker-count', type=int, default=max(1, get_cpu_count()), |
|
84 help='Number of tests to run in parallel (default %default)') |
|
85 harness_og.add_option('-t', '--timeout', type=float, default=150.0, |
|
86 help='Set maximum time a test is allows to run (in seconds).') |
|
87 harness_og.add_option('-a', '--args', dest='shell_args', default='', |
|
88 help='Extra args to pass to the JS shell.') |
|
89 harness_og.add_option('--jitflags', default='', help="Obsolete. Does nothing.") |
|
90 harness_og.add_option('--tbpl', action='store_true', |
|
91 help='Runs each test in all configurations tbpl tests.') |
|
92 harness_og.add_option('-g', '--debug', action='store_true', help='Run a test in debugger.') |
|
93 harness_og.add_option('--debugger', default='gdb -q --args', help='Debugger command.') |
|
94 harness_og.add_option('-J', '--jorendb', action='store_true', help='Run under JS debugger.') |
|
95 harness_og.add_option('--passthrough', action='store_true', help='Run tests with stdin/stdout attached to caller.') |
|
96 harness_og.add_option('--valgrind', action='store_true', help='Run tests in valgrind.') |
|
97 harness_og.add_option('--valgrind-args', default='', help='Extra args to pass to valgrind.') |
|
98 op.add_option_group(harness_og) |
|
99 |
|
100 input_og = OptionGroup(op, "Inputs", "Change what tests are run.") |
|
101 input_og.add_option('-f', '--file', dest='test_file', action='append', |
|
102 help='Get tests from the given file.') |
|
103 input_og.add_option('-x', '--exclude-file', action='append', |
|
104 help='Exclude tests from the given file.') |
|
105 input_og.add_option('-d', '--exclude-random', dest='random', action='store_false', |
|
106 help='Exclude tests marked as "random."') |
|
107 input_og.add_option('--run-skipped', action='store_true', help='Run tests marked as "skip."') |
|
108 input_og.add_option('--run-only-skipped', action='store_true', help='Run only tests marked as "skip."') |
|
109 input_og.add_option('--run-slow-tests', action='store_true', |
|
110 help='Do not skip tests marked as "slow."') |
|
111 input_og.add_option('--no-extensions', action='store_true', |
|
112 help='Run only tests conforming to the ECMAScript 5 standard.') |
|
113 op.add_option_group(input_og) |
|
114 |
|
115 output_og = OptionGroup(op, "Output", "Modify the harness and tests output.") |
|
116 output_og.add_option('-s', '--show-cmd', action='store_true', |
|
117 help='Show exact commandline used to run each test.') |
|
118 output_og.add_option('-o', '--show-output', action='store_true', |
|
119 help="Print each test's output to the file given by --output-file.") |
|
120 output_og.add_option('-F', '--failed-only', action='store_true', |
|
121 help="If a --show-* option is given, only print output for failed tests.") |
|
122 output_og.add_option('-O', '--output-file', |
|
123 help='Write all output to the given file (default: stdout).') |
|
124 output_og.add_option('--failure-file', |
|
125 help='Write all not-passed tests to the given file.') |
|
126 output_og.add_option('--no-progress', dest='hide_progress', action='store_true', |
|
127 help='Do not show the progress bar.') |
|
128 output_og.add_option('--tinderbox', action='store_true', |
|
129 help='Use tinderbox-parseable output format.') |
|
130 op.add_option_group(output_og) |
|
131 |
|
132 special_og = OptionGroup(op, "Special", "Special modes that do not run tests.") |
|
133 special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH', |
|
134 help='Generate reftest manifest files.') |
|
135 op.add_option_group(special_og) |
|
136 options, args = op.parse_args() |
|
137 |
|
138 # Acquire the JS shell given on the command line. |
|
139 options.js_shell = None |
|
140 requested_paths = set() |
|
141 if len(args) > 0: |
|
142 options.js_shell = abspath(args[0]) |
|
143 requested_paths |= set(args[1:]) |
|
144 |
|
145 # If we do not have a shell, we must be in a special mode. |
|
146 if options.js_shell is None and not options.make_manifests: |
|
147 op.error('missing JS_SHELL argument') |
|
148 |
|
149 # Valgrind and gdb are mutually exclusive. |
|
150 if options.valgrind and options.debug: |
|
151 op.error("--valgrind and --debug are mutually exclusive.") |
|
152 |
|
153 # Fill the debugger field, as needed. |
|
154 prefix = options.debugger.split() if options.debug else [] |
|
155 if options.valgrind: |
|
156 prefix = ['valgrind'] + options.valgrind_args.split() |
|
157 if os.uname()[0] == 'Darwin': |
|
158 prefix.append('--dsymutil=yes') |
|
159 options.show_output = True |
|
160 |
|
161 js_cmd_args = options.shell_args.split() |
|
162 if options.jorendb: |
|
163 options.passthrough = True |
|
164 options.hide_progress = True |
|
165 options.worker_count = 1 |
|
166 debugger_path = realpath(os.path.join(abspath(dirname(abspath(__file__))), '..', '..', 'examples', 'jorendb.js')) |
|
167 js_cmd_args.extend([ '-d', '-f', debugger_path, '--' ]) |
|
168 TestCase.set_js_cmd_prefix(options.js_shell, js_cmd_args, prefix) |
|
169 |
|
170 # If files with lists of tests to run were specified, add them to the |
|
171 # requested tests set. |
|
172 if options.test_file: |
|
173 for test_file in options.test_file: |
|
174 requested_paths |= set([line.strip() for line in open(test_file).readlines()]) |
|
175 |
|
176 # If files with lists of tests to exclude were specified, add them to the |
|
177 # excluded tests set. |
|
178 excluded_paths = set() |
|
179 if options.exclude_file: |
|
180 for filename in options.exclude_file: |
|
181 try: |
|
182 fp = open(filename, 'r') |
|
183 for line in fp: |
|
184 if line.startswith('#'): continue |
|
185 line = line.strip() |
|
186 if not line: continue |
|
187 excluded_paths |= set((line,)) |
|
188 finally: |
|
189 fp.close() |
|
190 |
|
191 # Handle output redirection, if requested and relevant. |
|
192 options.output_fp = sys.stdout |
|
193 if options.output_file: |
|
194 if not options.show_cmd: |
|
195 options.show_output = True |
|
196 try: |
|
197 options.output_fp = open(options.output_file, 'w') |
|
198 except IOError, ex: |
|
199 raise SystemExit("Failed to open output file: " + str(ex)) |
|
200 |
|
201 options.show = options.show_cmd or options.show_output |
|
202 |
|
203 # Hide the progress bar if it will get in the way of other output. |
|
204 options.hide_progress = (options.tinderbox or |
|
205 not ProgressBar.conservative_isatty() or |
|
206 options.hide_progress) |
|
207 |
|
208 return (options, requested_paths, excluded_paths) |
|
209 |
|
210 def load_tests(options, requested_paths, excluded_paths): |
|
211 """ |
|
212 Returns a tuple: (skipped_tests, test_list) |
|
213 skip_list: [iterable<Test>] Tests found but skipped. |
|
214 test_list: [iterable<Test>] Tests found that should be run. |
|
215 """ |
|
216 import lib.manifest as manifest |
|
217 |
|
218 if options.js_shell is None: |
|
219 xul_tester = manifest.NullXULInfoTester() |
|
220 else: |
|
221 if options.xul_info_src is None: |
|
222 xul_info = manifest.XULInfo.create(options.js_shell) |
|
223 else: |
|
224 xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':') |
|
225 xul_debug = xul_debug.lower() is 'true' |
|
226 xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug) |
|
227 xul_tester = manifest.XULInfoTester(xul_info, options.js_shell) |
|
228 |
|
229 test_dir = dirname(abspath(__file__)) |
|
230 test_list = manifest.load(test_dir, xul_tester) |
|
231 skip_list = [] |
|
232 |
|
233 if options.make_manifests: |
|
234 manifest.make_manifests(options.make_manifests, test_list) |
|
235 sys.exit() |
|
236 |
|
237 # Create a new test list. Apply each TBPL configuration to every test. |
|
238 if options.tbpl: |
|
239 new_test_list = [] |
|
240 flags_list = TBPL_FLAGS |
|
241 for test in test_list: |
|
242 for jitflags in flags_list: |
|
243 tmp_test = copy(test) |
|
244 tmp_test.options = copy(test.options) |
|
245 tmp_test.options.extend(jitflags) |
|
246 new_test_list.append(tmp_test) |
|
247 test_list = new_test_list |
|
248 |
|
249 if options.jitflags: |
|
250 print("Warning: the --jitflags option is obsolete and does nothing now.") |
|
251 |
|
252 if options.test_file: |
|
253 paths = set() |
|
254 for test_file in options.test_file: |
|
255 paths |= set([ line.strip() for line in open(test_file).readlines()]) |
|
256 test_list = [ _ for _ in test_list if _.path in paths ] |
|
257 |
|
258 if requested_paths: |
|
259 def p(path): |
|
260 for arg in requested_paths: |
|
261 if path.find(arg) != -1: |
|
262 return True |
|
263 return False |
|
264 test_list = [ _ for _ in test_list if p(_.path) ] |
|
265 |
|
266 if options.exclude_file: |
|
267 test_list = [_ for _ in test_list if _.path not in excluded_paths] |
|
268 |
|
269 if options.no_extensions: |
|
270 pattern = os.sep + 'extensions' + os.sep |
|
271 test_list = [_ for _ in test_list if pattern not in _.path] |
|
272 |
|
273 if not options.random: |
|
274 test_list = [ _ for _ in test_list if not _.random ] |
|
275 |
|
276 if options.run_only_skipped: |
|
277 options.run_skipped = True |
|
278 test_list = [ _ for _ in test_list if not _.enable ] |
|
279 |
|
280 if not options.run_slow_tests: |
|
281 test_list = [ _ for _ in test_list if not _.slow ] |
|
282 |
|
283 if not options.run_skipped: |
|
284 skip_list = [ _ for _ in test_list if not _.enable ] |
|
285 test_list = [ _ for _ in test_list if _.enable ] |
|
286 |
|
287 return skip_list, test_list |
|
288 |
|
289 def main(): |
|
290 options, requested_paths, excluded_paths = parse_args() |
|
291 skip_list, test_list = load_tests(options, requested_paths, excluded_paths) |
|
292 |
|
293 if not test_list: |
|
294 print 'no tests selected' |
|
295 return 1 |
|
296 |
|
297 test_dir = dirname(abspath(__file__)) |
|
298 |
|
299 if options.debug: |
|
300 if len(test_list) > 1: |
|
301 print('Multiple tests match command line arguments, debugger can only run one') |
|
302 for tc in test_list: |
|
303 print(' %s'%tc.path) |
|
304 return 2 |
|
305 |
|
306 cmd = test_list[0].get_command(TestCase.js_cmd_prefix) |
|
307 if options.show_cmd: |
|
308 print list2cmdline(cmd) |
|
309 if test_dir not in ('', '.'): |
|
310 os.chdir(test_dir) |
|
311 call(cmd) |
|
312 return 0 |
|
313 |
|
314 curdir = os.getcwd() |
|
315 if test_dir not in ('', '.'): |
|
316 os.chdir(test_dir) |
|
317 |
|
318 results = None |
|
319 try: |
|
320 results = ResultsSink(options, len(skip_list) + len(test_list)) |
|
321 for t in skip_list: |
|
322 results.push(NullTestOutput(t)) |
|
323 run_tests(options, test_list, results) |
|
324 finally: |
|
325 os.chdir(curdir) |
|
326 |
|
327 if results is None or not results.all_passed(): |
|
328 return 1 |
|
329 |
|
330 return 0 |
|
331 |
|
332 if __name__ == '__main__': |
|
333 sys.exit(main()) |