|
1 #!/usr/bin/env python |
|
2 # This Source Code Form is subject to the terms of the Mozilla Public |
|
3 # License, v. 2.0. If a copy of the MPL was not distributed with this |
|
4 # file, You can obtain one at http://mozilla.org/MPL/2.0/. |
|
5 |
|
6 # run-tests.py -- Python harness for GDB SpiderMonkey support |
|
7 |
|
8 import os, re, subprocess, sys, traceback |
|
9 from threading import Thread |
|
10 |
|
11 # From this directory: |
|
12 import progressbar |
|
13 from taskpool import TaskPool, get_cpu_count |
|
14 |
|
15 # Backported from Python 3.1 posixpath.py |
|
16 def _relpath(path, start=None): |
|
17 """Return a relative version of a path""" |
|
18 |
|
19 if not path: |
|
20 raise ValueError("no path specified") |
|
21 |
|
22 if start is None: |
|
23 start = os.curdir |
|
24 |
|
25 start_list = os.path.abspath(start).split(os.sep) |
|
26 path_list = os.path.abspath(path).split(os.sep) |
|
27 |
|
28 # Work out how much of the filepath is shared by start and path. |
|
29 i = len(os.path.commonprefix([start_list, path_list])) |
|
30 |
|
31 rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:] |
|
32 if not rel_list: |
|
33 return os.curdir |
|
34 return os.path.join(*rel_list) |
|
35 |
|
36 os.path.relpath = _relpath |
|
37 |
|
38 # Characters that need to be escaped when used in shell words. |
|
39 shell_need_escapes = re.compile('[^\w\d%+,-./:=@\'"]', re.DOTALL) |
|
40 # Characters that need to be escaped within double-quoted strings. |
|
41 shell_dquote_escapes = re.compile('[^\w\d%+,-./:=@"]', re.DOTALL) |
|
42 def make_shell_cmd(l): |
|
43 def quote(s): |
|
44 if shell_need_escapes.search(s): |
|
45 if s.find("'") < 0: |
|
46 return "'" + s + "'" |
|
47 return '"' + shell_dquote_escapes.sub('\\g<0>', s) + '"' |
|
48 return s |
|
49 |
|
50 return ' '.join([quote(_) for _ in l]) |
|
51 |
|
52 # An instance of this class collects the lists of passing, failing, and |
|
53 # timing-out tests, runs the progress bar, and prints a summary at the end. |
|
54 class Summary(object): |
|
55 |
|
56 class SummaryBar(progressbar.ProgressBar): |
|
57 def __init__(self, limit): |
|
58 super(Summary.SummaryBar, self).__init__('', limit, 24) |
|
59 def start(self): |
|
60 self.label = '[starting ]' |
|
61 self.update(0) |
|
62 def counts(self, run, failures, timeouts): |
|
63 self.label = '[%4d|%4d|%4d|%4d]' % (run - failures, failures, timeouts, run) |
|
64 self.update(run) |
|
65 |
|
66 def __init__(self, num_tests): |
|
67 self.run = 0 |
|
68 self.failures = [] # kind of judgemental; "unexpecteds"? |
|
69 self.timeouts = [] |
|
70 if not OPTIONS.hide_progress: |
|
71 self.bar = Summary.SummaryBar(num_tests) |
|
72 |
|
73 # Progress bar control. |
|
74 def start(self): |
|
75 if not OPTIONS.hide_progress: |
|
76 self.bar.start() |
|
77 def update(self): |
|
78 if not OPTIONS.hide_progress: |
|
79 self.bar.counts(self.run, len(self.failures), len(self.timeouts)) |
|
80 # Call 'thunk' to show some output, while getting the progress bar out of the way. |
|
81 def interleave_output(self, thunk): |
|
82 if not OPTIONS.hide_progress: |
|
83 self.bar.clear() |
|
84 thunk() |
|
85 self.update() |
|
86 |
|
87 def passed(self, test): |
|
88 self.run += 1 |
|
89 self.update() |
|
90 |
|
91 def failed(self, test): |
|
92 self.run += 1 |
|
93 self.failures.append(test) |
|
94 self.update() |
|
95 |
|
96 def timeout(self, test): |
|
97 self.run += 1 |
|
98 self.timeouts.append(test) |
|
99 self.update() |
|
100 |
|
101 def finish(self): |
|
102 if not OPTIONS.hide_progress: |
|
103 self.bar.finish() |
|
104 |
|
105 if self.failures: |
|
106 |
|
107 print "tests failed:" |
|
108 for test in self.failures: |
|
109 test.show(sys.stdout) |
|
110 |
|
111 if OPTIONS.worklist: |
|
112 try: |
|
113 with open(OPTIONS.worklist) as out: |
|
114 for test in self.failures: |
|
115 out.write(test.name + '\n') |
|
116 except IOError as err: |
|
117 sys.stderr.write("Error writing worklist file '%s': %s" |
|
118 % (OPTIONS.worklist, err)) |
|
119 sys.exit(1) |
|
120 |
|
121 if OPTIONS.write_failures: |
|
122 try: |
|
123 with open(OPTIONS.write_failures) as out: |
|
124 for test in self.failures: |
|
125 test.show(out) |
|
126 except IOError as err: |
|
127 sys.stderr.write("Error writing worklist file '%s': %s" |
|
128 % (OPTIONS.write_failures, err)) |
|
129 sys.exit(1) |
|
130 |
|
131 if self.timeouts: |
|
132 print "tests timed out:" |
|
133 for test in self.timeouts: |
|
134 test.show(sys.stdout) |
|
135 |
|
136 if self.failures or self.timeouts: |
|
137 sys.exit(2) |
|
138 |
|
139 class Test(TaskPool.Task): |
|
140 def __init__(self, path, summary): |
|
141 super(Test, self).__init__() |
|
142 self.test_path = path # path to .py test file |
|
143 self.summary = summary |
|
144 |
|
145 # test.name is the name of the test relative to the top of the test |
|
146 # directory. This is what we use to report failures and timeouts, |
|
147 # and when writing test lists. |
|
148 self.name = os.path.relpath(self.test_path, OPTIONS.testdir) |
|
149 |
|
150 self.stdout = '' |
|
151 self.stderr = '' |
|
152 self.returncode = None |
|
153 |
|
154 def cmd(self): |
|
155 testlibdir = os.path.normpath(os.path.join(OPTIONS.testdir, '..', 'lib-for-tests')) |
|
156 return [OPTIONS.gdb_executable, |
|
157 '-nw', # Don't create a window (unnecessary?) |
|
158 '-nx', # Don't read .gdbinit. |
|
159 '--ex', 'add-auto-load-safe-path %s' % (OPTIONS.builddir,), |
|
160 '--ex', 'set env LD_LIBRARY_PATH %s' % (OPTIONS.libdir,), |
|
161 '--ex', 'file %s' % (os.path.join(OPTIONS.builddir, 'gdb-tests'),), |
|
162 '--eval-command', 'python testlibdir=%r' % (testlibdir,), |
|
163 '--eval-command', 'python testscript=%r' % (self.test_path,), |
|
164 '--eval-command', 'python execfile(%r)' % os.path.join(testlibdir, 'catcher.py')] |
|
165 |
|
166 def start(self, pipe, deadline): |
|
167 super(Test, self).start(pipe, deadline) |
|
168 if OPTIONS.show_cmd: |
|
169 self.summary.interleave_output(lambda: self.show_cmd(sys.stdout)) |
|
170 |
|
171 def onStdout(self, text): |
|
172 self.stdout += text |
|
173 |
|
174 def onStderr(self, text): |
|
175 self.stderr += text |
|
176 |
|
177 def onFinished(self, returncode): |
|
178 self.returncode = returncode |
|
179 if OPTIONS.show_output: |
|
180 self.summary.interleave_output(lambda: self.show_output(sys.stdout)) |
|
181 if returncode != 0: |
|
182 self.summary.failed(self) |
|
183 else: |
|
184 self.summary.passed(self) |
|
185 |
|
186 def onTimeout(self): |
|
187 self.summary.timeout(self) |
|
188 |
|
189 def show_cmd(self, out): |
|
190 print "Command: ", make_shell_cmd(self.cmd()) |
|
191 |
|
192 def show_output(self, out): |
|
193 if self.stdout: |
|
194 out.write('Standard output:') |
|
195 out.write('\n' + self.stdout + '\n') |
|
196 if self.stderr: |
|
197 out.write('Standard error:') |
|
198 out.write('\n' + self.stderr + '\n') |
|
199 |
|
200 def show(self, out): |
|
201 out.write(self.name + '\n') |
|
202 if OPTIONS.write_failure_output: |
|
203 out.write('Command: %s\n' % (make_shell_cmd(self.cmd()),)) |
|
204 self.show_output(out) |
|
205 out.write('GDB exit code: %r\n' % (self.returncode,)) |
|
206 |
|
207 def find_tests(dir, substring = None): |
|
208 ans = [] |
|
209 for dirpath, dirnames, filenames in os.walk(dir): |
|
210 if dirpath == '.': |
|
211 continue |
|
212 for filename in filenames: |
|
213 if not filename.endswith('.py'): |
|
214 continue |
|
215 test = os.path.join(dirpath, filename) |
|
216 if substring is None or substring in os.path.relpath(test, dir): |
|
217 ans.append(test) |
|
218 return ans |
|
219 |
|
220 def build_test_exec(builddir): |
|
221 p = subprocess.check_call(['make', 'gdb-tests'], cwd=builddir) |
|
222 |
|
223 def run_tests(tests, summary): |
|
224 pool = TaskPool(tests, job_limit=OPTIONS.workercount, timeout=OPTIONS.timeout) |
|
225 pool.run_all() |
|
226 |
|
227 OPTIONS = None |
|
228 def main(argv): |
|
229 global OPTIONS |
|
230 script_path = os.path.abspath(__file__) |
|
231 script_dir = os.path.dirname(script_path) |
|
232 |
|
233 # LIBDIR is the directory in which we find the SpiderMonkey shared |
|
234 # library, to link against. |
|
235 # |
|
236 # The [TESTS] optional arguments are paths of test files relative |
|
237 # to the jit-test/tests directory. |
|
238 from optparse import OptionParser |
|
239 op = OptionParser(usage='%prog [options] LIBDIR [TESTS...]') |
|
240 op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true', |
|
241 help='show GDB shell command run') |
|
242 op.add_option('-o', '--show-output', dest='show_output', action='store_true', |
|
243 help='show output from GDB') |
|
244 op.add_option('-x', '--exclude', dest='exclude', action='append', |
|
245 help='exclude given test dir or path') |
|
246 op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0, |
|
247 help='set test timeout in seconds') |
|
248 op.add_option('-j', '--worker-count', dest='workercount', type=int, |
|
249 help='Run [WORKERCOUNT] tests at a time') |
|
250 op.add_option('--no-progress', dest='hide_progress', action='store_true', |
|
251 help='hide progress bar') |
|
252 op.add_option('--worklist', dest='worklist', metavar='FILE', |
|
253 help='Read tests to run from [FILE] (or run all if [FILE] not found);\n' |
|
254 'write failures back to [FILE]') |
|
255 op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE', |
|
256 help='Run test files listed in [FILE]') |
|
257 op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE', |
|
258 help='Write failing tests to [FILE]') |
|
259 op.add_option('--write-failure-output', dest='write_failure_output', action='store_true', |
|
260 help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]') |
|
261 op.add_option('--gdb', dest='gdb_executable', metavar='EXECUTABLE', default='gdb', |
|
262 help='Run tests with [EXECUTABLE], rather than plain \'gdb\'.') |
|
263 op.add_option('--srcdir', dest='srcdir', |
|
264 default=os.path.abspath(os.path.join(script_dir, '..')), |
|
265 help='Use SpiderMonkey sources in [SRCDIR].') |
|
266 op.add_option('--testdir', dest='testdir', default=os.path.join(script_dir, 'tests'), |
|
267 help='Find tests in [TESTDIR].') |
|
268 op.add_option('--builddir', dest='builddir', |
|
269 help='Build test executable in [BUILDDIR].') |
|
270 (OPTIONS, args) = op.parse_args(argv) |
|
271 if len(args) < 1: |
|
272 op.error('missing LIBDIR argument') |
|
273 OPTIONS.libdir = os.path.abspath(args[0]) |
|
274 test_args = args[1:] |
|
275 |
|
276 if not OPTIONS.workercount: |
|
277 OPTIONS.workercount = get_cpu_count() |
|
278 |
|
279 # Compute default for OPTIONS.builddir now, since we've computed OPTIONS.libdir. |
|
280 if not OPTIONS.builddir: |
|
281 OPTIONS.builddir = os.path.join(OPTIONS.libdir, 'gdb') |
|
282 |
|
283 test_set = set() |
|
284 |
|
285 # All the various sources of test names accumulate. |
|
286 if test_args: |
|
287 for arg in test_args: |
|
288 test_set.update(find_tests(OPTIONS.testdir, arg)) |
|
289 if OPTIONS.worklist: |
|
290 try: |
|
291 with open(OPTIONS.worklist) as f: |
|
292 for line in f: |
|
293 test_set.update(os.path.join(test_dir, line.strip('\n'))) |
|
294 except IOError: |
|
295 # With worklist, a missing file means to start the process with |
|
296 # the complete list of tests. |
|
297 sys.stderr.write("Couldn't read worklist file '%s'; running all tests\n" |
|
298 % (OPTIONS.worklist,)) |
|
299 test_set = set(find_tests(OPTIONS.testdir)) |
|
300 if OPTIONS.read_tests: |
|
301 try: |
|
302 with open(OPTIONS.read_tests) as f: |
|
303 for line in f: |
|
304 test_set.update(os.path.join(test_dir, line.strip('\n'))) |
|
305 except IOError as err: |
|
306 sys.stderr.write("Error trying to read test file '%s': %s\n" |
|
307 % (OPTIONS.read_tests, err)) |
|
308 sys.exit(1) |
|
309 |
|
310 # If none of the above options were passed, and no tests were listed |
|
311 # explicitly, use the complete set. |
|
312 if not test_args and not OPTIONS.worklist and not OPTIONS.read_tests: |
|
313 test_set = set(find_tests(OPTIONS.testdir)) |
|
314 |
|
315 if OPTIONS.exclude: |
|
316 exclude_set = set() |
|
317 for exclude in OPTIONS.exclude: |
|
318 exclude_set.update(find_tests(test_dir, exclude)) |
|
319 test_set -= exclude_set |
|
320 |
|
321 if not test_set: |
|
322 sys.stderr.write("No tests found matching command line arguments.\n") |
|
323 sys.exit(1) |
|
324 |
|
325 summary = Summary(len(test_set)) |
|
326 test_list = [ Test(_, summary) for _ in sorted(test_set) ] |
|
327 |
|
328 # Build the test executable from all the .cpp files found in the test |
|
329 # directory tree. |
|
330 try: |
|
331 build_test_exec(OPTIONS.builddir) |
|
332 except subprocess.CalledProcessError as err: |
|
333 sys.stderr.write("Error building test executable: %s\n" % (err,)) |
|
334 sys.exit(1) |
|
335 |
|
336 # Run the tests. |
|
337 try: |
|
338 summary.start() |
|
339 run_tests(test_list, summary) |
|
340 summary.finish() |
|
341 except OSError as err: |
|
342 sys.stderr.write("Error running tests: %s\n" % (err,)) |
|
343 sys.exit(1) |
|
344 |
|
345 sys.exit(0) |
|
346 |
|
347 if __name__ == '__main__': |
|
348 main(sys.argv[1:]) |