|
1 # Library for JSTest manifests. |
|
2 # |
|
3 # This includes classes for representing and parsing JS manifests. |
|
4 |
|
5 import os, os.path, re, sys |
|
6 from subprocess import Popen, PIPE |
|
7 |
|
8 from tests import TestCase |
|
9 |
|
10 |
|
11 def split_path_into_dirs(path): |
|
12 dirs = [path] |
|
13 |
|
14 while True: |
|
15 path, tail = os.path.split(path) |
|
16 if not tail: |
|
17 break |
|
18 dirs.append(path) |
|
19 return dirs |
|
20 |
|
21 class XULInfo: |
|
22 def __init__(self, abi, os, isdebug): |
|
23 self.abi = abi |
|
24 self.os = os |
|
25 self.isdebug = isdebug |
|
26 self.browserIsRemote = False |
|
27 |
|
28 def as_js(self): |
|
29 """Return JS that when executed sets up variables so that JS expression |
|
30 predicates on XUL build info evaluate properly.""" |
|
31 |
|
32 return ('var xulRuntime = { OS: "%s", XPCOMABI: "%s", shell: true };' + |
|
33 'var isDebugBuild=%s; var Android=%s; var browserIsRemote=%s') % ( |
|
34 self.os, |
|
35 self.abi, |
|
36 str(self.isdebug).lower(), |
|
37 str(self.os == "Android").lower(), |
|
38 str(self.browserIsRemote).lower()) |
|
39 |
|
40 @classmethod |
|
41 def create(cls, jsdir): |
|
42 """Create a XULInfo based on the current platform's characteristics.""" |
|
43 |
|
44 # Our strategy is to find the autoconf.mk generated for the build and |
|
45 # read the values from there. |
|
46 |
|
47 # Find config/autoconf.mk. |
|
48 dirs = split_path_into_dirs(os.getcwd()) + split_path_into_dirs(jsdir) |
|
49 |
|
50 path = None |
|
51 for dir in dirs: |
|
52 _path = os.path.join(dir, 'config/autoconf.mk') |
|
53 if os.path.isfile(_path): |
|
54 path = _path |
|
55 break |
|
56 |
|
57 if path == None: |
|
58 print ("Can't find config/autoconf.mk on a directory containing the JS shell" |
|
59 " (searched from %s)") % jsdir |
|
60 sys.exit(1) |
|
61 |
|
62 # Read the values. |
|
63 val_re = re.compile(r'(TARGET_XPCOM_ABI|OS_TARGET|MOZ_DEBUG)\s*=\s*(.*)') |
|
64 kw = { 'isdebug': False } |
|
65 for line in open(path): |
|
66 m = val_re.match(line) |
|
67 if m: |
|
68 key, val = m.groups() |
|
69 val = val.rstrip() |
|
70 if key == 'TARGET_XPCOM_ABI': |
|
71 kw['abi'] = val |
|
72 if key == 'OS_TARGET': |
|
73 kw['os'] = val |
|
74 if key == 'MOZ_DEBUG': |
|
75 kw['isdebug'] = (val == '1') |
|
76 return cls(**kw) |
|
77 |
|
78 class XULInfoTester: |
|
79 def __init__(self, xulinfo, js_bin): |
|
80 self.js_prolog = xulinfo.as_js() |
|
81 self.js_bin = js_bin |
|
82 # Maps JS expr to evaluation result. |
|
83 self.cache = {} |
|
84 |
|
85 def test(self, cond): |
|
86 """Test a XUL predicate condition against this local info.""" |
|
87 ans = self.cache.get(cond, None) |
|
88 if ans is None: |
|
89 cmd = [ self.js_bin, '-e', self.js_prolog, '-e', 'print(!!(%s))'%cond ] |
|
90 p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) |
|
91 out, err = p.communicate() |
|
92 if out in ('true\n', 'true\r\n'): |
|
93 ans = True |
|
94 elif out in ('false\n', 'false\r\n'): |
|
95 ans = False |
|
96 else: |
|
97 raise Exception(("Failed to test XUL condition %r;" |
|
98 + " output was %r, stderr was %r") |
|
99 % (cond, out, err)) |
|
100 self.cache[cond] = ans |
|
101 return ans |
|
102 |
|
103 class NullXULInfoTester: |
|
104 """Can be used to parse manifests without a JS shell.""" |
|
105 def test(self, cond): |
|
106 return False |
|
107 |
|
108 def _parse_one(testcase, xul_tester): |
|
109 pos = 0 |
|
110 parts = testcase.terms.split() |
|
111 while pos < len(parts): |
|
112 if parts[pos] == 'fails': |
|
113 testcase.expect = False |
|
114 pos += 1 |
|
115 elif parts[pos] == 'skip': |
|
116 testcase.expect = testcase.enable = False |
|
117 pos += 1 |
|
118 elif parts[pos] == 'random': |
|
119 testcase.random = True |
|
120 pos += 1 |
|
121 elif parts[pos].startswith('fails-if'): |
|
122 cond = parts[pos][len('fails-if('):-1] |
|
123 if xul_tester.test(cond): |
|
124 testcase.expect = False |
|
125 pos += 1 |
|
126 elif parts[pos].startswith('asserts-if'): |
|
127 # This directive means we may flunk some number of |
|
128 # NS_ASSERTIONs in the browser. For the shell, ignore it. |
|
129 pos += 1 |
|
130 elif parts[pos].startswith('skip-if'): |
|
131 cond = parts[pos][len('skip-if('):-1] |
|
132 if xul_tester.test(cond): |
|
133 testcase.expect = testcase.enable = False |
|
134 pos += 1 |
|
135 elif parts[pos].startswith('random-if'): |
|
136 cond = parts[pos][len('random-if('):-1] |
|
137 if xul_tester.test(cond): |
|
138 testcase.random = True |
|
139 pos += 1 |
|
140 elif parts[pos].startswith('require-or'): |
|
141 cond = parts[pos][len('require-or('):-1] |
|
142 (preconditions, fallback_action) = re.split(",", cond) |
|
143 for precondition in re.split("&&", preconditions): |
|
144 if precondition == 'debugMode': |
|
145 testcase.options.append('-d') |
|
146 elif precondition == 'true': |
|
147 pass |
|
148 else: |
|
149 if fallback_action == "skip": |
|
150 testcase.expect = testcase.enable = False |
|
151 elif fallback_action == "fail": |
|
152 testcase.expect = False |
|
153 elif fallback_action == "random": |
|
154 testcase.random = True |
|
155 else: |
|
156 raise Exception(("Invalid precondition '%s' or fallback " + |
|
157 " action '%s'") % (precondition, fallback_action)) |
|
158 break |
|
159 pos += 1 |
|
160 elif parts[pos] == 'slow': |
|
161 testcase.slow = True |
|
162 pos += 1 |
|
163 elif parts[pos] == 'silentfail': |
|
164 # silentfails use tons of memory, and Darwin doesn't support ulimit. |
|
165 if xul_tester.test("xulRuntime.OS == 'Darwin'"): |
|
166 testcase.expect = testcase.enable = False |
|
167 pos += 1 |
|
168 else: |
|
169 print 'warning: invalid manifest line element "%s"'%parts[pos] |
|
170 pos += 1 |
|
171 |
|
172 def _build_manifest_script_entry(script_name, test): |
|
173 line = [] |
|
174 if test.terms: |
|
175 line.append(test.terms) |
|
176 line.append("script") |
|
177 line.append(script_name) |
|
178 if test.comment: |
|
179 line.append("#") |
|
180 line.append(test.comment) |
|
181 return ' '.join(line) |
|
182 |
|
183 def _map_prefixes_left(test_list): |
|
184 """ |
|
185 Splits tests into a dictionary keyed on the first component of the test |
|
186 path, aggregating tests with a common base path into a list. |
|
187 """ |
|
188 byprefix = {} |
|
189 for t in test_list: |
|
190 left, sep, remainder = t.path.partition(os.sep) |
|
191 if left not in byprefix: |
|
192 byprefix[left] = [] |
|
193 if remainder: |
|
194 t.path = remainder |
|
195 byprefix[left].append(t) |
|
196 return byprefix |
|
197 |
|
198 def _emit_manifest_at(location, relative, test_list, depth): |
|
199 """ |
|
200 location - str: absolute path where we want to write the manifest |
|
201 relative - str: relative path from topmost manifest directory to current |
|
202 test_list - [str]: list of all test paths and directorys |
|
203 depth - int: number of dirs we are below the topmost manifest dir |
|
204 """ |
|
205 manifests = _map_prefixes_left(test_list) |
|
206 |
|
207 filename = os.path.join(location, 'jstests.list') |
|
208 manifest = [] |
|
209 numTestFiles = 0 |
|
210 for k, test_list in manifests.iteritems(): |
|
211 fullpath = os.path.join(location, k) |
|
212 if os.path.isdir(fullpath): |
|
213 manifest.append("include " + k + "/jstests.list") |
|
214 relpath = os.path.join(relative, k) |
|
215 _emit_manifest_at(fullpath, relpath, test_list, depth + 1) |
|
216 else: |
|
217 numTestFiles += 1 |
|
218 assert len(test_list) == 1 |
|
219 line = _build_manifest_script_entry(k, test_list[0]) |
|
220 manifest.append(line) |
|
221 |
|
222 # Always present our manifest in sorted order. |
|
223 manifest.sort() |
|
224 |
|
225 # If we have tests, we have to set the url-prefix so reftest can find them. |
|
226 if numTestFiles > 0: |
|
227 manifest = (["url-prefix %sjsreftest.html?test=%s/" % ('../' * depth, relative)] |
|
228 + manifest) |
|
229 |
|
230 fp = open(filename, 'w') |
|
231 try: |
|
232 fp.write('\n'.join(manifest) + '\n') |
|
233 finally: |
|
234 fp.close() |
|
235 |
|
236 def make_manifests(location, test_list): |
|
237 _emit_manifest_at(location, '', test_list, 0) |
|
238 |
|
239 def _find_all_js_files(base, location): |
|
240 for root, dirs, files in os.walk(location): |
|
241 root = root[len(base) + 1:] |
|
242 for fn in files: |
|
243 if fn.endswith('.js'): |
|
244 yield root, fn |
|
245 |
|
246 TEST_HEADER_PATTERN_INLINE = re.compile(r'//\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?$') |
|
247 TEST_HEADER_PATTERN_MULTI = re.compile(r'/\*\s*\|(.*?)\|\s*(.*?)\s*(--\s*(.*))?\*/') |
|
248 |
|
249 def _parse_test_header(fullpath, testcase, xul_tester): |
|
250 """ |
|
251 This looks a bit weird. The reason is that it needs to be efficient, since |
|
252 it has to be done on every test |
|
253 """ |
|
254 fp = open(fullpath, 'r') |
|
255 try: |
|
256 buf = fp.read(512) |
|
257 finally: |
|
258 fp.close() |
|
259 |
|
260 # Bail early if we do not start with a single comment. |
|
261 if not buf.startswith("//"): |
|
262 return |
|
263 |
|
264 # Extract the token. |
|
265 buf, _, _ = buf.partition('\n') |
|
266 matches = TEST_HEADER_PATTERN_INLINE.match(buf) |
|
267 |
|
268 if not matches: |
|
269 matches = TEST_HEADER_PATTERN_MULTI.match(buf) |
|
270 if not matches: |
|
271 return |
|
272 |
|
273 testcase.tag = matches.group(1) |
|
274 testcase.terms = matches.group(2) |
|
275 testcase.comment = matches.group(4) |
|
276 |
|
277 _parse_one(testcase, xul_tester) |
|
278 |
|
279 def _parse_external_manifest(filename, relpath): |
|
280 """ |
|
281 Reads an external manifest file for test suites whose individual test cases |
|
282 can't be decorated with reftest comments. |
|
283 filename - str: name of the manifest file |
|
284 relpath - str: relative path of the directory containing the manifest |
|
285 within the test suite |
|
286 """ |
|
287 entries = [] |
|
288 |
|
289 with open(filename, 'r') as fp: |
|
290 manifest_re = re.compile(r'^\s*(.*)\s+(include|script)\s+(\S+)$') |
|
291 for line in fp: |
|
292 line, _, comment = line.partition('#') |
|
293 line = line.strip() |
|
294 if not line: |
|
295 continue |
|
296 matches = manifest_re.match(line) |
|
297 if not matches: |
|
298 print('warning: unrecognized line in jstests.list: {0}'.format(line)) |
|
299 continue |
|
300 |
|
301 path = os.path.normpath(os.path.join(relpath, matches.group(3))) |
|
302 if matches.group(2) == 'include': |
|
303 # The manifest spec wants a reference to another manifest here, |
|
304 # but we need just the directory. We do need the trailing |
|
305 # separator so we don't accidentally match other paths of which |
|
306 # this one is a prefix. |
|
307 assert(path.endswith('jstests.list')) |
|
308 path = path[:-len('jstests.list')] |
|
309 |
|
310 entries.append({'path': path, 'terms': matches.group(1), 'comment': comment.strip()}) |
|
311 |
|
312 # if one directory name is a prefix of another, we want the shorter one first |
|
313 entries.sort(key=lambda x: x["path"]) |
|
314 return entries |
|
315 |
|
316 def _apply_external_manifests(filename, testcase, entries, xul_tester): |
|
317 for entry in entries: |
|
318 if filename.startswith(entry["path"]): |
|
319 # The reftest spec would require combining the terms (failure types) |
|
320 # that may already be defined in the test case with the terms |
|
321 # specified in entry; for example, a skip overrides a random, which |
|
322 # overrides a fails. Since we don't necessarily know yet in which |
|
323 # environment the test cases will be run, we'd also have to |
|
324 # consider skip-if, random-if, and fails-if with as-yet unresolved |
|
325 # conditions. |
|
326 # At this point, we use external manifests only for test cases |
|
327 # that can't have their own failure type comments, so we simply |
|
328 # use the terms for the most specific path. |
|
329 testcase.terms = entry["terms"] |
|
330 testcase.comment = entry["comment"] |
|
331 _parse_one(testcase, xul_tester) |
|
332 |
|
333 def load(location, xul_tester, reldir = ''): |
|
334 """ |
|
335 Locates all tests by walking the filesystem starting at |location|. |
|
336 Uses xul_tester to evaluate any test conditions in the test header. |
|
337 Failure type and comment for a test case can come from |
|
338 - an external manifest entry for the test case, |
|
339 - an external manifest entry for a containing directory, |
|
340 - most commonly: the header of the test case itself. |
|
341 """ |
|
342 # The list of tests that we are collecting. |
|
343 tests = [] |
|
344 |
|
345 # Any file whose basename matches something in this set is ignored. |
|
346 EXCLUDED = set(('browser.js', 'shell.js', 'jsref.js', 'template.js', |
|
347 'user.js', 'sta.js', |
|
348 'test262-browser.js', 'test262-shell.js', |
|
349 'test402-browser.js', 'test402-shell.js', |
|
350 'testBuiltInObject.js', 'testIntl.js', |
|
351 'js-test-driver-begin.js', 'js-test-driver-end.js')) |
|
352 |
|
353 manifestFile = os.path.join(location, 'jstests.list') |
|
354 externalManifestEntries = _parse_external_manifest(manifestFile, '') |
|
355 |
|
356 for root, basename in _find_all_js_files(location, location): |
|
357 # Skip js files in the root test directory. |
|
358 if not root: |
|
359 continue |
|
360 |
|
361 # Skip files that we know are not tests. |
|
362 if basename in EXCLUDED: |
|
363 continue |
|
364 |
|
365 # Get the full path and relative location of the file. |
|
366 filename = os.path.join(root, basename) |
|
367 fullpath = os.path.join(location, filename) |
|
368 |
|
369 # Skip empty files. |
|
370 statbuf = os.stat(fullpath) |
|
371 if statbuf.st_size == 0: |
|
372 continue |
|
373 |
|
374 testcase = TestCase(os.path.join(reldir, filename)) |
|
375 _apply_external_manifests(filename, testcase, externalManifestEntries, xul_tester) |
|
376 _parse_test_header(fullpath, testcase, xul_tester) |
|
377 tests.append(testcase) |
|
378 return tests |