Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | #!/usr/bin/env python |
michael@0 | 2 | """ |
michael@0 | 3 | The JS Shell Test Harness. |
michael@0 | 4 | |
michael@0 | 5 | See the adjacent README.txt for more details. |
michael@0 | 6 | """ |
michael@0 | 7 | |
michael@0 | 8 | import os, sys, textwrap |
michael@0 | 9 | from os.path import abspath, dirname, realpath |
michael@0 | 10 | from copy import copy |
michael@0 | 11 | from subprocess import list2cmdline, call |
michael@0 | 12 | |
michael@0 | 13 | from lib.results import NullTestOutput |
michael@0 | 14 | from lib.tests import TestCase, TBPL_FLAGS |
michael@0 | 15 | from lib.results import ResultsSink |
michael@0 | 16 | from lib.progressbar import ProgressBar |
michael@0 | 17 | |
michael@0 | 18 | if (sys.platform.startswith('linux') or |
michael@0 | 19 | sys.platform.startswith('darwin') |
michael@0 | 20 | ): |
michael@0 | 21 | from lib.tasks_unix import run_all_tests |
michael@0 | 22 | else: |
michael@0 | 23 | from lib.tasks_win import run_all_tests |
michael@0 | 24 | |
michael@0 | 25 | def run_tests(options, tests, results): |
michael@0 | 26 | """Run the given tests, sending raw results to the given results accumulator.""" |
michael@0 | 27 | try: |
michael@0 | 28 | completed = run_all_tests(tests, results, options) |
michael@0 | 29 | except KeyboardInterrupt: |
michael@0 | 30 | completed = False |
michael@0 | 31 | |
michael@0 | 32 | results.finish(completed) |
michael@0 | 33 | |
michael@0 | 34 | def get_cpu_count(): |
michael@0 | 35 | """ |
michael@0 | 36 | Guess at a reasonable parallelism count to set as the default for the |
michael@0 | 37 | current machine and run. |
michael@0 | 38 | """ |
michael@0 | 39 | # Python 2.6+ |
michael@0 | 40 | try: |
michael@0 | 41 | import multiprocessing |
michael@0 | 42 | return multiprocessing.cpu_count() |
michael@0 | 43 | except (ImportError,NotImplementedError): |
michael@0 | 44 | pass |
michael@0 | 45 | |
michael@0 | 46 | # POSIX |
michael@0 | 47 | try: |
michael@0 | 48 | res = int(os.sysconf('SC_NPROCESSORS_ONLN')) |
michael@0 | 49 | if res > 0: |
michael@0 | 50 | return res |
michael@0 | 51 | except (AttributeError,ValueError): |
michael@0 | 52 | pass |
michael@0 | 53 | |
michael@0 | 54 | # Windows |
michael@0 | 55 | try: |
michael@0 | 56 | res = int(os.environ['NUMBER_OF_PROCESSORS']) |
michael@0 | 57 | if res > 0: |
michael@0 | 58 | return res |
michael@0 | 59 | except (KeyError, ValueError): |
michael@0 | 60 | pass |
michael@0 | 61 | |
michael@0 | 62 | return 1 |
michael@0 | 63 | |
michael@0 | 64 | def parse_args(): |
michael@0 | 65 | """ |
michael@0 | 66 | Parse command line arguments. |
michael@0 | 67 | Returns a tuple of: (options, js_shell, requested_paths, excluded_paths) |
michael@0 | 68 | options :object: The raw OptionParser output. |
michael@0 | 69 | js_shell :str: The absolute location of the shell to test with. |
michael@0 | 70 | requested_paths :set<str>: Test paths specially requested on the CLI. |
michael@0 | 71 | excluded_paths :set<str>: Test paths specifically excluded by the CLI. |
michael@0 | 72 | """ |
michael@0 | 73 | from optparse import OptionParser, OptionGroup |
michael@0 | 74 | op = OptionParser(usage=textwrap.dedent(""" |
michael@0 | 75 | %prog [OPTIONS] JS_SHELL [TESTS] |
michael@0 | 76 | |
michael@0 | 77 | Shell output format: [ pass | fail | timeout | skip ] progress | time |
michael@0 | 78 | """).strip()) |
michael@0 | 79 | op.add_option('--xul-info', dest='xul_info_src', |
michael@0 | 80 | help='config data for xulRuntime (avoids search for config/autoconf.mk)') |
michael@0 | 81 | |
michael@0 | 82 | harness_og = OptionGroup(op, "Harness Controls", "Control how tests are run.") |
michael@0 | 83 | harness_og.add_option('-j', '--worker-count', type=int, default=max(1, get_cpu_count()), |
michael@0 | 84 | help='Number of tests to run in parallel (default %default)') |
michael@0 | 85 | harness_og.add_option('-t', '--timeout', type=float, default=150.0, |
michael@0 | 86 | help='Set maximum time a test is allows to run (in seconds).') |
michael@0 | 87 | harness_og.add_option('-a', '--args', dest='shell_args', default='', |
michael@0 | 88 | help='Extra args to pass to the JS shell.') |
michael@0 | 89 | harness_og.add_option('--jitflags', default='', help="Obsolete. Does nothing.") |
michael@0 | 90 | harness_og.add_option('--tbpl', action='store_true', |
michael@0 | 91 | help='Runs each test in all configurations tbpl tests.') |
michael@0 | 92 | harness_og.add_option('-g', '--debug', action='store_true', help='Run a test in debugger.') |
michael@0 | 93 | harness_og.add_option('--debugger', default='gdb -q --args', help='Debugger command.') |
michael@0 | 94 | harness_og.add_option('-J', '--jorendb', action='store_true', help='Run under JS debugger.') |
michael@0 | 95 | harness_og.add_option('--passthrough', action='store_true', help='Run tests with stdin/stdout attached to caller.') |
michael@0 | 96 | harness_og.add_option('--valgrind', action='store_true', help='Run tests in valgrind.') |
michael@0 | 97 | harness_og.add_option('--valgrind-args', default='', help='Extra args to pass to valgrind.') |
michael@0 | 98 | op.add_option_group(harness_og) |
michael@0 | 99 | |
michael@0 | 100 | input_og = OptionGroup(op, "Inputs", "Change what tests are run.") |
michael@0 | 101 | input_og.add_option('-f', '--file', dest='test_file', action='append', |
michael@0 | 102 | help='Get tests from the given file.') |
michael@0 | 103 | input_og.add_option('-x', '--exclude-file', action='append', |
michael@0 | 104 | help='Exclude tests from the given file.') |
michael@0 | 105 | input_og.add_option('-d', '--exclude-random', dest='random', action='store_false', |
michael@0 | 106 | help='Exclude tests marked as "random."') |
michael@0 | 107 | input_og.add_option('--run-skipped', action='store_true', help='Run tests marked as "skip."') |
michael@0 | 108 | input_og.add_option('--run-only-skipped', action='store_true', help='Run only tests marked as "skip."') |
michael@0 | 109 | input_og.add_option('--run-slow-tests', action='store_true', |
michael@0 | 110 | help='Do not skip tests marked as "slow."') |
michael@0 | 111 | input_og.add_option('--no-extensions', action='store_true', |
michael@0 | 112 | help='Run only tests conforming to the ECMAScript 5 standard.') |
michael@0 | 113 | op.add_option_group(input_og) |
michael@0 | 114 | |
michael@0 | 115 | output_og = OptionGroup(op, "Output", "Modify the harness and tests output.") |
michael@0 | 116 | output_og.add_option('-s', '--show-cmd', action='store_true', |
michael@0 | 117 | help='Show exact commandline used to run each test.') |
michael@0 | 118 | output_og.add_option('-o', '--show-output', action='store_true', |
michael@0 | 119 | help="Print each test's output to the file given by --output-file.") |
michael@0 | 120 | output_og.add_option('-F', '--failed-only', action='store_true', |
michael@0 | 121 | help="If a --show-* option is given, only print output for failed tests.") |
michael@0 | 122 | output_og.add_option('-O', '--output-file', |
michael@0 | 123 | help='Write all output to the given file (default: stdout).') |
michael@0 | 124 | output_og.add_option('--failure-file', |
michael@0 | 125 | help='Write all not-passed tests to the given file.') |
michael@0 | 126 | output_og.add_option('--no-progress', dest='hide_progress', action='store_true', |
michael@0 | 127 | help='Do not show the progress bar.') |
michael@0 | 128 | output_og.add_option('--tinderbox', action='store_true', |
michael@0 | 129 | help='Use tinderbox-parseable output format.') |
michael@0 | 130 | op.add_option_group(output_og) |
michael@0 | 131 | |
michael@0 | 132 | special_og = OptionGroup(op, "Special", "Special modes that do not run tests.") |
michael@0 | 133 | special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH', |
michael@0 | 134 | help='Generate reftest manifest files.') |
michael@0 | 135 | op.add_option_group(special_og) |
michael@0 | 136 | options, args = op.parse_args() |
michael@0 | 137 | |
michael@0 | 138 | # Acquire the JS shell given on the command line. |
michael@0 | 139 | options.js_shell = None |
michael@0 | 140 | requested_paths = set() |
michael@0 | 141 | if len(args) > 0: |
michael@0 | 142 | options.js_shell = abspath(args[0]) |
michael@0 | 143 | requested_paths |= set(args[1:]) |
michael@0 | 144 | |
michael@0 | 145 | # If we do not have a shell, we must be in a special mode. |
michael@0 | 146 | if options.js_shell is None and not options.make_manifests: |
michael@0 | 147 | op.error('missing JS_SHELL argument') |
michael@0 | 148 | |
michael@0 | 149 | # Valgrind and gdb are mutually exclusive. |
michael@0 | 150 | if options.valgrind and options.debug: |
michael@0 | 151 | op.error("--valgrind and --debug are mutually exclusive.") |
michael@0 | 152 | |
michael@0 | 153 | # Fill the debugger field, as needed. |
michael@0 | 154 | prefix = options.debugger.split() if options.debug else [] |
michael@0 | 155 | if options.valgrind: |
michael@0 | 156 | prefix = ['valgrind'] + options.valgrind_args.split() |
michael@0 | 157 | if os.uname()[0] == 'Darwin': |
michael@0 | 158 | prefix.append('--dsymutil=yes') |
michael@0 | 159 | options.show_output = True |
michael@0 | 160 | |
michael@0 | 161 | js_cmd_args = options.shell_args.split() |
michael@0 | 162 | if options.jorendb: |
michael@0 | 163 | options.passthrough = True |
michael@0 | 164 | options.hide_progress = True |
michael@0 | 165 | options.worker_count = 1 |
michael@0 | 166 | debugger_path = realpath(os.path.join(abspath(dirname(abspath(__file__))), '..', '..', 'examples', 'jorendb.js')) |
michael@0 | 167 | js_cmd_args.extend([ '-d', '-f', debugger_path, '--' ]) |
michael@0 | 168 | TestCase.set_js_cmd_prefix(options.js_shell, js_cmd_args, prefix) |
michael@0 | 169 | |
michael@0 | 170 | # If files with lists of tests to run were specified, add them to the |
michael@0 | 171 | # requested tests set. |
michael@0 | 172 | if options.test_file: |
michael@0 | 173 | for test_file in options.test_file: |
michael@0 | 174 | requested_paths |= set([line.strip() for line in open(test_file).readlines()]) |
michael@0 | 175 | |
michael@0 | 176 | # If files with lists of tests to exclude were specified, add them to the |
michael@0 | 177 | # excluded tests set. |
michael@0 | 178 | excluded_paths = set() |
michael@0 | 179 | if options.exclude_file: |
michael@0 | 180 | for filename in options.exclude_file: |
michael@0 | 181 | try: |
michael@0 | 182 | fp = open(filename, 'r') |
michael@0 | 183 | for line in fp: |
michael@0 | 184 | if line.startswith('#'): continue |
michael@0 | 185 | line = line.strip() |
michael@0 | 186 | if not line: continue |
michael@0 | 187 | excluded_paths |= set((line,)) |
michael@0 | 188 | finally: |
michael@0 | 189 | fp.close() |
michael@0 | 190 | |
michael@0 | 191 | # Handle output redirection, if requested and relevant. |
michael@0 | 192 | options.output_fp = sys.stdout |
michael@0 | 193 | if options.output_file: |
michael@0 | 194 | if not options.show_cmd: |
michael@0 | 195 | options.show_output = True |
michael@0 | 196 | try: |
michael@0 | 197 | options.output_fp = open(options.output_file, 'w') |
michael@0 | 198 | except IOError, ex: |
michael@0 | 199 | raise SystemExit("Failed to open output file: " + str(ex)) |
michael@0 | 200 | |
michael@0 | 201 | options.show = options.show_cmd or options.show_output |
michael@0 | 202 | |
michael@0 | 203 | # Hide the progress bar if it will get in the way of other output. |
michael@0 | 204 | options.hide_progress = (options.tinderbox or |
michael@0 | 205 | not ProgressBar.conservative_isatty() or |
michael@0 | 206 | options.hide_progress) |
michael@0 | 207 | |
michael@0 | 208 | return (options, requested_paths, excluded_paths) |
michael@0 | 209 | |
michael@0 | 210 | def load_tests(options, requested_paths, excluded_paths): |
michael@0 | 211 | """ |
michael@0 | 212 | Returns a tuple: (skipped_tests, test_list) |
michael@0 | 213 | skip_list: [iterable<Test>] Tests found but skipped. |
michael@0 | 214 | test_list: [iterable<Test>] Tests found that should be run. |
michael@0 | 215 | """ |
michael@0 | 216 | import lib.manifest as manifest |
michael@0 | 217 | |
michael@0 | 218 | if options.js_shell is None: |
michael@0 | 219 | xul_tester = manifest.NullXULInfoTester() |
michael@0 | 220 | else: |
michael@0 | 221 | if options.xul_info_src is None: |
michael@0 | 222 | xul_info = manifest.XULInfo.create(options.js_shell) |
michael@0 | 223 | else: |
michael@0 | 224 | xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':') |
michael@0 | 225 | xul_debug = xul_debug.lower() is 'true' |
michael@0 | 226 | xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug) |
michael@0 | 227 | xul_tester = manifest.XULInfoTester(xul_info, options.js_shell) |
michael@0 | 228 | |
michael@0 | 229 | test_dir = dirname(abspath(__file__)) |
michael@0 | 230 | test_list = manifest.load(test_dir, xul_tester) |
michael@0 | 231 | skip_list = [] |
michael@0 | 232 | |
michael@0 | 233 | if options.make_manifests: |
michael@0 | 234 | manifest.make_manifests(options.make_manifests, test_list) |
michael@0 | 235 | sys.exit() |
michael@0 | 236 | |
michael@0 | 237 | # Create a new test list. Apply each TBPL configuration to every test. |
michael@0 | 238 | if options.tbpl: |
michael@0 | 239 | new_test_list = [] |
michael@0 | 240 | flags_list = TBPL_FLAGS |
michael@0 | 241 | for test in test_list: |
michael@0 | 242 | for jitflags in flags_list: |
michael@0 | 243 | tmp_test = copy(test) |
michael@0 | 244 | tmp_test.options = copy(test.options) |
michael@0 | 245 | tmp_test.options.extend(jitflags) |
michael@0 | 246 | new_test_list.append(tmp_test) |
michael@0 | 247 | test_list = new_test_list |
michael@0 | 248 | |
michael@0 | 249 | if options.jitflags: |
michael@0 | 250 | print("Warning: the --jitflags option is obsolete and does nothing now.") |
michael@0 | 251 | |
michael@0 | 252 | if options.test_file: |
michael@0 | 253 | paths = set() |
michael@0 | 254 | for test_file in options.test_file: |
michael@0 | 255 | paths |= set([ line.strip() for line in open(test_file).readlines()]) |
michael@0 | 256 | test_list = [ _ for _ in test_list if _.path in paths ] |
michael@0 | 257 | |
michael@0 | 258 | if requested_paths: |
michael@0 | 259 | def p(path): |
michael@0 | 260 | for arg in requested_paths: |
michael@0 | 261 | if path.find(arg) != -1: |
michael@0 | 262 | return True |
michael@0 | 263 | return False |
michael@0 | 264 | test_list = [ _ for _ in test_list if p(_.path) ] |
michael@0 | 265 | |
michael@0 | 266 | if options.exclude_file: |
michael@0 | 267 | test_list = [_ for _ in test_list if _.path not in excluded_paths] |
michael@0 | 268 | |
michael@0 | 269 | if options.no_extensions: |
michael@0 | 270 | pattern = os.sep + 'extensions' + os.sep |
michael@0 | 271 | test_list = [_ for _ in test_list if pattern not in _.path] |
michael@0 | 272 | |
michael@0 | 273 | if not options.random: |
michael@0 | 274 | test_list = [ _ for _ in test_list if not _.random ] |
michael@0 | 275 | |
michael@0 | 276 | if options.run_only_skipped: |
michael@0 | 277 | options.run_skipped = True |
michael@0 | 278 | test_list = [ _ for _ in test_list if not _.enable ] |
michael@0 | 279 | |
michael@0 | 280 | if not options.run_slow_tests: |
michael@0 | 281 | test_list = [ _ for _ in test_list if not _.slow ] |
michael@0 | 282 | |
michael@0 | 283 | if not options.run_skipped: |
michael@0 | 284 | skip_list = [ _ for _ in test_list if not _.enable ] |
michael@0 | 285 | test_list = [ _ for _ in test_list if _.enable ] |
michael@0 | 286 | |
michael@0 | 287 | return skip_list, test_list |
michael@0 | 288 | |
michael@0 | 289 | def main(): |
michael@0 | 290 | options, requested_paths, excluded_paths = parse_args() |
michael@0 | 291 | skip_list, test_list = load_tests(options, requested_paths, excluded_paths) |
michael@0 | 292 | |
michael@0 | 293 | if not test_list: |
michael@0 | 294 | print 'no tests selected' |
michael@0 | 295 | return 1 |
michael@0 | 296 | |
michael@0 | 297 | test_dir = dirname(abspath(__file__)) |
michael@0 | 298 | |
michael@0 | 299 | if options.debug: |
michael@0 | 300 | if len(test_list) > 1: |
michael@0 | 301 | print('Multiple tests match command line arguments, debugger can only run one') |
michael@0 | 302 | for tc in test_list: |
michael@0 | 303 | print(' %s'%tc.path) |
michael@0 | 304 | return 2 |
michael@0 | 305 | |
michael@0 | 306 | cmd = test_list[0].get_command(TestCase.js_cmd_prefix) |
michael@0 | 307 | if options.show_cmd: |
michael@0 | 308 | print list2cmdline(cmd) |
michael@0 | 309 | if test_dir not in ('', '.'): |
michael@0 | 310 | os.chdir(test_dir) |
michael@0 | 311 | call(cmd) |
michael@0 | 312 | return 0 |
michael@0 | 313 | |
michael@0 | 314 | curdir = os.getcwd() |
michael@0 | 315 | if test_dir not in ('', '.'): |
michael@0 | 316 | os.chdir(test_dir) |
michael@0 | 317 | |
michael@0 | 318 | results = None |
michael@0 | 319 | try: |
michael@0 | 320 | results = ResultsSink(options, len(skip_list) + len(test_list)) |
michael@0 | 321 | for t in skip_list: |
michael@0 | 322 | results.push(NullTestOutput(t)) |
michael@0 | 323 | run_tests(options, test_list, results) |
michael@0 | 324 | finally: |
michael@0 | 325 | os.chdir(curdir) |
michael@0 | 326 | |
michael@0 | 327 | if results is None or not results.all_passed(): |
michael@0 | 328 | return 1 |
michael@0 | 329 | |
michael@0 | 330 | return 0 |
michael@0 | 331 | |
michael@0 | 332 | if __name__ == '__main__': |
michael@0 | 333 | sys.exit(main()) |