js/src/jit-test/jit_test.py

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit-test/jit_test.py	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,247 @@
     1.4 +#!/usr/bin/env python
     1.5 +# This Source Code Form is subject to the terms of the Mozilla Public
     1.6 +# License, v. 2.0. If a copy of the MPL was not distributed with this
     1.7 +# file, You can obtain one at http://mozilla.org/MPL/2.0/.
     1.8 +
     1.9 +import math, os, posixpath, shlex, shutil, subprocess, sys, traceback
    1.10 +
    1.11 +def add_libdir_to_path():
    1.12 +    from os.path import dirname, exists, join, realpath
    1.13 +    js_src_dir = dirname(dirname(realpath(sys.argv[0])))
    1.14 +    assert exists(join(js_src_dir,'jsapi.h'))
    1.15 +    sys.path.insert(0, join(js_src_dir, 'lib'))
    1.16 +    sys.path.insert(0, join(js_src_dir, 'tests', 'lib'))
    1.17 +
    1.18 +add_libdir_to_path()
    1.19 +
    1.20 +import jittests
    1.21 +from tests import TBPL_FLAGS
    1.22 +
    1.23 +def main(argv):
    1.24 +
    1.25 +    # If no multiprocessing is available, fallback to serial test execution
    1.26 +    max_jobs_default = 1
    1.27 +    if jittests.HAVE_MULTIPROCESSING:
    1.28 +        try:
    1.29 +            max_jobs_default = jittests.cpu_count()
    1.30 +        except NotImplementedError:
    1.31 +            pass
    1.32 +
    1.33 +    # The [TESTS] optional arguments are paths of test files relative
    1.34 +    # to the jit-test/tests directory.
    1.35 +
    1.36 +    from optparse import OptionParser
    1.37 +    op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
    1.38 +    op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
    1.39 +                  help='show js shell command run')
    1.40 +    op.add_option('-f', '--show-failed-cmd', dest='show_failed',
    1.41 +                  action='store_true', help='show command lines of failed tests')
    1.42 +    op.add_option('-o', '--show-output', dest='show_output', action='store_true',
    1.43 +                  help='show output from js shell')
    1.44 +    op.add_option('-x', '--exclude', dest='exclude', action='append',
    1.45 +                  help='exclude given test dir or path')
    1.46 +    op.add_option('--no-slow', dest='run_slow', action='store_false',
    1.47 +                  help='do not run tests marked as slow')
    1.48 +    op.add_option('-t', '--timeout', dest='timeout',  type=float, default=150.0,
    1.49 +                  help='set test timeout in seconds')
    1.50 +    op.add_option('--no-progress', dest='hide_progress', action='store_true',
    1.51 +                  help='hide progress bar')
    1.52 +    op.add_option('--tinderbox', dest='tinderbox', action='store_true',
    1.53 +                  help='Tinderbox-parseable output format')
    1.54 +    op.add_option('--args', dest='shell_args', default='',
    1.55 +                  help='extra args to pass to the JS shell')
    1.56 +    op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
    1.57 +                  help='Write a list of failed tests to [FILE]')
    1.58 +    op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
    1.59 +                  help='Run test files listed in [FILE]')
    1.60 +    op.add_option('-R', '--retest', dest='retest', metavar='FILE',
    1.61 +                  help='Retest using test list file [FILE]')
    1.62 +    op.add_option('-g', '--debug', dest='debug', action='store_true',
    1.63 +                  help='Run test in gdb')
    1.64 +    op.add_option('--valgrind', dest='valgrind', action='store_true',
    1.65 +                  help='Enable the |valgrind| flag, if valgrind is in $PATH.')
    1.66 +    op.add_option('--valgrind-all', dest='valgrind_all', action='store_true',
    1.67 +                  help='Run all tests with valgrind, if valgrind is in $PATH.')
    1.68 +    op.add_option('--jitflags', dest='jitflags', default='',
    1.69 +                  help='Example: --jitflags=m,mn to run each test with "-m" and "-m -n" [default="%default"]. ' +
    1.70 +                       'Long flags, such as "--ion-eager", should be set using --args.')
    1.71 +    op.add_option('--avoid-stdio', dest='avoid_stdio', action='store_true',
    1.72 +                  help='Use js-shell file indirection instead of piping stdio.')
    1.73 +    op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
    1.74 +                  help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
    1.75 +    op.add_option('--ion', dest='ion', action='store_true',
    1.76 +                  help='Run tests once with --ion-eager and once with --baseline-eager (ignores --jitflags)')
    1.77 +    op.add_option('--tbpl', dest='tbpl', action='store_true',
    1.78 +                  help='Run tests with all IonMonkey option combinations (ignores --jitflags)')
    1.79 +    op.add_option('-j', '--worker-count', dest='max_jobs', type=int, default=max_jobs_default,
    1.80 +                  help='Number of tests to run in parallel (default %default)')
    1.81 +    op.add_option('--remote', action='store_true',
    1.82 +                  help='Run tests on a remote device')
    1.83 +    op.add_option('--deviceIP', action='store',
    1.84 +                  type='string', dest='device_ip',
    1.85 +                  help='IP address of remote device to test')
    1.86 +    op.add_option('--devicePort', action='store',
    1.87 +                  type=int, dest='device_port', default=20701,
    1.88 +                  help='port of remote device to test')
    1.89 +    op.add_option('--deviceSerial', action='store',
    1.90 +                  type='string', dest='device_serial', default=None,
    1.91 +                  help='ADB device serial number of remote device to test')
    1.92 +    op.add_option('--deviceTransport', action='store',
    1.93 +                  type='string', dest='device_transport', default='sut',
    1.94 +                  help='The transport to use to communicate with device: [adb|sut]; default=sut')
    1.95 +    op.add_option('--remoteTestRoot', dest='remote_test_root', action='store',
    1.96 +                  type='string', default='/data/local/tests',
    1.97 +                  help='The remote directory to use as test root (eg. /data/local/tests)')
    1.98 +    op.add_option('--localLib', dest='local_lib', action='store',
    1.99 +                  type='string',
   1.100 +                  help='The location of libraries to push -- preferably stripped')
   1.101 +    op.add_option('--repeat', type=int, default=1,
   1.102 +                  help='Repeat tests the given number of times.')
   1.103 +    op.add_option('--this-chunk', type=int, default=1,
   1.104 +                  help='The test chunk to run.')
   1.105 +    op.add_option('--total-chunks', type=int, default=1,
   1.106 +                  help='The total number of test chunks.')
   1.107 +
   1.108 +    options, args = op.parse_args(argv)
   1.109 +    if len(args) < 1:
   1.110 +        op.error('missing JS_SHELL argument')
   1.111 +    # We need to make sure we are using backslashes on Windows.
   1.112 +    test_args = args[1:]
   1.113 +
   1.114 +    if jittests.stdio_might_be_broken():
   1.115 +        # Prefer erring on the side of caution and not using stdio if
   1.116 +        # it might be broken on this platform.  The file-redirect
   1.117 +        # fallback should work on any platform, so at worst by
   1.118 +        # guessing wrong we might have slowed down the tests a bit.
   1.119 +        #
   1.120 +        # XXX technically we could check for broken stdio, but it
   1.121 +        # really seems like overkill.
   1.122 +        options.avoid_stdio = True
   1.123 +
   1.124 +    if options.retest:
   1.125 +        options.read_tests = options.retest
   1.126 +        options.write_failures = options.retest
   1.127 +
   1.128 +    test_list = []
   1.129 +    read_all = True
   1.130 +
   1.131 +    if test_args:
   1.132 +        read_all = False
   1.133 +        for arg in test_args:
   1.134 +            test_list += jittests.find_tests(arg)
   1.135 +
   1.136 +    if options.read_tests:
   1.137 +        read_all = False
   1.138 +        try:
   1.139 +            f = open(options.read_tests)
   1.140 +            for line in f:
   1.141 +                test_list.append(os.path.join(jittests.TEST_DIR, line.strip('\n')))
   1.142 +            f.close()
   1.143 +        except IOError:
   1.144 +            if options.retest:
   1.145 +                read_all = True
   1.146 +            else:
   1.147 +                sys.stderr.write("Exception thrown trying to read test file '%s'\n"%
   1.148 +                                 options.read_tests)
   1.149 +                traceback.print_exc()
   1.150 +                sys.stderr.write('---\n')
   1.151 +
   1.152 +    if read_all:
   1.153 +        test_list = jittests.find_tests()
   1.154 +
   1.155 +    if options.exclude:
   1.156 +        exclude_list = []
   1.157 +        for exclude in options.exclude:
   1.158 +            exclude_list += jittests.find_tests(exclude)
   1.159 +        test_list = [ test for test in test_list if test not in set(exclude_list) ]
   1.160 +
   1.161 +    if not test_list:
   1.162 +        print >> sys.stderr, "No tests found matching command line arguments."
   1.163 +        sys.exit(0)
   1.164 +
   1.165 +    test_list = [jittests.Test.from_file(_, options) for _ in test_list]
   1.166 +
   1.167 +    if not options.run_slow:
   1.168 +        test_list = [ _ for _ in test_list if not _.slow ]
   1.169 +
   1.170 +    # If chunking is enabled, determine which tests are part of this chunk.
   1.171 +    # This code was adapted from testing/mochitest/runtestsremote.py.
   1.172 +    if options.total_chunks > 1:
   1.173 +        total_tests = len(test_list)
   1.174 +        tests_per_chunk = math.ceil(total_tests / float(options.total_chunks))
   1.175 +        start = int(round((options.this_chunk - 1) * tests_per_chunk))
   1.176 +        end = int(round(options.this_chunk * tests_per_chunk))
   1.177 +        test_list = test_list[start:end]
   1.178 +
   1.179 +    # The full test list is ready. Now create copies for each JIT configuration.
   1.180 +    job_list = []
   1.181 +    if options.tbpl:
   1.182 +        # Running all bits would take forever. Instead, we test a few interesting combinations.
   1.183 +        for test in test_list:
   1.184 +            for variant in TBPL_FLAGS:
   1.185 +                new_test = test.copy()
   1.186 +                new_test.jitflags.extend(variant)
   1.187 +                job_list.append(new_test)
   1.188 +    elif options.ion:
   1.189 +        flags = [['--baseline-eager'], ['--ion-eager', '--ion-parallel-compile=off']]
   1.190 +        for test in test_list:
   1.191 +            for variant in flags:
   1.192 +                new_test = test.copy()
   1.193 +                new_test.jitflags.extend(variant)
   1.194 +                job_list.append(new_test)
   1.195 +    else:
   1.196 +        jitflags_list = jittests.parse_jitflags(options)
   1.197 +        for test in test_list:
   1.198 +            for jitflags in jitflags_list:
   1.199 +                new_test = test.copy()
   1.200 +                new_test.jitflags.extend(jitflags)
   1.201 +                job_list.append(new_test)
   1.202 +
   1.203 +    prefix = [os.path.abspath(args[0])] + shlex.split(options.shell_args)
   1.204 +    prolog = os.path.join(jittests.LIB_DIR, 'prolog.js')
   1.205 +    if options.remote:
   1.206 +        prolog = posixpath.join(options.remote_test_root, 'jit-tests', 'jit-tests', 'lib', 'prolog.js')
   1.207 +
   1.208 +    prefix += ['-f', prolog]
   1.209 +
   1.210 +    # Avoid racing on the cache by having the js shell create a new cache
   1.211 +    # subdir for each process. The js shell takes care of deleting these
   1.212 +    # subdirs when the process exits.
   1.213 +    if options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
   1.214 +        prefix += ['--js-cache-per-process']
   1.215 +
   1.216 +    # Clean up any remnants from previous crashes etc
   1.217 +    shutil.rmtree(jittests.JS_CACHE_DIR, ignore_errors=True)
   1.218 +    os.mkdir(jittests.JS_CACHE_DIR)
   1.219 +
   1.220 +    if options.debug:
   1.221 +        if len(job_list) > 1:
   1.222 +            print 'Multiple tests match command line arguments, debugger can only run one'
   1.223 +            for tc in job_list:
   1.224 +                print '    %s' % tc.path
   1.225 +            sys.exit(1)
   1.226 +
   1.227 +        tc = job_list[0]
   1.228 +        cmd = ['gdb', '--args'] + tc.command(prefix, jittests.LIB_DIR)
   1.229 +        subprocess.call(cmd)
   1.230 +        sys.exit()
   1.231 +
   1.232 +    try:
   1.233 +        ok = None
   1.234 +        if options.remote:
   1.235 +            ok = jittests.run_tests_remote(job_list, prefix, options)
   1.236 +        elif options.max_jobs > 1 and jittests.HAVE_MULTIPROCESSING:
   1.237 +            ok = jittests.run_tests_parallel(job_list, prefix, options)
   1.238 +        else:
   1.239 +            ok = jittests.run_tests(job_list, prefix, options)
   1.240 +        if not ok:
   1.241 +            sys.exit(2)
   1.242 +    except OSError:
   1.243 +        if not os.path.exists(prefix[0]):
   1.244 +            print >> sys.stderr, "JS shell argument: file does not exist: '%s'" % prefix[0]
   1.245 +            sys.exit(1)
   1.246 +        else:
   1.247 +            raise
   1.248 +
   1.249 +if __name__ == '__main__':
   1.250 +    main(sys.argv[1:])

mercurial