js/src/tests/parsemark.py

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/tests/parsemark.py	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,154 @@
     1.4 +#!/usr/bin/env python
     1.5 +
     1.6 +"""%prog [options] shellpath dirpath
     1.7 +
     1.8 +Pulls performance data on parsing via the js shell.
     1.9 +Displays the average number of milliseconds it took to parse each file.
    1.10 +
    1.11 +For comparison, something apparently approximating a t-test is performed:
    1.12 +"Faster" means that:
    1.13 +
    1.14 +    t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev)
    1.15 +    t_current_badrun = (t_current_avg + t_current_stddev) 
    1.16 +    t_current_badrun < t_baseline_goodrun
    1.17 +
    1.18 +Effectively, a bad run from the current data is better than a good run from the
    1.19 +baseline data, we're probably faster. A similar computation is used for
    1.20 +determining the "slower" designation.
    1.21 +
    1.22 +Arguments:
    1.23 +  shellpath             executable JavaScript shell
    1.24 +  dirpath               directory filled with parsilicious js files
    1.25 +"""
    1.26 +
    1.27 +import math
    1.28 +import optparse
    1.29 +import os
    1.30 +import subprocess as subp
    1.31 +import sys
    1.32 +from string import Template
    1.33 +
    1.34 +try:
    1.35 +    import compare_bench
    1.36 +except ImportError:
    1.37 +    compare_bench = None
    1.38 +
    1.39 +
    1.40 +_DIR = os.path.dirname(__file__)
    1.41 +JS_CODE_TEMPLATE = Template("""
    1.42 +if (typeof snarf !== 'undefined') read = snarf
    1.43 +var contents = read("$filepath");
    1.44 +for (var i = 0; i < $warmup_run_count; i++)
    1.45 +    parse(contents);
    1.46 +var results = [];
    1.47 +for (var i = 0; i < $real_run_count; i++) {
    1.48 +    var start = new Date();
    1.49 +    parse(contents);
    1.50 +    var end = new Date();
    1.51 +    results.push(end - start);
    1.52 +}
    1.53 +print(results);
    1.54 +""")
    1.55 +
    1.56 +
    1.57 +def gen_filepaths(dirpath, target_ext='.js'):
    1.58 +    for filename in os.listdir(dirpath):
    1.59 +        if filename.endswith(target_ext):
    1.60 +            yield os.path.join(dirpath, filename)
    1.61 +
    1.62 +
    1.63 +def avg(seq):
    1.64 +    return sum(seq) / len(seq)
    1.65 +
    1.66 +
    1.67 +def stddev(seq, mean):
    1.68 +    diffs = ((float(item) - mean) ** 2 for item in seq)
    1.69 +    return math.sqrt(sum(diffs) / len(seq))
    1.70 +
    1.71 +
    1.72 +def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
    1.73 +    """Return a list of milliseconds for the counted runs."""
    1.74 +    assert '"' not in filepath
    1.75 +    code = JS_CODE_TEMPLATE.substitute(filepath=filepath,
    1.76 +            warmup_run_count=warmup_runs, real_run_count=counted_runs)
    1.77 +    proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE)
    1.78 +    stdout, _ = proc.communicate()
    1.79 +    milliseconds = [float(val) for val in stdout.split(',')]
    1.80 +    mean = avg(milliseconds)
    1.81 +    sigma = stddev(milliseconds, mean)
    1.82 +    if not stfu:
    1.83 +        print 'Runs:', [int(ms) for ms in milliseconds]
    1.84 +        print 'Mean:', mean
    1.85 +        print 'Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100)
    1.86 +    return mean, sigma
    1.87 +
    1.88 +
    1.89 +def parsemark(filepaths, fbench, stfu=False):
    1.90 +    """:param fbench: fbench(filename) -> float"""
    1.91 +    bench_map = {} # {filename: (avg, stddev)}
    1.92 +    for filepath in filepaths:
    1.93 +        filename = os.path.split(filepath)[-1]
    1.94 +        if not stfu:
    1.95 +            print 'Parsemarking %s...' % filename
    1.96 +        bench_map[filename] = fbench(filepath)
    1.97 +    print '{'
    1.98 +    for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
    1.99 +        assert '"' not in filename
   1.100 +        fmt = '    %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}'
   1.101 +        if i != len(bench_map) - 1:
   1.102 +            fmt += ','
   1.103 +        filename_str = '"%s"' % filename
   1.104 +        print fmt % (filename_str, avg, stddev)
   1.105 +    print '}'
   1.106 +    return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
   1.107 +            for filename, (avg, stddev) in bench_map.iteritems())
   1.108 +
   1.109 +
   1.110 +def main():
   1.111 +    parser = optparse.OptionParser(usage=__doc__.strip())
   1.112 +    parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
   1.113 +            default=5, help='used to minimize test instability [%default]')
   1.114 +    parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
   1.115 +            default=50, help='timed data runs that count towards the average [%default]')
   1.116 +    parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
   1.117 +            'location; when omitted, will look in likely places')
   1.118 +    parser.add_option('-b', '--baseline', metavar='JSON_PATH',
   1.119 +            dest='baseline_path', help='json file with baseline values to '
   1.120 +            'compare against')
   1.121 +    parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
   1.122 +            default=False, help='only print JSON to stdout [%default]')
   1.123 +    options, args = parser.parse_args()
   1.124 +    try:
   1.125 +        shellpath = args.pop(0)
   1.126 +    except IndexError:
   1.127 +        parser.print_help()
   1.128 +        print
   1.129 +        print >> sys.stderr, 'error: shellpath required'
   1.130 +        return -1
   1.131 +    try:
   1.132 +        dirpath = args.pop(0)
   1.133 +    except IndexError:
   1.134 +        parser.print_help()
   1.135 +        print
   1.136 +        print >> sys.stderr, 'error: dirpath required'
   1.137 +        return -1
   1.138 +    if not shellpath or not os.path.exists(shellpath):
   1.139 +        print >> sys.stderr, 'error: could not find shell:', shellpath
   1.140 +        return -1
   1.141 +    if options.baseline_path:
   1.142 +        if not os.path.isfile(options.baseline_path):
   1.143 +            print >> sys.stderr, 'error: baseline file does not exist'
   1.144 +            return -1
   1.145 +        if not compare_bench:
   1.146 +            print >> sys.stderr, 'error: JSON support is missing, cannot compare benchmarks'
   1.147 +            return -1
   1.148 +    benchfile = lambda filepath: bench(shellpath, filepath,
   1.149 +            options.warmup_runs, options.counted_runs, stfu=options.stfu)
   1.150 +    bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
   1.151 +    if options.baseline_path:
   1.152 +        compare_bench.compare_immediate(bench_map, options.baseline_path)
   1.153 +    return 0
   1.154 +
   1.155 +
   1.156 +if __name__ == '__main__':
   1.157 +    sys.exit(main())

mercurial