Thu, 22 Jan 2015 13:21:57 +0100
Incorporate requested changes from Mozilla in review:
https://bugzilla.mozilla.org/show_bug.cgi?id=1123480#c6
michael@0 | 1 | #!/usr/bin/env python |
michael@0 | 2 | |
michael@0 | 3 | """%prog [options] shellpath dirpath |
michael@0 | 4 | |
michael@0 | 5 | Pulls performance data on parsing via the js shell. |
michael@0 | 6 | Displays the average number of milliseconds it took to parse each file. |
michael@0 | 7 | |
michael@0 | 8 | For comparison, something apparently approximating a t-test is performed: |
michael@0 | 9 | "Faster" means that: |
michael@0 | 10 | |
michael@0 | 11 | t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev) |
michael@0 | 12 | t_current_badrun = (t_current_avg + t_current_stddev) |
michael@0 | 13 | t_current_badrun < t_baseline_goodrun |
michael@0 | 14 | |
michael@0 | 15 | Effectively, a bad run from the current data is better than a good run from the |
michael@0 | 16 | baseline data, we're probably faster. A similar computation is used for |
michael@0 | 17 | determining the "slower" designation. |
michael@0 | 18 | |
michael@0 | 19 | Arguments: |
michael@0 | 20 | shellpath executable JavaScript shell |
michael@0 | 21 | dirpath directory filled with parsilicious js files |
michael@0 | 22 | """ |
michael@0 | 23 | |
michael@0 | 24 | import math |
michael@0 | 25 | import optparse |
michael@0 | 26 | import os |
michael@0 | 27 | import subprocess as subp |
michael@0 | 28 | import sys |
michael@0 | 29 | from string import Template |
michael@0 | 30 | |
michael@0 | 31 | try: |
michael@0 | 32 | import compare_bench |
michael@0 | 33 | except ImportError: |
michael@0 | 34 | compare_bench = None |
michael@0 | 35 | |
michael@0 | 36 | |
michael@0 | 37 | _DIR = os.path.dirname(__file__) |
michael@0 | 38 | JS_CODE_TEMPLATE = Template(""" |
michael@0 | 39 | if (typeof snarf !== 'undefined') read = snarf |
michael@0 | 40 | var contents = read("$filepath"); |
michael@0 | 41 | for (var i = 0; i < $warmup_run_count; i++) |
michael@0 | 42 | parse(contents); |
michael@0 | 43 | var results = []; |
michael@0 | 44 | for (var i = 0; i < $real_run_count; i++) { |
michael@0 | 45 | var start = new Date(); |
michael@0 | 46 | parse(contents); |
michael@0 | 47 | var end = new Date(); |
michael@0 | 48 | results.push(end - start); |
michael@0 | 49 | } |
michael@0 | 50 | print(results); |
michael@0 | 51 | """) |
michael@0 | 52 | |
michael@0 | 53 | |
michael@0 | 54 | def gen_filepaths(dirpath, target_ext='.js'): |
michael@0 | 55 | for filename in os.listdir(dirpath): |
michael@0 | 56 | if filename.endswith(target_ext): |
michael@0 | 57 | yield os.path.join(dirpath, filename) |
michael@0 | 58 | |
michael@0 | 59 | |
michael@0 | 60 | def avg(seq): |
michael@0 | 61 | return sum(seq) / len(seq) |
michael@0 | 62 | |
michael@0 | 63 | |
michael@0 | 64 | def stddev(seq, mean): |
michael@0 | 65 | diffs = ((float(item) - mean) ** 2 for item in seq) |
michael@0 | 66 | return math.sqrt(sum(diffs) / len(seq)) |
michael@0 | 67 | |
michael@0 | 68 | |
michael@0 | 69 | def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False): |
michael@0 | 70 | """Return a list of milliseconds for the counted runs.""" |
michael@0 | 71 | assert '"' not in filepath |
michael@0 | 72 | code = JS_CODE_TEMPLATE.substitute(filepath=filepath, |
michael@0 | 73 | warmup_run_count=warmup_runs, real_run_count=counted_runs) |
michael@0 | 74 | proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE) |
michael@0 | 75 | stdout, _ = proc.communicate() |
michael@0 | 76 | milliseconds = [float(val) for val in stdout.split(',')] |
michael@0 | 77 | mean = avg(milliseconds) |
michael@0 | 78 | sigma = stddev(milliseconds, mean) |
michael@0 | 79 | if not stfu: |
michael@0 | 80 | print 'Runs:', [int(ms) for ms in milliseconds] |
michael@0 | 81 | print 'Mean:', mean |
michael@0 | 82 | print 'Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100) |
michael@0 | 83 | return mean, sigma |
michael@0 | 84 | |
michael@0 | 85 | |
michael@0 | 86 | def parsemark(filepaths, fbench, stfu=False): |
michael@0 | 87 | """:param fbench: fbench(filename) -> float""" |
michael@0 | 88 | bench_map = {} # {filename: (avg, stddev)} |
michael@0 | 89 | for filepath in filepaths: |
michael@0 | 90 | filename = os.path.split(filepath)[-1] |
michael@0 | 91 | if not stfu: |
michael@0 | 92 | print 'Parsemarking %s...' % filename |
michael@0 | 93 | bench_map[filename] = fbench(filepath) |
michael@0 | 94 | print '{' |
michael@0 | 95 | for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()): |
michael@0 | 96 | assert '"' not in filename |
michael@0 | 97 | fmt = ' %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}' |
michael@0 | 98 | if i != len(bench_map) - 1: |
michael@0 | 99 | fmt += ',' |
michael@0 | 100 | filename_str = '"%s"' % filename |
michael@0 | 101 | print fmt % (filename_str, avg, stddev) |
michael@0 | 102 | print '}' |
michael@0 | 103 | return dict((filename, dict(average_ms=avg, stddev_ms=stddev)) |
michael@0 | 104 | for filename, (avg, stddev) in bench_map.iteritems()) |
michael@0 | 105 | |
michael@0 | 106 | |
michael@0 | 107 | def main(): |
michael@0 | 108 | parser = optparse.OptionParser(usage=__doc__.strip()) |
michael@0 | 109 | parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int, |
michael@0 | 110 | default=5, help='used to minimize test instability [%default]') |
michael@0 | 111 | parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int, |
michael@0 | 112 | default=50, help='timed data runs that count towards the average [%default]') |
michael@0 | 113 | parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell ' |
michael@0 | 114 | 'location; when omitted, will look in likely places') |
michael@0 | 115 | parser.add_option('-b', '--baseline', metavar='JSON_PATH', |
michael@0 | 116 | dest='baseline_path', help='json file with baseline values to ' |
michael@0 | 117 | 'compare against') |
michael@0 | 118 | parser.add_option('-q', '--quiet', dest='stfu', action='store_true', |
michael@0 | 119 | default=False, help='only print JSON to stdout [%default]') |
michael@0 | 120 | options, args = parser.parse_args() |
michael@0 | 121 | try: |
michael@0 | 122 | shellpath = args.pop(0) |
michael@0 | 123 | except IndexError: |
michael@0 | 124 | parser.print_help() |
michael@0 | 125 | |
michael@0 | 126 | print >> sys.stderr, 'error: shellpath required' |
michael@0 | 127 | return -1 |
michael@0 | 128 | try: |
michael@0 | 129 | dirpath = args.pop(0) |
michael@0 | 130 | except IndexError: |
michael@0 | 131 | parser.print_help() |
michael@0 | 132 | |
michael@0 | 133 | print >> sys.stderr, 'error: dirpath required' |
michael@0 | 134 | return -1 |
michael@0 | 135 | if not shellpath or not os.path.exists(shellpath): |
michael@0 | 136 | print >> sys.stderr, 'error: could not find shell:', shellpath |
michael@0 | 137 | return -1 |
michael@0 | 138 | if options.baseline_path: |
michael@0 | 139 | if not os.path.isfile(options.baseline_path): |
michael@0 | 140 | print >> sys.stderr, 'error: baseline file does not exist' |
michael@0 | 141 | return -1 |
michael@0 | 142 | if not compare_bench: |
michael@0 | 143 | print >> sys.stderr, 'error: JSON support is missing, cannot compare benchmarks' |
michael@0 | 144 | return -1 |
michael@0 | 145 | benchfile = lambda filepath: bench(shellpath, filepath, |
michael@0 | 146 | options.warmup_runs, options.counted_runs, stfu=options.stfu) |
michael@0 | 147 | bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu) |
michael@0 | 148 | if options.baseline_path: |
michael@0 | 149 | compare_bench.compare_immediate(bench_map, options.baseline_path) |
michael@0 | 150 | return 0 |
michael@0 | 151 | |
michael@0 | 152 | |
michael@0 | 153 | if __name__ == '__main__': |
michael@0 | 154 | sys.exit(main()) |