Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 # Works with python2.6
7 import datetime, os, re, sys, traceback
8 import math, string, copy, json
9 import subprocess
10 from subprocess import *
11 from operator import itemgetter
13 class Test:
14 def __init__(self, path, name):
15 self.path = path
16 self.name = name
18 @classmethod
19 def from_file(cls, path, name, options):
20 return cls(path, name)
22 def find_tests(dir, substring = None):
23 ans = []
24 for dirpath, dirnames, filenames in os.walk(dir):
25 if dirpath == '.':
26 continue
27 for filename in filenames:
28 if not filename.endswith('.js'):
29 continue
30 test = os.path.join(dirpath, filename)
31 if substring is None or substring in os.path.relpath(test, dir):
32 ans.append([test, filename])
33 return ans
35 def get_test_cmd(path):
36 return [ JS, '-f', path ]
38 def avg(seq):
39 return sum(seq) / len(seq)
41 def stddev(seq, mean):
42 diffs = ((float(item) - mean) ** 2 for item in seq)
43 return math.sqrt(sum(diffs) / len(seq))
45 def run_test(test):
46 env = os.environ.copy()
47 env['MOZ_GCTIMER'] = 'stderr'
48 cmd = get_test_cmd(test.path)
49 total = []
50 mark = []
51 sweep = []
52 close_fds = sys.platform != 'win32'
53 p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=close_fds, env=env)
54 out, err = p.communicate()
55 out, err = out.decode(), err.decode()
57 float_array = [float(_) for _ in err.split()]
59 if len(float_array) == 0:
60 print('Error: No data from application. Configured with --enable-gctimer?')
61 sys.exit(1)
63 for i, currItem in enumerate(float_array):
64 if (i % 3 == 0):
65 total.append(currItem)
66 else:
67 if (i % 3 == 1):
68 mark.append(currItem)
69 else:
70 sweep.append(currItem)
72 return max(total), avg(total), max(mark), avg(mark), max(sweep), avg(sweep)
74 def run_tests(tests, test_dir):
75 bench_map = {}
77 try:
78 for i, test in enumerate(tests):
79 filename_str = '"%s"' % test.name
80 TMax, TAvg, MMax, MAvg, SMax, SAvg = run_test(test)
81 bench_map[test.name] = [TMax, TAvg, MMax, MAvg, SMax, SAvg]
82 fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}'
83 if (i != len(tests) - 1):
84 fmt += ','
85 print(fmt %(filename_str ,TMax, TAvg, MMax, MAvg, SMax, MAvg))
86 except KeyboardInterrupt:
87 print('fail')
89 return dict((filename, dict(TMax=TMax, TAvg=TAvg, MMax=MMax, MAvg=MAvg, SMax=SMax, SAvg=SAvg))
90 for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems())
92 def compare(current, baseline):
93 percent_speedups = []
94 for key, current_result in current.iteritems():
95 try:
96 baseline_result = baseline[key]
97 except KeyError:
98 print key, 'missing from baseline'
99 continue
101 val_getter = itemgetter('TMax', 'TAvg', 'MMax', 'MAvg', 'SMax', 'SAvg')
102 BTMax, BTAvg, BMMax, BMAvg, BSMax, BSAvg = val_getter(baseline_result)
103 CTMax, CTAvg, CMMax, CMAvg, CSMax, CSAvg = val_getter(current_result)
105 fmt = '%30s: %s'
106 if CTAvg <= BTAvg:
107 speedup = (CTAvg / BTAvg - 1) * 100
108 result = 'faster: %6.2f < baseline %6.2f (%+6.2f%%)' % \
109 (CTAvg, BTAvg, speedup)
110 percent_speedups.append(speedup)
111 else:
112 slowdown = (CTAvg / BTAvg - 1) * 100
113 result = 'SLOWER: %6.2f > baseline %6.2f (%+6.2f%%) ' % \
114 (CTAvg, BTAvg, slowdown)
115 percent_speedups.append(slowdown)
116 print '%30s: %s' % (key, result)
117 if percent_speedups:
118 print 'Average speedup: %.2f%%' % avg(percent_speedups)
120 if __name__ == '__main__':
121 script_path = os.path.abspath(__file__)
122 script_dir = os.path.dirname(script_path)
123 test_dir = os.path.join(script_dir, 'tests')
125 from optparse import OptionParser
126 op = OptionParser(usage='%prog [options] JS_SHELL [TESTS]')
128 op.add_option('-b', '--baseline', metavar='JSON_PATH',
129 dest='baseline_path', help='json file with baseline values to '
130 'compare against')
132 (OPTIONS, args) = op.parse_args()
133 if len(args) < 1:
134 op.error('missing JS_SHELL argument')
135 # We need to make sure we are using backslashes on Windows.
136 JS, test_args = os.path.normpath(args[0]), args[1:]
138 test_list = []
139 bench_map = {}
141 test_list = find_tests(test_dir)
143 if not test_list:
144 print >> sys.stderr, "No tests found matching command line arguments."
145 sys.exit(0)
147 test_list = [ Test.from_file(tst, name, OPTIONS) for tst, name in test_list ]
149 try:
150 print("{")
151 bench_map = run_tests(test_list, test_dir)
152 print("}")
154 except OSError:
155 if not os.path.exists(JS):
156 print >> sys.stderr, "JS shell argument: file does not exist: '%s'"%JS
157 sys.exit(1)
158 else:
159 raise
161 if OPTIONS.baseline_path:
162 baseline_map = []
163 fh = open(OPTIONS.baseline_path, 'r')
164 baseline_map = json.load(fh)
165 fh.close()
166 compare(current=bench_map, baseline=baseline_map)