|
1 #!/usr/bin/python |
|
2 |
|
3 # |
|
4 # This Source Code Form is subject to the terms of the Mozilla Public |
|
5 # License, v. 2.0. If a copy of the MPL was not distributed with this |
|
6 # file, You can obtain one at http://mozilla.org/MPL/2.0/. |
|
7 |
|
8 """ |
|
9 Runs the static rooting analysis |
|
10 """ |
|
11 |
|
12 from subprocess import Popen |
|
13 import subprocess |
|
14 import os |
|
15 import argparse |
|
16 import sys |
|
17 import re |
|
18 |
|
19 def env(config): |
|
20 e = dict(os.environ) |
|
21 e['PATH'] = '%s:%s' % (e['PATH'], config['sixgill_bin']) |
|
22 e['XDB'] = '%(sixgill_bin)s/xdb.so' % config |
|
23 e['SOURCE'] = config['source'] |
|
24 e['ANALYZED_OBJDIR'] = config['objdir'] |
|
25 return e |
|
26 |
|
27 def fill(command, config): |
|
28 try: |
|
29 return tuple(s % config for s in command) |
|
30 except: |
|
31 print("Substitution failed:") |
|
32 problems = [] |
|
33 for fragment in command: |
|
34 try: |
|
35 fragment % config |
|
36 except: |
|
37 problems.append(fragment) |
|
38 raise Exception("\n".join(["Substitution failed:"] + [ " %s" % s for s in problems ])) |
|
39 |
|
40 def print_command(command, outfile=None, env=None): |
|
41 output = ' '.join(command) |
|
42 if outfile: |
|
43 output += ' > ' + outfile |
|
44 if env: |
|
45 changed = {} |
|
46 e = os.environ |
|
47 for key,value in env.items(): |
|
48 if (key not in e) or (e[key] != value): |
|
49 changed[key] = value |
|
50 if changed: |
|
51 outputs = [] |
|
52 for key, value in changed.items(): |
|
53 if key in e and e[key] in value: |
|
54 start = value.index(e[key]) |
|
55 end = start + len(e[key]) |
|
56 outputs.append('%s="%s${%s}%s"' % (key, |
|
57 value[:start], |
|
58 key, |
|
59 value[end:])) |
|
60 else: |
|
61 outputs.append("%s='%s'" % (key, value)) |
|
62 output = ' '.join(outputs) + " " + output |
|
63 |
|
64 print output |
|
65 |
|
66 def generate_hazards(config, outfilename): |
|
67 jobs = [] |
|
68 for i in range(config['jobs']): |
|
69 command = fill(('%(js)s', |
|
70 '%(analysis_scriptdir)s/analyzeRoots.js', |
|
71 '%(gcFunctions_list)s', |
|
72 '%(gcEdges)s', |
|
73 '%(suppressedFunctions_list)s', |
|
74 '%(gcTypes)s', |
|
75 str(i+1), '%(jobs)s', |
|
76 'tmp.%s' % (i+1,)), |
|
77 config) |
|
78 outfile = 'rootingHazards.%s' % (i+1,) |
|
79 output = open(outfile, 'w') |
|
80 print_command(command, outfile=outfile, env=env(config)) |
|
81 jobs.append((command, Popen(command, stdout=output, env=env(config)))) |
|
82 |
|
83 final_status = 0 |
|
84 while jobs: |
|
85 pid, status = os.wait() |
|
86 jobs = [ job for job in jobs if job[1].pid != pid ] |
|
87 final_status = final_status or status |
|
88 |
|
89 if final_status: |
|
90 raise subprocess.CalledProcessError(final_status, 'analyzeRoots.js') |
|
91 |
|
92 with open(outfilename, 'w') as output: |
|
93 command = ['cat'] + [ 'rootingHazards.%s' % (i+1,) for i in range(config['jobs']) ] |
|
94 print_command(command, outfile=outfilename) |
|
95 subprocess.call(command, stdout=output) |
|
96 |
|
97 JOBS = { 'dbs': |
|
98 (('%(ANALYSIS_SCRIPTDIR)s/run_complete', |
|
99 '--foreground', |
|
100 '--no-logs', |
|
101 '--build-root=%(objdir)s', |
|
102 '--wrap-dir=%(sixgill)s/scripts/wrap_gcc', |
|
103 '--work-dir=work', |
|
104 '-b', '%(sixgill_bin)s', |
|
105 '--buildcommand=%(buildcommand)s', |
|
106 '.'), |
|
107 ()), |
|
108 |
|
109 'callgraph': |
|
110 (('%(js)s', '%(analysis_scriptdir)s/computeCallgraph.js'), |
|
111 'callgraph.txt'), |
|
112 |
|
113 'gcFunctions': |
|
114 (('%(js)s', '%(analysis_scriptdir)s/computeGCFunctions.js', '%(callgraph)s', |
|
115 '[gcFunctions]', '[gcFunctions_list]', '[gcEdges]', '[suppressedFunctions_list]'), |
|
116 ('gcFunctions.txt', 'gcFunctions.lst', 'gcEdges.txt', 'suppressedFunctions.lst')), |
|
117 |
|
118 'gcTypes': |
|
119 (('%(js)s', '%(analysis_scriptdir)s/computeGCTypes.js',), |
|
120 'gcTypes.txt'), |
|
121 |
|
122 'allFunctions': |
|
123 (('%(sixgill_bin)s/xdbkeys', 'src_body.xdb',), |
|
124 'allFunctions.txt'), |
|
125 |
|
126 'hazards': |
|
127 (generate_hazards, 'rootingHazards.txt'), |
|
128 |
|
129 'explain': |
|
130 (('python', '%(analysis_scriptdir)s/explain.py', |
|
131 '%(hazards)s', '%(gcFunctions)s', |
|
132 '[explained_hazards]', '[unnecessary]', '[refs]'), |
|
133 ('hazards.txt', 'unnecessary.txt', 'refs.txt')) |
|
134 } |
|
135 |
|
136 def out_indexes(command): |
|
137 for i in range(len(command)): |
|
138 m = re.match(r'^\[(.*)\]$', command[i]) |
|
139 if m: |
|
140 yield (i, m.group(1)) |
|
141 |
|
142 def run_job(name, config): |
|
143 cmdspec, outfiles = JOBS[name] |
|
144 print("Running " + name + " to generate " + str(outfiles)) |
|
145 if hasattr(cmdspec, '__call__'): |
|
146 cmdspec(config, outfiles) |
|
147 else: |
|
148 temp_map = {} |
|
149 cmdspec = fill(cmdspec, config) |
|
150 if isinstance(outfiles, basestring): |
|
151 stdout_filename = '%s.tmp' % name |
|
152 temp_map[stdout_filename] = outfiles |
|
153 print_command(cmdspec, outfile=outfiles, env=env(config)) |
|
154 else: |
|
155 stdout_filename = None |
|
156 pc = list(cmdspec) |
|
157 outfile = 0 |
|
158 for (i, name) in out_indexes(cmdspec): |
|
159 pc[i] = outfiles[outfile] |
|
160 outfile += 1 |
|
161 print_command(pc, env=env(config)) |
|
162 |
|
163 command = list(cmdspec) |
|
164 outfile = 0 |
|
165 for (i, name) in out_indexes(cmdspec): |
|
166 command[i] = '%s.tmp' % name |
|
167 temp_map[command[i]] = outfiles[outfile] |
|
168 outfile += 1 |
|
169 |
|
170 sys.stdout.flush() |
|
171 if stdout_filename is None: |
|
172 subprocess.check_call(command, env=env(config)) |
|
173 else: |
|
174 with open(stdout_filename, 'w') as output: |
|
175 subprocess.check_call(command, stdout=output, env=env(config)) |
|
176 for (temp, final) in temp_map.items(): |
|
177 try: |
|
178 os.rename(temp, final) |
|
179 except OSError: |
|
180 print("Error renaming %s -> %s" % (temp, final)) |
|
181 raise |
|
182 |
|
183 config = { 'ANALYSIS_SCRIPTDIR': os.path.dirname(__file__) } |
|
184 |
|
185 defaults = [ '%s/defaults.py' % config['ANALYSIS_SCRIPTDIR'], |
|
186 '%s/defaults.py' % os.getcwd() ] |
|
187 |
|
188 for default in defaults: |
|
189 try: |
|
190 execfile(default, config) |
|
191 print("Loaded %s" % default) |
|
192 except: |
|
193 pass |
|
194 |
|
195 data = config.copy() |
|
196 |
|
197 parser = argparse.ArgumentParser(description='Statically analyze build tree for rooting hazards.') |
|
198 parser.add_argument('step', metavar='STEP', type=str, nargs='?', |
|
199 help='run starting from this step') |
|
200 parser.add_argument('--source', metavar='SOURCE', type=str, nargs='?', |
|
201 help='source code to analyze') |
|
202 parser.add_argument('--upto', metavar='UPTO', type=str, nargs='?', |
|
203 help='last step to execute') |
|
204 parser.add_argument('--jobs', '-j', default=None, metavar='JOBS', type=int, |
|
205 help='number of simultaneous analyzeRoots.js jobs') |
|
206 parser.add_argument('--list', const=True, nargs='?', type=bool, |
|
207 help='display available steps') |
|
208 parser.add_argument('--buildcommand', '--build', '-b', type=str, nargs='?', |
|
209 help='command to build the tree being analyzed') |
|
210 parser.add_argument('--tag', '-t', type=str, nargs='?', |
|
211 help='name of job, also sets build command to "build.<tag>"') |
|
212 parser.add_argument('--expect-file', type=str, nargs='?', |
|
213 help='deprecated option, temporarily still present for backwards compatibility') |
|
214 |
|
215 args = parser.parse_args() |
|
216 for k,v in vars(args).items(): |
|
217 if v is not None: |
|
218 data[k] = v |
|
219 |
|
220 if args.tag and not args.buildcommand: |
|
221 args.buildcommand="build.%s" % args.tag |
|
222 |
|
223 if args.jobs is not None: |
|
224 data['jobs'] = args.jobs |
|
225 if not data.get('jobs'): |
|
226 data['jobs'] = subprocess.check_output(['nproc', '--ignore=1']) |
|
227 |
|
228 if args.buildcommand: |
|
229 data['buildcommand'] = args.buildcommand |
|
230 elif 'BUILD' in os.environ: |
|
231 data['buildcommand'] = os.environ['BUILD'] |
|
232 else: |
|
233 data['buildcommand'] = 'make -j4 -s' |
|
234 |
|
235 if 'ANALYZED_OBJDIR' in os.environ: |
|
236 data['objdir'] = os.environ['ANALYZED_OBJDIR'] |
|
237 |
|
238 if 'SOURCE' in os.environ: |
|
239 data['source'] = os.environ['SOURCE'] |
|
240 if not data.get('source') and data.get('sixgill_bin'): |
|
241 path = subprocess.check_output(['sh', '-c', data['sixgill_bin'] + '/xdbkeys file_source.xdb | grep jsapi.cpp']) |
|
242 data['source'] = path.replace("/js/src/jsapi.cpp", "") |
|
243 |
|
244 steps = [ 'dbs', |
|
245 'callgraph', |
|
246 'gcTypes', |
|
247 'gcFunctions', |
|
248 'allFunctions', |
|
249 'hazards', |
|
250 'explain' ] |
|
251 |
|
252 if args.list: |
|
253 for step in steps: |
|
254 command, outfilename = JOBS[step] |
|
255 if outfilename: |
|
256 print("%s -> %s" % (step, outfilename)) |
|
257 else: |
|
258 print(step) |
|
259 sys.exit(0) |
|
260 |
|
261 for step in steps: |
|
262 command, outfiles = JOBS[step] |
|
263 if isinstance(outfiles, basestring): |
|
264 data[step] = outfiles |
|
265 else: |
|
266 outfile = 0 |
|
267 for (i, name) in out_indexes(command): |
|
268 data[name] = outfiles[outfile] |
|
269 outfile += 1 |
|
270 assert len(outfiles) == outfile, 'step \'%s\': mismatched number of output files and params' % step |
|
271 |
|
272 if args.step: |
|
273 steps = steps[steps.index(args.step):] |
|
274 |
|
275 if args.upto: |
|
276 steps = steps[:steps.index(args.upto)+1] |
|
277 |
|
278 for step in steps: |
|
279 run_job(step, data) |