|
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
|
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
|
3 /* This Source Code Form is subject to the terms of the Mozilla Public |
|
4 * License, v. 2.0. If a copy of the MPL was not distributed with this |
|
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
|
6 |
|
7 /* |
|
8 * This is an implementation of stack unwinding according to a subset |
|
9 * of the ARM Exception Handling ABI, as described in: |
|
10 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf |
|
11 * |
|
12 * This handles only the ARM-defined "personality routines" (chapter |
|
13 * 9), and don't track the value of FP registers, because profiling |
|
14 * needs only chain of PC/SP values. |
|
15 * |
|
16 * Because the exception handling info may not be accurate for all |
|
17 * possible places where an async signal could occur (e.g., in a |
|
18 * prologue or epilogue), this bounds-checks all stack accesses. |
|
19 * |
|
20 * This file uses "struct" for structures in the exception tables and |
|
21 * "class" otherwise. We should avoid violating the C++11 |
|
22 * standard-layout rules in the former. |
|
23 */ |
|
24 |
|
25 #include "EHABIStackWalk.h" |
|
26 |
|
27 #include "shared-libraries.h" |
|
28 #include "platform.h" |
|
29 |
|
30 #include "mozilla/Atomics.h" |
|
31 #include "mozilla/Attributes.h" |
|
32 #include "mozilla/DebugOnly.h" |
|
33 #include "mozilla/Endian.h" |
|
34 |
|
35 #include <algorithm> |
|
36 #include <elf.h> |
|
37 #include <stdint.h> |
|
38 #include <vector> |
|
39 #include <string> |
|
40 |
|
41 #ifndef PT_ARM_EXIDX |
|
42 #define PT_ARM_EXIDX 0x70000001 |
|
43 #endif |
|
44 |
|
45 |
|
46 namespace mozilla { |
|
47 |
|
48 struct EHEntry; |
|
49 |
|
50 class EHState { |
|
51 // Note that any core register can be used as a "frame pointer" to |
|
52 // influence the unwinding process, so this must track all of them. |
|
53 uint32_t mRegs[16]; |
|
54 public: |
|
55 bool unwind(const EHEntry *aEntry, const void *stackBase); |
|
56 uint32_t &operator[](int i) { return mRegs[i]; } |
|
57 const uint32_t &operator[](int i) const { return mRegs[i]; } |
|
58 EHState(const mcontext_t &); |
|
59 }; |
|
60 |
|
61 enum { |
|
62 R_SP = 13, |
|
63 R_LR = 14, |
|
64 R_PC = 15 |
|
65 }; |
|
66 |
|
67 class EHEntryHandle { |
|
68 const EHEntry *mValue; |
|
69 public: |
|
70 EHEntryHandle(const EHEntry *aEntry) : mValue(aEntry) { } |
|
71 const EHEntry *value() const { return mValue; } |
|
72 }; |
|
73 |
|
74 class EHTable { |
|
75 uint32_t mStartPC; |
|
76 uint32_t mEndPC; |
|
77 uint32_t mLoadOffset; |
|
78 // In principle we should be able to binary-search the index section in |
|
79 // place, but the ICS toolchain's linker is noncompliant and produces |
|
80 // indices that aren't entirely sorted (e.g., libc). So we have this: |
|
81 std::vector<EHEntryHandle> mEntries; |
|
82 std::string mName; |
|
83 public: |
|
84 EHTable(const void *aELF, size_t aSize, const std::string &aName); |
|
85 const EHEntry *lookup(uint32_t aPC) const; |
|
86 bool isValid() const { return mEntries.size() > 0; } |
|
87 const std::string &name() const { return mName; } |
|
88 uint32_t startPC() const { return mStartPC; } |
|
89 uint32_t endPC() const { return mEndPC; } |
|
90 uint32_t loadOffset() const { return mLoadOffset; } |
|
91 }; |
|
92 |
|
93 class EHAddrSpace { |
|
94 std::vector<uint32_t> mStarts; |
|
95 std::vector<EHTable> mTables; |
|
96 static mozilla::Atomic<const EHAddrSpace*> sCurrent; |
|
97 public: |
|
98 explicit EHAddrSpace(const std::vector<EHTable>& aTables); |
|
99 const EHTable *lookup(uint32_t aPC) const; |
|
100 static void Update(); |
|
101 static const EHAddrSpace *Get(); |
|
102 }; |
|
103 |
|
104 |
|
105 void EHABIStackWalkInit() |
|
106 { |
|
107 EHAddrSpace::Update(); |
|
108 } |
|
109 |
|
110 size_t EHABIStackWalk(const mcontext_t &aContext, void *stackBase, |
|
111 void **aSPs, void **aPCs, const size_t aNumFrames) |
|
112 { |
|
113 const EHAddrSpace *space = EHAddrSpace::Get(); |
|
114 EHState state(aContext); |
|
115 size_t count = 0; |
|
116 |
|
117 while (count < aNumFrames) { |
|
118 uint32_t pc = state[R_PC], sp = state[R_SP]; |
|
119 aPCs[count] = reinterpret_cast<void *>(pc); |
|
120 aSPs[count] = reinterpret_cast<void *>(sp); |
|
121 count++; |
|
122 |
|
123 if (!space) |
|
124 break; |
|
125 // TODO: cache these lookups. Binary-searching libxul is |
|
126 // expensive (possibly more expensive than doing the actual |
|
127 // unwind), and even a small cache should help. |
|
128 const EHTable *table = space->lookup(pc); |
|
129 if (!table) |
|
130 break; |
|
131 const EHEntry *entry = table->lookup(pc); |
|
132 if (!entry) |
|
133 break; |
|
134 if (!state.unwind(entry, stackBase)) |
|
135 break; |
|
136 } |
|
137 |
|
138 return count; |
|
139 } |
|
140 |
|
141 |
|
142 struct PRel31 { |
|
143 uint32_t mBits; |
|
144 bool topBit() const { return mBits & 0x80000000; } |
|
145 uint32_t value() const { return mBits & 0x7fffffff; } |
|
146 int32_t offset() const { return (static_cast<int32_t>(mBits) << 1) >> 1; } |
|
147 const void *compute() const { |
|
148 return reinterpret_cast<const char *>(this) + offset(); |
|
149 } |
|
150 private: |
|
151 PRel31(const PRel31 &copied) MOZ_DELETE; |
|
152 PRel31() MOZ_DELETE; |
|
153 }; |
|
154 |
|
155 struct EHEntry { |
|
156 PRel31 startPC; |
|
157 PRel31 exidx; |
|
158 private: |
|
159 EHEntry(const EHEntry &copied) MOZ_DELETE; |
|
160 EHEntry() MOZ_DELETE; |
|
161 }; |
|
162 |
|
163 |
|
164 class EHInterp { |
|
165 public: |
|
166 // Note that stackLimit is exclusive and stackBase is inclusive |
|
167 // (i.e, stackLimit < SP <= stackBase), following the convention |
|
168 // set by the AAPCS spec. |
|
169 EHInterp(EHState &aState, const EHEntry *aEntry, |
|
170 uint32_t aStackLimit, uint32_t aStackBase) |
|
171 : mState(aState), |
|
172 mStackLimit(aStackLimit), |
|
173 mStackBase(aStackBase), |
|
174 mNextWord(0), |
|
175 mWordsLeft(0), |
|
176 mFailed(false) |
|
177 { |
|
178 const PRel31 &exidx = aEntry->exidx; |
|
179 uint32_t firstWord; |
|
180 |
|
181 if (exidx.mBits == 1) { // EXIDX_CANTUNWIND |
|
182 mFailed = true; |
|
183 return; |
|
184 } |
|
185 if (exidx.topBit()) { |
|
186 firstWord = exidx.mBits; |
|
187 } else { |
|
188 mNextWord = reinterpret_cast<const uint32_t *>(exidx.compute()); |
|
189 firstWord = *mNextWord++; |
|
190 } |
|
191 |
|
192 switch (firstWord >> 24) { |
|
193 case 0x80: // short |
|
194 mWord = firstWord << 8; |
|
195 mBytesLeft = 3; |
|
196 break; |
|
197 case 0x81: case 0x82: // long; catch descriptor size ignored |
|
198 mWord = firstWord << 16; |
|
199 mBytesLeft = 2; |
|
200 mWordsLeft = (firstWord >> 16) & 0xff; |
|
201 break; |
|
202 default: |
|
203 // unknown personality |
|
204 mFailed = true; |
|
205 } |
|
206 } |
|
207 |
|
208 bool unwind(); |
|
209 |
|
210 private: |
|
211 // TODO: GCC has been observed not CSEing repeated reads of |
|
212 // mState[R_SP] with writes to mFailed between them, suggesting that |
|
213 // it hasn't determined that they can't alias and is thus missing |
|
214 // optimization opportunities. So, we may want to flatten EHState |
|
215 // into this class; this may also make the code simpler. |
|
216 EHState &mState; |
|
217 uint32_t mStackLimit; |
|
218 uint32_t mStackBase; |
|
219 const uint32_t *mNextWord; |
|
220 uint32_t mWord; |
|
221 uint8_t mWordsLeft; |
|
222 uint8_t mBytesLeft; |
|
223 bool mFailed; |
|
224 |
|
225 enum { |
|
226 I_ADDSP = 0x00, // 0sxxxxxx (subtract if s) |
|
227 M_ADDSP = 0x80, |
|
228 I_POPMASK = 0x80, // 1000iiii iiiiiiii (if any i set) |
|
229 M_POPMASK = 0xf0, |
|
230 I_MOVSP = 0x90, // 1001nnnn |
|
231 M_MOVSP = 0xf0, |
|
232 I_POPN = 0xa0, // 1010lnnn |
|
233 M_POPN = 0xf0, |
|
234 I_FINISH = 0xb0, // 10110000 |
|
235 I_POPLO = 0xb1, // 10110001 0000iiii (if any i set) |
|
236 I_ADDSPBIG = 0xb2, // 10110010 uleb128 |
|
237 I_POPFDX = 0xb3, // 10110011 sssscccc |
|
238 I_POPFDX8 = 0xb8, // 10111nnn |
|
239 M_POPFDX8 = 0xf8, |
|
240 // "Intel Wireless MMX" extensions omitted. |
|
241 I_POPFDD = 0xc8, // 1100100h sssscccc |
|
242 M_POPFDD = 0xfe, |
|
243 I_POPFDD8 = 0xd0, // 11010nnn |
|
244 M_POPFDD8 = 0xf8 |
|
245 }; |
|
246 |
|
247 uint8_t next() { |
|
248 if (mBytesLeft == 0) { |
|
249 if (mWordsLeft == 0) { |
|
250 return I_FINISH; |
|
251 } |
|
252 mWordsLeft--; |
|
253 mWord = *mNextWord++; |
|
254 mBytesLeft = 4; |
|
255 } |
|
256 mBytesLeft--; |
|
257 mWord = (mWord << 8) | (mWord >> 24); // rotate |
|
258 return mWord; |
|
259 } |
|
260 |
|
261 uint32_t &vSP() { return mState[R_SP]; } |
|
262 uint32_t *ptrSP() { return reinterpret_cast<uint32_t *>(vSP()); } |
|
263 |
|
264 void checkStackBase() { if (vSP() > mStackBase) mFailed = true; } |
|
265 void checkStackLimit() { if (vSP() <= mStackLimit) mFailed = true; } |
|
266 void checkStackAlign() { if ((vSP() & 3) != 0) mFailed = true; } |
|
267 void checkStack() { |
|
268 checkStackBase(); |
|
269 checkStackLimit(); |
|
270 checkStackAlign(); |
|
271 } |
|
272 |
|
273 void popRange(uint8_t first, uint8_t last, uint16_t mask) { |
|
274 bool hasSP = false; |
|
275 uint32_t tmpSP; |
|
276 if (mask == 0) |
|
277 mFailed = true; |
|
278 for (uint8_t r = first; r <= last; ++r) { |
|
279 if (mask & 1) { |
|
280 if (r == R_SP) { |
|
281 hasSP = true; |
|
282 tmpSP = *ptrSP(); |
|
283 } else |
|
284 mState[r] = *ptrSP(); |
|
285 vSP() += 4; |
|
286 checkStackBase(); |
|
287 if (mFailed) |
|
288 return; |
|
289 } |
|
290 mask >>= 1; |
|
291 } |
|
292 if (hasSP) { |
|
293 vSP() = tmpSP; |
|
294 checkStack(); |
|
295 } |
|
296 } |
|
297 }; |
|
298 |
|
299 |
|
300 bool EHState::unwind(const EHEntry *aEntry, const void *stackBasePtr) { |
|
301 // The unwinding program cannot set SP to less than the initial value. |
|
302 uint32_t stackLimit = mRegs[R_SP] - 4; |
|
303 uint32_t stackBase = reinterpret_cast<uint32_t>(stackBasePtr); |
|
304 EHInterp interp(*this, aEntry, stackLimit, stackBase); |
|
305 return interp.unwind(); |
|
306 } |
|
307 |
|
308 bool EHInterp::unwind() { |
|
309 mState[R_PC] = 0; |
|
310 checkStack(); |
|
311 while (!mFailed) { |
|
312 uint8_t insn = next(); |
|
313 #if DEBUG_EHABI_UNWIND |
|
314 LOGF("unwind insn = %02x", (unsigned)insn); |
|
315 #endif |
|
316 // Try to put the common cases first. |
|
317 |
|
318 // 00xxxxxx: vsp = vsp + (xxxxxx << 2) + 4 |
|
319 // 01xxxxxx: vsp = vsp - (xxxxxx << 2) - 4 |
|
320 if ((insn & M_ADDSP) == I_ADDSP) { |
|
321 uint32_t offset = ((insn & 0x3f) << 2) + 4; |
|
322 if (insn & 0x40) { |
|
323 vSP() -= offset; |
|
324 checkStackLimit(); |
|
325 } else { |
|
326 vSP() += offset; |
|
327 checkStackBase(); |
|
328 } |
|
329 continue; |
|
330 } |
|
331 |
|
332 // 10100nnn: Pop r4-r[4+nnn] |
|
333 // 10101nnn: Pop r4-r[4+nnn], r14 |
|
334 if ((insn & M_POPN) == I_POPN) { |
|
335 uint8_t n = (insn & 0x07) + 1; |
|
336 bool lr = insn & 0x08; |
|
337 uint32_t *ptr = ptrSP(); |
|
338 vSP() += (n + (lr ? 1 : 0)) * 4; |
|
339 checkStackBase(); |
|
340 for (uint8_t r = 4; r < 4 + n; ++r) |
|
341 mState[r] = *ptr++; |
|
342 if (lr) |
|
343 mState[R_LR] = *ptr++; |
|
344 continue; |
|
345 } |
|
346 |
|
347 // 1011000: Finish |
|
348 if (insn == I_FINISH) { |
|
349 if (mState[R_PC] == 0) { |
|
350 mState[R_PC] = mState[R_LR]; |
|
351 // Non-standard change (bug 916106): Prevent the caller from |
|
352 // re-using LR. Since the caller is by definition not a leaf |
|
353 // routine, it will have to restore LR from somewhere to |
|
354 // return to its own caller, so we can safely zero it here. |
|
355 // This makes a difference only if an error in unwinding |
|
356 // (e.g., caused by starting from within a prologue/epilogue) |
|
357 // causes us to load a pointer to a leaf routine as LR; if we |
|
358 // don't do something, we'll go into an infinite loop of |
|
359 // "returning" to that same function. |
|
360 mState[R_LR] = 0; |
|
361 } |
|
362 return true; |
|
363 } |
|
364 |
|
365 // 1001nnnn: Set vsp = r[nnnn] |
|
366 if ((insn & M_MOVSP) == I_MOVSP) { |
|
367 vSP() = mState[insn & 0x0f]; |
|
368 checkStack(); |
|
369 continue; |
|
370 } |
|
371 |
|
372 // 11001000 sssscccc: Pop VFP regs D[16+ssss]-D[16+ssss+cccc] (as FLDMFDD) |
|
373 // 11001001 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDD) |
|
374 if ((insn & M_POPFDD) == I_POPFDD) { |
|
375 uint8_t n = (next() & 0x0f) + 1; |
|
376 // Note: if the 16+ssss+cccc > 31, the encoding is reserved. |
|
377 // As the space is currently unused, we don't try to check. |
|
378 vSP() += 8 * n; |
|
379 checkStackBase(); |
|
380 continue; |
|
381 } |
|
382 |
|
383 // 11010nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDD) |
|
384 if ((insn & M_POPFDD8) == I_POPFDD8) { |
|
385 uint8_t n = (insn & 0x07) + 1; |
|
386 vSP() += 8 * n; |
|
387 checkStackBase(); |
|
388 continue; |
|
389 } |
|
390 |
|
391 // 10110010 uleb128: vsp = vsp + 0x204 + (uleb128 << 2) |
|
392 if (insn == I_ADDSPBIG) { |
|
393 uint32_t acc = 0; |
|
394 uint8_t shift = 0; |
|
395 uint8_t byte; |
|
396 do { |
|
397 if (shift >= 32) |
|
398 return false; |
|
399 byte = next(); |
|
400 acc |= (byte & 0x7f) << shift; |
|
401 shift += 7; |
|
402 } while (byte & 0x80); |
|
403 uint32_t offset = 0x204 + (acc << 2); |
|
404 // The calculations above could have overflowed. |
|
405 // But the one we care about is this: |
|
406 if (vSP() + offset < vSP()) |
|
407 mFailed = true; |
|
408 vSP() += offset; |
|
409 // ...so that this is the only other check needed: |
|
410 checkStackBase(); |
|
411 continue; |
|
412 } |
|
413 |
|
414 // 1000iiii iiiiiiii (i not all 0): Pop under masks {r15-r12}, {r11-r4} |
|
415 if ((insn & M_POPMASK) == I_POPMASK) { |
|
416 popRange(4, 15, ((insn & 0x0f) << 8) | next()); |
|
417 continue; |
|
418 } |
|
419 |
|
420 // 1011001 0000iiii (i not all 0): Pop under mask {r3-r0} |
|
421 if (insn == I_POPLO) { |
|
422 popRange(0, 3, next() & 0x0f); |
|
423 continue; |
|
424 } |
|
425 |
|
426 // 10110011 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDX) |
|
427 if (insn == I_POPFDX) { |
|
428 uint8_t n = (next() & 0x0f) + 1; |
|
429 vSP() += 8 * n + 4; |
|
430 checkStackBase(); |
|
431 continue; |
|
432 } |
|
433 |
|
434 // 10111nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDX) |
|
435 if ((insn & M_POPFDX8) == I_POPFDX8) { |
|
436 uint8_t n = (insn & 0x07) + 1; |
|
437 vSP() += 8 * n + 4; |
|
438 checkStackBase(); |
|
439 continue; |
|
440 } |
|
441 |
|
442 // unhandled instruction |
|
443 #ifdef DEBUG_EHABI_UNWIND |
|
444 LOGF("Unhandled EHABI instruction 0x%02x", insn); |
|
445 #endif |
|
446 mFailed = true; |
|
447 } |
|
448 return false; |
|
449 } |
|
450 |
|
451 |
|
452 bool operator<(const EHTable &lhs, const EHTable &rhs) { |
|
453 return lhs.startPC() < rhs.endPC(); |
|
454 } |
|
455 |
|
456 // Async signal unsafe. |
|
457 EHAddrSpace::EHAddrSpace(const std::vector<EHTable>& aTables) |
|
458 : mTables(aTables) |
|
459 { |
|
460 std::sort(mTables.begin(), mTables.end()); |
|
461 DebugOnly<uint32_t> lastEnd = 0; |
|
462 for (std::vector<EHTable>::iterator i = mTables.begin(); |
|
463 i != mTables.end(); ++i) { |
|
464 MOZ_ASSERT(i->startPC() >= lastEnd); |
|
465 mStarts.push_back(i->startPC()); |
|
466 lastEnd = i->endPC(); |
|
467 } |
|
468 } |
|
469 |
|
470 const EHTable *EHAddrSpace::lookup(uint32_t aPC) const { |
|
471 ptrdiff_t i = (std::upper_bound(mStarts.begin(), mStarts.end(), aPC) |
|
472 - mStarts.begin()) - 1; |
|
473 |
|
474 if (i < 0 || aPC >= mTables[i].endPC()) |
|
475 return 0; |
|
476 return &mTables[i]; |
|
477 } |
|
478 |
|
479 |
|
480 bool operator<(const EHEntryHandle &lhs, const EHEntryHandle &rhs) { |
|
481 return lhs.value()->startPC.compute() < rhs.value()->startPC.compute(); |
|
482 } |
|
483 |
|
484 const EHEntry *EHTable::lookup(uint32_t aPC) const { |
|
485 MOZ_ASSERT(aPC >= mStartPC); |
|
486 if (aPC >= mEndPC) |
|
487 return nullptr; |
|
488 |
|
489 std::vector<EHEntryHandle>::const_iterator begin = mEntries.begin(); |
|
490 std::vector<EHEntryHandle>::const_iterator end = mEntries.end(); |
|
491 MOZ_ASSERT(begin < end); |
|
492 if (aPC < reinterpret_cast<uint32_t>(begin->value()->startPC.compute())) |
|
493 return nullptr; |
|
494 |
|
495 while (end - begin > 1) { |
|
496 std::vector<EHEntryHandle>::const_iterator mid = begin + (end - begin) / 2; |
|
497 if (aPC < reinterpret_cast<uint32_t>(mid->value()->startPC.compute())) |
|
498 end = mid; |
|
499 else |
|
500 begin = mid; |
|
501 } |
|
502 return begin->value(); |
|
503 } |
|
504 |
|
505 |
|
506 #if MOZ_LITTLE_ENDIAN |
|
507 static const unsigned char hostEndian = ELFDATA2LSB; |
|
508 #elif MOZ_BIG_ENDIAN |
|
509 static const unsigned char hostEndian = ELFDATA2MSB; |
|
510 #else |
|
511 #error "No endian?" |
|
512 #endif |
|
513 |
|
514 // Async signal unsafe. (Note use of std::vector::reserve.) |
|
515 EHTable::EHTable(const void *aELF, size_t aSize, const std::string &aName) |
|
516 : mStartPC(~0), // largest uint32_t |
|
517 mEndPC(0), |
|
518 mName(aName) |
|
519 { |
|
520 const uint32_t base = reinterpret_cast<uint32_t>(aELF); |
|
521 |
|
522 if (aSize < sizeof(Elf32_Ehdr)) |
|
523 return; |
|
524 |
|
525 const Elf32_Ehdr &file = *(reinterpret_cast<Elf32_Ehdr *>(base)); |
|
526 if (memcmp(&file.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 || |
|
527 file.e_ident[EI_CLASS] != ELFCLASS32 || |
|
528 file.e_ident[EI_DATA] != hostEndian || |
|
529 file.e_ident[EI_VERSION] != EV_CURRENT || |
|
530 file.e_ident[EI_OSABI] != ELFOSABI_SYSV || |
|
531 #ifdef EI_ABIVERSION |
|
532 file.e_ident[EI_ABIVERSION] != 0 || |
|
533 #endif |
|
534 file.e_machine != EM_ARM || |
|
535 file.e_version != EV_CURRENT) |
|
536 // e_flags? |
|
537 return; |
|
538 |
|
539 MOZ_ASSERT(file.e_phoff + file.e_phnum * file.e_phentsize <= aSize); |
|
540 const Elf32_Phdr *exidxHdr = 0, *zeroHdr = 0; |
|
541 for (unsigned i = 0; i < file.e_phnum; ++i) { |
|
542 const Elf32_Phdr &phdr = |
|
543 *(reinterpret_cast<Elf32_Phdr *>(base + file.e_phoff |
|
544 + i * file.e_phentsize)); |
|
545 if (phdr.p_type == PT_ARM_EXIDX) { |
|
546 exidxHdr = &phdr; |
|
547 } else if (phdr.p_type == PT_LOAD) { |
|
548 if (phdr.p_offset == 0) { |
|
549 zeroHdr = &phdr; |
|
550 } |
|
551 if (phdr.p_flags & PF_X) { |
|
552 mStartPC = std::min(mStartPC, phdr.p_vaddr); |
|
553 mEndPC = std::max(mEndPC, phdr.p_vaddr + phdr.p_memsz); |
|
554 } |
|
555 } |
|
556 } |
|
557 if (!exidxHdr) |
|
558 return; |
|
559 if (!zeroHdr) |
|
560 return; |
|
561 mLoadOffset = base - zeroHdr->p_vaddr; |
|
562 mStartPC += mLoadOffset; |
|
563 mEndPC += mLoadOffset; |
|
564 |
|
565 // Create a sorted index of the index to work around linker bugs. |
|
566 const EHEntry *startTable = |
|
567 reinterpret_cast<const EHEntry *>(mLoadOffset + exidxHdr->p_vaddr); |
|
568 const EHEntry *endTable = |
|
569 reinterpret_cast<const EHEntry *>(mLoadOffset + exidxHdr->p_vaddr |
|
570 + exidxHdr->p_memsz); |
|
571 mEntries.reserve(endTable - startTable); |
|
572 for (const EHEntry *i = startTable; i < endTable; ++i) |
|
573 mEntries.push_back(i); |
|
574 std::sort(mEntries.begin(), mEntries.end()); |
|
575 } |
|
576 |
|
577 |
|
578 mozilla::Atomic<const EHAddrSpace*> EHAddrSpace::sCurrent(nullptr); |
|
579 |
|
580 // Async signal safe; can fail if Update() hasn't returned yet. |
|
581 const EHAddrSpace *EHAddrSpace::Get() { |
|
582 return sCurrent; |
|
583 } |
|
584 |
|
585 // Collect unwinding information from loaded objects. Calls after the |
|
586 // first have no effect. Async signal unsafe. |
|
587 void EHAddrSpace::Update() { |
|
588 const EHAddrSpace *space = sCurrent; |
|
589 if (space) |
|
590 return; |
|
591 |
|
592 SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf(); |
|
593 std::vector<EHTable> tables; |
|
594 |
|
595 for (size_t i = 0; i < info.GetSize(); ++i) { |
|
596 const SharedLibrary &lib = info.GetEntry(i); |
|
597 if (lib.GetOffset() != 0) |
|
598 // TODO: if it has a name, and we haven't seen a mapping of |
|
599 // offset 0 for that file, try opening it and reading the |
|
600 // headers instead. The only thing I've seen so far that's |
|
601 // linked so as to need that treatment is the dynamic linker |
|
602 // itself. |
|
603 continue; |
|
604 EHTable tab(reinterpret_cast<const void *>(lib.GetStart()), |
|
605 lib.GetEnd() - lib.GetStart(), lib.GetName()); |
|
606 if (tab.isValid()) |
|
607 tables.push_back(tab); |
|
608 } |
|
609 space = new EHAddrSpace(tables); |
|
610 |
|
611 if (!sCurrent.compareExchange(nullptr, space)) { |
|
612 delete space; |
|
613 space = sCurrent; |
|
614 } |
|
615 } |
|
616 |
|
617 |
|
618 EHState::EHState(const mcontext_t &context) { |
|
619 #ifdef linux |
|
620 mRegs[0] = context.arm_r0; |
|
621 mRegs[1] = context.arm_r1; |
|
622 mRegs[2] = context.arm_r2; |
|
623 mRegs[3] = context.arm_r3; |
|
624 mRegs[4] = context.arm_r4; |
|
625 mRegs[5] = context.arm_r5; |
|
626 mRegs[6] = context.arm_r6; |
|
627 mRegs[7] = context.arm_r7; |
|
628 mRegs[8] = context.arm_r8; |
|
629 mRegs[9] = context.arm_r9; |
|
630 mRegs[10] = context.arm_r10; |
|
631 mRegs[11] = context.arm_fp; |
|
632 mRegs[12] = context.arm_ip; |
|
633 mRegs[13] = context.arm_sp; |
|
634 mRegs[14] = context.arm_lr; |
|
635 mRegs[15] = context.arm_pc; |
|
636 #else |
|
637 # error "Unhandled OS for ARM EHABI unwinding" |
|
638 #endif |
|
639 } |
|
640 |
|
641 } // namespace mozilla |
|
642 |