Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
michael@0 | 2 | /* vim: set ts=8 sts=2 et sw=2 tw=80: */ |
michael@0 | 3 | /* This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | /* |
michael@0 | 8 | * This is an implementation of stack unwinding according to a subset |
michael@0 | 9 | * of the ARM Exception Handling ABI, as described in: |
michael@0 | 10 | * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0038a/IHI0038A_ehabi.pdf |
michael@0 | 11 | * |
michael@0 | 12 | * This handles only the ARM-defined "personality routines" (chapter |
michael@0 | 13 | * 9), and don't track the value of FP registers, because profiling |
michael@0 | 14 | * needs only chain of PC/SP values. |
michael@0 | 15 | * |
michael@0 | 16 | * Because the exception handling info may not be accurate for all |
michael@0 | 17 | * possible places where an async signal could occur (e.g., in a |
michael@0 | 18 | * prologue or epilogue), this bounds-checks all stack accesses. |
michael@0 | 19 | * |
michael@0 | 20 | * This file uses "struct" for structures in the exception tables and |
michael@0 | 21 | * "class" otherwise. We should avoid violating the C++11 |
michael@0 | 22 | * standard-layout rules in the former. |
michael@0 | 23 | */ |
michael@0 | 24 | |
michael@0 | 25 | #include "EHABIStackWalk.h" |
michael@0 | 26 | |
michael@0 | 27 | #include "shared-libraries.h" |
michael@0 | 28 | #include "platform.h" |
michael@0 | 29 | |
michael@0 | 30 | #include "mozilla/Atomics.h" |
michael@0 | 31 | #include "mozilla/Attributes.h" |
michael@0 | 32 | #include "mozilla/DebugOnly.h" |
michael@0 | 33 | #include "mozilla/Endian.h" |
michael@0 | 34 | |
michael@0 | 35 | #include <algorithm> |
michael@0 | 36 | #include <elf.h> |
michael@0 | 37 | #include <stdint.h> |
michael@0 | 38 | #include <vector> |
michael@0 | 39 | #include <string> |
michael@0 | 40 | |
michael@0 | 41 | #ifndef PT_ARM_EXIDX |
michael@0 | 42 | #define PT_ARM_EXIDX 0x70000001 |
michael@0 | 43 | #endif |
michael@0 | 44 | |
michael@0 | 45 | |
michael@0 | 46 | namespace mozilla { |
michael@0 | 47 | |
michael@0 | 48 | struct EHEntry; |
michael@0 | 49 | |
michael@0 | 50 | class EHState { |
michael@0 | 51 | // Note that any core register can be used as a "frame pointer" to |
michael@0 | 52 | // influence the unwinding process, so this must track all of them. |
michael@0 | 53 | uint32_t mRegs[16]; |
michael@0 | 54 | public: |
michael@0 | 55 | bool unwind(const EHEntry *aEntry, const void *stackBase); |
michael@0 | 56 | uint32_t &operator[](int i) { return mRegs[i]; } |
michael@0 | 57 | const uint32_t &operator[](int i) const { return mRegs[i]; } |
michael@0 | 58 | EHState(const mcontext_t &); |
michael@0 | 59 | }; |
michael@0 | 60 | |
michael@0 | 61 | enum { |
michael@0 | 62 | R_SP = 13, |
michael@0 | 63 | R_LR = 14, |
michael@0 | 64 | R_PC = 15 |
michael@0 | 65 | }; |
michael@0 | 66 | |
michael@0 | 67 | class EHEntryHandle { |
michael@0 | 68 | const EHEntry *mValue; |
michael@0 | 69 | public: |
michael@0 | 70 | EHEntryHandle(const EHEntry *aEntry) : mValue(aEntry) { } |
michael@0 | 71 | const EHEntry *value() const { return mValue; } |
michael@0 | 72 | }; |
michael@0 | 73 | |
michael@0 | 74 | class EHTable { |
michael@0 | 75 | uint32_t mStartPC; |
michael@0 | 76 | uint32_t mEndPC; |
michael@0 | 77 | uint32_t mLoadOffset; |
michael@0 | 78 | // In principle we should be able to binary-search the index section in |
michael@0 | 79 | // place, but the ICS toolchain's linker is noncompliant and produces |
michael@0 | 80 | // indices that aren't entirely sorted (e.g., libc). So we have this: |
michael@0 | 81 | std::vector<EHEntryHandle> mEntries; |
michael@0 | 82 | std::string mName; |
michael@0 | 83 | public: |
michael@0 | 84 | EHTable(const void *aELF, size_t aSize, const std::string &aName); |
michael@0 | 85 | const EHEntry *lookup(uint32_t aPC) const; |
michael@0 | 86 | bool isValid() const { return mEntries.size() > 0; } |
michael@0 | 87 | const std::string &name() const { return mName; } |
michael@0 | 88 | uint32_t startPC() const { return mStartPC; } |
michael@0 | 89 | uint32_t endPC() const { return mEndPC; } |
michael@0 | 90 | uint32_t loadOffset() const { return mLoadOffset; } |
michael@0 | 91 | }; |
michael@0 | 92 | |
michael@0 | 93 | class EHAddrSpace { |
michael@0 | 94 | std::vector<uint32_t> mStarts; |
michael@0 | 95 | std::vector<EHTable> mTables; |
michael@0 | 96 | static mozilla::Atomic<const EHAddrSpace*> sCurrent; |
michael@0 | 97 | public: |
michael@0 | 98 | explicit EHAddrSpace(const std::vector<EHTable>& aTables); |
michael@0 | 99 | const EHTable *lookup(uint32_t aPC) const; |
michael@0 | 100 | static void Update(); |
michael@0 | 101 | static const EHAddrSpace *Get(); |
michael@0 | 102 | }; |
michael@0 | 103 | |
michael@0 | 104 | |
michael@0 | 105 | void EHABIStackWalkInit() |
michael@0 | 106 | { |
michael@0 | 107 | EHAddrSpace::Update(); |
michael@0 | 108 | } |
michael@0 | 109 | |
michael@0 | 110 | size_t EHABIStackWalk(const mcontext_t &aContext, void *stackBase, |
michael@0 | 111 | void **aSPs, void **aPCs, const size_t aNumFrames) |
michael@0 | 112 | { |
michael@0 | 113 | const EHAddrSpace *space = EHAddrSpace::Get(); |
michael@0 | 114 | EHState state(aContext); |
michael@0 | 115 | size_t count = 0; |
michael@0 | 116 | |
michael@0 | 117 | while (count < aNumFrames) { |
michael@0 | 118 | uint32_t pc = state[R_PC], sp = state[R_SP]; |
michael@0 | 119 | aPCs[count] = reinterpret_cast<void *>(pc); |
michael@0 | 120 | aSPs[count] = reinterpret_cast<void *>(sp); |
michael@0 | 121 | count++; |
michael@0 | 122 | |
michael@0 | 123 | if (!space) |
michael@0 | 124 | break; |
michael@0 | 125 | // TODO: cache these lookups. Binary-searching libxul is |
michael@0 | 126 | // expensive (possibly more expensive than doing the actual |
michael@0 | 127 | // unwind), and even a small cache should help. |
michael@0 | 128 | const EHTable *table = space->lookup(pc); |
michael@0 | 129 | if (!table) |
michael@0 | 130 | break; |
michael@0 | 131 | const EHEntry *entry = table->lookup(pc); |
michael@0 | 132 | if (!entry) |
michael@0 | 133 | break; |
michael@0 | 134 | if (!state.unwind(entry, stackBase)) |
michael@0 | 135 | break; |
michael@0 | 136 | } |
michael@0 | 137 | |
michael@0 | 138 | return count; |
michael@0 | 139 | } |
michael@0 | 140 | |
michael@0 | 141 | |
michael@0 | 142 | struct PRel31 { |
michael@0 | 143 | uint32_t mBits; |
michael@0 | 144 | bool topBit() const { return mBits & 0x80000000; } |
michael@0 | 145 | uint32_t value() const { return mBits & 0x7fffffff; } |
michael@0 | 146 | int32_t offset() const { return (static_cast<int32_t>(mBits) << 1) >> 1; } |
michael@0 | 147 | const void *compute() const { |
michael@0 | 148 | return reinterpret_cast<const char *>(this) + offset(); |
michael@0 | 149 | } |
michael@0 | 150 | private: |
michael@0 | 151 | PRel31(const PRel31 &copied) MOZ_DELETE; |
michael@0 | 152 | PRel31() MOZ_DELETE; |
michael@0 | 153 | }; |
michael@0 | 154 | |
michael@0 | 155 | struct EHEntry { |
michael@0 | 156 | PRel31 startPC; |
michael@0 | 157 | PRel31 exidx; |
michael@0 | 158 | private: |
michael@0 | 159 | EHEntry(const EHEntry &copied) MOZ_DELETE; |
michael@0 | 160 | EHEntry() MOZ_DELETE; |
michael@0 | 161 | }; |
michael@0 | 162 | |
michael@0 | 163 | |
michael@0 | 164 | class EHInterp { |
michael@0 | 165 | public: |
michael@0 | 166 | // Note that stackLimit is exclusive and stackBase is inclusive |
michael@0 | 167 | // (i.e, stackLimit < SP <= stackBase), following the convention |
michael@0 | 168 | // set by the AAPCS spec. |
michael@0 | 169 | EHInterp(EHState &aState, const EHEntry *aEntry, |
michael@0 | 170 | uint32_t aStackLimit, uint32_t aStackBase) |
michael@0 | 171 | : mState(aState), |
michael@0 | 172 | mStackLimit(aStackLimit), |
michael@0 | 173 | mStackBase(aStackBase), |
michael@0 | 174 | mNextWord(0), |
michael@0 | 175 | mWordsLeft(0), |
michael@0 | 176 | mFailed(false) |
michael@0 | 177 | { |
michael@0 | 178 | const PRel31 &exidx = aEntry->exidx; |
michael@0 | 179 | uint32_t firstWord; |
michael@0 | 180 | |
michael@0 | 181 | if (exidx.mBits == 1) { // EXIDX_CANTUNWIND |
michael@0 | 182 | mFailed = true; |
michael@0 | 183 | return; |
michael@0 | 184 | } |
michael@0 | 185 | if (exidx.topBit()) { |
michael@0 | 186 | firstWord = exidx.mBits; |
michael@0 | 187 | } else { |
michael@0 | 188 | mNextWord = reinterpret_cast<const uint32_t *>(exidx.compute()); |
michael@0 | 189 | firstWord = *mNextWord++; |
michael@0 | 190 | } |
michael@0 | 191 | |
michael@0 | 192 | switch (firstWord >> 24) { |
michael@0 | 193 | case 0x80: // short |
michael@0 | 194 | mWord = firstWord << 8; |
michael@0 | 195 | mBytesLeft = 3; |
michael@0 | 196 | break; |
michael@0 | 197 | case 0x81: case 0x82: // long; catch descriptor size ignored |
michael@0 | 198 | mWord = firstWord << 16; |
michael@0 | 199 | mBytesLeft = 2; |
michael@0 | 200 | mWordsLeft = (firstWord >> 16) & 0xff; |
michael@0 | 201 | break; |
michael@0 | 202 | default: |
michael@0 | 203 | // unknown personality |
michael@0 | 204 | mFailed = true; |
michael@0 | 205 | } |
michael@0 | 206 | } |
michael@0 | 207 | |
michael@0 | 208 | bool unwind(); |
michael@0 | 209 | |
michael@0 | 210 | private: |
michael@0 | 211 | // TODO: GCC has been observed not CSEing repeated reads of |
michael@0 | 212 | // mState[R_SP] with writes to mFailed between them, suggesting that |
michael@0 | 213 | // it hasn't determined that they can't alias and is thus missing |
michael@0 | 214 | // optimization opportunities. So, we may want to flatten EHState |
michael@0 | 215 | // into this class; this may also make the code simpler. |
michael@0 | 216 | EHState &mState; |
michael@0 | 217 | uint32_t mStackLimit; |
michael@0 | 218 | uint32_t mStackBase; |
michael@0 | 219 | const uint32_t *mNextWord; |
michael@0 | 220 | uint32_t mWord; |
michael@0 | 221 | uint8_t mWordsLeft; |
michael@0 | 222 | uint8_t mBytesLeft; |
michael@0 | 223 | bool mFailed; |
michael@0 | 224 | |
michael@0 | 225 | enum { |
michael@0 | 226 | I_ADDSP = 0x00, // 0sxxxxxx (subtract if s) |
michael@0 | 227 | M_ADDSP = 0x80, |
michael@0 | 228 | I_POPMASK = 0x80, // 1000iiii iiiiiiii (if any i set) |
michael@0 | 229 | M_POPMASK = 0xf0, |
michael@0 | 230 | I_MOVSP = 0x90, // 1001nnnn |
michael@0 | 231 | M_MOVSP = 0xf0, |
michael@0 | 232 | I_POPN = 0xa0, // 1010lnnn |
michael@0 | 233 | M_POPN = 0xf0, |
michael@0 | 234 | I_FINISH = 0xb0, // 10110000 |
michael@0 | 235 | I_POPLO = 0xb1, // 10110001 0000iiii (if any i set) |
michael@0 | 236 | I_ADDSPBIG = 0xb2, // 10110010 uleb128 |
michael@0 | 237 | I_POPFDX = 0xb3, // 10110011 sssscccc |
michael@0 | 238 | I_POPFDX8 = 0xb8, // 10111nnn |
michael@0 | 239 | M_POPFDX8 = 0xf8, |
michael@0 | 240 | // "Intel Wireless MMX" extensions omitted. |
michael@0 | 241 | I_POPFDD = 0xc8, // 1100100h sssscccc |
michael@0 | 242 | M_POPFDD = 0xfe, |
michael@0 | 243 | I_POPFDD8 = 0xd0, // 11010nnn |
michael@0 | 244 | M_POPFDD8 = 0xf8 |
michael@0 | 245 | }; |
michael@0 | 246 | |
michael@0 | 247 | uint8_t next() { |
michael@0 | 248 | if (mBytesLeft == 0) { |
michael@0 | 249 | if (mWordsLeft == 0) { |
michael@0 | 250 | return I_FINISH; |
michael@0 | 251 | } |
michael@0 | 252 | mWordsLeft--; |
michael@0 | 253 | mWord = *mNextWord++; |
michael@0 | 254 | mBytesLeft = 4; |
michael@0 | 255 | } |
michael@0 | 256 | mBytesLeft--; |
michael@0 | 257 | mWord = (mWord << 8) | (mWord >> 24); // rotate |
michael@0 | 258 | return mWord; |
michael@0 | 259 | } |
michael@0 | 260 | |
michael@0 | 261 | uint32_t &vSP() { return mState[R_SP]; } |
michael@0 | 262 | uint32_t *ptrSP() { return reinterpret_cast<uint32_t *>(vSP()); } |
michael@0 | 263 | |
michael@0 | 264 | void checkStackBase() { if (vSP() > mStackBase) mFailed = true; } |
michael@0 | 265 | void checkStackLimit() { if (vSP() <= mStackLimit) mFailed = true; } |
michael@0 | 266 | void checkStackAlign() { if ((vSP() & 3) != 0) mFailed = true; } |
michael@0 | 267 | void checkStack() { |
michael@0 | 268 | checkStackBase(); |
michael@0 | 269 | checkStackLimit(); |
michael@0 | 270 | checkStackAlign(); |
michael@0 | 271 | } |
michael@0 | 272 | |
michael@0 | 273 | void popRange(uint8_t first, uint8_t last, uint16_t mask) { |
michael@0 | 274 | bool hasSP = false; |
michael@0 | 275 | uint32_t tmpSP; |
michael@0 | 276 | if (mask == 0) |
michael@0 | 277 | mFailed = true; |
michael@0 | 278 | for (uint8_t r = first; r <= last; ++r) { |
michael@0 | 279 | if (mask & 1) { |
michael@0 | 280 | if (r == R_SP) { |
michael@0 | 281 | hasSP = true; |
michael@0 | 282 | tmpSP = *ptrSP(); |
michael@0 | 283 | } else |
michael@0 | 284 | mState[r] = *ptrSP(); |
michael@0 | 285 | vSP() += 4; |
michael@0 | 286 | checkStackBase(); |
michael@0 | 287 | if (mFailed) |
michael@0 | 288 | return; |
michael@0 | 289 | } |
michael@0 | 290 | mask >>= 1; |
michael@0 | 291 | } |
michael@0 | 292 | if (hasSP) { |
michael@0 | 293 | vSP() = tmpSP; |
michael@0 | 294 | checkStack(); |
michael@0 | 295 | } |
michael@0 | 296 | } |
michael@0 | 297 | }; |
michael@0 | 298 | |
michael@0 | 299 | |
michael@0 | 300 | bool EHState::unwind(const EHEntry *aEntry, const void *stackBasePtr) { |
michael@0 | 301 | // The unwinding program cannot set SP to less than the initial value. |
michael@0 | 302 | uint32_t stackLimit = mRegs[R_SP] - 4; |
michael@0 | 303 | uint32_t stackBase = reinterpret_cast<uint32_t>(stackBasePtr); |
michael@0 | 304 | EHInterp interp(*this, aEntry, stackLimit, stackBase); |
michael@0 | 305 | return interp.unwind(); |
michael@0 | 306 | } |
michael@0 | 307 | |
michael@0 | 308 | bool EHInterp::unwind() { |
michael@0 | 309 | mState[R_PC] = 0; |
michael@0 | 310 | checkStack(); |
michael@0 | 311 | while (!mFailed) { |
michael@0 | 312 | uint8_t insn = next(); |
michael@0 | 313 | #if DEBUG_EHABI_UNWIND |
michael@0 | 314 | LOGF("unwind insn = %02x", (unsigned)insn); |
michael@0 | 315 | #endif |
michael@0 | 316 | // Try to put the common cases first. |
michael@0 | 317 | |
michael@0 | 318 | // 00xxxxxx: vsp = vsp + (xxxxxx << 2) + 4 |
michael@0 | 319 | // 01xxxxxx: vsp = vsp - (xxxxxx << 2) - 4 |
michael@0 | 320 | if ((insn & M_ADDSP) == I_ADDSP) { |
michael@0 | 321 | uint32_t offset = ((insn & 0x3f) << 2) + 4; |
michael@0 | 322 | if (insn & 0x40) { |
michael@0 | 323 | vSP() -= offset; |
michael@0 | 324 | checkStackLimit(); |
michael@0 | 325 | } else { |
michael@0 | 326 | vSP() += offset; |
michael@0 | 327 | checkStackBase(); |
michael@0 | 328 | } |
michael@0 | 329 | continue; |
michael@0 | 330 | } |
michael@0 | 331 | |
michael@0 | 332 | // 10100nnn: Pop r4-r[4+nnn] |
michael@0 | 333 | // 10101nnn: Pop r4-r[4+nnn], r14 |
michael@0 | 334 | if ((insn & M_POPN) == I_POPN) { |
michael@0 | 335 | uint8_t n = (insn & 0x07) + 1; |
michael@0 | 336 | bool lr = insn & 0x08; |
michael@0 | 337 | uint32_t *ptr = ptrSP(); |
michael@0 | 338 | vSP() += (n + (lr ? 1 : 0)) * 4; |
michael@0 | 339 | checkStackBase(); |
michael@0 | 340 | for (uint8_t r = 4; r < 4 + n; ++r) |
michael@0 | 341 | mState[r] = *ptr++; |
michael@0 | 342 | if (lr) |
michael@0 | 343 | mState[R_LR] = *ptr++; |
michael@0 | 344 | continue; |
michael@0 | 345 | } |
michael@0 | 346 | |
michael@0 | 347 | // 1011000: Finish |
michael@0 | 348 | if (insn == I_FINISH) { |
michael@0 | 349 | if (mState[R_PC] == 0) { |
michael@0 | 350 | mState[R_PC] = mState[R_LR]; |
michael@0 | 351 | // Non-standard change (bug 916106): Prevent the caller from |
michael@0 | 352 | // re-using LR. Since the caller is by definition not a leaf |
michael@0 | 353 | // routine, it will have to restore LR from somewhere to |
michael@0 | 354 | // return to its own caller, so we can safely zero it here. |
michael@0 | 355 | // This makes a difference only if an error in unwinding |
michael@0 | 356 | // (e.g., caused by starting from within a prologue/epilogue) |
michael@0 | 357 | // causes us to load a pointer to a leaf routine as LR; if we |
michael@0 | 358 | // don't do something, we'll go into an infinite loop of |
michael@0 | 359 | // "returning" to that same function. |
michael@0 | 360 | mState[R_LR] = 0; |
michael@0 | 361 | } |
michael@0 | 362 | return true; |
michael@0 | 363 | } |
michael@0 | 364 | |
michael@0 | 365 | // 1001nnnn: Set vsp = r[nnnn] |
michael@0 | 366 | if ((insn & M_MOVSP) == I_MOVSP) { |
michael@0 | 367 | vSP() = mState[insn & 0x0f]; |
michael@0 | 368 | checkStack(); |
michael@0 | 369 | continue; |
michael@0 | 370 | } |
michael@0 | 371 | |
michael@0 | 372 | // 11001000 sssscccc: Pop VFP regs D[16+ssss]-D[16+ssss+cccc] (as FLDMFDD) |
michael@0 | 373 | // 11001001 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDD) |
michael@0 | 374 | if ((insn & M_POPFDD) == I_POPFDD) { |
michael@0 | 375 | uint8_t n = (next() & 0x0f) + 1; |
michael@0 | 376 | // Note: if the 16+ssss+cccc > 31, the encoding is reserved. |
michael@0 | 377 | // As the space is currently unused, we don't try to check. |
michael@0 | 378 | vSP() += 8 * n; |
michael@0 | 379 | checkStackBase(); |
michael@0 | 380 | continue; |
michael@0 | 381 | } |
michael@0 | 382 | |
michael@0 | 383 | // 11010nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDD) |
michael@0 | 384 | if ((insn & M_POPFDD8) == I_POPFDD8) { |
michael@0 | 385 | uint8_t n = (insn & 0x07) + 1; |
michael@0 | 386 | vSP() += 8 * n; |
michael@0 | 387 | checkStackBase(); |
michael@0 | 388 | continue; |
michael@0 | 389 | } |
michael@0 | 390 | |
michael@0 | 391 | // 10110010 uleb128: vsp = vsp + 0x204 + (uleb128 << 2) |
michael@0 | 392 | if (insn == I_ADDSPBIG) { |
michael@0 | 393 | uint32_t acc = 0; |
michael@0 | 394 | uint8_t shift = 0; |
michael@0 | 395 | uint8_t byte; |
michael@0 | 396 | do { |
michael@0 | 397 | if (shift >= 32) |
michael@0 | 398 | return false; |
michael@0 | 399 | byte = next(); |
michael@0 | 400 | acc |= (byte & 0x7f) << shift; |
michael@0 | 401 | shift += 7; |
michael@0 | 402 | } while (byte & 0x80); |
michael@0 | 403 | uint32_t offset = 0x204 + (acc << 2); |
michael@0 | 404 | // The calculations above could have overflowed. |
michael@0 | 405 | // But the one we care about is this: |
michael@0 | 406 | if (vSP() + offset < vSP()) |
michael@0 | 407 | mFailed = true; |
michael@0 | 408 | vSP() += offset; |
michael@0 | 409 | // ...so that this is the only other check needed: |
michael@0 | 410 | checkStackBase(); |
michael@0 | 411 | continue; |
michael@0 | 412 | } |
michael@0 | 413 | |
michael@0 | 414 | // 1000iiii iiiiiiii (i not all 0): Pop under masks {r15-r12}, {r11-r4} |
michael@0 | 415 | if ((insn & M_POPMASK) == I_POPMASK) { |
michael@0 | 416 | popRange(4, 15, ((insn & 0x0f) << 8) | next()); |
michael@0 | 417 | continue; |
michael@0 | 418 | } |
michael@0 | 419 | |
michael@0 | 420 | // 1011001 0000iiii (i not all 0): Pop under mask {r3-r0} |
michael@0 | 421 | if (insn == I_POPLO) { |
michael@0 | 422 | popRange(0, 3, next() & 0x0f); |
michael@0 | 423 | continue; |
michael@0 | 424 | } |
michael@0 | 425 | |
michael@0 | 426 | // 10110011 sssscccc: Pop VFP regs D[ssss]-D[ssss+cccc] (as FLDMFDX) |
michael@0 | 427 | if (insn == I_POPFDX) { |
michael@0 | 428 | uint8_t n = (next() & 0x0f) + 1; |
michael@0 | 429 | vSP() += 8 * n + 4; |
michael@0 | 430 | checkStackBase(); |
michael@0 | 431 | continue; |
michael@0 | 432 | } |
michael@0 | 433 | |
michael@0 | 434 | // 10111nnn: Pop VFP regs D[8]-D[8+nnn] (as FLDMFDX) |
michael@0 | 435 | if ((insn & M_POPFDX8) == I_POPFDX8) { |
michael@0 | 436 | uint8_t n = (insn & 0x07) + 1; |
michael@0 | 437 | vSP() += 8 * n + 4; |
michael@0 | 438 | checkStackBase(); |
michael@0 | 439 | continue; |
michael@0 | 440 | } |
michael@0 | 441 | |
michael@0 | 442 | // unhandled instruction |
michael@0 | 443 | #ifdef DEBUG_EHABI_UNWIND |
michael@0 | 444 | LOGF("Unhandled EHABI instruction 0x%02x", insn); |
michael@0 | 445 | #endif |
michael@0 | 446 | mFailed = true; |
michael@0 | 447 | } |
michael@0 | 448 | return false; |
michael@0 | 449 | } |
michael@0 | 450 | |
michael@0 | 451 | |
michael@0 | 452 | bool operator<(const EHTable &lhs, const EHTable &rhs) { |
michael@0 | 453 | return lhs.startPC() < rhs.endPC(); |
michael@0 | 454 | } |
michael@0 | 455 | |
michael@0 | 456 | // Async signal unsafe. |
michael@0 | 457 | EHAddrSpace::EHAddrSpace(const std::vector<EHTable>& aTables) |
michael@0 | 458 | : mTables(aTables) |
michael@0 | 459 | { |
michael@0 | 460 | std::sort(mTables.begin(), mTables.end()); |
michael@0 | 461 | DebugOnly<uint32_t> lastEnd = 0; |
michael@0 | 462 | for (std::vector<EHTable>::iterator i = mTables.begin(); |
michael@0 | 463 | i != mTables.end(); ++i) { |
michael@0 | 464 | MOZ_ASSERT(i->startPC() >= lastEnd); |
michael@0 | 465 | mStarts.push_back(i->startPC()); |
michael@0 | 466 | lastEnd = i->endPC(); |
michael@0 | 467 | } |
michael@0 | 468 | } |
michael@0 | 469 | |
michael@0 | 470 | const EHTable *EHAddrSpace::lookup(uint32_t aPC) const { |
michael@0 | 471 | ptrdiff_t i = (std::upper_bound(mStarts.begin(), mStarts.end(), aPC) |
michael@0 | 472 | - mStarts.begin()) - 1; |
michael@0 | 473 | |
michael@0 | 474 | if (i < 0 || aPC >= mTables[i].endPC()) |
michael@0 | 475 | return 0; |
michael@0 | 476 | return &mTables[i]; |
michael@0 | 477 | } |
michael@0 | 478 | |
michael@0 | 479 | |
michael@0 | 480 | bool operator<(const EHEntryHandle &lhs, const EHEntryHandle &rhs) { |
michael@0 | 481 | return lhs.value()->startPC.compute() < rhs.value()->startPC.compute(); |
michael@0 | 482 | } |
michael@0 | 483 | |
michael@0 | 484 | const EHEntry *EHTable::lookup(uint32_t aPC) const { |
michael@0 | 485 | MOZ_ASSERT(aPC >= mStartPC); |
michael@0 | 486 | if (aPC >= mEndPC) |
michael@0 | 487 | return nullptr; |
michael@0 | 488 | |
michael@0 | 489 | std::vector<EHEntryHandle>::const_iterator begin = mEntries.begin(); |
michael@0 | 490 | std::vector<EHEntryHandle>::const_iterator end = mEntries.end(); |
michael@0 | 491 | MOZ_ASSERT(begin < end); |
michael@0 | 492 | if (aPC < reinterpret_cast<uint32_t>(begin->value()->startPC.compute())) |
michael@0 | 493 | return nullptr; |
michael@0 | 494 | |
michael@0 | 495 | while (end - begin > 1) { |
michael@0 | 496 | std::vector<EHEntryHandle>::const_iterator mid = begin + (end - begin) / 2; |
michael@0 | 497 | if (aPC < reinterpret_cast<uint32_t>(mid->value()->startPC.compute())) |
michael@0 | 498 | end = mid; |
michael@0 | 499 | else |
michael@0 | 500 | begin = mid; |
michael@0 | 501 | } |
michael@0 | 502 | return begin->value(); |
michael@0 | 503 | } |
michael@0 | 504 | |
michael@0 | 505 | |
michael@0 | 506 | #if MOZ_LITTLE_ENDIAN |
michael@0 | 507 | static const unsigned char hostEndian = ELFDATA2LSB; |
michael@0 | 508 | #elif MOZ_BIG_ENDIAN |
michael@0 | 509 | static const unsigned char hostEndian = ELFDATA2MSB; |
michael@0 | 510 | #else |
michael@0 | 511 | #error "No endian?" |
michael@0 | 512 | #endif |
michael@0 | 513 | |
michael@0 | 514 | // Async signal unsafe. (Note use of std::vector::reserve.) |
michael@0 | 515 | EHTable::EHTable(const void *aELF, size_t aSize, const std::string &aName) |
michael@0 | 516 | : mStartPC(~0), // largest uint32_t |
michael@0 | 517 | mEndPC(0), |
michael@0 | 518 | mName(aName) |
michael@0 | 519 | { |
michael@0 | 520 | const uint32_t base = reinterpret_cast<uint32_t>(aELF); |
michael@0 | 521 | |
michael@0 | 522 | if (aSize < sizeof(Elf32_Ehdr)) |
michael@0 | 523 | return; |
michael@0 | 524 | |
michael@0 | 525 | const Elf32_Ehdr &file = *(reinterpret_cast<Elf32_Ehdr *>(base)); |
michael@0 | 526 | if (memcmp(&file.e_ident[EI_MAG0], ELFMAG, SELFMAG) != 0 || |
michael@0 | 527 | file.e_ident[EI_CLASS] != ELFCLASS32 || |
michael@0 | 528 | file.e_ident[EI_DATA] != hostEndian || |
michael@0 | 529 | file.e_ident[EI_VERSION] != EV_CURRENT || |
michael@0 | 530 | file.e_ident[EI_OSABI] != ELFOSABI_SYSV || |
michael@0 | 531 | #ifdef EI_ABIVERSION |
michael@0 | 532 | file.e_ident[EI_ABIVERSION] != 0 || |
michael@0 | 533 | #endif |
michael@0 | 534 | file.e_machine != EM_ARM || |
michael@0 | 535 | file.e_version != EV_CURRENT) |
michael@0 | 536 | // e_flags? |
michael@0 | 537 | return; |
michael@0 | 538 | |
michael@0 | 539 | MOZ_ASSERT(file.e_phoff + file.e_phnum * file.e_phentsize <= aSize); |
michael@0 | 540 | const Elf32_Phdr *exidxHdr = 0, *zeroHdr = 0; |
michael@0 | 541 | for (unsigned i = 0; i < file.e_phnum; ++i) { |
michael@0 | 542 | const Elf32_Phdr &phdr = |
michael@0 | 543 | *(reinterpret_cast<Elf32_Phdr *>(base + file.e_phoff |
michael@0 | 544 | + i * file.e_phentsize)); |
michael@0 | 545 | if (phdr.p_type == PT_ARM_EXIDX) { |
michael@0 | 546 | exidxHdr = &phdr; |
michael@0 | 547 | } else if (phdr.p_type == PT_LOAD) { |
michael@0 | 548 | if (phdr.p_offset == 0) { |
michael@0 | 549 | zeroHdr = &phdr; |
michael@0 | 550 | } |
michael@0 | 551 | if (phdr.p_flags & PF_X) { |
michael@0 | 552 | mStartPC = std::min(mStartPC, phdr.p_vaddr); |
michael@0 | 553 | mEndPC = std::max(mEndPC, phdr.p_vaddr + phdr.p_memsz); |
michael@0 | 554 | } |
michael@0 | 555 | } |
michael@0 | 556 | } |
michael@0 | 557 | if (!exidxHdr) |
michael@0 | 558 | return; |
michael@0 | 559 | if (!zeroHdr) |
michael@0 | 560 | return; |
michael@0 | 561 | mLoadOffset = base - zeroHdr->p_vaddr; |
michael@0 | 562 | mStartPC += mLoadOffset; |
michael@0 | 563 | mEndPC += mLoadOffset; |
michael@0 | 564 | |
michael@0 | 565 | // Create a sorted index of the index to work around linker bugs. |
michael@0 | 566 | const EHEntry *startTable = |
michael@0 | 567 | reinterpret_cast<const EHEntry *>(mLoadOffset + exidxHdr->p_vaddr); |
michael@0 | 568 | const EHEntry *endTable = |
michael@0 | 569 | reinterpret_cast<const EHEntry *>(mLoadOffset + exidxHdr->p_vaddr |
michael@0 | 570 | + exidxHdr->p_memsz); |
michael@0 | 571 | mEntries.reserve(endTable - startTable); |
michael@0 | 572 | for (const EHEntry *i = startTable; i < endTable; ++i) |
michael@0 | 573 | mEntries.push_back(i); |
michael@0 | 574 | std::sort(mEntries.begin(), mEntries.end()); |
michael@0 | 575 | } |
michael@0 | 576 | |
michael@0 | 577 | |
michael@0 | 578 | mozilla::Atomic<const EHAddrSpace*> EHAddrSpace::sCurrent(nullptr); |
michael@0 | 579 | |
michael@0 | 580 | // Async signal safe; can fail if Update() hasn't returned yet. |
michael@0 | 581 | const EHAddrSpace *EHAddrSpace::Get() { |
michael@0 | 582 | return sCurrent; |
michael@0 | 583 | } |
michael@0 | 584 | |
michael@0 | 585 | // Collect unwinding information from loaded objects. Calls after the |
michael@0 | 586 | // first have no effect. Async signal unsafe. |
michael@0 | 587 | void EHAddrSpace::Update() { |
michael@0 | 588 | const EHAddrSpace *space = sCurrent; |
michael@0 | 589 | if (space) |
michael@0 | 590 | return; |
michael@0 | 591 | |
michael@0 | 592 | SharedLibraryInfo info = SharedLibraryInfo::GetInfoForSelf(); |
michael@0 | 593 | std::vector<EHTable> tables; |
michael@0 | 594 | |
michael@0 | 595 | for (size_t i = 0; i < info.GetSize(); ++i) { |
michael@0 | 596 | const SharedLibrary &lib = info.GetEntry(i); |
michael@0 | 597 | if (lib.GetOffset() != 0) |
michael@0 | 598 | // TODO: if it has a name, and we haven't seen a mapping of |
michael@0 | 599 | // offset 0 for that file, try opening it and reading the |
michael@0 | 600 | // headers instead. The only thing I've seen so far that's |
michael@0 | 601 | // linked so as to need that treatment is the dynamic linker |
michael@0 | 602 | // itself. |
michael@0 | 603 | continue; |
michael@0 | 604 | EHTable tab(reinterpret_cast<const void *>(lib.GetStart()), |
michael@0 | 605 | lib.GetEnd() - lib.GetStart(), lib.GetName()); |
michael@0 | 606 | if (tab.isValid()) |
michael@0 | 607 | tables.push_back(tab); |
michael@0 | 608 | } |
michael@0 | 609 | space = new EHAddrSpace(tables); |
michael@0 | 610 | |
michael@0 | 611 | if (!sCurrent.compareExchange(nullptr, space)) { |
michael@0 | 612 | delete space; |
michael@0 | 613 | space = sCurrent; |
michael@0 | 614 | } |
michael@0 | 615 | } |
michael@0 | 616 | |
michael@0 | 617 | |
michael@0 | 618 | EHState::EHState(const mcontext_t &context) { |
michael@0 | 619 | #ifdef linux |
michael@0 | 620 | mRegs[0] = context.arm_r0; |
michael@0 | 621 | mRegs[1] = context.arm_r1; |
michael@0 | 622 | mRegs[2] = context.arm_r2; |
michael@0 | 623 | mRegs[3] = context.arm_r3; |
michael@0 | 624 | mRegs[4] = context.arm_r4; |
michael@0 | 625 | mRegs[5] = context.arm_r5; |
michael@0 | 626 | mRegs[6] = context.arm_r6; |
michael@0 | 627 | mRegs[7] = context.arm_r7; |
michael@0 | 628 | mRegs[8] = context.arm_r8; |
michael@0 | 629 | mRegs[9] = context.arm_r9; |
michael@0 | 630 | mRegs[10] = context.arm_r10; |
michael@0 | 631 | mRegs[11] = context.arm_fp; |
michael@0 | 632 | mRegs[12] = context.arm_ip; |
michael@0 | 633 | mRegs[13] = context.arm_sp; |
michael@0 | 634 | mRegs[14] = context.arm_lr; |
michael@0 | 635 | mRegs[15] = context.arm_pc; |
michael@0 | 636 | #else |
michael@0 | 637 | # error "Unhandled OS for ARM EHABI unwinding" |
michael@0 | 638 | #endif |
michael@0 | 639 | } |
michael@0 | 640 | |
michael@0 | 641 | } // namespace mozilla |
michael@0 | 642 |