toolkit/crashreporter/google-breakpad/src/processor/stackwalker_amd64.cc

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 // Copyright (c) 2010 Google Inc.
michael@0 2 // All rights reserved.
michael@0 3 //
michael@0 4 // Redistribution and use in source and binary forms, with or without
michael@0 5 // modification, are permitted provided that the following conditions are
michael@0 6 // met:
michael@0 7 //
michael@0 8 // * Redistributions of source code must retain the above copyright
michael@0 9 // notice, this list of conditions and the following disclaimer.
michael@0 10 // * Redistributions in binary form must reproduce the above
michael@0 11 // copyright notice, this list of conditions and the following disclaimer
michael@0 12 // in the documentation and/or other materials provided with the
michael@0 13 // distribution.
michael@0 14 // * Neither the name of Google Inc. nor the names of its
michael@0 15 // contributors may be used to endorse or promote products derived from
michael@0 16 // this software without specific prior written permission.
michael@0 17 //
michael@0 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
michael@0 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
michael@0 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
michael@0 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
michael@0 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
michael@0 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
michael@0 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
michael@0 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
michael@0 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
michael@0 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
michael@0 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
michael@0 29
michael@0 30 // stackwalker_amd64.cc: amd64-specific stackwalker.
michael@0 31 //
michael@0 32 // See stackwalker_amd64.h for documentation.
michael@0 33 //
michael@0 34 // Author: Mark Mentovai, Ted Mielczarek
michael@0 35
michael@0 36 #include <assert.h>
michael@0 37
michael@0 38 #include "common/scoped_ptr.h"
michael@0 39 #include "google_breakpad/processor/call_stack.h"
michael@0 40 #include "google_breakpad/processor/memory_region.h"
michael@0 41 #include "google_breakpad/processor/source_line_resolver_interface.h"
michael@0 42 #include "google_breakpad/processor/stack_frame_cpu.h"
michael@0 43 #include "processor/cfi_frame_info.h"
michael@0 44 #include "common/logging.h"
michael@0 45 #include "processor/stackwalker_amd64.h"
michael@0 46
michael@0 47 namespace google_breakpad {
michael@0 48
michael@0 49
michael@0 50 const StackwalkerAMD64::CFIWalker::RegisterSet
michael@0 51 StackwalkerAMD64::cfi_register_map_[] = {
michael@0 52 // It may seem like $rip and $rsp are callee-saves, because the callee is
michael@0 53 // responsible for having them restored upon return. But the callee_saves
michael@0 54 // flags here really means that the walker should assume they're
michael@0 55 // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
michael@0 56 // and $rsp.
michael@0 57 { ToUniqueString("$rax"), NULL, false,
michael@0 58 StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
michael@0 59 { ToUniqueString("$rdx"), NULL, false,
michael@0 60 StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
michael@0 61 { ToUniqueString("$rcx"), NULL, false,
michael@0 62 StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
michael@0 63 { ToUniqueString("$rbx"), NULL, true,
michael@0 64 StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
michael@0 65 { ToUniqueString("$rsi"), NULL, false,
michael@0 66 StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
michael@0 67 { ToUniqueString("$rdi"), NULL, false,
michael@0 68 StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
michael@0 69 { ToUniqueString("$rbp"), NULL, true,
michael@0 70 StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
michael@0 71 { ToUniqueString("$rsp"), ToUniqueString(".cfa"), false,
michael@0 72 StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
michael@0 73 { ToUniqueString("$r8"), NULL, false,
michael@0 74 StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 },
michael@0 75 { ToUniqueString("$r9"), NULL, false,
michael@0 76 StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 },
michael@0 77 { ToUniqueString("$r10"), NULL, false,
michael@0 78 StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
michael@0 79 { ToUniqueString("$r11"), NULL, false,
michael@0 80 StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
michael@0 81 { ToUniqueString("$r12"), NULL, true,
michael@0 82 StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
michael@0 83 { ToUniqueString("$r13"), NULL, true,
michael@0 84 StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
michael@0 85 { ToUniqueString("$r14"), NULL, true,
michael@0 86 StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
michael@0 87 { ToUniqueString("$r15"), NULL, true,
michael@0 88 StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
michael@0 89 { ToUniqueString("$rip"), ToUniqueString(".ra"), false,
michael@0 90 StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
michael@0 91 };
michael@0 92
michael@0 93 StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info,
michael@0 94 const MDRawContextAMD64* context,
michael@0 95 MemoryRegion* memory,
michael@0 96 const CodeModules* modules,
michael@0 97 StackFrameSymbolizer* resolver_helper)
michael@0 98 : Stackwalker(system_info, memory, modules, resolver_helper),
michael@0 99 context_(context),
michael@0 100 cfi_walker_(cfi_register_map_,
michael@0 101 (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
michael@0 102 }
michael@0 103
michael@0 104 uint64_t StackFrameAMD64::ReturnAddress() const
michael@0 105 {
michael@0 106 assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP);
michael@0 107 return context.rip;
michael@0 108 }
michael@0 109
michael@0 110 StackFrame* StackwalkerAMD64::GetContextFrame() {
michael@0 111 if (!context_) {
michael@0 112 BPLOG(ERROR) << "Can't get context frame without context";
michael@0 113 return NULL;
michael@0 114 }
michael@0 115
michael@0 116 StackFrameAMD64* frame = new StackFrameAMD64();
michael@0 117
michael@0 118 // The instruction pointer is stored directly in a register, so pull it
michael@0 119 // straight out of the CPU context structure.
michael@0 120 frame->context = *context_;
michael@0 121 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
michael@0 122 frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
michael@0 123 frame->instruction = frame->context.rip;
michael@0 124
michael@0 125 return frame;
michael@0 126 }
michael@0 127
michael@0 128 StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo(
michael@0 129 const vector<StackFrame*> &frames,
michael@0 130 CFIFrameInfo* cfi_frame_info) {
michael@0 131 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
michael@0 132
michael@0 133 scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
michael@0 134 if (!cfi_walker_
michael@0 135 .FindCallerRegisters(*memory_, *cfi_frame_info,
michael@0 136 last_frame->context, last_frame->context_validity,
michael@0 137 &frame->context, &frame->context_validity))
michael@0 138 return NULL;
michael@0 139
michael@0 140 // Make sure we recovered all the essentials.
michael@0 141 static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
michael@0 142 | StackFrameAMD64::CONTEXT_VALID_RSP);
michael@0 143 if ((frame->context_validity & essentials) != essentials)
michael@0 144 return NULL;
michael@0 145
michael@0 146 frame->trust = StackFrame::FRAME_TRUST_CFI;
michael@0 147 return frame.release();
michael@0 148 }
michael@0 149
michael@0 150 StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan(
michael@0 151 const vector<StackFrame*> &frames) {
michael@0 152 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
michael@0 153 uint64_t last_rsp = last_frame->context.rsp;
michael@0 154 uint64_t caller_rip_address, caller_rip;
michael@0 155
michael@0 156 if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip)) {
michael@0 157 // No plausible return address was found.
michael@0 158 return NULL;
michael@0 159 }
michael@0 160
michael@0 161 // Create a new stack frame (ownership will be transferred to the caller)
michael@0 162 // and fill it in.
michael@0 163 StackFrameAMD64* frame = new StackFrameAMD64();
michael@0 164
michael@0 165 frame->trust = StackFrame::FRAME_TRUST_SCAN;
michael@0 166 frame->context = last_frame->context;
michael@0 167 frame->context.rip = caller_rip;
michael@0 168 // The caller's %rsp is directly underneath the return address pushed by
michael@0 169 // the call.
michael@0 170 frame->context.rsp = caller_rip_address + 8;
michael@0 171 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
michael@0 172 StackFrameAMD64::CONTEXT_VALID_RSP;
michael@0 173
michael@0 174 // Other unwinders give up if they don't have an %rbp value, so see if we
michael@0 175 // can pass some plausible value on.
michael@0 176 if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
michael@0 177 // Functions typically push their caller's %rbp immediately upon entry,
michael@0 178 // and then set %rbp to point to that. So if the callee's %rbp is
michael@0 179 // pointing to the first word below the alleged return address, presume
michael@0 180 // that the caller's %rbp is saved there.
michael@0 181 if (caller_rip_address - 8 == last_frame->context.rbp) {
michael@0 182 uint64_t caller_rbp = 0;
michael@0 183 if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
michael@0 184 caller_rbp > caller_rip_address) {
michael@0 185 frame->context.rbp = caller_rbp;
michael@0 186 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
michael@0 187 }
michael@0 188 } else if (last_frame->context.rbp >= caller_rip_address + 8) {
michael@0 189 // If the callee's %rbp is plausible as a value for the caller's
michael@0 190 // %rbp, presume that the callee left it unchanged.
michael@0 191 frame->context.rbp = last_frame->context.rbp;
michael@0 192 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
michael@0 193 }
michael@0 194 }
michael@0 195
michael@0 196 return frame;
michael@0 197 }
michael@0 198
michael@0 199 StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack,
michael@0 200 bool stack_scan_allowed) {
michael@0 201 if (!memory_ || !stack) {
michael@0 202 BPLOG(ERROR) << "Can't get caller frame without memory or stack";
michael@0 203 return NULL;
michael@0 204 }
michael@0 205
michael@0 206 const vector<StackFrame*> &frames = *stack->frames();
michael@0 207 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
michael@0 208 scoped_ptr<StackFrameAMD64> new_frame;
michael@0 209
michael@0 210 // If we have DWARF CFI information, use it.
michael@0 211 scoped_ptr<CFIFrameInfo> cfi_frame_info(
michael@0 212 frame_symbolizer_->FindCFIFrameInfo(last_frame));
michael@0 213 if (cfi_frame_info.get())
michael@0 214 new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
michael@0 215
michael@0 216 // If CFI failed, or there wasn't CFI available, fall back
michael@0 217 // to stack scanning.
michael@0 218 if (stack_scan_allowed && !new_frame.get()) {
michael@0 219 new_frame.reset(GetCallerByStackScan(frames));
michael@0 220 }
michael@0 221
michael@0 222 // If nothing worked, tell the caller.
michael@0 223 if (!new_frame.get())
michael@0 224 return NULL;
michael@0 225
michael@0 226 // Treat an instruction address of 0 as end-of-stack.
michael@0 227 if (new_frame->context.rip == 0)
michael@0 228 return NULL;
michael@0 229
michael@0 230 // If the new stack pointer is at a lower address than the old, then
michael@0 231 // that's clearly incorrect. Treat this as end-of-stack to enforce
michael@0 232 // progress and avoid infinite loops.
michael@0 233 if (new_frame->context.rsp <= last_frame->context.rsp)
michael@0 234 return NULL;
michael@0 235
michael@0 236 // new_frame->context.rip is the return address, which is the instruction
michael@0 237 // after the CALL that caused us to arrive at the callee. Set
michael@0 238 // new_frame->instruction to one less than that, so it points within the
michael@0 239 // CALL instruction. See StackFrame::instruction for details, and
michael@0 240 // StackFrameAMD64::ReturnAddress.
michael@0 241 new_frame->instruction = new_frame->context.rip - 1;
michael@0 242
michael@0 243 return new_frame.release();
michael@0 244 }
michael@0 245
michael@0 246 } // namespace google_breakpad

mercurial