toolkit/crashreporter/google-breakpad/src/processor/stackwalker_amd64.cc

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/toolkit/crashreporter/google-breakpad/src/processor/stackwalker_amd64.cc	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,246 @@
     1.4 +// Copyright (c) 2010 Google Inc.
     1.5 +// All rights reserved.
     1.6 +//
     1.7 +// Redistribution and use in source and binary forms, with or without
     1.8 +// modification, are permitted provided that the following conditions are
     1.9 +// met:
    1.10 +//
    1.11 +//     * Redistributions of source code must retain the above copyright
    1.12 +// notice, this list of conditions and the following disclaimer.
    1.13 +//     * Redistributions in binary form must reproduce the above
    1.14 +// copyright notice, this list of conditions and the following disclaimer
    1.15 +// in the documentation and/or other materials provided with the
    1.16 +// distribution.
    1.17 +//     * Neither the name of Google Inc. nor the names of its
    1.18 +// contributors may be used to endorse or promote products derived from
    1.19 +// this software without specific prior written permission.
    1.20 +//
    1.21 +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    1.22 +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    1.23 +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    1.24 +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    1.25 +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    1.26 +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    1.27 +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.28 +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.29 +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.30 +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    1.31 +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.32 +
    1.33 +// stackwalker_amd64.cc: amd64-specific stackwalker.
    1.34 +//
    1.35 +// See stackwalker_amd64.h for documentation.
    1.36 +//
    1.37 +// Author: Mark Mentovai, Ted Mielczarek
    1.38 +
    1.39 +#include <assert.h>
    1.40 +
    1.41 +#include "common/scoped_ptr.h"
    1.42 +#include "google_breakpad/processor/call_stack.h"
    1.43 +#include "google_breakpad/processor/memory_region.h"
    1.44 +#include "google_breakpad/processor/source_line_resolver_interface.h"
    1.45 +#include "google_breakpad/processor/stack_frame_cpu.h"
    1.46 +#include "processor/cfi_frame_info.h"
    1.47 +#include "common/logging.h"
    1.48 +#include "processor/stackwalker_amd64.h"
    1.49 +
    1.50 +namespace google_breakpad {
    1.51 +
    1.52 +
    1.53 +const StackwalkerAMD64::CFIWalker::RegisterSet
    1.54 +StackwalkerAMD64::cfi_register_map_[] = {
    1.55 +  // It may seem like $rip and $rsp are callee-saves, because the callee is
    1.56 +  // responsible for having them restored upon return. But the callee_saves
    1.57 +  // flags here really means that the walker should assume they're
    1.58 +  // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
    1.59 +  // and $rsp.
    1.60 +  { ToUniqueString("$rax"), NULL, false,
    1.61 +    StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
    1.62 +  { ToUniqueString("$rdx"), NULL, false,
    1.63 +    StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
    1.64 +  { ToUniqueString("$rcx"), NULL, false,
    1.65 +    StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
    1.66 +  { ToUniqueString("$rbx"), NULL, true,
    1.67 +    StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
    1.68 +  { ToUniqueString("$rsi"), NULL, false,
    1.69 +    StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
    1.70 +  { ToUniqueString("$rdi"), NULL, false,
    1.71 +    StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
    1.72 +  { ToUniqueString("$rbp"), NULL, true,
    1.73 +    StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
    1.74 +  { ToUniqueString("$rsp"), ToUniqueString(".cfa"), false,
    1.75 +    StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
    1.76 +  { ToUniqueString("$r8"), NULL, false,
    1.77 +    StackFrameAMD64::CONTEXT_VALID_R8,  &MDRawContextAMD64::r8 },
    1.78 +  { ToUniqueString("$r9"), NULL, false,
    1.79 +    StackFrameAMD64::CONTEXT_VALID_R9,  &MDRawContextAMD64::r9 },
    1.80 +  { ToUniqueString("$r10"), NULL, false,
    1.81 +    StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
    1.82 +  { ToUniqueString("$r11"), NULL, false,
    1.83 +    StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
    1.84 +  { ToUniqueString("$r12"), NULL, true,
    1.85 +    StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
    1.86 +  { ToUniqueString("$r13"), NULL, true,
    1.87 +    StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
    1.88 +  { ToUniqueString("$r14"), NULL, true,
    1.89 +    StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
    1.90 +  { ToUniqueString("$r15"), NULL, true,
    1.91 +    StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
    1.92 +  { ToUniqueString("$rip"), ToUniqueString(".ra"), false,
    1.93 +    StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
    1.94 +};
    1.95 +
    1.96 +StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info,
    1.97 +                                   const MDRawContextAMD64* context,
    1.98 +                                   MemoryRegion* memory,
    1.99 +                                   const CodeModules* modules,
   1.100 +                                   StackFrameSymbolizer* resolver_helper)
   1.101 +    : Stackwalker(system_info, memory, modules, resolver_helper),
   1.102 +      context_(context),
   1.103 +      cfi_walker_(cfi_register_map_,
   1.104 +                  (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
   1.105 +}
   1.106 +
   1.107 +uint64_t StackFrameAMD64::ReturnAddress() const
   1.108 +{
   1.109 +  assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP);
   1.110 +  return context.rip;   
   1.111 +}
   1.112 +
   1.113 +StackFrame* StackwalkerAMD64::GetContextFrame() {
   1.114 +  if (!context_) {
   1.115 +    BPLOG(ERROR) << "Can't get context frame without context";
   1.116 +    return NULL;
   1.117 +  }
   1.118 +
   1.119 +  StackFrameAMD64* frame = new StackFrameAMD64();
   1.120 +
   1.121 +  // The instruction pointer is stored directly in a register, so pull it
   1.122 +  // straight out of the CPU context structure.
   1.123 +  frame->context = *context_;
   1.124 +  frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
   1.125 +  frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
   1.126 +  frame->instruction = frame->context.rip;
   1.127 +
   1.128 +  return frame;
   1.129 +}
   1.130 +
   1.131 +StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo(
   1.132 +    const vector<StackFrame*> &frames,
   1.133 +    CFIFrameInfo* cfi_frame_info) {
   1.134 +  StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
   1.135 +
   1.136 +  scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
   1.137 +  if (!cfi_walker_
   1.138 +      .FindCallerRegisters(*memory_, *cfi_frame_info,
   1.139 +                           last_frame->context, last_frame->context_validity,
   1.140 +                           &frame->context, &frame->context_validity))
   1.141 +    return NULL;
   1.142 +
   1.143 +  // Make sure we recovered all the essentials.
   1.144 +  static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
   1.145 +                                 | StackFrameAMD64::CONTEXT_VALID_RSP);
   1.146 +  if ((frame->context_validity & essentials) != essentials)
   1.147 +    return NULL;
   1.148 +
   1.149 +  frame->trust = StackFrame::FRAME_TRUST_CFI;
   1.150 +  return frame.release();
   1.151 +}
   1.152 +
   1.153 +StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan(
   1.154 +    const vector<StackFrame*> &frames) {
   1.155 +  StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
   1.156 +  uint64_t last_rsp = last_frame->context.rsp;
   1.157 +  uint64_t caller_rip_address, caller_rip;
   1.158 +
   1.159 +  if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip)) {
   1.160 +    // No plausible return address was found.
   1.161 +    return NULL;
   1.162 +  }
   1.163 +
   1.164 +  // Create a new stack frame (ownership will be transferred to the caller)
   1.165 +  // and fill it in.
   1.166 +  StackFrameAMD64* frame = new StackFrameAMD64();
   1.167 +
   1.168 +  frame->trust = StackFrame::FRAME_TRUST_SCAN;
   1.169 +  frame->context = last_frame->context;
   1.170 +  frame->context.rip = caller_rip;
   1.171 +  // The caller's %rsp is directly underneath the return address pushed by
   1.172 +  // the call.
   1.173 +  frame->context.rsp = caller_rip_address + 8;
   1.174 +  frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
   1.175 +                            StackFrameAMD64::CONTEXT_VALID_RSP;
   1.176 +
   1.177 +  // Other unwinders give up if they don't have an %rbp value, so see if we
   1.178 +  // can pass some plausible value on.
   1.179 +  if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
   1.180 +    // Functions typically push their caller's %rbp immediately upon entry,
   1.181 +    // and then set %rbp to point to that. So if the callee's %rbp is
   1.182 +    // pointing to the first word below the alleged return address, presume
   1.183 +    // that the caller's %rbp is saved there.
   1.184 +    if (caller_rip_address - 8 == last_frame->context.rbp) {
   1.185 +      uint64_t caller_rbp = 0;
   1.186 +      if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
   1.187 +          caller_rbp > caller_rip_address) {
   1.188 +        frame->context.rbp = caller_rbp;
   1.189 +        frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
   1.190 +      }
   1.191 +    } else if (last_frame->context.rbp >= caller_rip_address + 8) {
   1.192 +      // If the callee's %rbp is plausible as a value for the caller's
   1.193 +      // %rbp, presume that the callee left it unchanged.
   1.194 +      frame->context.rbp = last_frame->context.rbp;
   1.195 +      frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
   1.196 +    }
   1.197 +  }
   1.198 +
   1.199 +  return frame;
   1.200 +}
   1.201 +
   1.202 +StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack,
   1.203 +                                             bool stack_scan_allowed) {
   1.204 +  if (!memory_ || !stack) {
   1.205 +    BPLOG(ERROR) << "Can't get caller frame without memory or stack";
   1.206 +    return NULL;
   1.207 +  }
   1.208 +
   1.209 +  const vector<StackFrame*> &frames = *stack->frames();
   1.210 +  StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
   1.211 +  scoped_ptr<StackFrameAMD64> new_frame;
   1.212 +
   1.213 +  // If we have DWARF CFI information, use it.
   1.214 +  scoped_ptr<CFIFrameInfo> cfi_frame_info(
   1.215 +      frame_symbolizer_->FindCFIFrameInfo(last_frame));
   1.216 +  if (cfi_frame_info.get())
   1.217 +    new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
   1.218 +
   1.219 +  // If CFI failed, or there wasn't CFI available, fall back
   1.220 +  // to stack scanning.
   1.221 +  if (stack_scan_allowed && !new_frame.get()) {
   1.222 +    new_frame.reset(GetCallerByStackScan(frames));
   1.223 +  }
   1.224 +
   1.225 +  // If nothing worked, tell the caller.
   1.226 +  if (!new_frame.get())
   1.227 +    return NULL;
   1.228 +
   1.229 +  // Treat an instruction address of 0 as end-of-stack.
   1.230 +  if (new_frame->context.rip == 0)
   1.231 +    return NULL;
   1.232 +
   1.233 +  // If the new stack pointer is at a lower address than the old, then
   1.234 +  // that's clearly incorrect. Treat this as end-of-stack to enforce
   1.235 +  // progress and avoid infinite loops.
   1.236 +  if (new_frame->context.rsp <= last_frame->context.rsp)
   1.237 +    return NULL;
   1.238 +
   1.239 +  // new_frame->context.rip is the return address, which is the instruction
   1.240 +  // after the CALL that caused us to arrive at the callee. Set
   1.241 +  // new_frame->instruction to one less than that, so it points within the
   1.242 +  // CALL instruction. See StackFrame::instruction for details, and
   1.243 +  // StackFrameAMD64::ReturnAddress.
   1.244 +  new_frame->instruction = new_frame->context.rip - 1;
   1.245 +
   1.246 +  return new_frame.release();
   1.247 +}
   1.248 +
   1.249 +}  // namespace google_breakpad

mercurial