michael@0: // Copyright (c) 2010 Google Inc. michael@0: // All rights reserved. michael@0: // michael@0: // Redistribution and use in source and binary forms, with or without michael@0: // modification, are permitted provided that the following conditions are michael@0: // met: michael@0: // michael@0: // * Redistributions of source code must retain the above copyright michael@0: // notice, this list of conditions and the following disclaimer. michael@0: // * Redistributions in binary form must reproduce the above michael@0: // copyright notice, this list of conditions and the following disclaimer michael@0: // in the documentation and/or other materials provided with the michael@0: // distribution. michael@0: // * Neither the name of Google Inc. nor the names of its michael@0: // contributors may be used to endorse or promote products derived from michael@0: // this software without specific prior written permission. michael@0: // michael@0: // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT michael@0: // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR michael@0: // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT michael@0: // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, michael@0: // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT michael@0: // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: michael@0: // stackwalker_amd64.cc: amd64-specific stackwalker. michael@0: // michael@0: // See stackwalker_amd64.h for documentation. michael@0: // michael@0: // Author: Mark Mentovai, Ted Mielczarek michael@0: michael@0: #include michael@0: michael@0: #include "common/scoped_ptr.h" michael@0: #include "google_breakpad/processor/call_stack.h" michael@0: #include "google_breakpad/processor/memory_region.h" michael@0: #include "google_breakpad/processor/source_line_resolver_interface.h" michael@0: #include "google_breakpad/processor/stack_frame_cpu.h" michael@0: #include "processor/cfi_frame_info.h" michael@0: #include "common/logging.h" michael@0: #include "processor/stackwalker_amd64.h" michael@0: michael@0: namespace google_breakpad { michael@0: michael@0: michael@0: const StackwalkerAMD64::CFIWalker::RegisterSet michael@0: StackwalkerAMD64::cfi_register_map_[] = { michael@0: // It may seem like $rip and $rsp are callee-saves, because the callee is michael@0: // responsible for having them restored upon return. But the callee_saves michael@0: // flags here really means that the walker should assume they're michael@0: // unchanged if the CFI doesn't mention them --- clearly wrong for $rip michael@0: // and $rsp. michael@0: { ToUniqueString("$rax"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax }, michael@0: { ToUniqueString("$rdx"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx }, michael@0: { ToUniqueString("$rcx"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx }, michael@0: { ToUniqueString("$rbx"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx }, michael@0: { ToUniqueString("$rsi"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi }, michael@0: { ToUniqueString("$rdi"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi }, michael@0: { ToUniqueString("$rbp"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp }, michael@0: { ToUniqueString("$rsp"), ToUniqueString(".cfa"), false, michael@0: StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp }, michael@0: { ToUniqueString("$r8"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 }, michael@0: { ToUniqueString("$r9"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 }, michael@0: { ToUniqueString("$r10"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 }, michael@0: { ToUniqueString("$r11"), NULL, false, michael@0: StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 }, michael@0: { ToUniqueString("$r12"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 }, michael@0: { ToUniqueString("$r13"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 }, michael@0: { ToUniqueString("$r14"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 }, michael@0: { ToUniqueString("$r15"), NULL, true, michael@0: StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 }, michael@0: { ToUniqueString("$rip"), ToUniqueString(".ra"), false, michael@0: StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip }, michael@0: }; michael@0: michael@0: StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info, michael@0: const MDRawContextAMD64* context, michael@0: MemoryRegion* memory, michael@0: const CodeModules* modules, michael@0: StackFrameSymbolizer* resolver_helper) michael@0: : Stackwalker(system_info, memory, modules, resolver_helper), michael@0: context_(context), michael@0: cfi_walker_(cfi_register_map_, michael@0: (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) { michael@0: } michael@0: michael@0: uint64_t StackFrameAMD64::ReturnAddress() const michael@0: { michael@0: assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP); michael@0: return context.rip; michael@0: } michael@0: michael@0: StackFrame* StackwalkerAMD64::GetContextFrame() { michael@0: if (!context_) { michael@0: BPLOG(ERROR) << "Can't get context frame without context"; michael@0: return NULL; michael@0: } michael@0: michael@0: StackFrameAMD64* frame = new StackFrameAMD64(); michael@0: michael@0: // The instruction pointer is stored directly in a register, so pull it michael@0: // straight out of the CPU context structure. michael@0: frame->context = *context_; michael@0: frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL; michael@0: frame->trust = StackFrame::FRAME_TRUST_CONTEXT; michael@0: frame->instruction = frame->context.rip; michael@0: michael@0: return frame; michael@0: } michael@0: michael@0: StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo( michael@0: const vector &frames, michael@0: CFIFrameInfo* cfi_frame_info) { michael@0: StackFrameAMD64* last_frame = static_cast(frames.back()); michael@0: michael@0: scoped_ptr frame(new StackFrameAMD64()); michael@0: if (!cfi_walker_ michael@0: .FindCallerRegisters(*memory_, *cfi_frame_info, michael@0: last_frame->context, last_frame->context_validity, michael@0: &frame->context, &frame->context_validity)) michael@0: return NULL; michael@0: michael@0: // Make sure we recovered all the essentials. michael@0: static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP michael@0: | StackFrameAMD64::CONTEXT_VALID_RSP); michael@0: if ((frame->context_validity & essentials) != essentials) michael@0: return NULL; michael@0: michael@0: frame->trust = StackFrame::FRAME_TRUST_CFI; michael@0: return frame.release(); michael@0: } michael@0: michael@0: StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan( michael@0: const vector &frames) { michael@0: StackFrameAMD64* last_frame = static_cast(frames.back()); michael@0: uint64_t last_rsp = last_frame->context.rsp; michael@0: uint64_t caller_rip_address, caller_rip; michael@0: michael@0: if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip)) { michael@0: // No plausible return address was found. michael@0: return NULL; michael@0: } michael@0: michael@0: // Create a new stack frame (ownership will be transferred to the caller) michael@0: // and fill it in. michael@0: StackFrameAMD64* frame = new StackFrameAMD64(); michael@0: michael@0: frame->trust = StackFrame::FRAME_TRUST_SCAN; michael@0: frame->context = last_frame->context; michael@0: frame->context.rip = caller_rip; michael@0: // The caller's %rsp is directly underneath the return address pushed by michael@0: // the call. michael@0: frame->context.rsp = caller_rip_address + 8; michael@0: frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP | michael@0: StackFrameAMD64::CONTEXT_VALID_RSP; michael@0: michael@0: // Other unwinders give up if they don't have an %rbp value, so see if we michael@0: // can pass some plausible value on. michael@0: if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) { michael@0: // Functions typically push their caller's %rbp immediately upon entry, michael@0: // and then set %rbp to point to that. So if the callee's %rbp is michael@0: // pointing to the first word below the alleged return address, presume michael@0: // that the caller's %rbp is saved there. michael@0: if (caller_rip_address - 8 == last_frame->context.rbp) { michael@0: uint64_t caller_rbp = 0; michael@0: if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) && michael@0: caller_rbp > caller_rip_address) { michael@0: frame->context.rbp = caller_rbp; michael@0: frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP; michael@0: } michael@0: } else if (last_frame->context.rbp >= caller_rip_address + 8) { michael@0: // If the callee's %rbp is plausible as a value for the caller's michael@0: // %rbp, presume that the callee left it unchanged. michael@0: frame->context.rbp = last_frame->context.rbp; michael@0: frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP; michael@0: } michael@0: } michael@0: michael@0: return frame; michael@0: } michael@0: michael@0: StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack, michael@0: bool stack_scan_allowed) { michael@0: if (!memory_ || !stack) { michael@0: BPLOG(ERROR) << "Can't get caller frame without memory or stack"; michael@0: return NULL; michael@0: } michael@0: michael@0: const vector &frames = *stack->frames(); michael@0: StackFrameAMD64* last_frame = static_cast(frames.back()); michael@0: scoped_ptr new_frame; michael@0: michael@0: // If we have DWARF CFI information, use it. michael@0: scoped_ptr cfi_frame_info( michael@0: frame_symbolizer_->FindCFIFrameInfo(last_frame)); michael@0: if (cfi_frame_info.get()) michael@0: new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get())); michael@0: michael@0: // If CFI failed, or there wasn't CFI available, fall back michael@0: // to stack scanning. michael@0: if (stack_scan_allowed && !new_frame.get()) { michael@0: new_frame.reset(GetCallerByStackScan(frames)); michael@0: } michael@0: michael@0: // If nothing worked, tell the caller. michael@0: if (!new_frame.get()) michael@0: return NULL; michael@0: michael@0: // Treat an instruction address of 0 as end-of-stack. michael@0: if (new_frame->context.rip == 0) michael@0: return NULL; michael@0: michael@0: // If the new stack pointer is at a lower address than the old, then michael@0: // that's clearly incorrect. Treat this as end-of-stack to enforce michael@0: // progress and avoid infinite loops. michael@0: if (new_frame->context.rsp <= last_frame->context.rsp) michael@0: return NULL; michael@0: michael@0: // new_frame->context.rip is the return address, which is the instruction michael@0: // after the CALL that caused us to arrive at the callee. Set michael@0: // new_frame->instruction to one less than that, so it points within the michael@0: // CALL instruction. See StackFrame::instruction for details, and michael@0: // StackFrameAMD64::ReturnAddress. michael@0: new_frame->instruction = new_frame->context.rip - 1; michael@0: michael@0: return new_frame.release(); michael@0: } michael@0: michael@0: } // namespace google_breakpad