Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
1 // Copyright (c) 2010 Google Inc.
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above
11 // copyright notice, this list of conditions and the following disclaimer
12 // in the documentation and/or other materials provided with the
13 // distribution.
14 // * Neither the name of Google Inc. nor the names of its
15 // contributors may be used to endorse or promote products derived from
16 // this software without specific prior written permission.
17 //
18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 // stackwalker_amd64.cc: amd64-specific stackwalker.
31 //
32 // See stackwalker_amd64.h for documentation.
33 //
34 // Author: Mark Mentovai, Ted Mielczarek
36 #include <assert.h>
38 #include "common/scoped_ptr.h"
39 #include "google_breakpad/processor/call_stack.h"
40 #include "google_breakpad/processor/memory_region.h"
41 #include "google_breakpad/processor/source_line_resolver_interface.h"
42 #include "google_breakpad/processor/stack_frame_cpu.h"
43 #include "processor/cfi_frame_info.h"
44 #include "common/logging.h"
45 #include "processor/stackwalker_amd64.h"
47 namespace google_breakpad {
50 const StackwalkerAMD64::CFIWalker::RegisterSet
51 StackwalkerAMD64::cfi_register_map_[] = {
52 // It may seem like $rip and $rsp are callee-saves, because the callee is
53 // responsible for having them restored upon return. But the callee_saves
54 // flags here really means that the walker should assume they're
55 // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
56 // and $rsp.
57 { ToUniqueString("$rax"), NULL, false,
58 StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
59 { ToUniqueString("$rdx"), NULL, false,
60 StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
61 { ToUniqueString("$rcx"), NULL, false,
62 StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
63 { ToUniqueString("$rbx"), NULL, true,
64 StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
65 { ToUniqueString("$rsi"), NULL, false,
66 StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
67 { ToUniqueString("$rdi"), NULL, false,
68 StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
69 { ToUniqueString("$rbp"), NULL, true,
70 StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
71 { ToUniqueString("$rsp"), ToUniqueString(".cfa"), false,
72 StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
73 { ToUniqueString("$r8"), NULL, false,
74 StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 },
75 { ToUniqueString("$r9"), NULL, false,
76 StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 },
77 { ToUniqueString("$r10"), NULL, false,
78 StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
79 { ToUniqueString("$r11"), NULL, false,
80 StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
81 { ToUniqueString("$r12"), NULL, true,
82 StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
83 { ToUniqueString("$r13"), NULL, true,
84 StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
85 { ToUniqueString("$r14"), NULL, true,
86 StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
87 { ToUniqueString("$r15"), NULL, true,
88 StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
89 { ToUniqueString("$rip"), ToUniqueString(".ra"), false,
90 StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
91 };
93 StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info,
94 const MDRawContextAMD64* context,
95 MemoryRegion* memory,
96 const CodeModules* modules,
97 StackFrameSymbolizer* resolver_helper)
98 : Stackwalker(system_info, memory, modules, resolver_helper),
99 context_(context),
100 cfi_walker_(cfi_register_map_,
101 (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
102 }
104 uint64_t StackFrameAMD64::ReturnAddress() const
105 {
106 assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP);
107 return context.rip;
108 }
110 StackFrame* StackwalkerAMD64::GetContextFrame() {
111 if (!context_) {
112 BPLOG(ERROR) << "Can't get context frame without context";
113 return NULL;
114 }
116 StackFrameAMD64* frame = new StackFrameAMD64();
118 // The instruction pointer is stored directly in a register, so pull it
119 // straight out of the CPU context structure.
120 frame->context = *context_;
121 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
122 frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
123 frame->instruction = frame->context.rip;
125 return frame;
126 }
128 StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo(
129 const vector<StackFrame*> &frames,
130 CFIFrameInfo* cfi_frame_info) {
131 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
133 scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
134 if (!cfi_walker_
135 .FindCallerRegisters(*memory_, *cfi_frame_info,
136 last_frame->context, last_frame->context_validity,
137 &frame->context, &frame->context_validity))
138 return NULL;
140 // Make sure we recovered all the essentials.
141 static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
142 | StackFrameAMD64::CONTEXT_VALID_RSP);
143 if ((frame->context_validity & essentials) != essentials)
144 return NULL;
146 frame->trust = StackFrame::FRAME_TRUST_CFI;
147 return frame.release();
148 }
150 StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan(
151 const vector<StackFrame*> &frames) {
152 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
153 uint64_t last_rsp = last_frame->context.rsp;
154 uint64_t caller_rip_address, caller_rip;
156 if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip)) {
157 // No plausible return address was found.
158 return NULL;
159 }
161 // Create a new stack frame (ownership will be transferred to the caller)
162 // and fill it in.
163 StackFrameAMD64* frame = new StackFrameAMD64();
165 frame->trust = StackFrame::FRAME_TRUST_SCAN;
166 frame->context = last_frame->context;
167 frame->context.rip = caller_rip;
168 // The caller's %rsp is directly underneath the return address pushed by
169 // the call.
170 frame->context.rsp = caller_rip_address + 8;
171 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
172 StackFrameAMD64::CONTEXT_VALID_RSP;
174 // Other unwinders give up if they don't have an %rbp value, so see if we
175 // can pass some plausible value on.
176 if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
177 // Functions typically push their caller's %rbp immediately upon entry,
178 // and then set %rbp to point to that. So if the callee's %rbp is
179 // pointing to the first word below the alleged return address, presume
180 // that the caller's %rbp is saved there.
181 if (caller_rip_address - 8 == last_frame->context.rbp) {
182 uint64_t caller_rbp = 0;
183 if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
184 caller_rbp > caller_rip_address) {
185 frame->context.rbp = caller_rbp;
186 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
187 }
188 } else if (last_frame->context.rbp >= caller_rip_address + 8) {
189 // If the callee's %rbp is plausible as a value for the caller's
190 // %rbp, presume that the callee left it unchanged.
191 frame->context.rbp = last_frame->context.rbp;
192 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
193 }
194 }
196 return frame;
197 }
199 StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack,
200 bool stack_scan_allowed) {
201 if (!memory_ || !stack) {
202 BPLOG(ERROR) << "Can't get caller frame without memory or stack";
203 return NULL;
204 }
206 const vector<StackFrame*> &frames = *stack->frames();
207 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back());
208 scoped_ptr<StackFrameAMD64> new_frame;
210 // If we have DWARF CFI information, use it.
211 scoped_ptr<CFIFrameInfo> cfi_frame_info(
212 frame_symbolizer_->FindCFIFrameInfo(last_frame));
213 if (cfi_frame_info.get())
214 new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
216 // If CFI failed, or there wasn't CFI available, fall back
217 // to stack scanning.
218 if (stack_scan_allowed && !new_frame.get()) {
219 new_frame.reset(GetCallerByStackScan(frames));
220 }
222 // If nothing worked, tell the caller.
223 if (!new_frame.get())
224 return NULL;
226 // Treat an instruction address of 0 as end-of-stack.
227 if (new_frame->context.rip == 0)
228 return NULL;
230 // If the new stack pointer is at a lower address than the old, then
231 // that's clearly incorrect. Treat this as end-of-stack to enforce
232 // progress and avoid infinite loops.
233 if (new_frame->context.rsp <= last_frame->context.rsp)
234 return NULL;
236 // new_frame->context.rip is the return address, which is the instruction
237 // after the CALL that caused us to arrive at the callee. Set
238 // new_frame->instruction to one less than that, so it points within the
239 // CALL instruction. See StackFrame::instruction for details, and
240 // StackFrameAMD64::ReturnAddress.
241 new_frame->instruction = new_frame->context.rip - 1;
243 return new_frame.release();
244 }
246 } // namespace google_breakpad