Tue, 06 Jan 2015 21:39:09 +0100
Conditionally force memory storage according to privacy.thirdparty.isolate;
This solves Tor bug #9701, complying with disk avoidance documented in
https://www.torproject.org/projects/torbrowser/design/#disk-avoidance.
1 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 // System
7 #include <string>
8 #include <stdio.h>
9 #include <errno.h>
10 #include <ostream>
11 #include <fstream>
12 #include <sstream>
14 // Profiler
15 #include "PlatformMacros.h"
16 #include "GeckoProfiler.h"
17 #include "platform.h"
18 #include "nsXULAppAPI.h"
19 #include "nsThreadUtils.h"
20 #include "prenv.h"
21 #include "shared-libraries.h"
22 #include "mozilla/StackWalk.h"
23 #include "ProfileEntry.h"
24 #include "SyncProfile.h"
25 #include "SaveProfileTask.h"
26 #include "UnwinderThread2.h"
27 #include "TableTicker.h"
29 // Meta
30 #include "nsXPCOM.h"
31 #include "nsXPCOMCID.h"
32 #include "nsIHttpProtocolHandler.h"
33 #include "nsServiceManagerUtils.h"
34 #include "nsIXULRuntime.h"
35 #include "nsIXULAppInfo.h"
36 #include "nsDirectoryServiceUtils.h"
37 #include "nsDirectoryServiceDefs.h"
38 #include "nsIObserverService.h"
39 #include "mozilla/Services.h"
41 // JS
42 #include "js/OldDebugAPI.h"
44 // This file's exports are listed in GeckoProfilerImpl.h.
46 /* These will be set to something sensible before we take the first
47 sample. */
48 UnwMode sUnwindMode = UnwINVALID;
49 int sUnwindInterval = 0;
50 int sUnwindStackScan = 0;
51 int sProfileEntries = 0;
53 using std::string;
54 using namespace mozilla;
56 #if _MSC_VER
57 #define snprintf _snprintf
58 #endif
61 ////////////////////////////////////////////////////////////////////////
62 // BEGIN take samples.
63 // Everything in this section RUNS IN SIGHANDLER CONTEXT
65 // RUNS IN SIGHANDLER CONTEXT
66 static
67 void genProfileEntry(/*MODIFIED*/UnwinderThreadBuffer* utb,
68 volatile StackEntry &entry,
69 PseudoStack *stack, void *lastpc)
70 {
71 int lineno = -1;
73 // Add a pseudostack-entry start label
74 utb__addEntry( utb, ProfileEntry('h', 'P') );
75 // And the SP value, if it is non-zero
76 if (entry.stackAddress() != 0) {
77 utb__addEntry( utb, ProfileEntry('S', entry.stackAddress()) );
78 }
80 // First entry has tagName 's' (start)
81 // Check for magic pointer bit 1 to indicate copy
82 const char* sampleLabel = entry.label();
83 if (entry.isCopyLabel()) {
84 // Store the string using 1 or more 'd' (dynamic) tags
85 // that will happen to the preceding tag
87 utb__addEntry( utb, ProfileEntry('c', "") );
88 // Add one to store the null termination
89 size_t strLen = strlen(sampleLabel) + 1;
90 for (size_t j = 0; j < strLen;) {
91 // Store as many characters in the void* as the platform allows
92 char text[sizeof(void*)];
93 for (size_t pos = 0; pos < sizeof(void*) && j+pos < strLen; pos++) {
94 text[pos] = sampleLabel[j+pos];
95 }
96 j += sizeof(void*)/sizeof(char);
97 // Cast to *((void**) to pass the text data to a void*
98 utb__addEntry( utb, ProfileEntry('d', *((void**)(&text[0]))) );
99 }
100 if (entry.js()) {
101 if (!entry.pc()) {
102 // The JIT only allows the top-most entry to have a nullptr pc
103 MOZ_ASSERT(&entry == &stack->mStack[stack->stackSize() - 1]);
104 // If stack-walking was disabled, then that's just unfortunate
105 if (lastpc) {
106 jsbytecode *jspc = js::ProfilingGetPC(stack->mRuntime, entry.script(),
107 lastpc);
108 if (jspc) {
109 lineno = JS_PCToLineNumber(nullptr, entry.script(), jspc);
110 }
111 }
112 } else {
113 lineno = JS_PCToLineNumber(nullptr, entry.script(), entry.pc());
114 }
115 } else {
116 lineno = entry.line();
117 }
118 } else {
119 utb__addEntry( utb, ProfileEntry('c', sampleLabel) );
120 lineno = entry.line();
121 }
122 if (lineno != -1) {
123 utb__addEntry( utb, ProfileEntry('n', lineno) );
124 }
126 // Add a pseudostack-entry end label
127 utb__addEntry( utb, ProfileEntry('h', 'Q') );
128 }
130 // RUNS IN SIGHANDLER CONTEXT
131 // Generate pseudo-backtrace entries and put them in |utb|, with
132 // the order outermost frame first.
133 void genPseudoBacktraceEntries(/*MODIFIED*/UnwinderThreadBuffer* utb,
134 PseudoStack *aStack, TickSample *sample)
135 {
136 // Call genProfileEntry to generate tags for each profile
137 // entry. Each entry will be bounded by a 'h' 'P' tag to
138 // mark the start and a 'h' 'Q' tag to mark the end.
139 uint32_t nInStack = aStack->stackSize();
140 for (uint32_t i = 0; i < nInStack; i++) {
141 genProfileEntry(utb, aStack->mStack[i], aStack, nullptr);
142 }
143 # ifdef ENABLE_SPS_LEAF_DATA
144 if (sample) {
145 utb__addEntry( utb, ProfileEntry('l', (void*)sample->pc) );
146 # ifdef ENABLE_ARM_LR_SAVING
147 utb__addEntry( utb, ProfileEntry('L', (void*)sample->lr) );
148 # endif
149 }
150 # endif
151 }
153 // RUNS IN SIGHANDLER CONTEXT
154 static
155 void populateBuffer(UnwinderThreadBuffer* utb, TickSample* sample,
156 UTB_RELEASE_FUNC releaseFunction, bool jankOnly)
157 {
158 ThreadProfile& sampledThreadProfile = *sample->threadProfile;
159 PseudoStack* stack = sampledThreadProfile.GetPseudoStack();
161 /* Manufacture the ProfileEntries that we will give to the unwinder
162 thread, and park them in |utb|. */
163 bool recordSample = true;
165 /* Don't process the PeudoStack's markers or honour jankOnly if we're
166 immediately sampling the current thread. */
167 if (!sample->isSamplingCurrentThread) {
168 // LinkedUWTBuffers before markers
169 UWTBufferLinkedList* syncBufs = stack->getLinkedUWTBuffers();
170 while (syncBufs && syncBufs->peek()) {
171 LinkedUWTBuffer* syncBuf = syncBufs->popHead();
172 utb__addEntry(utb, ProfileEntry('B', syncBuf->GetBuffer()));
173 }
174 // Marker(s) come before the sample
175 ProfilerMarkerLinkedList* pendingMarkersList = stack->getPendingMarkers();
176 while (pendingMarkersList && pendingMarkersList->peek()) {
177 ProfilerMarker* marker = pendingMarkersList->popHead();
178 stack->addStoredMarker(marker);
179 utb__addEntry( utb, ProfileEntry('m', marker) );
180 }
181 stack->updateGeneration(sampledThreadProfile.GetGenerationID());
182 if (jankOnly) {
183 // if we are on a different event we can discard any temporary samples
184 // we've kept around
185 if (sLastSampledEventGeneration != sCurrentEventGeneration) {
186 // XXX: we also probably want to add an entry to the profile to help
187 // distinguish which samples are part of the same event. That, or record
188 // the event generation in each sample
189 sampledThreadProfile.erase();
190 }
191 sLastSampledEventGeneration = sCurrentEventGeneration;
193 recordSample = false;
194 // only record the events when we have a we haven't seen a tracer
195 // event for 100ms
196 if (!sLastTracerEvent.IsNull()) {
197 TimeDuration delta = sample->timestamp - sLastTracerEvent;
198 if (delta.ToMilliseconds() > 100.0) {
199 recordSample = true;
200 }
201 }
202 }
203 }
205 // JRS 2012-Sept-27: this logic used to involve mUseStackWalk.
206 // That should be reinstated, but for the moment, use the
207 // settings in sUnwindMode and sUnwindInterval.
208 // Add a native-backtrace request, or add pseudo backtrace entries,
209 // or both.
210 switch (sUnwindMode) {
211 case UnwNATIVE: /* Native only */
212 // add a "do native stack trace now" hint. This will be actioned
213 // by the unwinder thread as it processes the entries in this
214 // sample.
215 utb__addEntry( utb, ProfileEntry('h'/*hint*/, 'N'/*native-trace*/) );
216 break;
217 case UnwPSEUDO: /* Pseudo only */
218 /* Add into |utb|, the pseudo backtrace entries */
219 genPseudoBacktraceEntries(utb, stack, sample);
220 break;
221 case UnwCOMBINED: /* Both Native and Pseudo */
222 utb__addEntry( utb, ProfileEntry('h'/*hint*/, 'N'/*native-trace*/) );
223 genPseudoBacktraceEntries(utb, stack, sample);
224 break;
225 case UnwINVALID:
226 default:
227 MOZ_CRASH();
228 }
230 if (recordSample) {
231 // add a "flush now" hint
232 utb__addEntry( utb, ProfileEntry('h'/*hint*/, 'F'/*flush*/) );
233 }
235 // Add any extras
236 if (!sLastTracerEvent.IsNull() && sample) {
237 TimeDuration delta = sample->timestamp - sLastTracerEvent;
238 utb__addEntry( utb, ProfileEntry('r', static_cast<float>(delta.ToMilliseconds())) );
239 }
241 if (sample) {
242 TimeDuration delta = sample->timestamp - sStartTime;
243 utb__addEntry( utb, ProfileEntry('t', static_cast<float>(delta.ToMilliseconds())) );
244 }
246 if (sLastFrameNumber != sFrameNumber) {
247 utb__addEntry( utb, ProfileEntry('f', sFrameNumber) );
248 sLastFrameNumber = sFrameNumber;
249 }
251 /* So now we have, in |utb|, the complete set of entries we want to
252 push into the circular buffer. This may also include a 'h' 'F'
253 entry, which is "flush now" hint, and/or a 'h' 'N' entry, which
254 is a "generate a native backtrace and add it to the buffer right
255 now" hint. Hand them off to the helper thread, together with
256 stack and register context needed to do a native unwind, if that
257 is currently enabled. */
259 /* If a native unwind has been requested, we'll start it off using
260 the context obtained from the signal handler, to avoid the
261 problem of having to unwind through the signal frame itself. */
263 /* On Linux and Android, the initial register state is in the
264 supplied sample->context. But on MacOS it's not, so we have to
265 fake it up here (sigh). */
266 if (sUnwindMode == UnwNATIVE || sUnwindMode == UnwCOMBINED) {
267 # if defined(SPS_PLAT_amd64_linux) || defined(SPS_PLAT_arm_android) \
268 || defined(SPS_PLAT_x86_linux) || defined(SPS_PLAT_x86_android)
269 void* ucV = (void*)sample->context;
270 # elif defined(SPS_PLAT_amd64_darwin)
271 struct __darwin_mcontext64 mc;
272 memset(&mc, 0, sizeof(mc));
273 ucontext_t uc;
274 memset(&uc, 0, sizeof(uc));
275 uc.uc_mcontext = &mc;
276 mc.__ss.__rip = (uint64_t)sample->pc;
277 mc.__ss.__rsp = (uint64_t)sample->sp;
278 mc.__ss.__rbp = (uint64_t)sample->fp;
279 void* ucV = (void*)&uc;
280 # elif defined(SPS_PLAT_x86_darwin)
281 struct __darwin_mcontext32 mc;
282 memset(&mc, 0, sizeof(mc));
283 ucontext_t uc;
284 memset(&uc, 0, sizeof(uc));
285 uc.uc_mcontext = &mc;
286 mc.__ss.__eip = (uint32_t)sample->pc;
287 mc.__ss.__esp = (uint32_t)sample->sp;
288 mc.__ss.__ebp = (uint32_t)sample->fp;
289 void* ucV = (void*)&uc;
290 # elif defined(SPS_OS_windows)
291 /* Totally fake this up so it at least builds. No idea if we can
292 even ever get here on Windows. */
293 void* ucV = nullptr;
294 # else
295 # error "Unsupported platform"
296 # endif
297 releaseFunction(&sampledThreadProfile, utb, ucV);
298 } else {
299 releaseFunction(&sampledThreadProfile, utb, nullptr);
300 }
301 }
303 static
304 void sampleCurrent(TickSample* sample)
305 {
306 // This variant requires sample->threadProfile to be set
307 MOZ_ASSERT(sample->threadProfile);
308 LinkedUWTBuffer* syncBuf = utb__acquire_sync_buffer(tlsStackTop.get());
309 if (!syncBuf) {
310 return;
311 }
312 SyncProfile* syncProfile = sample->threadProfile->AsSyncProfile();
313 MOZ_ASSERT(syncProfile);
314 if (!syncProfile->SetUWTBuffer(syncBuf)) {
315 utb__release_sync_buffer(syncBuf);
316 return;
317 }
318 UnwinderThreadBuffer* utb = syncBuf->GetBuffer();
319 populateBuffer(utb, sample, &utb__finish_sync_buffer, false);
320 }
322 // RUNS IN SIGHANDLER CONTEXT
323 void TableTicker::UnwinderTick(TickSample* sample)
324 {
325 if (sample->isSamplingCurrentThread) {
326 sampleCurrent(sample);
327 return;
328 }
330 if (!sample->threadProfile) {
331 // Platform doesn't support multithread, so use the main thread profile we created
332 sample->threadProfile = GetPrimaryThreadProfile();
333 }
335 /* Get hold of an empty inter-thread buffer into which to park
336 the ProfileEntries for this sample. */
337 UnwinderThreadBuffer* utb = uwt__acquire_empty_buffer();
339 /* This could fail, if no buffers are currently available, in which
340 case we must give up right away. We cannot wait for a buffer to
341 become available, as that risks deadlock. */
342 if (!utb)
343 return;
345 populateBuffer(utb, sample, &uwt__release_full_buffer, mJankOnly);
346 }
348 // END take samples
349 ////////////////////////////////////////////////////////////////////////