1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/gfx/skia/trunk/src/core/SkTraceEvent.h Wed Dec 31 06:09:35 2014 +0100 1.3 @@ -0,0 +1,1245 @@ 1.4 +// Copyright (c) 2014 Google Inc. 1.5 +// 1.6 +// Use of this source code is governed by a BSD-style license that can be 1.7 +// found in the LICENSE file. 1.8 + 1.9 +// This header file defines the set of trace_event macros without specifying 1.10 +// how the events actually get collected and stored. If you need to expose trace 1.11 +// events to some other universe, you can copy-and-paste this file as well as 1.12 +// trace_event.h, modifying the macros contained there as necessary for the 1.13 +// target platform. The end result is that multiple libraries can funnel events 1.14 +// through to a shared trace event collector. 1.15 + 1.16 +// Trace events are for tracking application performance and resource usage. 1.17 +// Macros are provided to track: 1.18 +// Begin and end of function calls 1.19 +// Counters 1.20 +// 1.21 +// Events are issued against categories. Whereas LOG's 1.22 +// categories are statically defined, TRACE categories are created 1.23 +// implicitly with a string. For example: 1.24 +// TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent", 1.25 +// TRACE_EVENT_SCOPE_THREAD) 1.26 +// 1.27 +// It is often the case that one trace may belong in multiple categories at the 1.28 +// same time. The first argument to the trace can be a comma-separated list of 1.29 +// categories, forming a category group, like: 1.30 +// 1.31 +// TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD) 1.32 +// 1.33 +// We can enable/disable tracing of OnMouseOver by enabling/disabling either 1.34 +// category. 1.35 +// 1.36 +// Events can be INSTANT, or can be pairs of BEGIN and END in the same scope: 1.37 +// TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly") 1.38 +// doSomethingCostly() 1.39 +// TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly") 1.40 +// Note: our tools can't always determine the correct BEGIN/END pairs unless 1.41 +// these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you 1.42 +// need them to be in separate scopes. 1.43 +// 1.44 +// A common use case is to trace entire function scopes. This 1.45 +// issues a trace BEGIN and END automatically: 1.46 +// void doSomethingCostly() { 1.47 +// TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly"); 1.48 +// ... 1.49 +// } 1.50 +// 1.51 +// Additional parameters can be associated with an event: 1.52 +// void doSomethingCostly2(int howMuch) { 1.53 +// TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", 1.54 +// "howMuch", howMuch); 1.55 +// ... 1.56 +// } 1.57 +// 1.58 +// The trace system will automatically add to this information the 1.59 +// current process id, thread id, and a timestamp in microseconds. 1.60 +// 1.61 +// To trace an asynchronous procedure such as an IPC send/receive, use 1.62 +// ASYNC_BEGIN and ASYNC_END: 1.63 +// [single threaded sender code] 1.64 +// static int send_count = 0; 1.65 +// ++send_count; 1.66 +// TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count); 1.67 +// Send(new MyMessage(send_count)); 1.68 +// [receive code] 1.69 +// void OnMyMessage(send_count) { 1.70 +// TRACE_EVENT_ASYNC_END0("ipc", "message", send_count); 1.71 +// } 1.72 +// The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs. 1.73 +// ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process. 1.74 +// Pointers can be used for the ID parameter, and they will be mangled 1.75 +// internally so that the same pointer on two different processes will not 1.76 +// match. For example: 1.77 +// class MyTracedClass { 1.78 +// public: 1.79 +// MyTracedClass() { 1.80 +// TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this); 1.81 +// } 1.82 +// ~MyTracedClass() { 1.83 +// TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this); 1.84 +// } 1.85 +// } 1.86 +// 1.87 +// Trace event also supports counters, which is a way to track a quantity 1.88 +// as it varies over time. Counters are created with the following macro: 1.89 +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue); 1.90 +// 1.91 +// Counters are process-specific. The macro itself can be issued from any 1.92 +// thread, however. 1.93 +// 1.94 +// Sometimes, you want to track two counters at once. You can do this with two 1.95 +// counter macros: 1.96 +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]); 1.97 +// TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]); 1.98 +// Or you can do it with a combined macro: 1.99 +// TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter", 1.100 +// "bytesPinned", g_myCounterValue[0], 1.101 +// "bytesAllocated", g_myCounterValue[1]); 1.102 +// This indicates to the tracing UI that these counters should be displayed 1.103 +// in a single graph, as a summed area chart. 1.104 +// 1.105 +// Since counters are in a global namespace, you may want to disambiguate with a 1.106 +// unique ID, by using the TRACE_COUNTER_ID* variations. 1.107 +// 1.108 +// By default, trace collection is compiled in, but turned off at runtime. 1.109 +// Collecting trace data is the responsibility of the embedding 1.110 +// application. In Chrome's case, navigating to about:tracing will turn on 1.111 +// tracing and display data collected across all active processes. 1.112 +// 1.113 +// 1.114 +// Memory scoping note: 1.115 +// Tracing copies the pointers, not the string content, of the strings passed 1.116 +// in for category_group, name, and arg_names. Thus, the following code will 1.117 +// cause problems: 1.118 +// char* str = strdup("importantName"); 1.119 +// TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD! 1.120 +// free(str); // Trace system now has dangling pointer 1.121 +// 1.122 +// To avoid this issue with the |name| and |arg_name| parameters, use the 1.123 +// TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead. 1.124 +// Notes: The category must always be in a long-lived char* (i.e. static const). 1.125 +// The |arg_values|, when used, are always deep copied with the _COPY 1.126 +// macros. 1.127 +// 1.128 +// When are string argument values copied: 1.129 +// const char* arg_values are only referenced by default: 1.130 +// TRACE_EVENT1("category", "name", 1.131 +// "arg1", "literal string is only referenced"); 1.132 +// Use TRACE_STR_COPY to force copying of a const char*: 1.133 +// TRACE_EVENT1("category", "name", 1.134 +// "arg1", TRACE_STR_COPY("string will be copied")); 1.135 +// std::string arg_values are always copied: 1.136 +// TRACE_EVENT1("category", "name", 1.137 +// "arg1", std::string("string will be copied")); 1.138 +// 1.139 +// 1.140 +// Thread Safety: 1.141 +// A thread safe singleton and mutex are used for thread safety. Category 1.142 +// enabled flags are used to limit the performance impact when the system 1.143 +// is not enabled. 1.144 +// 1.145 +// TRACE_EVENT macros first cache a pointer to a category. The categories are 1.146 +// statically allocated and safe at all times, even after exit. Fetching a 1.147 +// category is protected by the TraceLog::lock_. Multiple threads initializing 1.148 +// the static variable is safe, as they will be serialized by the lock and 1.149 +// multiple calls will return the same pointer to the category. 1.150 +// 1.151 +// Then the category_group_enabled flag is checked. This is a unsigned char, and 1.152 +// not intended to be multithread safe. It optimizes access to AddTraceEvent 1.153 +// which is threadsafe internally via TraceLog::lock_. The enabled flag may 1.154 +// cause some threads to incorrectly call or skip calling AddTraceEvent near 1.155 +// the time of the system being enabled or disabled. This is acceptable as 1.156 +// we tolerate some data loss while the system is being enabled/disabled and 1.157 +// because AddTraceEvent is threadsafe internally and checks the enabled state 1.158 +// again under lock. 1.159 +// 1.160 +// Without the use of these static category pointers and enabled flags all 1.161 +// trace points would carry a significant performance cost of acquiring a lock 1.162 +// and resolving the category. 1.163 + 1.164 +#ifndef SkTraceEvent_DEFINED 1.165 +#define SkTraceEvent_DEFINED 1.166 + 1.167 +#include "SkEventTracer.h" 1.168 + 1.169 +// By default, const char* argument values are assumed to have long-lived scope 1.170 +// and will not be copied. Use this macro to force a const char* to be copied. 1.171 +#define TRACE_STR_COPY(str) \ 1.172 + skia::tracing_internals::TraceStringWithCopy(str) 1.173 + 1.174 +// By default, uint64 ID argument values are not mangled with the Process ID in 1.175 +// TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling. 1.176 +#define TRACE_ID_MANGLE(id) \ 1.177 + skia::tracing_internals::TraceID::ForceMangle(id) 1.178 + 1.179 +// By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC 1.180 +// macros. Use this macro to prevent Process ID mangling. 1.181 +#define TRACE_ID_DONT_MANGLE(id) \ 1.182 + skia::tracing_internals::TraceID::DontMangle(id) 1.183 + 1.184 +// Records a pair of begin and end events called "name" for the current 1.185 +// scope, with 0, 1 or 2 associated arguments. If the category is not 1.186 +// enabled, then this does nothing. 1.187 +// - category and name strings must have application lifetime (statics or 1.188 +// literals). They may not include " chars. 1.189 +#define TRACE_EVENT0(category_group, name) \ 1.190 + INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name) 1.191 +#define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ 1.192 + INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val) 1.193 +#define TRACE_EVENT2( \ 1.194 + category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \ 1.195 + INTERNAL_TRACE_EVENT_ADD_SCOPED( \ 1.196 + category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) 1.197 + 1.198 +// Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing. 1.199 +// Use this where |name| is too generic to accurately aggregate allocations. 1.200 +#define TRACE_EVENT_WITH_MEMORY_TAG2( \ 1.201 + category, name, memory_tag, arg1_name, arg1_val, arg2_name, arg2_val) \ 1.202 + INTERNAL_TRACE_EVENT_ADD_SCOPED( \ 1.203 + category, name, arg1_name, arg1_val, arg2_name, arg2_val) 1.204 + 1.205 +// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not 1.206 +// included in official builds. 1.207 + 1.208 +#if OFFICIAL_BUILD 1.209 +#undef TRACING_IS_OFFICIAL_BUILD 1.210 +#define TRACING_IS_OFFICIAL_BUILD 1 1.211 +#elif !defined(TRACING_IS_OFFICIAL_BUILD) 1.212 +#define TRACING_IS_OFFICIAL_BUILD 0 1.213 +#endif 1.214 + 1.215 +#if TRACING_IS_OFFICIAL_BUILD 1.216 +#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0 1.217 +#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ 1.218 + (void)0 1.219 +#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \ 1.220 + arg2_name, arg2_val) (void)0 1.221 +#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0 1.222 +#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \ 1.223 + arg1_name, arg1_val) (void)0 1.224 +#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \ 1.225 + arg1_name, arg1_val, \ 1.226 + arg2_name, arg2_val) (void)0 1.227 +#else 1.228 +#define UNSHIPPED_TRACE_EVENT0(category_group, name) \ 1.229 + TRACE_EVENT0(category_group, name) 1.230 +#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \ 1.231 + TRACE_EVENT1(category_group, name, arg1_name, arg1_val) 1.232 +#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \ 1.233 + arg2_name, arg2_val) \ 1.234 + TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) 1.235 +#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \ 1.236 + TRACE_EVENT_INSTANT0(category_group, name, scope) 1.237 +#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \ 1.238 + arg1_name, arg1_val) \ 1.239 + TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) 1.240 +#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \ 1.241 + arg1_name, arg1_val, \ 1.242 + arg2_name, arg2_val) \ 1.243 + TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \ 1.244 + arg2_name, arg2_val) 1.245 +#endif 1.246 + 1.247 +// Records a single event called "name" immediately, with 0, 1 or 2 1.248 +// associated arguments. If the category is not enabled, then this 1.249 +// does nothing. 1.250 +// - category and name strings must have application lifetime (statics or 1.251 +// literals). They may not include " chars. 1.252 +#define TRACE_EVENT_INSTANT0(category_group, name, scope) \ 1.253 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.254 + category_group, name, TRACE_EVENT_FLAG_NONE | scope) 1.255 +#define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \ 1.256 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.257 + category_group, name, TRACE_EVENT_FLAG_NONE | scope, \ 1.258 + arg1_name, arg1_val) 1.259 +#define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \ 1.260 + arg2_name, arg2_val) \ 1.261 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.262 + category_group, name, TRACE_EVENT_FLAG_NONE | scope, \ 1.263 + arg1_name, arg1_val, arg2_name, arg2_val) 1.264 +#define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \ 1.265 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.266 + category_group, name, TRACE_EVENT_FLAG_COPY | scope) 1.267 +#define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, \ 1.268 + arg1_name, arg1_val) \ 1.269 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.270 + category_group, name, TRACE_EVENT_FLAG_COPY | scope, arg1_name, \ 1.271 + arg1_val) 1.272 +#define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, \ 1.273 + arg1_name, arg1_val, \ 1.274 + arg2_name, arg2_val) \ 1.275 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \ 1.276 + category_group, name, TRACE_EVENT_FLAG_COPY | scope, \ 1.277 + arg1_name, arg1_val, arg2_name, arg2_val) 1.278 + 1.279 +// Sets the current sample state to the given category and name (both must be 1.280 +// constant strings). These states are intended for a sampling profiler. 1.281 +// Implementation note: we store category and name together because we don't 1.282 +// want the inconsistency/expense of storing two pointers. 1.283 +// |thread_bucket| is [0..2] and is used to statically isolate samples in one 1.284 +// thread from others. 1.285 +#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \ 1.286 + bucket_number, category, name) \ 1.287 + skia::tracing_internals:: \ 1.288 + TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name) 1.289 + 1.290 +// Returns a current sampling state of the given bucket. 1.291 +#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \ 1.292 + skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>::Current() 1.293 + 1.294 +// Creates a scope of a sampling state of the given bucket. 1.295 +// 1.296 +// { // The sampling state is set within this scope. 1.297 +// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name"); 1.298 +// ...; 1.299 +// } 1.300 +#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \ 1.301 + bucket_number, category, name) \ 1.302 + skia::tracing_internals::TraceEventSamplingStateScope<bucket_number> \ 1.303 + traceEventSamplingScope(category "\0" name); 1.304 + 1.305 +// Syntactic sugars for the sampling tracing in the main thread. 1.306 +#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \ 1.307 + TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name) 1.308 +#define TRACE_EVENT_GET_SAMPLING_STATE() \ 1.309 + TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0) 1.310 +#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \ 1.311 + TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name) 1.312 + 1.313 + 1.314 +// Records a single BEGIN event called "name" immediately, with 0, 1 or 2 1.315 +// associated arguments. If the category is not enabled, then this 1.316 +// does nothing. 1.317 +// - category and name strings must have application lifetime (statics or 1.318 +// literals). They may not include " chars. 1.319 +#define TRACE_EVENT_BEGIN0(category_group, name) \ 1.320 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.321 + category_group, name, TRACE_EVENT_FLAG_NONE) 1.322 +#define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \ 1.323 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.324 + category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.325 +#define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \ 1.326 + arg2_name, arg2_val) \ 1.327 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.328 + category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ 1.329 + arg2_name, arg2_val) 1.330 +#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \ 1.331 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.332 + category_group, name, TRACE_EVENT_FLAG_COPY) 1.333 +#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \ 1.334 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.335 + category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) 1.336 +#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \ 1.337 + arg2_name, arg2_val) \ 1.338 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \ 1.339 + category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ 1.340 + arg2_name, arg2_val) 1.341 + 1.342 +// Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided. 1.343 +// - |id| is used to match the _BEGIN event with the _END event. 1.344 +// Events are considered to match if their category_group, name and id values 1.345 +// all match. |id| must either be a pointer or an integer value up to 64 bits. 1.346 +// If it's a pointer, the bits will be xored with a hash of the process ID so 1.347 +// that the same pointer on two different processes will not collide. 1.348 +#define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, \ 1.349 + name, id, thread_id) \ 1.350 + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ 1.351 + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ 1.352 + timestamp, TRACE_EVENT_FLAG_NONE) 1.353 +#define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \ 1.354 + category_group, name, id, thread_id) \ 1.355 + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ 1.356 + TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \ 1.357 + timestamp, TRACE_EVENT_FLAG_COPY) 1.358 + 1.359 +// Records a single END event for "name" immediately. If the category 1.360 +// is not enabled, then this does nothing. 1.361 +// - category and name strings must have application lifetime (statics or 1.362 +// literals). They may not include " chars. 1.363 +#define TRACE_EVENT_END0(category_group, name) \ 1.364 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.365 + category_group, name, TRACE_EVENT_FLAG_NONE) 1.366 +#define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \ 1.367 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.368 + category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.369 +#define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, \ 1.370 + arg2_name, arg2_val) \ 1.371 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.372 + category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \ 1.373 + arg2_name, arg2_val) 1.374 +#define TRACE_EVENT_COPY_END0(category_group, name) \ 1.375 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.376 + category_group, name, TRACE_EVENT_FLAG_COPY) 1.377 +#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \ 1.378 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.379 + category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val) 1.380 +#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \ 1.381 + arg2_name, arg2_val) \ 1.382 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \ 1.383 + category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \ 1.384 + arg2_name, arg2_val) 1.385 + 1.386 +// Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided. 1.387 +// - |id| is used to match the _BEGIN event with the _END event. 1.388 +// Events are considered to match if their category_group, name and id values 1.389 +// all match. |id| must either be a pointer or an integer value up to 64 bits. 1.390 +// If it's a pointer, the bits will be xored with a hash of the process ID so 1.391 +// that the same pointer on two different processes will not collide. 1.392 +#define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, \ 1.393 + name, id, thread_id) \ 1.394 + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ 1.395 + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ 1.396 + timestamp, TRACE_EVENT_FLAG_NONE) 1.397 +#define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \ 1.398 + category_group, name, id, thread_id) \ 1.399 + INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ 1.400 + TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \ 1.401 + timestamp, TRACE_EVENT_FLAG_COPY) 1.402 + 1.403 +// Records the value of a counter called "name" immediately. Value 1.404 +// must be representable as a 32 bit integer. 1.405 +// - category and name strings must have application lifetime (statics or 1.406 +// literals). They may not include " chars. 1.407 +#define TRACE_COUNTER1(category_group, name, value) \ 1.408 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \ 1.409 + category_group, name, TRACE_EVENT_FLAG_NONE, \ 1.410 + "value", static_cast<int>(value)) 1.411 +#define TRACE_COPY_COUNTER1(category_group, name, value) \ 1.412 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \ 1.413 + category_group, name, TRACE_EVENT_FLAG_COPY, \ 1.414 + "value", static_cast<int>(value)) 1.415 + 1.416 +// Records the values of a multi-parted counter called "name" immediately. 1.417 +// The UI will treat value1 and value2 as parts of a whole, displaying their 1.418 +// values as a stacked-bar chart. 1.419 +// - category and name strings must have application lifetime (statics or 1.420 +// literals). They may not include " chars. 1.421 +#define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \ 1.422 + value2_name, value2_val) \ 1.423 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \ 1.424 + category_group, name, TRACE_EVENT_FLAG_NONE, \ 1.425 + value1_name, static_cast<int>(value1_val), \ 1.426 + value2_name, static_cast<int>(value2_val)) 1.427 +#define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \ 1.428 + value2_name, value2_val) \ 1.429 + INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \ 1.430 + category_group, name, TRACE_EVENT_FLAG_COPY, \ 1.431 + value1_name, static_cast<int>(value1_val), \ 1.432 + value2_name, static_cast<int>(value2_val)) 1.433 + 1.434 +// Records the value of a counter called "name" immediately. Value 1.435 +// must be representable as a 32 bit integer. 1.436 +// - category and name strings must have application lifetime (statics or 1.437 +// literals). They may not include " chars. 1.438 +// - |id| is used to disambiguate counters with the same name. It must either 1.439 +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits 1.440 +// will be xored with a hash of the process ID so that the same pointer on 1.441 +// two different processes will not collide. 1.442 +#define TRACE_COUNTER_ID1(category_group, name, id, value) \ 1.443 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \ 1.444 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.445 + "value", static_cast<int>(value)) 1.446 +#define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \ 1.447 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \ 1.448 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.449 + "value", static_cast<int>(value)) 1.450 + 1.451 +// Records the values of a multi-parted counter called "name" immediately. 1.452 +// The UI will treat value1 and value2 as parts of a whole, displaying their 1.453 +// values as a stacked-bar chart. 1.454 +// - category and name strings must have application lifetime (statics or 1.455 +// literals). They may not include " chars. 1.456 +// - |id| is used to disambiguate counters with the same name. It must either 1.457 +// be a pointer or an integer value up to 64 bits. If it's a pointer, the bits 1.458 +// will be xored with a hash of the process ID so that the same pointer on 1.459 +// two different processes will not collide. 1.460 +#define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \ 1.461 + value2_name, value2_val) \ 1.462 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \ 1.463 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.464 + value1_name, static_cast<int>(value1_val), \ 1.465 + value2_name, static_cast<int>(value2_val)) 1.466 +#define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \ 1.467 + value1_val, value2_name, value2_val) \ 1.468 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \ 1.469 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.470 + value1_name, static_cast<int>(value1_val), \ 1.471 + value2_name, static_cast<int>(value2_val)) 1.472 + 1.473 + 1.474 +// Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2 1.475 +// associated arguments. If the category is not enabled, then this 1.476 +// does nothing. 1.477 +// - category and name strings must have application lifetime (statics or 1.478 +// literals). They may not include " chars. 1.479 +// - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC 1.480 +// events are considered to match if their category_group, name and id values 1.481 +// all match. |id| must either be a pointer or an integer value up to 64 bits. 1.482 +// If it's a pointer, the bits will be xored with a hash of the process ID so 1.483 +// that the same pointer on two different processes will not collide. 1.484 +// 1.485 +// An asynchronous operation can consist of multiple phases. The first phase is 1.486 +// defined by the ASYNC_BEGIN calls. Additional phases can be defined using the 1.487 +// ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will 1.488 +// annotate the block following the call. The ASYNC_STEP_PAST macro will 1.489 +// annotate the block prior to the call. Note that any particular event must use 1.490 +// only STEP_INTO or STEP_PAST macros; they can not mix and match. When the 1.491 +// operation completes, call ASYNC_END. 1.492 +// 1.493 +// An ASYNC trace typically occurs on a single thread (if not, they will only be 1.494 +// drawn on the thread defined in the ASYNC_BEGIN event), but all events in that 1.495 +// operation must use the same |name| and |id|. Each step can have its own 1.496 +// args. 1.497 +#define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \ 1.498 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.499 + category_group, name, id, TRACE_EVENT_FLAG_NONE) 1.500 +#define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \ 1.501 + arg1_val) \ 1.502 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.503 + category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.504 +#define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \ 1.505 + arg1_val, arg2_name, arg2_val) \ 1.506 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.507 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.508 + arg1_name, arg1_val, arg2_name, arg2_val) 1.509 +#define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \ 1.510 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.511 + category_group, name, id, TRACE_EVENT_FLAG_COPY) 1.512 +#define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \ 1.513 + arg1_val) \ 1.514 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.515 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.516 + arg1_name, arg1_val) 1.517 +#define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \ 1.518 + arg1_val, arg2_name, arg2_val) \ 1.519 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \ 1.520 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.521 + arg1_name, arg1_val, arg2_name, arg2_val) 1.522 + 1.523 +// Records a single ASYNC_STEP_INTO event for |step| immediately. If the 1.524 +// category is not enabled, then this does nothing. The |name| and |id| must 1.525 +// match the ASYNC_BEGIN event above. The |step| param identifies this step 1.526 +// within the async event. This should be called at the beginning of the next 1.527 +// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any 1.528 +// ASYNC_STEP_PAST events. 1.529 +#define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \ 1.530 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \ 1.531 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step) 1.532 +#define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \ 1.533 + arg1_name, arg1_val) \ 1.534 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \ 1.535 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \ 1.536 + arg1_name, arg1_val) 1.537 + 1.538 +// Records a single ASYNC_STEP_PAST event for |step| immediately. If the 1.539 +// category is not enabled, then this does nothing. The |name| and |id| must 1.540 +// match the ASYNC_BEGIN event above. The |step| param identifies this step 1.541 +// within the async event. This should be called at the beginning of the next 1.542 +// phase of an asynchronous operation. The ASYNC_BEGIN event must not have any 1.543 +// ASYNC_STEP_INTO events. 1.544 +#define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \ 1.545 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \ 1.546 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step) 1.547 +#define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \ 1.548 + arg1_name, arg1_val) \ 1.549 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \ 1.550 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \ 1.551 + arg1_name, arg1_val) 1.552 + 1.553 +// Records a single ASYNC_END event for "name" immediately. If the category 1.554 +// is not enabled, then this does nothing. 1.555 +#define TRACE_EVENT_ASYNC_END0(category_group, name, id) \ 1.556 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.557 + category_group, name, id, TRACE_EVENT_FLAG_NONE) 1.558 +#define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \ 1.559 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.560 + category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.561 +#define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \ 1.562 + arg2_name, arg2_val) \ 1.563 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.564 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.565 + arg1_name, arg1_val, arg2_name, arg2_val) 1.566 +#define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \ 1.567 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.568 + category_group, name, id, TRACE_EVENT_FLAG_COPY) 1.569 +#define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \ 1.570 + arg1_val) \ 1.571 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.572 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.573 + arg1_name, arg1_val) 1.574 +#define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \ 1.575 + arg1_val, arg2_name, arg2_val) \ 1.576 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \ 1.577 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.578 + arg1_name, arg1_val, arg2_name, arg2_val) 1.579 + 1.580 + 1.581 +// Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2 1.582 +// associated arguments. If the category is not enabled, then this 1.583 +// does nothing. 1.584 +// - category and name strings must have application lifetime (statics or 1.585 +// literals). They may not include " chars. 1.586 +// - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW 1.587 +// events are considered to match if their category_group, name and id values 1.588 +// all match. |id| must either be a pointer or an integer value up to 64 bits. 1.589 +// If it's a pointer, the bits will be xored with a hash of the process ID so 1.590 +// that the same pointer on two different processes will not collide. 1.591 +// FLOW events are different from ASYNC events in how they are drawn by the 1.592 +// tracing UI. A FLOW defines asynchronous data flow, such as posting a task 1.593 +// (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be 1.594 +// drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar 1.595 +// to ASYNC, a FLOW can consist of multiple phases. The first phase is defined 1.596 +// by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP 1.597 +// macros. When the operation completes, call FLOW_END. An async operation can 1.598 +// span threads and processes, but all events in that operation must use the 1.599 +// same |name| and |id|. Each event can have its own args. 1.600 +#define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \ 1.601 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.602 + category_group, name, id, TRACE_EVENT_FLAG_NONE) 1.603 +#define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \ 1.604 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.605 + category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.606 +#define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \ 1.607 + arg2_name, arg2_val) \ 1.608 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.609 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.610 + arg1_name, arg1_val, arg2_name, arg2_val) 1.611 +#define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \ 1.612 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.613 + category_group, name, id, TRACE_EVENT_FLAG_COPY) 1.614 +#define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \ 1.615 + arg1_val) \ 1.616 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.617 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.618 + arg1_name, arg1_val) 1.619 +#define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \ 1.620 + arg1_val, arg2_name, arg2_val) \ 1.621 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \ 1.622 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.623 + arg1_name, arg1_val, arg2_name, arg2_val) 1.624 + 1.625 +// Records a single FLOW_STEP event for |step| immediately. If the category 1.626 +// is not enabled, then this does nothing. The |name| and |id| must match the 1.627 +// FLOW_BEGIN event above. The |step| param identifies this step within the 1.628 +// async event. This should be called at the beginning of the next phase of an 1.629 +// asynchronous operation. 1.630 +#define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \ 1.631 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ 1.632 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step) 1.633 +#define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, \ 1.634 + arg1_name, arg1_val) \ 1.635 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ 1.636 + category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \ 1.637 + arg1_name, arg1_val) 1.638 +#define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \ 1.639 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ 1.640 + category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step) 1.641 +#define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, \ 1.642 + arg1_name, arg1_val) \ 1.643 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \ 1.644 + category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \ 1.645 + arg1_name, arg1_val) 1.646 + 1.647 +// Records a single FLOW_END event for "name" immediately. If the category 1.648 +// is not enabled, then this does nothing. 1.649 +#define TRACE_EVENT_FLOW_END0(category_group, name, id) \ 1.650 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.651 + category_group, name, id, TRACE_EVENT_FLAG_NONE) 1.652 +#define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \ 1.653 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.654 + category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val) 1.655 +#define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \ 1.656 + arg2_name, arg2_val) \ 1.657 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.658 + category_group, name, id, TRACE_EVENT_FLAG_NONE, \ 1.659 + arg1_name, arg1_val, arg2_name, arg2_val) 1.660 +#define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \ 1.661 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.662 + category_group, name, id, TRACE_EVENT_FLAG_COPY) 1.663 +#define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \ 1.664 + arg1_val) \ 1.665 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.666 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.667 + arg1_name, arg1_val) 1.668 +#define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \ 1.669 + arg1_val, arg2_name, arg2_val) \ 1.670 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \ 1.671 + category_group, name, id, TRACE_EVENT_FLAG_COPY, \ 1.672 + arg1_name, arg1_val, arg2_name, arg2_val) 1.673 + 1.674 +// Macros to track the life time and value of arbitrary client objects. 1.675 +// See also TraceTrackableObject. 1.676 +#define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \ 1.677 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT, \ 1.678 + category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) 1.679 + 1.680 +#define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \ 1.681 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, \ 1.682 + category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE,\ 1.683 + "snapshot", snapshot) 1.684 + 1.685 +#define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \ 1.686 + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT, \ 1.687 + category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE) 1.688 + 1.689 +#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ 1.690 + *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ 1.691 + (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \ 1.692 + SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags) 1.693 + 1.694 +// Macro to efficiently determine if a given category group is enabled. 1.695 +#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \ 1.696 + do { \ 1.697 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ 1.698 + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ 1.699 + *ret = true; \ 1.700 + } else { \ 1.701 + *ret = false; \ 1.702 + } \ 1.703 + } while (0) 1.704 + 1.705 +// Macro to efficiently determine, through polling, if a new trace has begun. 1.706 +#define TRACE_EVENT_IS_NEW_TRACE(ret) \ 1.707 + do { \ 1.708 + static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \ 1.709 + int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \ 1.710 + if (num_traces_recorded != -1 && \ 1.711 + num_traces_recorded != \ 1.712 + INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \ 1.713 + INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = \ 1.714 + num_traces_recorded; \ 1.715 + *ret = true; \ 1.716 + } else { \ 1.717 + *ret = false; \ 1.718 + } \ 1.719 + } while (0) 1.720 + 1.721 +//////////////////////////////////////////////////////////////////////////////// 1.722 +// Implementation specific tracing API definitions. 1.723 + 1.724 +// Get a pointer to the enabled state of the given trace category. Only 1.725 +// long-lived literal strings should be given as the category group. The 1.726 +// returned pointer can be held permanently in a local static for example. If 1.727 +// the unsigned char is non-zero, tracing is enabled. If tracing is enabled, 1.728 +// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled 1.729 +// between the load of the tracing state and the call to 1.730 +// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out 1.731 +// for best performance when tracing is disabled. 1.732 +// const uint8_t* 1.733 +// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group) 1.734 +#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \ 1.735 + SkEventTracer::GetInstance()->getCategoryGroupEnabled 1.736 + 1.737 +// Get the number of times traces have been recorded. This is used to implement 1.738 +// the TRACE_EVENT_IS_NEW_TRACE facility. 1.739 +// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED() 1.740 +#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \ 1.741 + SkEventTracer::GetInstance()->getNumTracesRecorded 1.742 + 1.743 +// Add a trace event to the platform tracing system. 1.744 +// SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT( 1.745 +// char phase, 1.746 +// const uint8_t* category_group_enabled, 1.747 +// const char* name, 1.748 +// uint64_t id, 1.749 +// int num_args, 1.750 +// const char** arg_names, 1.751 +// const uint8_t* arg_types, 1.752 +// const uint64_t* arg_values, 1.753 +// unsigned char flags) 1.754 +#define TRACE_EVENT_API_ADD_TRACE_EVENT \ 1.755 + SkEventTracer::GetInstance()->addTraceEvent 1.756 + 1.757 +// Set the duration field of a COMPLETE trace event. 1.758 +// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( 1.759 +// const uint8_t* category_group_enabled, 1.760 +// const char* name, 1.761 +// SkEventTracer::Handle id) 1.762 +#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ 1.763 + SkEventTracer::GetInstance()->updateTraceEventDuration 1.764 + 1.765 +// These operations are atomic in the Chrome tracing implementation 1.766 +// to cater to ARM's weak memory consistency; we're just doing read/ 1.767 +// write here because it's not strictly needed for correctness. 1.768 +// So says Nat. 1.769 +// FIXME 1.770 + 1.771 +#define TRACE_EVENT_API_ATOMIC_WORD intptr_t 1.772 +#define TRACE_EVENT_API_ATOMIC_LOAD(var) (*(&var)) 1.773 +#define TRACE_EVENT_API_ATOMIC_STORE(var, value) (var=value) 1.774 + 1.775 +// Defines visibility for classes in trace_event.h 1.776 +#define TRACE_EVENT_API_CLASS_EXPORT SK_API 1.777 + 1.778 +// The thread buckets for the sampling profiler. 1.779 +TRACE_EVENT_API_CLASS_EXPORT extern \ 1.780 + TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; 1.781 + 1.782 +#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \ 1.783 + g_trace_state[thread_bucket] 1.784 + 1.785 +//////////////////////////////////////////////////////////////////////////////// 1.786 + 1.787 +// Implementation detail: trace event macros create temporary variables 1.788 +// to keep instrumentation overhead low. These macros give each temporary 1.789 +// variable a unique name based on the line number to prevent name collisions. 1.790 +#define INTERNAL_TRACE_EVENT_UID3(a,b) \ 1.791 + trace_event_unique_##a##b 1.792 +#define INTERNAL_TRACE_EVENT_UID2(a,b) \ 1.793 + INTERNAL_TRACE_EVENT_UID3(a,b) 1.794 +#define INTERNAL_TRACE_EVENT_UID(name_prefix) \ 1.795 + INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) 1.796 + 1.797 +// Implementation detail: internal macro to create static category. 1.798 +// No barriers are needed, because this code is designed to operate safely 1.799 +// even when the unsigned char* points to garbage data (which may be the case 1.800 +// on processors without cache coherency). 1.801 +#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \ 1.802 + category_group, atomic, category_group_enabled) \ 1.803 + category_group_enabled = \ 1.804 + reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD( \ 1.805 + atomic)); \ 1.806 + if (!category_group_enabled) { \ 1.807 + category_group_enabled = \ 1.808 + TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \ 1.809 + TRACE_EVENT_API_ATOMIC_STORE(atomic, \ 1.810 + reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \ 1.811 + category_group_enabled)); \ 1.812 + } 1.813 + 1.814 +#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \ 1.815 + static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \ 1.816 + const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \ 1.817 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \ 1.818 + INTERNAL_TRACE_EVENT_UID(atomic), \ 1.819 + INTERNAL_TRACE_EVENT_UID(category_group_enabled)); 1.820 + 1.821 +// Implementation detail: internal macro to create static category and add 1.822 +// event if the category is enabled. 1.823 +#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \ 1.824 + do { \ 1.825 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ 1.826 + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ 1.827 + skia::tracing_internals::AddTraceEvent( \ 1.828 + phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \ 1.829 + skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \ 1.830 + } \ 1.831 + } while (0) 1.832 + 1.833 +// Implementation detail: internal macro to create static category and add begin 1.834 +// event if the category is enabled. Also adds the end event when the scope 1.835 +// ends. 1.836 +#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \ 1.837 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ 1.838 + skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \ 1.839 + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ 1.840 + SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \ 1.841 + TRACE_EVENT_PHASE_COMPLETE, \ 1.842 + INTERNAL_TRACE_EVENT_UID(category_group_enabled), \ 1.843 + name, skia::tracing_internals::kNoEventId, \ 1.844 + TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \ 1.845 + INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \ 1.846 + INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \ 1.847 + } 1.848 + 1.849 +// Implementation detail: internal macro to create static category and add 1.850 +// event if the category is enabled. 1.851 +#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \ 1.852 + flags, ...) \ 1.853 + do { \ 1.854 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ 1.855 + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ 1.856 + unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ 1.857 + skia::tracing_internals::TraceID trace_event_trace_id( \ 1.858 + id, &trace_event_flags); \ 1.859 + skia::tracing_internals::AddTraceEvent( \ 1.860 + phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \ 1.861 + name, trace_event_trace_id.data(), trace_event_flags, \ 1.862 + ##__VA_ARGS__); \ 1.863 + } \ 1.864 + } while (0) 1.865 + 1.866 +// Implementation detail: internal macro to create static category and add 1.867 +// event if the category is enabled. 1.868 +#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \ 1.869 + category_group, name, id, thread_id, flags, ...) \ 1.870 + do { \ 1.871 + INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ 1.872 + if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ 1.873 + unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \ 1.874 + skia::tracing_internals::TraceID trace_event_trace_id( \ 1.875 + id, &trace_event_flags); \ 1.876 + skia::tracing_internals::AddTraceEventWithThreadIdAndTimestamp( \ 1.877 + phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \ 1.878 + name, trace_event_trace_id.data(), \ 1.879 + thread_id, base::TimeTicks::FromInternalValue(timestamp), \ 1.880 + trace_event_flags, ##__VA_ARGS__); \ 1.881 + } \ 1.882 + } while (0) 1.883 + 1.884 +// Notes regarding the following definitions: 1.885 +// New values can be added and propagated to third party libraries, but existing 1.886 +// definitions must never be changed, because third party libraries may use old 1.887 +// definitions. 1.888 + 1.889 +// Phase indicates the nature of an event entry. E.g. part of a begin/end pair. 1.890 +#define TRACE_EVENT_PHASE_BEGIN ('B') 1.891 +#define TRACE_EVENT_PHASE_END ('E') 1.892 +#define TRACE_EVENT_PHASE_COMPLETE ('X') 1.893 +#define TRACE_EVENT_PHASE_INSTANT ('i') 1.894 +#define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S') 1.895 +#define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T') 1.896 +#define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p') 1.897 +#define TRACE_EVENT_PHASE_ASYNC_END ('F') 1.898 +#define TRACE_EVENT_PHASE_FLOW_BEGIN ('s') 1.899 +#define TRACE_EVENT_PHASE_FLOW_STEP ('t') 1.900 +#define TRACE_EVENT_PHASE_FLOW_END ('f') 1.901 +#define TRACE_EVENT_PHASE_METADATA ('M') 1.902 +#define TRACE_EVENT_PHASE_COUNTER ('C') 1.903 +#define TRACE_EVENT_PHASE_SAMPLE ('P') 1.904 +#define TRACE_EVENT_PHASE_CREATE_OBJECT ('N') 1.905 +#define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O') 1.906 +#define TRACE_EVENT_PHASE_DELETE_OBJECT ('D') 1.907 + 1.908 +// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT. 1.909 +#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0)) 1.910 +#define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0)) 1.911 +#define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1)) 1.912 +#define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2)) 1.913 +#define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned char>(1 << 3)) 1.914 + 1.915 +#define TRACE_EVENT_FLAG_SCOPE_MASK (static_cast<unsigned char>( \ 1.916 + TRACE_EVENT_FLAG_SCOPE_OFFSET | (TRACE_EVENT_FLAG_SCOPE_OFFSET << 1))) 1.917 + 1.918 +// Type values for identifying types in the TraceValue union. 1.919 +#define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1)) 1.920 +#define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2)) 1.921 +#define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3)) 1.922 +#define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4)) 1.923 +#define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5)) 1.924 +#define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6)) 1.925 +#define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7)) 1.926 +#define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8)) 1.927 + 1.928 +// Enum reflecting the scope of an INSTANT event. Must fit within 1.929 +// TRACE_EVENT_FLAG_SCOPE_MASK. 1.930 +#define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3)) 1.931 +#define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3)) 1.932 +#define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3)) 1.933 + 1.934 +#define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g') 1.935 +#define TRACE_EVENT_SCOPE_NAME_PROCESS ('p') 1.936 +#define TRACE_EVENT_SCOPE_NAME_THREAD ('t') 1.937 + 1.938 +namespace skia { 1.939 +namespace tracing_internals { 1.940 + 1.941 +// Specify these values when the corresponding argument of AddTraceEvent is not 1.942 +// used. 1.943 +const int kZeroNumArgs = 0; 1.944 +const uint64_t kNoEventId = 0; 1.945 + 1.946 +// TraceID encapsulates an ID that can either be an integer or pointer. Pointers 1.947 +// are by default mangled with the Process ID so that they are unlikely to 1.948 +// collide when the same pointer is used on different processes. 1.949 +class TraceID { 1.950 + public: 1.951 + class DontMangle { 1.952 + public: 1.953 + explicit DontMangle(const void* id) 1.954 + : data_(static_cast<uint64_t>( 1.955 + reinterpret_cast<unsigned long>(id))) {} 1.956 + explicit DontMangle(uint64_t id) : data_(id) {} 1.957 + explicit DontMangle(unsigned int id) : data_(id) {} 1.958 + explicit DontMangle(unsigned short id) : data_(id) {} 1.959 + explicit DontMangle(unsigned char id) : data_(id) {} 1.960 + explicit DontMangle(long long id) 1.961 + : data_(static_cast<uint64_t>(id)) {} 1.962 + explicit DontMangle(long id) 1.963 + : data_(static_cast<uint64_t>(id)) {} 1.964 + explicit DontMangle(int id) 1.965 + : data_(static_cast<uint64_t>(id)) {} 1.966 + explicit DontMangle(short id) 1.967 + : data_(static_cast<uint64_t>(id)) {} 1.968 + explicit DontMangle(signed char id) 1.969 + : data_(static_cast<uint64_t>(id)) {} 1.970 + uint64_t data() const { return data_; } 1.971 + private: 1.972 + uint64_t data_; 1.973 + }; 1.974 + 1.975 + class ForceMangle { 1.976 + public: 1.977 + explicit ForceMangle(uint64_t id) : data_(id) {} 1.978 + explicit ForceMangle(unsigned int id) : data_(id) {} 1.979 + explicit ForceMangle(unsigned short id) : data_(id) {} 1.980 + explicit ForceMangle(unsigned char id) : data_(id) {} 1.981 + explicit ForceMangle(long long id) 1.982 + : data_(static_cast<uint64_t>(id)) {} 1.983 + explicit ForceMangle(long id) 1.984 + : data_(static_cast<uint64_t>(id)) {} 1.985 + explicit ForceMangle(int id) 1.986 + : data_(static_cast<uint64_t>(id)) {} 1.987 + explicit ForceMangle(short id) 1.988 + : data_(static_cast<uint64_t>(id)) {} 1.989 + explicit ForceMangle(signed char id) 1.990 + : data_(static_cast<uint64_t>(id)) {} 1.991 + uint64_t data() const { return data_; } 1.992 + private: 1.993 + uint64_t data_; 1.994 + }; 1.995 + 1.996 + TraceID(const void* id, unsigned char* flags) 1.997 + : data_(static_cast<uint64_t>( 1.998 + reinterpret_cast<unsigned long>(id))) { 1.999 + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; 1.1000 + } 1.1001 + TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) { 1.1002 + *flags |= TRACE_EVENT_FLAG_MANGLE_ID; 1.1003 + } 1.1004 + TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) { 1.1005 + } 1.1006 + TraceID(uint64_t id, unsigned char* flags) 1.1007 + : data_(id) { (void)flags; } 1.1008 + TraceID(unsigned int id, unsigned char* flags) 1.1009 + : data_(id) { (void)flags; } 1.1010 + TraceID(unsigned short id, unsigned char* flags) 1.1011 + : data_(id) { (void)flags; } 1.1012 + TraceID(unsigned char id, unsigned char* flags) 1.1013 + : data_(id) { (void)flags; } 1.1014 + TraceID(long long id, unsigned char* flags) 1.1015 + : data_(static_cast<uint64_t>(id)) { (void)flags; } 1.1016 + TraceID(long id, unsigned char* flags) 1.1017 + : data_(static_cast<uint64_t>(id)) { (void)flags; } 1.1018 + TraceID(int id, unsigned char* flags) 1.1019 + : data_(static_cast<uint64_t>(id)) { (void)flags; } 1.1020 + TraceID(short id, unsigned char* flags) 1.1021 + : data_(static_cast<uint64_t>(id)) { (void)flags; } 1.1022 + TraceID(signed char id, unsigned char* flags) 1.1023 + : data_(static_cast<uint64_t>(id)) { (void)flags; } 1.1024 + 1.1025 + uint64_t data() const { return data_; } 1.1026 + 1.1027 + private: 1.1028 + uint64_t data_; 1.1029 +}; 1.1030 + 1.1031 +// Simple union to store various types as uint64_t. 1.1032 +union TraceValueUnion { 1.1033 + bool as_bool; 1.1034 + uint64_t as_uint; 1.1035 + long long as_int; 1.1036 + double as_double; 1.1037 + const void* as_pointer; 1.1038 + const char* as_string; 1.1039 +}; 1.1040 + 1.1041 +// Simple container for const char* that should be copied instead of retained. 1.1042 +class TraceStringWithCopy { 1.1043 + public: 1.1044 + explicit TraceStringWithCopy(const char* str) : str_(str) {} 1.1045 + operator const char* () const { return str_; } 1.1046 + private: 1.1047 + const char* str_; 1.1048 +}; 1.1049 + 1.1050 +// Define SetTraceValue for each allowed type. It stores the type and 1.1051 +// value in the return arguments. This allows this API to avoid declaring any 1.1052 +// structures so that it is portable to third_party libraries. 1.1053 +#define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \ 1.1054 + union_member, \ 1.1055 + value_type_id) \ 1.1056 + static inline void SetTraceValue( \ 1.1057 + actual_type arg, \ 1.1058 + unsigned char* type, \ 1.1059 + uint64_t* value) { \ 1.1060 + TraceValueUnion type_value; \ 1.1061 + type_value.union_member = arg; \ 1.1062 + *type = value_type_id; \ 1.1063 + *value = type_value.as_uint; \ 1.1064 + } 1.1065 +// Simpler form for int types that can be safely casted. 1.1066 +#define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \ 1.1067 + value_type_id) \ 1.1068 + static inline void SetTraceValue( \ 1.1069 + actual_type arg, \ 1.1070 + unsigned char* type, \ 1.1071 + uint64_t* value) { \ 1.1072 + *type = value_type_id; \ 1.1073 + *value = static_cast<uint64_t>(arg); \ 1.1074 + } 1.1075 + 1.1076 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT) 1.1077 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT) 1.1078 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT) 1.1079 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT) 1.1080 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT) 1.1081 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT) 1.1082 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT) 1.1083 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT) 1.1084 +INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT) 1.1085 +INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL) 1.1086 +INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE) 1.1087 +INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer, 1.1088 + TRACE_VALUE_TYPE_POINTER) 1.1089 +INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string, 1.1090 + TRACE_VALUE_TYPE_STRING) 1.1091 +INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string, 1.1092 + TRACE_VALUE_TYPE_COPY_STRING) 1.1093 + 1.1094 +#undef INTERNAL_DECLARE_SET_TRACE_VALUE 1.1095 +#undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT 1.1096 + 1.1097 +// These AddTraceEvent and AddTraceEvent template 1.1098 +// functions are defined here instead of in the macro, because the arg_values 1.1099 +// could be temporary objects, such as std::string. In order to store 1.1100 +// pointers to the internal c_str and pass through to the tracing API, 1.1101 +// the arg_values must live throughout these procedures. 1.1102 + 1.1103 +static inline SkEventTracer::Handle 1.1104 +AddTraceEvent( 1.1105 + char phase, 1.1106 + const uint8_t* category_group_enabled, 1.1107 + const char* name, 1.1108 + uint64_t id, 1.1109 + unsigned char flags) { 1.1110 + return TRACE_EVENT_API_ADD_TRACE_EVENT( 1.1111 + phase, category_group_enabled, name, id, 1.1112 + kZeroNumArgs, NULL, NULL, NULL, flags); 1.1113 +} 1.1114 + 1.1115 +template<class ARG1_TYPE> 1.1116 +static inline SkEventTracer::Handle 1.1117 +AddTraceEvent( 1.1118 + char phase, 1.1119 + const uint8_t* category_group_enabled, 1.1120 + const char* name, 1.1121 + uint64_t id, 1.1122 + unsigned char flags, 1.1123 + const char* arg1_name, 1.1124 + const ARG1_TYPE& arg1_val) { 1.1125 + const int num_args = 1; 1.1126 + uint8_t arg_types[1]; 1.1127 + uint64_t arg_values[1]; 1.1128 + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); 1.1129 + return TRACE_EVENT_API_ADD_TRACE_EVENT( 1.1130 + phase, category_group_enabled, name, id, 1.1131 + num_args, &arg1_name, arg_types, arg_values, flags); 1.1132 +} 1.1133 + 1.1134 +template<class ARG1_TYPE, class ARG2_TYPE> 1.1135 +static inline SkEventTracer::Handle 1.1136 +AddTraceEvent( 1.1137 + char phase, 1.1138 + const uint8_t* category_group_enabled, 1.1139 + const char* name, 1.1140 + uint64_t id, 1.1141 + unsigned char flags, 1.1142 + const char* arg1_name, 1.1143 + const ARG1_TYPE& arg1_val, 1.1144 + const char* arg2_name, 1.1145 + const ARG2_TYPE& arg2_val) { 1.1146 + const int num_args = 2; 1.1147 + const char* arg_names[2] = { arg1_name, arg2_name }; 1.1148 + unsigned char arg_types[2]; 1.1149 + uint64_t arg_values[2]; 1.1150 + SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]); 1.1151 + SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]); 1.1152 + return TRACE_EVENT_API_ADD_TRACE_EVENT( 1.1153 + phase, category_group_enabled, name, id, 1.1154 + num_args, arg_names, arg_types, arg_values, flags); 1.1155 +} 1.1156 + 1.1157 +// Used by TRACE_EVENTx macros. Do not use directly. 1.1158 +class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer { 1.1159 + public: 1.1160 + // Note: members of data_ intentionally left uninitialized. See Initialize. 1.1161 + ScopedTracer() : p_data_(NULL) {} 1.1162 + 1.1163 + ~ScopedTracer() { 1.1164 + if (p_data_ && *data_.category_group_enabled) 1.1165 + TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION( 1.1166 + data_.category_group_enabled, data_.name, data_.event_handle); 1.1167 + } 1.1168 + 1.1169 + void Initialize(const uint8_t* category_group_enabled, 1.1170 + const char* name, 1.1171 + SkEventTracer::Handle event_handle) { 1.1172 + data_.category_group_enabled = category_group_enabled; 1.1173 + data_.name = name; 1.1174 + data_.event_handle = event_handle; 1.1175 + p_data_ = &data_; 1.1176 + } 1.1177 + 1.1178 + private: 1.1179 + // This Data struct workaround is to avoid initializing all the members 1.1180 + // in Data during construction of this object, since this object is always 1.1181 + // constructed, even when tracing is disabled. If the members of Data were 1.1182 + // members of this class instead, compiler warnings occur about potential 1.1183 + // uninitialized accesses. 1.1184 + struct Data { 1.1185 + const uint8_t* category_group_enabled; 1.1186 + const char* name; 1.1187 + SkEventTracer::Handle event_handle; 1.1188 + }; 1.1189 + Data* p_data_; 1.1190 + Data data_; 1.1191 +}; 1.1192 + 1.1193 +// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly. 1.1194 +class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient { 1.1195 + public: 1.1196 + ScopedTraceBinaryEfficient(const char* category_group, const char* name); 1.1197 + ~ScopedTraceBinaryEfficient(); 1.1198 + 1.1199 + private: 1.1200 + const uint8_t* category_group_enabled_; 1.1201 + const char* name_; 1.1202 + SkEventTracer::Handle event_handle_; 1.1203 +}; 1.1204 + 1.1205 +// This macro generates less code then TRACE_EVENT0 but is also 1.1206 +// slower to execute when tracing is off. It should generally only be 1.1207 +// used with code that is seldom executed or conditionally executed 1.1208 +// when debugging. 1.1209 +// For now the category_group must be "gpu". 1.1210 +#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \ 1.1211 + skia::tracing_internals::ScopedTraceBinaryEfficient \ 1.1212 + INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name); 1.1213 + 1.1214 +// TraceEventSamplingStateScope records the current sampling state 1.1215 +// and sets a new sampling state. When the scope exists, it restores 1.1216 +// the sampling state having recorded. 1.1217 +template<size_t BucketNumber> 1.1218 +class TraceEventSamplingStateScope { 1.1219 + public: 1.1220 + TraceEventSamplingStateScope(const char* category_and_name) { 1.1221 + previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current(); 1.1222 + TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name); 1.1223 + } 1.1224 + 1.1225 + ~TraceEventSamplingStateScope() { 1.1226 + TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_); 1.1227 + } 1.1228 + 1.1229 + static inline const char* Current() { 1.1230 + return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD( 1.1231 + g_trace_state[BucketNumber])); 1.1232 + } 1.1233 + 1.1234 + static inline void Set(const char* category_and_name) { 1.1235 + TRACE_EVENT_API_ATOMIC_STORE( 1.1236 + g_trace_state[BucketNumber], 1.1237 + reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( 1.1238 + const_cast<char*>(category_and_name))); 1.1239 + } 1.1240 + 1.1241 + private: 1.1242 + const char* previous_state_; 1.1243 +}; 1.1244 + 1.1245 +} // namespace tracing_internals 1.1246 +} // namespace skia 1.1247 + 1.1248 +#endif