gfx/skia/trunk/src/core/SkTraceEvent.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 // Copyright (c) 2014 Google Inc.
michael@0 2 //
michael@0 3 // Use of this source code is governed by a BSD-style license that can be
michael@0 4 // found in the LICENSE file.
michael@0 5
michael@0 6 // This header file defines the set of trace_event macros without specifying
michael@0 7 // how the events actually get collected and stored. If you need to expose trace
michael@0 8 // events to some other universe, you can copy-and-paste this file as well as
michael@0 9 // trace_event.h, modifying the macros contained there as necessary for the
michael@0 10 // target platform. The end result is that multiple libraries can funnel events
michael@0 11 // through to a shared trace event collector.
michael@0 12
michael@0 13 // Trace events are for tracking application performance and resource usage.
michael@0 14 // Macros are provided to track:
michael@0 15 // Begin and end of function calls
michael@0 16 // Counters
michael@0 17 //
michael@0 18 // Events are issued against categories. Whereas LOG's
michael@0 19 // categories are statically defined, TRACE categories are created
michael@0 20 // implicitly with a string. For example:
michael@0 21 // TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent",
michael@0 22 // TRACE_EVENT_SCOPE_THREAD)
michael@0 23 //
michael@0 24 // It is often the case that one trace may belong in multiple categories at the
michael@0 25 // same time. The first argument to the trace can be a comma-separated list of
michael@0 26 // categories, forming a category group, like:
michael@0 27 //
michael@0 28 // TRACE_EVENT_INSTANT0("input,views", "OnMouseOver", TRACE_EVENT_SCOPE_THREAD)
michael@0 29 //
michael@0 30 // We can enable/disable tracing of OnMouseOver by enabling/disabling either
michael@0 31 // category.
michael@0 32 //
michael@0 33 // Events can be INSTANT, or can be pairs of BEGIN and END in the same scope:
michael@0 34 // TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly")
michael@0 35 // doSomethingCostly()
michael@0 36 // TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly")
michael@0 37 // Note: our tools can't always determine the correct BEGIN/END pairs unless
michael@0 38 // these are used in the same scope. Use ASYNC_BEGIN/ASYNC_END macros if you
michael@0 39 // need them to be in separate scopes.
michael@0 40 //
michael@0 41 // A common use case is to trace entire function scopes. This
michael@0 42 // issues a trace BEGIN and END automatically:
michael@0 43 // void doSomethingCostly() {
michael@0 44 // TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly");
michael@0 45 // ...
michael@0 46 // }
michael@0 47 //
michael@0 48 // Additional parameters can be associated with an event:
michael@0 49 // void doSomethingCostly2(int howMuch) {
michael@0 50 // TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly",
michael@0 51 // "howMuch", howMuch);
michael@0 52 // ...
michael@0 53 // }
michael@0 54 //
michael@0 55 // The trace system will automatically add to this information the
michael@0 56 // current process id, thread id, and a timestamp in microseconds.
michael@0 57 //
michael@0 58 // To trace an asynchronous procedure such as an IPC send/receive, use
michael@0 59 // ASYNC_BEGIN and ASYNC_END:
michael@0 60 // [single threaded sender code]
michael@0 61 // static int send_count = 0;
michael@0 62 // ++send_count;
michael@0 63 // TRACE_EVENT_ASYNC_BEGIN0("ipc", "message", send_count);
michael@0 64 // Send(new MyMessage(send_count));
michael@0 65 // [receive code]
michael@0 66 // void OnMyMessage(send_count) {
michael@0 67 // TRACE_EVENT_ASYNC_END0("ipc", "message", send_count);
michael@0 68 // }
michael@0 69 // The third parameter is a unique ID to match ASYNC_BEGIN/ASYNC_END pairs.
michael@0 70 // ASYNC_BEGIN and ASYNC_END can occur on any thread of any traced process.
michael@0 71 // Pointers can be used for the ID parameter, and they will be mangled
michael@0 72 // internally so that the same pointer on two different processes will not
michael@0 73 // match. For example:
michael@0 74 // class MyTracedClass {
michael@0 75 // public:
michael@0 76 // MyTracedClass() {
michael@0 77 // TRACE_EVENT_ASYNC_BEGIN0("category", "MyTracedClass", this);
michael@0 78 // }
michael@0 79 // ~MyTracedClass() {
michael@0 80 // TRACE_EVENT_ASYNC_END0("category", "MyTracedClass", this);
michael@0 81 // }
michael@0 82 // }
michael@0 83 //
michael@0 84 // Trace event also supports counters, which is a way to track a quantity
michael@0 85 // as it varies over time. Counters are created with the following macro:
michael@0 86 // TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter", g_myCounterValue);
michael@0 87 //
michael@0 88 // Counters are process-specific. The macro itself can be issued from any
michael@0 89 // thread, however.
michael@0 90 //
michael@0 91 // Sometimes, you want to track two counters at once. You can do this with two
michael@0 92 // counter macros:
michael@0 93 // TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter0", g_myCounterValue[0]);
michael@0 94 // TRACE_COUNTER1("MY_SUBSYSTEM", "myCounter1", g_myCounterValue[1]);
michael@0 95 // Or you can do it with a combined macro:
michael@0 96 // TRACE_COUNTER2("MY_SUBSYSTEM", "myCounter",
michael@0 97 // "bytesPinned", g_myCounterValue[0],
michael@0 98 // "bytesAllocated", g_myCounterValue[1]);
michael@0 99 // This indicates to the tracing UI that these counters should be displayed
michael@0 100 // in a single graph, as a summed area chart.
michael@0 101 //
michael@0 102 // Since counters are in a global namespace, you may want to disambiguate with a
michael@0 103 // unique ID, by using the TRACE_COUNTER_ID* variations.
michael@0 104 //
michael@0 105 // By default, trace collection is compiled in, but turned off at runtime.
michael@0 106 // Collecting trace data is the responsibility of the embedding
michael@0 107 // application. In Chrome's case, navigating to about:tracing will turn on
michael@0 108 // tracing and display data collected across all active processes.
michael@0 109 //
michael@0 110 //
michael@0 111 // Memory scoping note:
michael@0 112 // Tracing copies the pointers, not the string content, of the strings passed
michael@0 113 // in for category_group, name, and arg_names. Thus, the following code will
michael@0 114 // cause problems:
michael@0 115 // char* str = strdup("importantName");
michael@0 116 // TRACE_EVENT_INSTANT0("SUBSYSTEM", str); // BAD!
michael@0 117 // free(str); // Trace system now has dangling pointer
michael@0 118 //
michael@0 119 // To avoid this issue with the |name| and |arg_name| parameters, use the
michael@0 120 // TRACE_EVENT_COPY_XXX overloads of the macros at additional runtime overhead.
michael@0 121 // Notes: The category must always be in a long-lived char* (i.e. static const).
michael@0 122 // The |arg_values|, when used, are always deep copied with the _COPY
michael@0 123 // macros.
michael@0 124 //
michael@0 125 // When are string argument values copied:
michael@0 126 // const char* arg_values are only referenced by default:
michael@0 127 // TRACE_EVENT1("category", "name",
michael@0 128 // "arg1", "literal string is only referenced");
michael@0 129 // Use TRACE_STR_COPY to force copying of a const char*:
michael@0 130 // TRACE_EVENT1("category", "name",
michael@0 131 // "arg1", TRACE_STR_COPY("string will be copied"));
michael@0 132 // std::string arg_values are always copied:
michael@0 133 // TRACE_EVENT1("category", "name",
michael@0 134 // "arg1", std::string("string will be copied"));
michael@0 135 //
michael@0 136 //
michael@0 137 // Thread Safety:
michael@0 138 // A thread safe singleton and mutex are used for thread safety. Category
michael@0 139 // enabled flags are used to limit the performance impact when the system
michael@0 140 // is not enabled.
michael@0 141 //
michael@0 142 // TRACE_EVENT macros first cache a pointer to a category. The categories are
michael@0 143 // statically allocated and safe at all times, even after exit. Fetching a
michael@0 144 // category is protected by the TraceLog::lock_. Multiple threads initializing
michael@0 145 // the static variable is safe, as they will be serialized by the lock and
michael@0 146 // multiple calls will return the same pointer to the category.
michael@0 147 //
michael@0 148 // Then the category_group_enabled flag is checked. This is a unsigned char, and
michael@0 149 // not intended to be multithread safe. It optimizes access to AddTraceEvent
michael@0 150 // which is threadsafe internally via TraceLog::lock_. The enabled flag may
michael@0 151 // cause some threads to incorrectly call or skip calling AddTraceEvent near
michael@0 152 // the time of the system being enabled or disabled. This is acceptable as
michael@0 153 // we tolerate some data loss while the system is being enabled/disabled and
michael@0 154 // because AddTraceEvent is threadsafe internally and checks the enabled state
michael@0 155 // again under lock.
michael@0 156 //
michael@0 157 // Without the use of these static category pointers and enabled flags all
michael@0 158 // trace points would carry a significant performance cost of acquiring a lock
michael@0 159 // and resolving the category.
michael@0 160
michael@0 161 #ifndef SkTraceEvent_DEFINED
michael@0 162 #define SkTraceEvent_DEFINED
michael@0 163
michael@0 164 #include "SkEventTracer.h"
michael@0 165
michael@0 166 // By default, const char* argument values are assumed to have long-lived scope
michael@0 167 // and will not be copied. Use this macro to force a const char* to be copied.
michael@0 168 #define TRACE_STR_COPY(str) \
michael@0 169 skia::tracing_internals::TraceStringWithCopy(str)
michael@0 170
michael@0 171 // By default, uint64 ID argument values are not mangled with the Process ID in
michael@0 172 // TRACE_EVENT_ASYNC macros. Use this macro to force Process ID mangling.
michael@0 173 #define TRACE_ID_MANGLE(id) \
michael@0 174 skia::tracing_internals::TraceID::ForceMangle(id)
michael@0 175
michael@0 176 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC
michael@0 177 // macros. Use this macro to prevent Process ID mangling.
michael@0 178 #define TRACE_ID_DONT_MANGLE(id) \
michael@0 179 skia::tracing_internals::TraceID::DontMangle(id)
michael@0 180
michael@0 181 // Records a pair of begin and end events called "name" for the current
michael@0 182 // scope, with 0, 1 or 2 associated arguments. If the category is not
michael@0 183 // enabled, then this does nothing.
michael@0 184 // - category and name strings must have application lifetime (statics or
michael@0 185 // literals). They may not include " chars.
michael@0 186 #define TRACE_EVENT0(category_group, name) \
michael@0 187 INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name)
michael@0 188 #define TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
michael@0 189 INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, arg1_name, arg1_val)
michael@0 190 #define TRACE_EVENT2( \
michael@0 191 category_group, name, arg1_name, arg1_val, arg2_name, arg2_val) \
michael@0 192 INTERNAL_TRACE_EVENT_ADD_SCOPED( \
michael@0 193 category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 194
michael@0 195 // Records events like TRACE_EVENT2 but uses |memory_tag| for memory tracing.
michael@0 196 // Use this where |name| is too generic to accurately aggregate allocations.
michael@0 197 #define TRACE_EVENT_WITH_MEMORY_TAG2( \
michael@0 198 category, name, memory_tag, arg1_name, arg1_val, arg2_name, arg2_val) \
michael@0 199 INTERNAL_TRACE_EVENT_ADD_SCOPED( \
michael@0 200 category, name, arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 201
michael@0 202 // UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
michael@0 203 // included in official builds.
michael@0 204
michael@0 205 #if OFFICIAL_BUILD
michael@0 206 #undef TRACING_IS_OFFICIAL_BUILD
michael@0 207 #define TRACING_IS_OFFICIAL_BUILD 1
michael@0 208 #elif !defined(TRACING_IS_OFFICIAL_BUILD)
michael@0 209 #define TRACING_IS_OFFICIAL_BUILD 0
michael@0 210 #endif
michael@0 211
michael@0 212 #if TRACING_IS_OFFICIAL_BUILD
michael@0 213 #define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
michael@0 214 #define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
michael@0 215 (void)0
michael@0 216 #define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
michael@0 217 arg2_name, arg2_val) (void)0
michael@0 218 #define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
michael@0 219 #define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
michael@0 220 arg1_name, arg1_val) (void)0
michael@0 221 #define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
michael@0 222 arg1_name, arg1_val, \
michael@0 223 arg2_name, arg2_val) (void)0
michael@0 224 #else
michael@0 225 #define UNSHIPPED_TRACE_EVENT0(category_group, name) \
michael@0 226 TRACE_EVENT0(category_group, name)
michael@0 227 #define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
michael@0 228 TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
michael@0 229 #define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
michael@0 230 arg2_name, arg2_val) \
michael@0 231 TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 232 #define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
michael@0 233 TRACE_EVENT_INSTANT0(category_group, name, scope)
michael@0 234 #define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, \
michael@0 235 arg1_name, arg1_val) \
michael@0 236 TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
michael@0 237 #define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, \
michael@0 238 arg1_name, arg1_val, \
michael@0 239 arg2_name, arg2_val) \
michael@0 240 TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
michael@0 241 arg2_name, arg2_val)
michael@0 242 #endif
michael@0 243
michael@0 244 // Records a single event called "name" immediately, with 0, 1 or 2
michael@0 245 // associated arguments. If the category is not enabled, then this
michael@0 246 // does nothing.
michael@0 247 // - category and name strings must have application lifetime (statics or
michael@0 248 // literals). They may not include " chars.
michael@0 249 #define TRACE_EVENT_INSTANT0(category_group, name, scope) \
michael@0 250 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 251 category_group, name, TRACE_EVENT_FLAG_NONE | scope)
michael@0 252 #define TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val) \
michael@0 253 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 254 category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
michael@0 255 arg1_name, arg1_val)
michael@0 256 #define TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
michael@0 257 arg2_name, arg2_val) \
michael@0 258 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 259 category_group, name, TRACE_EVENT_FLAG_NONE | scope, \
michael@0 260 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 261 #define TRACE_EVENT_COPY_INSTANT0(category_group, name, scope) \
michael@0 262 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 263 category_group, name, TRACE_EVENT_FLAG_COPY | scope)
michael@0 264 #define TRACE_EVENT_COPY_INSTANT1(category_group, name, scope, \
michael@0 265 arg1_name, arg1_val) \
michael@0 266 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 267 category_group, name, TRACE_EVENT_FLAG_COPY | scope, arg1_name, \
michael@0 268 arg1_val)
michael@0 269 #define TRACE_EVENT_COPY_INSTANT2(category_group, name, scope, \
michael@0 270 arg1_name, arg1_val, \
michael@0 271 arg2_name, arg2_val) \
michael@0 272 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, \
michael@0 273 category_group, name, TRACE_EVENT_FLAG_COPY | scope, \
michael@0 274 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 275
michael@0 276 // Sets the current sample state to the given category and name (both must be
michael@0 277 // constant strings). These states are intended for a sampling profiler.
michael@0 278 // Implementation note: we store category and name together because we don't
michael@0 279 // want the inconsistency/expense of storing two pointers.
michael@0 280 // |thread_bucket| is [0..2] and is used to statically isolate samples in one
michael@0 281 // thread from others.
michael@0 282 #define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
michael@0 283 bucket_number, category, name) \
michael@0 284 skia::tracing_internals:: \
michael@0 285 TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
michael@0 286
michael@0 287 // Returns a current sampling state of the given bucket.
michael@0 288 #define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
michael@0 289 skia::tracing_internals::TraceEventSamplingStateScope<bucket_number>::Current()
michael@0 290
michael@0 291 // Creates a scope of a sampling state of the given bucket.
michael@0 292 //
michael@0 293 // { // The sampling state is set within this scope.
michael@0 294 // TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
michael@0 295 // ...;
michael@0 296 // }
michael@0 297 #define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
michael@0 298 bucket_number, category, name) \
michael@0 299 skia::tracing_internals::TraceEventSamplingStateScope<bucket_number> \
michael@0 300 traceEventSamplingScope(category "\0" name);
michael@0 301
michael@0 302 // Syntactic sugars for the sampling tracing in the main thread.
michael@0 303 #define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
michael@0 304 TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
michael@0 305 #define TRACE_EVENT_GET_SAMPLING_STATE() \
michael@0 306 TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
michael@0 307 #define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
michael@0 308 TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
michael@0 309
michael@0 310
michael@0 311 // Records a single BEGIN event called "name" immediately, with 0, 1 or 2
michael@0 312 // associated arguments. If the category is not enabled, then this
michael@0 313 // does nothing.
michael@0 314 // - category and name strings must have application lifetime (statics or
michael@0 315 // literals). They may not include " chars.
michael@0 316 #define TRACE_EVENT_BEGIN0(category_group, name) \
michael@0 317 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 318 category_group, name, TRACE_EVENT_FLAG_NONE)
michael@0 319 #define TRACE_EVENT_BEGIN1(category_group, name, arg1_name, arg1_val) \
michael@0 320 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 321 category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 322 #define TRACE_EVENT_BEGIN2(category_group, name, arg1_name, arg1_val, \
michael@0 323 arg2_name, arg2_val) \
michael@0 324 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 325 category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
michael@0 326 arg2_name, arg2_val)
michael@0 327 #define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
michael@0 328 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 329 category_group, name, TRACE_EVENT_FLAG_COPY)
michael@0 330 #define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
michael@0 331 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 332 category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
michael@0 333 #define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
michael@0 334 arg2_name, arg2_val) \
michael@0 335 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, \
michael@0 336 category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
michael@0 337 arg2_name, arg2_val)
michael@0 338
michael@0 339 // Similar to TRACE_EVENT_BEGINx but with a custom |at| timestamp provided.
michael@0 340 // - |id| is used to match the _BEGIN event with the _END event.
michael@0 341 // Events are considered to match if their category_group, name and id values
michael@0 342 // all match. |id| must either be a pointer or an integer value up to 64 bits.
michael@0 343 // If it's a pointer, the bits will be xored with a hash of the process ID so
michael@0 344 // that the same pointer on two different processes will not collide.
michael@0 345 #define TRACE_EVENT_BEGIN_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
michael@0 346 name, id, thread_id) \
michael@0 347 INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
michael@0 348 TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
michael@0 349 timestamp, TRACE_EVENT_FLAG_NONE)
michael@0 350 #define TRACE_EVENT_COPY_BEGIN_WITH_ID_TID_AND_TIMESTAMP0( \
michael@0 351 category_group, name, id, thread_id) \
michael@0 352 INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
michael@0 353 TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, thread_id, \
michael@0 354 timestamp, TRACE_EVENT_FLAG_COPY)
michael@0 355
michael@0 356 // Records a single END event for "name" immediately. If the category
michael@0 357 // is not enabled, then this does nothing.
michael@0 358 // - category and name strings must have application lifetime (statics or
michael@0 359 // literals). They may not include " chars.
michael@0 360 #define TRACE_EVENT_END0(category_group, name) \
michael@0 361 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 362 category_group, name, TRACE_EVENT_FLAG_NONE)
michael@0 363 #define TRACE_EVENT_END1(category_group, name, arg1_name, arg1_val) \
michael@0 364 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 365 category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 366 #define TRACE_EVENT_END2(category_group, name, arg1_name, arg1_val, \
michael@0 367 arg2_name, arg2_val) \
michael@0 368 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 369 category_group, name, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
michael@0 370 arg2_name, arg2_val)
michael@0 371 #define TRACE_EVENT_COPY_END0(category_group, name) \
michael@0 372 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 373 category_group, name, TRACE_EVENT_FLAG_COPY)
michael@0 374 #define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
michael@0 375 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 376 category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
michael@0 377 #define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
michael@0 378 arg2_name, arg2_val) \
michael@0 379 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, \
michael@0 380 category_group, name, TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, \
michael@0 381 arg2_name, arg2_val)
michael@0 382
michael@0 383 // Similar to TRACE_EVENT_ENDx but with a custom |at| timestamp provided.
michael@0 384 // - |id| is used to match the _BEGIN event with the _END event.
michael@0 385 // Events are considered to match if their category_group, name and id values
michael@0 386 // all match. |id| must either be a pointer or an integer value up to 64 bits.
michael@0 387 // If it's a pointer, the bits will be xored with a hash of the process ID so
michael@0 388 // that the same pointer on two different processes will not collide.
michael@0 389 #define TRACE_EVENT_END_WITH_ID_TID_AND_TIMESTAMP0(category_group, \
michael@0 390 name, id, thread_id) \
michael@0 391 INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
michael@0 392 TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
michael@0 393 timestamp, TRACE_EVENT_FLAG_NONE)
michael@0 394 #define TRACE_EVENT_COPY_END_WITH_ID_TID_AND_TIMESTAMP0( \
michael@0 395 category_group, name, id, thread_id) \
michael@0 396 INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
michael@0 397 TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, thread_id, \
michael@0 398 timestamp, TRACE_EVENT_FLAG_COPY)
michael@0 399
michael@0 400 // Records the value of a counter called "name" immediately. Value
michael@0 401 // must be representable as a 32 bit integer.
michael@0 402 // - category and name strings must have application lifetime (statics or
michael@0 403 // literals). They may not include " chars.
michael@0 404 #define TRACE_COUNTER1(category_group, name, value) \
michael@0 405 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
michael@0 406 category_group, name, TRACE_EVENT_FLAG_NONE, \
michael@0 407 "value", static_cast<int>(value))
michael@0 408 #define TRACE_COPY_COUNTER1(category_group, name, value) \
michael@0 409 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
michael@0 410 category_group, name, TRACE_EVENT_FLAG_COPY, \
michael@0 411 "value", static_cast<int>(value))
michael@0 412
michael@0 413 // Records the values of a multi-parted counter called "name" immediately.
michael@0 414 // The UI will treat value1 and value2 as parts of a whole, displaying their
michael@0 415 // values as a stacked-bar chart.
michael@0 416 // - category and name strings must have application lifetime (statics or
michael@0 417 // literals). They may not include " chars.
michael@0 418 #define TRACE_COUNTER2(category_group, name, value1_name, value1_val, \
michael@0 419 value2_name, value2_val) \
michael@0 420 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
michael@0 421 category_group, name, TRACE_EVENT_FLAG_NONE, \
michael@0 422 value1_name, static_cast<int>(value1_val), \
michael@0 423 value2_name, static_cast<int>(value2_val))
michael@0 424 #define TRACE_COPY_COUNTER2(category_group, name, value1_name, value1_val, \
michael@0 425 value2_name, value2_val) \
michael@0 426 INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_COUNTER, \
michael@0 427 category_group, name, TRACE_EVENT_FLAG_COPY, \
michael@0 428 value1_name, static_cast<int>(value1_val), \
michael@0 429 value2_name, static_cast<int>(value2_val))
michael@0 430
michael@0 431 // Records the value of a counter called "name" immediately. Value
michael@0 432 // must be representable as a 32 bit integer.
michael@0 433 // - category and name strings must have application lifetime (statics or
michael@0 434 // literals). They may not include " chars.
michael@0 435 // - |id| is used to disambiguate counters with the same name. It must either
michael@0 436 // be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
michael@0 437 // will be xored with a hash of the process ID so that the same pointer on
michael@0 438 // two different processes will not collide.
michael@0 439 #define TRACE_COUNTER_ID1(category_group, name, id, value) \
michael@0 440 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
michael@0 441 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 442 "value", static_cast<int>(value))
michael@0 443 #define TRACE_COPY_COUNTER_ID1(category_group, name, id, value) \
michael@0 444 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
michael@0 445 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 446 "value", static_cast<int>(value))
michael@0 447
michael@0 448 // Records the values of a multi-parted counter called "name" immediately.
michael@0 449 // The UI will treat value1 and value2 as parts of a whole, displaying their
michael@0 450 // values as a stacked-bar chart.
michael@0 451 // - category and name strings must have application lifetime (statics or
michael@0 452 // literals). They may not include " chars.
michael@0 453 // - |id| is used to disambiguate counters with the same name. It must either
michael@0 454 // be a pointer or an integer value up to 64 bits. If it's a pointer, the bits
michael@0 455 // will be xored with a hash of the process ID so that the same pointer on
michael@0 456 // two different processes will not collide.
michael@0 457 #define TRACE_COUNTER_ID2(category_group, name, id, value1_name, value1_val, \
michael@0 458 value2_name, value2_val) \
michael@0 459 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
michael@0 460 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 461 value1_name, static_cast<int>(value1_val), \
michael@0 462 value2_name, static_cast<int>(value2_val))
michael@0 463 #define TRACE_COPY_COUNTER_ID2(category_group, name, id, value1_name, \
michael@0 464 value1_val, value2_name, value2_val) \
michael@0 465 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_COUNTER, \
michael@0 466 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 467 value1_name, static_cast<int>(value1_val), \
michael@0 468 value2_name, static_cast<int>(value2_val))
michael@0 469
michael@0 470
michael@0 471 // Records a single ASYNC_BEGIN event called "name" immediately, with 0, 1 or 2
michael@0 472 // associated arguments. If the category is not enabled, then this
michael@0 473 // does nothing.
michael@0 474 // - category and name strings must have application lifetime (statics or
michael@0 475 // literals). They may not include " chars.
michael@0 476 // - |id| is used to match the ASYNC_BEGIN event with the ASYNC_END event. ASYNC
michael@0 477 // events are considered to match if their category_group, name and id values
michael@0 478 // all match. |id| must either be a pointer or an integer value up to 64 bits.
michael@0 479 // If it's a pointer, the bits will be xored with a hash of the process ID so
michael@0 480 // that the same pointer on two different processes will not collide.
michael@0 481 //
michael@0 482 // An asynchronous operation can consist of multiple phases. The first phase is
michael@0 483 // defined by the ASYNC_BEGIN calls. Additional phases can be defined using the
michael@0 484 // ASYNC_STEP_INTO or ASYNC_STEP_PAST macros. The ASYNC_STEP_INTO macro will
michael@0 485 // annotate the block following the call. The ASYNC_STEP_PAST macro will
michael@0 486 // annotate the block prior to the call. Note that any particular event must use
michael@0 487 // only STEP_INTO or STEP_PAST macros; they can not mix and match. When the
michael@0 488 // operation completes, call ASYNC_END.
michael@0 489 //
michael@0 490 // An ASYNC trace typically occurs on a single thread (if not, they will only be
michael@0 491 // drawn on the thread defined in the ASYNC_BEGIN event), but all events in that
michael@0 492 // operation must use the same |name| and |id|. Each step can have its own
michael@0 493 // args.
michael@0 494 #define TRACE_EVENT_ASYNC_BEGIN0(category_group, name, id) \
michael@0 495 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 496 category_group, name, id, TRACE_EVENT_FLAG_NONE)
michael@0 497 #define TRACE_EVENT_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
michael@0 498 arg1_val) \
michael@0 499 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 500 category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 501 #define TRACE_EVENT_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
michael@0 502 arg1_val, arg2_name, arg2_val) \
michael@0 503 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 504 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 505 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 506 #define TRACE_EVENT_COPY_ASYNC_BEGIN0(category_group, name, id) \
michael@0 507 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 508 category_group, name, id, TRACE_EVENT_FLAG_COPY)
michael@0 509 #define TRACE_EVENT_COPY_ASYNC_BEGIN1(category_group, name, id, arg1_name, \
michael@0 510 arg1_val) \
michael@0 511 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 512 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 513 arg1_name, arg1_val)
michael@0 514 #define TRACE_EVENT_COPY_ASYNC_BEGIN2(category_group, name, id, arg1_name, \
michael@0 515 arg1_val, arg2_name, arg2_val) \
michael@0 516 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
michael@0 517 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 518 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 519
michael@0 520 // Records a single ASYNC_STEP_INTO event for |step| immediately. If the
michael@0 521 // category is not enabled, then this does nothing. The |name| and |id| must
michael@0 522 // match the ASYNC_BEGIN event above. The |step| param identifies this step
michael@0 523 // within the async event. This should be called at the beginning of the next
michael@0 524 // phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
michael@0 525 // ASYNC_STEP_PAST events.
michael@0 526 #define TRACE_EVENT_ASYNC_STEP_INTO0(category_group, name, id, step) \
michael@0 527 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
michael@0 528 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
michael@0 529 #define TRACE_EVENT_ASYNC_STEP_INTO1(category_group, name, id, step, \
michael@0 530 arg1_name, arg1_val) \
michael@0 531 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_INTO, \
michael@0 532 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
michael@0 533 arg1_name, arg1_val)
michael@0 534
michael@0 535 // Records a single ASYNC_STEP_PAST event for |step| immediately. If the
michael@0 536 // category is not enabled, then this does nothing. The |name| and |id| must
michael@0 537 // match the ASYNC_BEGIN event above. The |step| param identifies this step
michael@0 538 // within the async event. This should be called at the beginning of the next
michael@0 539 // phase of an asynchronous operation. The ASYNC_BEGIN event must not have any
michael@0 540 // ASYNC_STEP_INTO events.
michael@0 541 #define TRACE_EVENT_ASYNC_STEP_PAST0(category_group, name, id, step) \
michael@0 542 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
michael@0 543 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
michael@0 544 #define TRACE_EVENT_ASYNC_STEP_PAST1(category_group, name, id, step, \
michael@0 545 arg1_name, arg1_val) \
michael@0 546 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_STEP_PAST, \
michael@0 547 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
michael@0 548 arg1_name, arg1_val)
michael@0 549
michael@0 550 // Records a single ASYNC_END event for "name" immediately. If the category
michael@0 551 // is not enabled, then this does nothing.
michael@0 552 #define TRACE_EVENT_ASYNC_END0(category_group, name, id) \
michael@0 553 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 554 category_group, name, id, TRACE_EVENT_FLAG_NONE)
michael@0 555 #define TRACE_EVENT_ASYNC_END1(category_group, name, id, arg1_name, arg1_val) \
michael@0 556 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 557 category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 558 #define TRACE_EVENT_ASYNC_END2(category_group, name, id, arg1_name, arg1_val, \
michael@0 559 arg2_name, arg2_val) \
michael@0 560 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 561 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 562 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 563 #define TRACE_EVENT_COPY_ASYNC_END0(category_group, name, id) \
michael@0 564 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 565 category_group, name, id, TRACE_EVENT_FLAG_COPY)
michael@0 566 #define TRACE_EVENT_COPY_ASYNC_END1(category_group, name, id, arg1_name, \
michael@0 567 arg1_val) \
michael@0 568 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 569 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 570 arg1_name, arg1_val)
michael@0 571 #define TRACE_EVENT_COPY_ASYNC_END2(category_group, name, id, arg1_name, \
michael@0 572 arg1_val, arg2_name, arg2_val) \
michael@0 573 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
michael@0 574 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 575 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 576
michael@0 577
michael@0 578 // Records a single FLOW_BEGIN event called "name" immediately, with 0, 1 or 2
michael@0 579 // associated arguments. If the category is not enabled, then this
michael@0 580 // does nothing.
michael@0 581 // - category and name strings must have application lifetime (statics or
michael@0 582 // literals). They may not include " chars.
michael@0 583 // - |id| is used to match the FLOW_BEGIN event with the FLOW_END event. FLOW
michael@0 584 // events are considered to match if their category_group, name and id values
michael@0 585 // all match. |id| must either be a pointer or an integer value up to 64 bits.
michael@0 586 // If it's a pointer, the bits will be xored with a hash of the process ID so
michael@0 587 // that the same pointer on two different processes will not collide.
michael@0 588 // FLOW events are different from ASYNC events in how they are drawn by the
michael@0 589 // tracing UI. A FLOW defines asynchronous data flow, such as posting a task
michael@0 590 // (FLOW_BEGIN) and later executing that task (FLOW_END). Expect FLOWs to be
michael@0 591 // drawn as lines or arrows from FLOW_BEGIN scopes to FLOW_END scopes. Similar
michael@0 592 // to ASYNC, a FLOW can consist of multiple phases. The first phase is defined
michael@0 593 // by the FLOW_BEGIN calls. Additional phases can be defined using the FLOW_STEP
michael@0 594 // macros. When the operation completes, call FLOW_END. An async operation can
michael@0 595 // span threads and processes, but all events in that operation must use the
michael@0 596 // same |name| and |id|. Each event can have its own args.
michael@0 597 #define TRACE_EVENT_FLOW_BEGIN0(category_group, name, id) \
michael@0 598 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 599 category_group, name, id, TRACE_EVENT_FLAG_NONE)
michael@0 600 #define TRACE_EVENT_FLOW_BEGIN1(category_group, name, id, arg1_name, arg1_val) \
michael@0 601 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 602 category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 603 #define TRACE_EVENT_FLOW_BEGIN2(category_group, name, id, arg1_name, arg1_val, \
michael@0 604 arg2_name, arg2_val) \
michael@0 605 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 606 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 607 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 608 #define TRACE_EVENT_COPY_FLOW_BEGIN0(category_group, name, id) \
michael@0 609 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 610 category_group, name, id, TRACE_EVENT_FLAG_COPY)
michael@0 611 #define TRACE_EVENT_COPY_FLOW_BEGIN1(category_group, name, id, arg1_name, \
michael@0 612 arg1_val) \
michael@0 613 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 614 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 615 arg1_name, arg1_val)
michael@0 616 #define TRACE_EVENT_COPY_FLOW_BEGIN2(category_group, name, id, arg1_name, \
michael@0 617 arg1_val, arg2_name, arg2_val) \
michael@0 618 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_BEGIN, \
michael@0 619 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 620 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 621
michael@0 622 // Records a single FLOW_STEP event for |step| immediately. If the category
michael@0 623 // is not enabled, then this does nothing. The |name| and |id| must match the
michael@0 624 // FLOW_BEGIN event above. The |step| param identifies this step within the
michael@0 625 // async event. This should be called at the beginning of the next phase of an
michael@0 626 // asynchronous operation.
michael@0 627 #define TRACE_EVENT_FLOW_STEP0(category_group, name, id, step) \
michael@0 628 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
michael@0 629 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step)
michael@0 630 #define TRACE_EVENT_FLOW_STEP1(category_group, name, id, step, \
michael@0 631 arg1_name, arg1_val) \
michael@0 632 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
michael@0 633 category_group, name, id, TRACE_EVENT_FLAG_NONE, "step", step, \
michael@0 634 arg1_name, arg1_val)
michael@0 635 #define TRACE_EVENT_COPY_FLOW_STEP0(category_group, name, id, step) \
michael@0 636 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
michael@0 637 category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step)
michael@0 638 #define TRACE_EVENT_COPY_FLOW_STEP1(category_group, name, id, step, \
michael@0 639 arg1_name, arg1_val) \
michael@0 640 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_STEP, \
michael@0 641 category_group, name, id, TRACE_EVENT_FLAG_COPY, "step", step, \
michael@0 642 arg1_name, arg1_val)
michael@0 643
michael@0 644 // Records a single FLOW_END event for "name" immediately. If the category
michael@0 645 // is not enabled, then this does nothing.
michael@0 646 #define TRACE_EVENT_FLOW_END0(category_group, name, id) \
michael@0 647 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 648 category_group, name, id, TRACE_EVENT_FLAG_NONE)
michael@0 649 #define TRACE_EVENT_FLOW_END1(category_group, name, id, arg1_name, arg1_val) \
michael@0 650 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 651 category_group, name, id, TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
michael@0 652 #define TRACE_EVENT_FLOW_END2(category_group, name, id, arg1_name, arg1_val, \
michael@0 653 arg2_name, arg2_val) \
michael@0 654 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 655 category_group, name, id, TRACE_EVENT_FLAG_NONE, \
michael@0 656 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 657 #define TRACE_EVENT_COPY_FLOW_END0(category_group, name, id) \
michael@0 658 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 659 category_group, name, id, TRACE_EVENT_FLAG_COPY)
michael@0 660 #define TRACE_EVENT_COPY_FLOW_END1(category_group, name, id, arg1_name, \
michael@0 661 arg1_val) \
michael@0 662 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 663 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 664 arg1_name, arg1_val)
michael@0 665 #define TRACE_EVENT_COPY_FLOW_END2(category_group, name, id, arg1_name, \
michael@0 666 arg1_val, arg2_name, arg2_val) \
michael@0 667 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_FLOW_END, \
michael@0 668 category_group, name, id, TRACE_EVENT_FLAG_COPY, \
michael@0 669 arg1_name, arg1_val, arg2_name, arg2_val)
michael@0 670
michael@0 671 // Macros to track the life time and value of arbitrary client objects.
michael@0 672 // See also TraceTrackableObject.
michael@0 673 #define TRACE_EVENT_OBJECT_CREATED_WITH_ID(category_group, name, id) \
michael@0 674 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_CREATE_OBJECT, \
michael@0 675 category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
michael@0 676
michael@0 677 #define TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category_group, name, id, snapshot) \
michael@0 678 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT, \
michael@0 679 category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE,\
michael@0 680 "snapshot", snapshot)
michael@0 681
michael@0 682 #define TRACE_EVENT_OBJECT_DELETED_WITH_ID(category_group, name, id) \
michael@0 683 INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_DELETE_OBJECT, \
michael@0 684 category_group, name, TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
michael@0 685
michael@0 686 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
michael@0 687 *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
michael@0 688 (SkEventTracer::kEnabledForRecording_CategoryGroupEnabledFlags | \
michael@0 689 SkEventTracer::kEnabledForEventCallback_CategoryGroupEnabledFlags)
michael@0 690
michael@0 691 // Macro to efficiently determine if a given category group is enabled.
michael@0 692 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
michael@0 693 do { \
michael@0 694 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
michael@0 695 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
michael@0 696 *ret = true; \
michael@0 697 } else { \
michael@0 698 *ret = false; \
michael@0 699 } \
michael@0 700 } while (0)
michael@0 701
michael@0 702 // Macro to efficiently determine, through polling, if a new trace has begun.
michael@0 703 #define TRACE_EVENT_IS_NEW_TRACE(ret) \
michael@0 704 do { \
michael@0 705 static int INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = 0; \
michael@0 706 int num_traces_recorded = TRACE_EVENT_API_GET_NUM_TRACES_RECORDED(); \
michael@0 707 if (num_traces_recorded != -1 && \
michael@0 708 num_traces_recorded != \
michael@0 709 INTERNAL_TRACE_EVENT_UID(lastRecordingNumber)) { \
michael@0 710 INTERNAL_TRACE_EVENT_UID(lastRecordingNumber) = \
michael@0 711 num_traces_recorded; \
michael@0 712 *ret = true; \
michael@0 713 } else { \
michael@0 714 *ret = false; \
michael@0 715 } \
michael@0 716 } while (0)
michael@0 717
michael@0 718 ////////////////////////////////////////////////////////////////////////////////
michael@0 719 // Implementation specific tracing API definitions.
michael@0 720
michael@0 721 // Get a pointer to the enabled state of the given trace category. Only
michael@0 722 // long-lived literal strings should be given as the category group. The
michael@0 723 // returned pointer can be held permanently in a local static for example. If
michael@0 724 // the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
michael@0 725 // TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
michael@0 726 // between the load of the tracing state and the call to
michael@0 727 // TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
michael@0 728 // for best performance when tracing is disabled.
michael@0 729 // const uint8_t*
michael@0 730 // TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
michael@0 731 #define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
michael@0 732 SkEventTracer::GetInstance()->getCategoryGroupEnabled
michael@0 733
michael@0 734 // Get the number of times traces have been recorded. This is used to implement
michael@0 735 // the TRACE_EVENT_IS_NEW_TRACE facility.
michael@0 736 // unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
michael@0 737 #define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
michael@0 738 SkEventTracer::GetInstance()->getNumTracesRecorded
michael@0 739
michael@0 740 // Add a trace event to the platform tracing system.
michael@0 741 // SkEventTracer::Handle TRACE_EVENT_API_ADD_TRACE_EVENT(
michael@0 742 // char phase,
michael@0 743 // const uint8_t* category_group_enabled,
michael@0 744 // const char* name,
michael@0 745 // uint64_t id,
michael@0 746 // int num_args,
michael@0 747 // const char** arg_names,
michael@0 748 // const uint8_t* arg_types,
michael@0 749 // const uint64_t* arg_values,
michael@0 750 // unsigned char flags)
michael@0 751 #define TRACE_EVENT_API_ADD_TRACE_EVENT \
michael@0 752 SkEventTracer::GetInstance()->addTraceEvent
michael@0 753
michael@0 754 // Set the duration field of a COMPLETE trace event.
michael@0 755 // void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
michael@0 756 // const uint8_t* category_group_enabled,
michael@0 757 // const char* name,
michael@0 758 // SkEventTracer::Handle id)
michael@0 759 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
michael@0 760 SkEventTracer::GetInstance()->updateTraceEventDuration
michael@0 761
michael@0 762 // These operations are atomic in the Chrome tracing implementation
michael@0 763 // to cater to ARM's weak memory consistency; we're just doing read/
michael@0 764 // write here because it's not strictly needed for correctness.
michael@0 765 // So says Nat.
michael@0 766 // FIXME
michael@0 767
michael@0 768 #define TRACE_EVENT_API_ATOMIC_WORD intptr_t
michael@0 769 #define TRACE_EVENT_API_ATOMIC_LOAD(var) (*(&var))
michael@0 770 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) (var=value)
michael@0 771
michael@0 772 // Defines visibility for classes in trace_event.h
michael@0 773 #define TRACE_EVENT_API_CLASS_EXPORT SK_API
michael@0 774
michael@0 775 // The thread buckets for the sampling profiler.
michael@0 776 TRACE_EVENT_API_CLASS_EXPORT extern \
michael@0 777 TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
michael@0 778
michael@0 779 #define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
michael@0 780 g_trace_state[thread_bucket]
michael@0 781
michael@0 782 ////////////////////////////////////////////////////////////////////////////////
michael@0 783
michael@0 784 // Implementation detail: trace event macros create temporary variables
michael@0 785 // to keep instrumentation overhead low. These macros give each temporary
michael@0 786 // variable a unique name based on the line number to prevent name collisions.
michael@0 787 #define INTERNAL_TRACE_EVENT_UID3(a,b) \
michael@0 788 trace_event_unique_##a##b
michael@0 789 #define INTERNAL_TRACE_EVENT_UID2(a,b) \
michael@0 790 INTERNAL_TRACE_EVENT_UID3(a,b)
michael@0 791 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \
michael@0 792 INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
michael@0 793
michael@0 794 // Implementation detail: internal macro to create static category.
michael@0 795 // No barriers are needed, because this code is designed to operate safely
michael@0 796 // even when the unsigned char* points to garbage data (which may be the case
michael@0 797 // on processors without cache coherency).
michael@0 798 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
michael@0 799 category_group, atomic, category_group_enabled) \
michael@0 800 category_group_enabled = \
michael@0 801 reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD( \
michael@0 802 atomic)); \
michael@0 803 if (!category_group_enabled) { \
michael@0 804 category_group_enabled = \
michael@0 805 TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
michael@0 806 TRACE_EVENT_API_ATOMIC_STORE(atomic, \
michael@0 807 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
michael@0 808 category_group_enabled)); \
michael@0 809 }
michael@0 810
michael@0 811 #define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
michael@0 812 static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
michael@0 813 const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
michael@0 814 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(category_group, \
michael@0 815 INTERNAL_TRACE_EVENT_UID(atomic), \
michael@0 816 INTERNAL_TRACE_EVENT_UID(category_group_enabled));
michael@0 817
michael@0 818 // Implementation detail: internal macro to create static category and add
michael@0 819 // event if the category is enabled.
michael@0 820 #define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
michael@0 821 do { \
michael@0 822 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
michael@0 823 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
michael@0 824 skia::tracing_internals::AddTraceEvent( \
michael@0 825 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
michael@0 826 skia::tracing_internals::kNoEventId, flags, ##__VA_ARGS__); \
michael@0 827 } \
michael@0 828 } while (0)
michael@0 829
michael@0 830 // Implementation detail: internal macro to create static category and add begin
michael@0 831 // event if the category is enabled. Also adds the end event when the scope
michael@0 832 // ends.
michael@0 833 #define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
michael@0 834 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
michael@0 835 skia::tracing_internals::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
michael@0 836 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
michael@0 837 SkEventTracer::Handle h = skia::tracing_internals::AddTraceEvent( \
michael@0 838 TRACE_EVENT_PHASE_COMPLETE, \
michael@0 839 INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
michael@0 840 name, skia::tracing_internals::kNoEventId, \
michael@0 841 TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
michael@0 842 INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
michael@0 843 INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
michael@0 844 }
michael@0 845
michael@0 846 // Implementation detail: internal macro to create static category and add
michael@0 847 // event if the category is enabled.
michael@0 848 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
michael@0 849 flags, ...) \
michael@0 850 do { \
michael@0 851 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
michael@0 852 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
michael@0 853 unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
michael@0 854 skia::tracing_internals::TraceID trace_event_trace_id( \
michael@0 855 id, &trace_event_flags); \
michael@0 856 skia::tracing_internals::AddTraceEvent( \
michael@0 857 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
michael@0 858 name, trace_event_trace_id.data(), trace_event_flags, \
michael@0 859 ##__VA_ARGS__); \
michael@0 860 } \
michael@0 861 } while (0)
michael@0 862
michael@0 863 // Implementation detail: internal macro to create static category and add
michael@0 864 // event if the category is enabled.
michael@0 865 #define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(phase, \
michael@0 866 category_group, name, id, thread_id, flags, ...) \
michael@0 867 do { \
michael@0 868 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
michael@0 869 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
michael@0 870 unsigned char trace_event_flags = flags | TRACE_EVENT_FLAG_HAS_ID; \
michael@0 871 skia::tracing_internals::TraceID trace_event_trace_id( \
michael@0 872 id, &trace_event_flags); \
michael@0 873 skia::tracing_internals::AddTraceEventWithThreadIdAndTimestamp( \
michael@0 874 phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
michael@0 875 name, trace_event_trace_id.data(), \
michael@0 876 thread_id, base::TimeTicks::FromInternalValue(timestamp), \
michael@0 877 trace_event_flags, ##__VA_ARGS__); \
michael@0 878 } \
michael@0 879 } while (0)
michael@0 880
michael@0 881 // Notes regarding the following definitions:
michael@0 882 // New values can be added and propagated to third party libraries, but existing
michael@0 883 // definitions must never be changed, because third party libraries may use old
michael@0 884 // definitions.
michael@0 885
michael@0 886 // Phase indicates the nature of an event entry. E.g. part of a begin/end pair.
michael@0 887 #define TRACE_EVENT_PHASE_BEGIN ('B')
michael@0 888 #define TRACE_EVENT_PHASE_END ('E')
michael@0 889 #define TRACE_EVENT_PHASE_COMPLETE ('X')
michael@0 890 #define TRACE_EVENT_PHASE_INSTANT ('i')
michael@0 891 #define TRACE_EVENT_PHASE_ASYNC_BEGIN ('S')
michael@0 892 #define TRACE_EVENT_PHASE_ASYNC_STEP_INTO ('T')
michael@0 893 #define TRACE_EVENT_PHASE_ASYNC_STEP_PAST ('p')
michael@0 894 #define TRACE_EVENT_PHASE_ASYNC_END ('F')
michael@0 895 #define TRACE_EVENT_PHASE_FLOW_BEGIN ('s')
michael@0 896 #define TRACE_EVENT_PHASE_FLOW_STEP ('t')
michael@0 897 #define TRACE_EVENT_PHASE_FLOW_END ('f')
michael@0 898 #define TRACE_EVENT_PHASE_METADATA ('M')
michael@0 899 #define TRACE_EVENT_PHASE_COUNTER ('C')
michael@0 900 #define TRACE_EVENT_PHASE_SAMPLE ('P')
michael@0 901 #define TRACE_EVENT_PHASE_CREATE_OBJECT ('N')
michael@0 902 #define TRACE_EVENT_PHASE_SNAPSHOT_OBJECT ('O')
michael@0 903 #define TRACE_EVENT_PHASE_DELETE_OBJECT ('D')
michael@0 904
michael@0 905 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
michael@0 906 #define TRACE_EVENT_FLAG_NONE (static_cast<unsigned char>(0))
michael@0 907 #define TRACE_EVENT_FLAG_COPY (static_cast<unsigned char>(1 << 0))
michael@0 908 #define TRACE_EVENT_FLAG_HAS_ID (static_cast<unsigned char>(1 << 1))
michael@0 909 #define TRACE_EVENT_FLAG_MANGLE_ID (static_cast<unsigned char>(1 << 2))
michael@0 910 #define TRACE_EVENT_FLAG_SCOPE_OFFSET (static_cast<unsigned char>(1 << 3))
michael@0 911
michael@0 912 #define TRACE_EVENT_FLAG_SCOPE_MASK (static_cast<unsigned char>( \
michael@0 913 TRACE_EVENT_FLAG_SCOPE_OFFSET | (TRACE_EVENT_FLAG_SCOPE_OFFSET << 1)))
michael@0 914
michael@0 915 // Type values for identifying types in the TraceValue union.
michael@0 916 #define TRACE_VALUE_TYPE_BOOL (static_cast<unsigned char>(1))
michael@0 917 #define TRACE_VALUE_TYPE_UINT (static_cast<unsigned char>(2))
michael@0 918 #define TRACE_VALUE_TYPE_INT (static_cast<unsigned char>(3))
michael@0 919 #define TRACE_VALUE_TYPE_DOUBLE (static_cast<unsigned char>(4))
michael@0 920 #define TRACE_VALUE_TYPE_POINTER (static_cast<unsigned char>(5))
michael@0 921 #define TRACE_VALUE_TYPE_STRING (static_cast<unsigned char>(6))
michael@0 922 #define TRACE_VALUE_TYPE_COPY_STRING (static_cast<unsigned char>(7))
michael@0 923 #define TRACE_VALUE_TYPE_CONVERTABLE (static_cast<unsigned char>(8))
michael@0 924
michael@0 925 // Enum reflecting the scope of an INSTANT event. Must fit within
michael@0 926 // TRACE_EVENT_FLAG_SCOPE_MASK.
michael@0 927 #define TRACE_EVENT_SCOPE_GLOBAL (static_cast<unsigned char>(0 << 3))
michael@0 928 #define TRACE_EVENT_SCOPE_PROCESS (static_cast<unsigned char>(1 << 3))
michael@0 929 #define TRACE_EVENT_SCOPE_THREAD (static_cast<unsigned char>(2 << 3))
michael@0 930
michael@0 931 #define TRACE_EVENT_SCOPE_NAME_GLOBAL ('g')
michael@0 932 #define TRACE_EVENT_SCOPE_NAME_PROCESS ('p')
michael@0 933 #define TRACE_EVENT_SCOPE_NAME_THREAD ('t')
michael@0 934
michael@0 935 namespace skia {
michael@0 936 namespace tracing_internals {
michael@0 937
michael@0 938 // Specify these values when the corresponding argument of AddTraceEvent is not
michael@0 939 // used.
michael@0 940 const int kZeroNumArgs = 0;
michael@0 941 const uint64_t kNoEventId = 0;
michael@0 942
michael@0 943 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
michael@0 944 // are by default mangled with the Process ID so that they are unlikely to
michael@0 945 // collide when the same pointer is used on different processes.
michael@0 946 class TraceID {
michael@0 947 public:
michael@0 948 class DontMangle {
michael@0 949 public:
michael@0 950 explicit DontMangle(const void* id)
michael@0 951 : data_(static_cast<uint64_t>(
michael@0 952 reinterpret_cast<unsigned long>(id))) {}
michael@0 953 explicit DontMangle(uint64_t id) : data_(id) {}
michael@0 954 explicit DontMangle(unsigned int id) : data_(id) {}
michael@0 955 explicit DontMangle(unsigned short id) : data_(id) {}
michael@0 956 explicit DontMangle(unsigned char id) : data_(id) {}
michael@0 957 explicit DontMangle(long long id)
michael@0 958 : data_(static_cast<uint64_t>(id)) {}
michael@0 959 explicit DontMangle(long id)
michael@0 960 : data_(static_cast<uint64_t>(id)) {}
michael@0 961 explicit DontMangle(int id)
michael@0 962 : data_(static_cast<uint64_t>(id)) {}
michael@0 963 explicit DontMangle(short id)
michael@0 964 : data_(static_cast<uint64_t>(id)) {}
michael@0 965 explicit DontMangle(signed char id)
michael@0 966 : data_(static_cast<uint64_t>(id)) {}
michael@0 967 uint64_t data() const { return data_; }
michael@0 968 private:
michael@0 969 uint64_t data_;
michael@0 970 };
michael@0 971
michael@0 972 class ForceMangle {
michael@0 973 public:
michael@0 974 explicit ForceMangle(uint64_t id) : data_(id) {}
michael@0 975 explicit ForceMangle(unsigned int id) : data_(id) {}
michael@0 976 explicit ForceMangle(unsigned short id) : data_(id) {}
michael@0 977 explicit ForceMangle(unsigned char id) : data_(id) {}
michael@0 978 explicit ForceMangle(long long id)
michael@0 979 : data_(static_cast<uint64_t>(id)) {}
michael@0 980 explicit ForceMangle(long id)
michael@0 981 : data_(static_cast<uint64_t>(id)) {}
michael@0 982 explicit ForceMangle(int id)
michael@0 983 : data_(static_cast<uint64_t>(id)) {}
michael@0 984 explicit ForceMangle(short id)
michael@0 985 : data_(static_cast<uint64_t>(id)) {}
michael@0 986 explicit ForceMangle(signed char id)
michael@0 987 : data_(static_cast<uint64_t>(id)) {}
michael@0 988 uint64_t data() const { return data_; }
michael@0 989 private:
michael@0 990 uint64_t data_;
michael@0 991 };
michael@0 992
michael@0 993 TraceID(const void* id, unsigned char* flags)
michael@0 994 : data_(static_cast<uint64_t>(
michael@0 995 reinterpret_cast<unsigned long>(id))) {
michael@0 996 *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
michael@0 997 }
michael@0 998 TraceID(ForceMangle id, unsigned char* flags) : data_(id.data()) {
michael@0 999 *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
michael@0 1000 }
michael@0 1001 TraceID(DontMangle id, unsigned char* flags) : data_(id.data()) {
michael@0 1002 }
michael@0 1003 TraceID(uint64_t id, unsigned char* flags)
michael@0 1004 : data_(id) { (void)flags; }
michael@0 1005 TraceID(unsigned int id, unsigned char* flags)
michael@0 1006 : data_(id) { (void)flags; }
michael@0 1007 TraceID(unsigned short id, unsigned char* flags)
michael@0 1008 : data_(id) { (void)flags; }
michael@0 1009 TraceID(unsigned char id, unsigned char* flags)
michael@0 1010 : data_(id) { (void)flags; }
michael@0 1011 TraceID(long long id, unsigned char* flags)
michael@0 1012 : data_(static_cast<uint64_t>(id)) { (void)flags; }
michael@0 1013 TraceID(long id, unsigned char* flags)
michael@0 1014 : data_(static_cast<uint64_t>(id)) { (void)flags; }
michael@0 1015 TraceID(int id, unsigned char* flags)
michael@0 1016 : data_(static_cast<uint64_t>(id)) { (void)flags; }
michael@0 1017 TraceID(short id, unsigned char* flags)
michael@0 1018 : data_(static_cast<uint64_t>(id)) { (void)flags; }
michael@0 1019 TraceID(signed char id, unsigned char* flags)
michael@0 1020 : data_(static_cast<uint64_t>(id)) { (void)flags; }
michael@0 1021
michael@0 1022 uint64_t data() const { return data_; }
michael@0 1023
michael@0 1024 private:
michael@0 1025 uint64_t data_;
michael@0 1026 };
michael@0 1027
michael@0 1028 // Simple union to store various types as uint64_t.
michael@0 1029 union TraceValueUnion {
michael@0 1030 bool as_bool;
michael@0 1031 uint64_t as_uint;
michael@0 1032 long long as_int;
michael@0 1033 double as_double;
michael@0 1034 const void* as_pointer;
michael@0 1035 const char* as_string;
michael@0 1036 };
michael@0 1037
michael@0 1038 // Simple container for const char* that should be copied instead of retained.
michael@0 1039 class TraceStringWithCopy {
michael@0 1040 public:
michael@0 1041 explicit TraceStringWithCopy(const char* str) : str_(str) {}
michael@0 1042 operator const char* () const { return str_; }
michael@0 1043 private:
michael@0 1044 const char* str_;
michael@0 1045 };
michael@0 1046
michael@0 1047 // Define SetTraceValue for each allowed type. It stores the type and
michael@0 1048 // value in the return arguments. This allows this API to avoid declaring any
michael@0 1049 // structures so that it is portable to third_party libraries.
michael@0 1050 #define INTERNAL_DECLARE_SET_TRACE_VALUE(actual_type, \
michael@0 1051 union_member, \
michael@0 1052 value_type_id) \
michael@0 1053 static inline void SetTraceValue( \
michael@0 1054 actual_type arg, \
michael@0 1055 unsigned char* type, \
michael@0 1056 uint64_t* value) { \
michael@0 1057 TraceValueUnion type_value; \
michael@0 1058 type_value.union_member = arg; \
michael@0 1059 *type = value_type_id; \
michael@0 1060 *value = type_value.as_uint; \
michael@0 1061 }
michael@0 1062 // Simpler form for int types that can be safely casted.
michael@0 1063 #define INTERNAL_DECLARE_SET_TRACE_VALUE_INT(actual_type, \
michael@0 1064 value_type_id) \
michael@0 1065 static inline void SetTraceValue( \
michael@0 1066 actual_type arg, \
michael@0 1067 unsigned char* type, \
michael@0 1068 uint64_t* value) { \
michael@0 1069 *type = value_type_id; \
michael@0 1070 *value = static_cast<uint64_t>(arg); \
michael@0 1071 }
michael@0 1072
michael@0 1073 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(uint64_t, TRACE_VALUE_TYPE_UINT)
michael@0 1074 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned int, TRACE_VALUE_TYPE_UINT)
michael@0 1075 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned short, TRACE_VALUE_TYPE_UINT)
michael@0 1076 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(unsigned char, TRACE_VALUE_TYPE_UINT)
michael@0 1077 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long long, TRACE_VALUE_TYPE_INT)
michael@0 1078 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(long, TRACE_VALUE_TYPE_INT)
michael@0 1079 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(int, TRACE_VALUE_TYPE_INT)
michael@0 1080 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(short, TRACE_VALUE_TYPE_INT)
michael@0 1081 INTERNAL_DECLARE_SET_TRACE_VALUE_INT(signed char, TRACE_VALUE_TYPE_INT)
michael@0 1082 INTERNAL_DECLARE_SET_TRACE_VALUE(bool, as_bool, TRACE_VALUE_TYPE_BOOL)
michael@0 1083 INTERNAL_DECLARE_SET_TRACE_VALUE(double, as_double, TRACE_VALUE_TYPE_DOUBLE)
michael@0 1084 INTERNAL_DECLARE_SET_TRACE_VALUE(const void*, as_pointer,
michael@0 1085 TRACE_VALUE_TYPE_POINTER)
michael@0 1086 INTERNAL_DECLARE_SET_TRACE_VALUE(const char*, as_string,
michael@0 1087 TRACE_VALUE_TYPE_STRING)
michael@0 1088 INTERNAL_DECLARE_SET_TRACE_VALUE(const TraceStringWithCopy&, as_string,
michael@0 1089 TRACE_VALUE_TYPE_COPY_STRING)
michael@0 1090
michael@0 1091 #undef INTERNAL_DECLARE_SET_TRACE_VALUE
michael@0 1092 #undef INTERNAL_DECLARE_SET_TRACE_VALUE_INT
michael@0 1093
michael@0 1094 // These AddTraceEvent and AddTraceEvent template
michael@0 1095 // functions are defined here instead of in the macro, because the arg_values
michael@0 1096 // could be temporary objects, such as std::string. In order to store
michael@0 1097 // pointers to the internal c_str and pass through to the tracing API,
michael@0 1098 // the arg_values must live throughout these procedures.
michael@0 1099
michael@0 1100 static inline SkEventTracer::Handle
michael@0 1101 AddTraceEvent(
michael@0 1102 char phase,
michael@0 1103 const uint8_t* category_group_enabled,
michael@0 1104 const char* name,
michael@0 1105 uint64_t id,
michael@0 1106 unsigned char flags) {
michael@0 1107 return TRACE_EVENT_API_ADD_TRACE_EVENT(
michael@0 1108 phase, category_group_enabled, name, id,
michael@0 1109 kZeroNumArgs, NULL, NULL, NULL, flags);
michael@0 1110 }
michael@0 1111
michael@0 1112 template<class ARG1_TYPE>
michael@0 1113 static inline SkEventTracer::Handle
michael@0 1114 AddTraceEvent(
michael@0 1115 char phase,
michael@0 1116 const uint8_t* category_group_enabled,
michael@0 1117 const char* name,
michael@0 1118 uint64_t id,
michael@0 1119 unsigned char flags,
michael@0 1120 const char* arg1_name,
michael@0 1121 const ARG1_TYPE& arg1_val) {
michael@0 1122 const int num_args = 1;
michael@0 1123 uint8_t arg_types[1];
michael@0 1124 uint64_t arg_values[1];
michael@0 1125 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
michael@0 1126 return TRACE_EVENT_API_ADD_TRACE_EVENT(
michael@0 1127 phase, category_group_enabled, name, id,
michael@0 1128 num_args, &arg1_name, arg_types, arg_values, flags);
michael@0 1129 }
michael@0 1130
michael@0 1131 template<class ARG1_TYPE, class ARG2_TYPE>
michael@0 1132 static inline SkEventTracer::Handle
michael@0 1133 AddTraceEvent(
michael@0 1134 char phase,
michael@0 1135 const uint8_t* category_group_enabled,
michael@0 1136 const char* name,
michael@0 1137 uint64_t id,
michael@0 1138 unsigned char flags,
michael@0 1139 const char* arg1_name,
michael@0 1140 const ARG1_TYPE& arg1_val,
michael@0 1141 const char* arg2_name,
michael@0 1142 const ARG2_TYPE& arg2_val) {
michael@0 1143 const int num_args = 2;
michael@0 1144 const char* arg_names[2] = { arg1_name, arg2_name };
michael@0 1145 unsigned char arg_types[2];
michael@0 1146 uint64_t arg_values[2];
michael@0 1147 SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
michael@0 1148 SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
michael@0 1149 return TRACE_EVENT_API_ADD_TRACE_EVENT(
michael@0 1150 phase, category_group_enabled, name, id,
michael@0 1151 num_args, arg_names, arg_types, arg_values, flags);
michael@0 1152 }
michael@0 1153
michael@0 1154 // Used by TRACE_EVENTx macros. Do not use directly.
michael@0 1155 class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
michael@0 1156 public:
michael@0 1157 // Note: members of data_ intentionally left uninitialized. See Initialize.
michael@0 1158 ScopedTracer() : p_data_(NULL) {}
michael@0 1159
michael@0 1160 ~ScopedTracer() {
michael@0 1161 if (p_data_ && *data_.category_group_enabled)
michael@0 1162 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
michael@0 1163 data_.category_group_enabled, data_.name, data_.event_handle);
michael@0 1164 }
michael@0 1165
michael@0 1166 void Initialize(const uint8_t* category_group_enabled,
michael@0 1167 const char* name,
michael@0 1168 SkEventTracer::Handle event_handle) {
michael@0 1169 data_.category_group_enabled = category_group_enabled;
michael@0 1170 data_.name = name;
michael@0 1171 data_.event_handle = event_handle;
michael@0 1172 p_data_ = &data_;
michael@0 1173 }
michael@0 1174
michael@0 1175 private:
michael@0 1176 // This Data struct workaround is to avoid initializing all the members
michael@0 1177 // in Data during construction of this object, since this object is always
michael@0 1178 // constructed, even when tracing is disabled. If the members of Data were
michael@0 1179 // members of this class instead, compiler warnings occur about potential
michael@0 1180 // uninitialized accesses.
michael@0 1181 struct Data {
michael@0 1182 const uint8_t* category_group_enabled;
michael@0 1183 const char* name;
michael@0 1184 SkEventTracer::Handle event_handle;
michael@0 1185 };
michael@0 1186 Data* p_data_;
michael@0 1187 Data data_;
michael@0 1188 };
michael@0 1189
michael@0 1190 // Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
michael@0 1191 class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
michael@0 1192 public:
michael@0 1193 ScopedTraceBinaryEfficient(const char* category_group, const char* name);
michael@0 1194 ~ScopedTraceBinaryEfficient();
michael@0 1195
michael@0 1196 private:
michael@0 1197 const uint8_t* category_group_enabled_;
michael@0 1198 const char* name_;
michael@0 1199 SkEventTracer::Handle event_handle_;
michael@0 1200 };
michael@0 1201
michael@0 1202 // This macro generates less code then TRACE_EVENT0 but is also
michael@0 1203 // slower to execute when tracing is off. It should generally only be
michael@0 1204 // used with code that is seldom executed or conditionally executed
michael@0 1205 // when debugging.
michael@0 1206 // For now the category_group must be "gpu".
michael@0 1207 #define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
michael@0 1208 skia::tracing_internals::ScopedTraceBinaryEfficient \
michael@0 1209 INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
michael@0 1210
michael@0 1211 // TraceEventSamplingStateScope records the current sampling state
michael@0 1212 // and sets a new sampling state. When the scope exists, it restores
michael@0 1213 // the sampling state having recorded.
michael@0 1214 template<size_t BucketNumber>
michael@0 1215 class TraceEventSamplingStateScope {
michael@0 1216 public:
michael@0 1217 TraceEventSamplingStateScope(const char* category_and_name) {
michael@0 1218 previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
michael@0 1219 TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
michael@0 1220 }
michael@0 1221
michael@0 1222 ~TraceEventSamplingStateScope() {
michael@0 1223 TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
michael@0 1224 }
michael@0 1225
michael@0 1226 static inline const char* Current() {
michael@0 1227 return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
michael@0 1228 g_trace_state[BucketNumber]));
michael@0 1229 }
michael@0 1230
michael@0 1231 static inline void Set(const char* category_and_name) {
michael@0 1232 TRACE_EVENT_API_ATOMIC_STORE(
michael@0 1233 g_trace_state[BucketNumber],
michael@0 1234 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
michael@0 1235 const_cast<char*>(category_and_name)));
michael@0 1236 }
michael@0 1237
michael@0 1238 private:
michael@0 1239 const char* previous_state_;
michael@0 1240 };
michael@0 1241
michael@0 1242 } // namespace tracing_internals
michael@0 1243 } // namespace skia
michael@0 1244
michael@0 1245 #endif

mercurial