Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* |
michael@0 | 2 | * Copyright 2013 Google Inc. |
michael@0 | 3 | * |
michael@0 | 4 | * Use of this source code is governed by a BSD-style license that can be |
michael@0 | 5 | * found in the LICENSE file. |
michael@0 | 6 | */ |
michael@0 | 7 | |
michael@0 | 8 | #ifndef SkOnce_DEFINED |
michael@0 | 9 | #define SkOnce_DEFINED |
michael@0 | 10 | |
michael@0 | 11 | // SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use |
michael@0 | 12 | // together to create a threadsafe way to call a function just once. This |
michael@0 | 13 | // is particularly useful for lazy singleton initialization. E.g. |
michael@0 | 14 | // |
michael@0 | 15 | // static void set_up_my_singleton(Singleton** singleton) { |
michael@0 | 16 | // *singleton = new Singleton(...); |
michael@0 | 17 | // } |
michael@0 | 18 | // ... |
michael@0 | 19 | // const Singleton& GetSingleton() { |
michael@0 | 20 | // static Singleton* singleton = NULL; |
michael@0 | 21 | // SK_DECLARE_STATIC_ONCE(once); |
michael@0 | 22 | // SkOnce(&once, set_up_my_singleton, &singleton); |
michael@0 | 23 | // SkASSERT(NULL != singleton); |
michael@0 | 24 | // return *singleton; |
michael@0 | 25 | // } |
michael@0 | 26 | // |
michael@0 | 27 | // OnceTest.cpp also should serve as a few other simple examples. |
michael@0 | 28 | // |
michael@0 | 29 | // You may optionally pass SkOnce a second function to be called at exit for cleanup. |
michael@0 | 30 | |
michael@0 | 31 | #include "SkDynamicAnnotations.h" |
michael@0 | 32 | #include "SkThread.h" |
michael@0 | 33 | #include "SkTypes.h" |
michael@0 | 34 | |
michael@0 | 35 | #define SK_ONCE_INIT { false, { 0, SkDEBUGCODE(0) } } |
michael@0 | 36 | #define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT |
michael@0 | 37 | |
michael@0 | 38 | struct SkOnceFlag; // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT |
michael@0 | 39 | |
michael@0 | 40 | template <typename Func, typename Arg> |
michael@0 | 41 | inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL); |
michael@0 | 42 | |
michael@0 | 43 | // If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag. |
michael@0 | 44 | template <typename Lock, typename Func, typename Arg> |
michael@0 | 45 | inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL); |
michael@0 | 46 | |
michael@0 | 47 | // ---------------------- Implementation details below here. ----------------------------- |
michael@0 | 48 | |
michael@0 | 49 | // This is POD and must be zero-initialized. |
michael@0 | 50 | struct SkSpinlock { |
michael@0 | 51 | void acquire() { |
michael@0 | 52 | SkASSERT(shouldBeZero == 0); |
michael@0 | 53 | // No memory barrier needed, but sk_atomic_cas gives us at least release anyway. |
michael@0 | 54 | while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) { |
michael@0 | 55 | // spin |
michael@0 | 56 | } |
michael@0 | 57 | } |
michael@0 | 58 | |
michael@0 | 59 | void release() { |
michael@0 | 60 | SkASSERT(shouldBeZero == 0); |
michael@0 | 61 | // This requires a release memory barrier before storing, which sk_atomic_cas guarantees. |
michael@0 | 62 | SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0)); |
michael@0 | 63 | } |
michael@0 | 64 | |
michael@0 | 65 | int32_t thisIsPrivate; |
michael@0 | 66 | SkDEBUGCODE(int32_t shouldBeZero;) |
michael@0 | 67 | }; |
michael@0 | 68 | |
michael@0 | 69 | struct SkOnceFlag { |
michael@0 | 70 | bool done; |
michael@0 | 71 | SkSpinlock lock; |
michael@0 | 72 | }; |
michael@0 | 73 | |
michael@0 | 74 | // TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands. |
michael@0 | 75 | |
michael@0 | 76 | #ifdef SK_BUILD_FOR_WIN |
michael@0 | 77 | # include <intrin.h> |
michael@0 | 78 | inline static void compiler_barrier() { |
michael@0 | 79 | _ReadWriteBarrier(); |
michael@0 | 80 | } |
michael@0 | 81 | #else |
michael@0 | 82 | inline static void compiler_barrier() { |
michael@0 | 83 | asm volatile("" : : : "memory"); |
michael@0 | 84 | } |
michael@0 | 85 | #endif |
michael@0 | 86 | |
michael@0 | 87 | inline static void full_barrier_on_arm() { |
michael@0 | 88 | #ifdef SK_CPU_ARM |
michael@0 | 89 | # if SK_ARM_ARCH >= 7 |
michael@0 | 90 | asm volatile("dmb" : : : "memory"); |
michael@0 | 91 | # else |
michael@0 | 92 | asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory"); |
michael@0 | 93 | # endif |
michael@0 | 94 | #endif |
michael@0 | 95 | } |
michael@0 | 96 | |
michael@0 | 97 | // On every platform, we issue a compiler barrier to prevent it from reordering |
michael@0 | 98 | // code. That's enough for platforms like x86 where release and acquire |
michael@0 | 99 | // barriers are no-ops. On other platforms we may need to be more careful; |
michael@0 | 100 | // ARM, in particular, needs real code for both acquire and release. We use a |
michael@0 | 101 | // full barrier, which acts as both, because that the finest precision ARM |
michael@0 | 102 | // provides. |
michael@0 | 103 | |
michael@0 | 104 | inline static void release_barrier() { |
michael@0 | 105 | compiler_barrier(); |
michael@0 | 106 | full_barrier_on_arm(); |
michael@0 | 107 | } |
michael@0 | 108 | |
michael@0 | 109 | inline static void acquire_barrier() { |
michael@0 | 110 | compiler_barrier(); |
michael@0 | 111 | full_barrier_on_arm(); |
michael@0 | 112 | } |
michael@0 | 113 | |
michael@0 | 114 | // Works with SkSpinlock or SkMutex. |
michael@0 | 115 | template <typename Lock> |
michael@0 | 116 | class SkAutoLockAcquire { |
michael@0 | 117 | public: |
michael@0 | 118 | explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); } |
michael@0 | 119 | ~SkAutoLockAcquire() { fLock->release(); } |
michael@0 | 120 | private: |
michael@0 | 121 | Lock* fLock; |
michael@0 | 122 | }; |
michael@0 | 123 | |
michael@0 | 124 | // We've pulled a pretty standard double-checked locking implementation apart |
michael@0 | 125 | // into its main fast path and a slow path that's called when we suspect the |
michael@0 | 126 | // one-time code hasn't run yet. |
michael@0 | 127 | |
michael@0 | 128 | // This is the guts of the code, called when we suspect the one-time code hasn't been run yet. |
michael@0 | 129 | // This should be rarely called, so we separate it from SkOnce and don't mark it as inline. |
michael@0 | 130 | // (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.) |
michael@0 | 131 | template <typename Lock, typename Func, typename Arg> |
michael@0 | 132 | static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) { |
michael@0 | 133 | const SkAutoLockAcquire<Lock> locked(lock); |
michael@0 | 134 | if (!*done) { |
michael@0 | 135 | f(arg); |
michael@0 | 136 | if (atExit != NULL) { |
michael@0 | 137 | atexit(atExit); |
michael@0 | 138 | } |
michael@0 | 139 | // Also known as a store-store/load-store barrier, this makes sure that the writes |
michael@0 | 140 | // done before here---in particular, those done by calling f(arg)---are observable |
michael@0 | 141 | // before the writes after the line, *done = true. |
michael@0 | 142 | // |
michael@0 | 143 | // In version control terms this is like saying, "check in the work up |
michael@0 | 144 | // to and including f(arg), then check in *done=true as a subsequent change". |
michael@0 | 145 | // |
michael@0 | 146 | // We'll use this in the fast path to make sure f(arg)'s effects are |
michael@0 | 147 | // observable whenever we observe *done == true. |
michael@0 | 148 | release_barrier(); |
michael@0 | 149 | *done = true; |
michael@0 | 150 | } |
michael@0 | 151 | } |
michael@0 | 152 | |
michael@0 | 153 | // This is our fast path, called all the time. We do really want it to be inlined. |
michael@0 | 154 | template <typename Lock, typename Func, typename Arg> |
michael@0 | 155 | inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { |
michael@0 | 156 | if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { |
michael@0 | 157 | sk_once_slow(done, lock, f, arg, atExit); |
michael@0 | 158 | } |
michael@0 | 159 | // Also known as a load-load/load-store barrier, this acquire barrier makes |
michael@0 | 160 | // sure that anything we read from memory---in particular, memory written by |
michael@0 | 161 | // calling f(arg)---is at least as current as the value we read from once->done. |
michael@0 | 162 | // |
michael@0 | 163 | // In version control terms, this is a lot like saying "sync up to the |
michael@0 | 164 | // commit where we wrote once->done = true". |
michael@0 | 165 | // |
michael@0 | 166 | // The release barrier in sk_once_slow guaranteed that once->done = true |
michael@0 | 167 | // happens after f(arg), so by syncing to once->done = true here we're |
michael@0 | 168 | // forcing ourselves to also wait until the effects of f(arg) are readble. |
michael@0 | 169 | acquire_barrier(); |
michael@0 | 170 | } |
michael@0 | 171 | |
michael@0 | 172 | template <typename Func, typename Arg> |
michael@0 | 173 | inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { |
michael@0 | 174 | return SkOnce(&once->done, &once->lock, f, arg, atExit); |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | #undef SK_ANNOTATE_BENIGN_RACE |
michael@0 | 178 | |
michael@0 | 179 | #endif // SkOnce_DEFINED |