gfx/skia/trunk/include/core/SkOnce.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /*
     2  * Copyright 2013 Google Inc.
     3  *
     4  * Use of this source code is governed by a BSD-style license that can be
     5  * found in the LICENSE file.
     6  */
     8 #ifndef SkOnce_DEFINED
     9 #define SkOnce_DEFINED
    11 // SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use
    12 // together to create a threadsafe way to call a function just once.  This
    13 // is particularly useful for lazy singleton initialization. E.g.
    14 //
    15 // static void set_up_my_singleton(Singleton** singleton) {
    16 //     *singleton = new Singleton(...);
    17 // }
    18 // ...
    19 // const Singleton& GetSingleton() {
    20 //     static Singleton* singleton = NULL;
    21 //     SK_DECLARE_STATIC_ONCE(once);
    22 //     SkOnce(&once, set_up_my_singleton, &singleton);
    23 //     SkASSERT(NULL != singleton);
    24 //     return *singleton;
    25 // }
    26 //
    27 // OnceTest.cpp also should serve as a few other simple examples.
    28 //
    29 // You may optionally pass SkOnce a second function to be called at exit for cleanup.
    31 #include "SkDynamicAnnotations.h"
    32 #include "SkThread.h"
    33 #include "SkTypes.h"
    35 #define SK_ONCE_INIT { false, { 0, SkDEBUGCODE(0) } }
    36 #define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT
    38 struct SkOnceFlag;  // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT
    40 template <typename Func, typename Arg>
    41 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL);
    43 // If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag.
    44 template <typename Lock, typename Func, typename Arg>
    45 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL);
    47 //  ----------------------  Implementation details below here. -----------------------------
    49 // This is POD and must be zero-initialized.
    50 struct SkSpinlock {
    51     void acquire() {
    52         SkASSERT(shouldBeZero == 0);
    53         // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
    54         while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
    55             // spin
    56         }
    57     }
    59     void release() {
    60         SkASSERT(shouldBeZero == 0);
    61         // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
    62         SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
    63     }
    65     int32_t thisIsPrivate;
    66     SkDEBUGCODE(int32_t shouldBeZero;)
    67 };
    69 struct SkOnceFlag {
    70     bool done;
    71     SkSpinlock lock;
    72 };
    74 // TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands.
    76 #ifdef SK_BUILD_FOR_WIN
    77 #  include <intrin.h>
    78 inline static void compiler_barrier() {
    79     _ReadWriteBarrier();
    80 }
    81 #else
    82 inline static void compiler_barrier() {
    83     asm volatile("" : : : "memory");
    84 }
    85 #endif
    87 inline static void full_barrier_on_arm() {
    88 #ifdef SK_CPU_ARM
    89 #  if SK_ARM_ARCH >= 7
    90     asm volatile("dmb" : : : "memory");
    91 #  else
    92     asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory");
    93 #  endif
    94 #endif
    95 }
    97 // On every platform, we issue a compiler barrier to prevent it from reordering
    98 // code.  That's enough for platforms like x86 where release and acquire
    99 // barriers are no-ops.  On other platforms we may need to be more careful;
   100 // ARM, in particular, needs real code for both acquire and release.  We use a
   101 // full barrier, which acts as both, because that the finest precision ARM
   102 // provides.
   104 inline static void release_barrier() {
   105     compiler_barrier();
   106     full_barrier_on_arm();
   107 }
   109 inline static void acquire_barrier() {
   110     compiler_barrier();
   111     full_barrier_on_arm();
   112 }
   114 // Works with SkSpinlock or SkMutex.
   115 template <typename Lock>
   116 class SkAutoLockAcquire {
   117 public:
   118     explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); }
   119     ~SkAutoLockAcquire() { fLock->release(); }
   120 private:
   121     Lock* fLock;
   122 };
   124 // We've pulled a pretty standard double-checked locking implementation apart
   125 // into its main fast path and a slow path that's called when we suspect the
   126 // one-time code hasn't run yet.
   128 // This is the guts of the code, called when we suspect the one-time code hasn't been run yet.
   129 // This should be rarely called, so we separate it from SkOnce and don't mark it as inline.
   130 // (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.)
   131 template <typename Lock, typename Func, typename Arg>
   132 static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) {
   133     const SkAutoLockAcquire<Lock> locked(lock);
   134     if (!*done) {
   135         f(arg);
   136         if (atExit != NULL) {
   137             atexit(atExit);
   138         }
   139         // Also known as a store-store/load-store barrier, this makes sure that the writes
   140         // done before here---in particular, those done by calling f(arg)---are observable
   141         // before the writes after the line, *done = true.
   142         //
   143         // In version control terms this is like saying, "check in the work up
   144         // to and including f(arg), then check in *done=true as a subsequent change".
   145         //
   146         // We'll use this in the fast path to make sure f(arg)'s effects are
   147         // observable whenever we observe *done == true.
   148         release_barrier();
   149         *done = true;
   150     }
   151 }
   153 // This is our fast path, called all the time.  We do really want it to be inlined.
   154 template <typename Lock, typename Func, typename Arg>
   155 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) {
   156     if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) {
   157         sk_once_slow(done, lock, f, arg, atExit);
   158     }
   159     // Also known as a load-load/load-store barrier, this acquire barrier makes
   160     // sure that anything we read from memory---in particular, memory written by
   161     // calling f(arg)---is at least as current as the value we read from once->done.
   162     //
   163     // In version control terms, this is a lot like saying "sync up to the
   164     // commit where we wrote once->done = true".
   165     //
   166     // The release barrier in sk_once_slow guaranteed that once->done = true
   167     // happens after f(arg), so by syncing to once->done = true here we're
   168     // forcing ourselves to also wait until the effects of f(arg) are readble.
   169     acquire_barrier();
   170 }
   172 template <typename Func, typename Arg>
   173 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) {
   174     return SkOnce(&once->done, &once->lock, f, arg, atExit);
   175 }
   177 #undef SK_ANNOTATE_BENIGN_RACE
   179 #endif  // SkOnce_DEFINED

mercurial