michael@0: /* michael@0: * Copyright 2013 Google Inc. michael@0: * michael@0: * Use of this source code is governed by a BSD-style license that can be michael@0: * found in the LICENSE file. michael@0: */ michael@0: michael@0: #ifndef SkOnce_DEFINED michael@0: #define SkOnce_DEFINED michael@0: michael@0: // SkOnce.h defines SK_DECLARE_STATIC_ONCE and SkOnce(), which you can use michael@0: // together to create a threadsafe way to call a function just once. This michael@0: // is particularly useful for lazy singleton initialization. E.g. michael@0: // michael@0: // static void set_up_my_singleton(Singleton** singleton) { michael@0: // *singleton = new Singleton(...); michael@0: // } michael@0: // ... michael@0: // const Singleton& GetSingleton() { michael@0: // static Singleton* singleton = NULL; michael@0: // SK_DECLARE_STATIC_ONCE(once); michael@0: // SkOnce(&once, set_up_my_singleton, &singleton); michael@0: // SkASSERT(NULL != singleton); michael@0: // return *singleton; michael@0: // } michael@0: // michael@0: // OnceTest.cpp also should serve as a few other simple examples. michael@0: // michael@0: // You may optionally pass SkOnce a second function to be called at exit for cleanup. michael@0: michael@0: #include "SkDynamicAnnotations.h" michael@0: #include "SkThread.h" michael@0: #include "SkTypes.h" michael@0: michael@0: #define SK_ONCE_INIT { false, { 0, SkDEBUGCODE(0) } } michael@0: #define SK_DECLARE_STATIC_ONCE(name) static SkOnceFlag name = SK_ONCE_INIT michael@0: michael@0: struct SkOnceFlag; // If manually created, initialize with SkOnceFlag once = SK_ONCE_INIT michael@0: michael@0: template michael@0: inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)() = NULL); michael@0: michael@0: // If you've already got a lock and a flag to use, this variant lets you avoid an extra SkOnceFlag. michael@0: template michael@0: inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)() = NULL); michael@0: michael@0: // ---------------------- Implementation details below here. ----------------------------- michael@0: michael@0: // This is POD and must be zero-initialized. michael@0: struct SkSpinlock { michael@0: void acquire() { michael@0: SkASSERT(shouldBeZero == 0); michael@0: // No memory barrier needed, but sk_atomic_cas gives us at least release anyway. michael@0: while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) { michael@0: // spin michael@0: } michael@0: } michael@0: michael@0: void release() { michael@0: SkASSERT(shouldBeZero == 0); michael@0: // This requires a release memory barrier before storing, which sk_atomic_cas guarantees. michael@0: SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0)); michael@0: } michael@0: michael@0: int32_t thisIsPrivate; michael@0: SkDEBUGCODE(int32_t shouldBeZero;) michael@0: }; michael@0: michael@0: struct SkOnceFlag { michael@0: bool done; michael@0: SkSpinlock lock; michael@0: }; michael@0: michael@0: // TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands. michael@0: michael@0: #ifdef SK_BUILD_FOR_WIN michael@0: # include michael@0: inline static void compiler_barrier() { michael@0: _ReadWriteBarrier(); michael@0: } michael@0: #else michael@0: inline static void compiler_barrier() { michael@0: asm volatile("" : : : "memory"); michael@0: } michael@0: #endif michael@0: michael@0: inline static void full_barrier_on_arm() { michael@0: #ifdef SK_CPU_ARM michael@0: # if SK_ARM_ARCH >= 7 michael@0: asm volatile("dmb" : : : "memory"); michael@0: # else michael@0: asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory"); michael@0: # endif michael@0: #endif michael@0: } michael@0: michael@0: // On every platform, we issue a compiler barrier to prevent it from reordering michael@0: // code. That's enough for platforms like x86 where release and acquire michael@0: // barriers are no-ops. On other platforms we may need to be more careful; michael@0: // ARM, in particular, needs real code for both acquire and release. We use a michael@0: // full barrier, which acts as both, because that the finest precision ARM michael@0: // provides. michael@0: michael@0: inline static void release_barrier() { michael@0: compiler_barrier(); michael@0: full_barrier_on_arm(); michael@0: } michael@0: michael@0: inline static void acquire_barrier() { michael@0: compiler_barrier(); michael@0: full_barrier_on_arm(); michael@0: } michael@0: michael@0: // Works with SkSpinlock or SkMutex. michael@0: template michael@0: class SkAutoLockAcquire { michael@0: public: michael@0: explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); } michael@0: ~SkAutoLockAcquire() { fLock->release(); } michael@0: private: michael@0: Lock* fLock; michael@0: }; michael@0: michael@0: // We've pulled a pretty standard double-checked locking implementation apart michael@0: // into its main fast path and a slow path that's called when we suspect the michael@0: // one-time code hasn't run yet. michael@0: michael@0: // This is the guts of the code, called when we suspect the one-time code hasn't been run yet. michael@0: // This should be rarely called, so we separate it from SkOnce and don't mark it as inline. michael@0: // (We don't mind if this is an actual function call, but odds are it'll be inlined anyway.) michael@0: template michael@0: static void sk_once_slow(bool* done, Lock* lock, Func f, Arg arg, void (*atExit)()) { michael@0: const SkAutoLockAcquire locked(lock); michael@0: if (!*done) { michael@0: f(arg); michael@0: if (atExit != NULL) { michael@0: atexit(atExit); michael@0: } michael@0: // Also known as a store-store/load-store barrier, this makes sure that the writes michael@0: // done before here---in particular, those done by calling f(arg)---are observable michael@0: // before the writes after the line, *done = true. michael@0: // michael@0: // In version control terms this is like saying, "check in the work up michael@0: // to and including f(arg), then check in *done=true as a subsequent change". michael@0: // michael@0: // We'll use this in the fast path to make sure f(arg)'s effects are michael@0: // observable whenever we observe *done == true. michael@0: release_barrier(); michael@0: *done = true; michael@0: } michael@0: } michael@0: michael@0: // This is our fast path, called all the time. We do really want it to be inlined. michael@0: template michael@0: inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { michael@0: if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { michael@0: sk_once_slow(done, lock, f, arg, atExit); michael@0: } michael@0: // Also known as a load-load/load-store barrier, this acquire barrier makes michael@0: // sure that anything we read from memory---in particular, memory written by michael@0: // calling f(arg)---is at least as current as the value we read from once->done. michael@0: // michael@0: // In version control terms, this is a lot like saying "sync up to the michael@0: // commit where we wrote once->done = true". michael@0: // michael@0: // The release barrier in sk_once_slow guaranteed that once->done = true michael@0: // happens after f(arg), so by syncing to once->done = true here we're michael@0: // forcing ourselves to also wait until the effects of f(arg) are readble. michael@0: acquire_barrier(); michael@0: } michael@0: michael@0: template michael@0: inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { michael@0: return SkOnce(&once->done, &once->lock, f, arg, atExit); michael@0: } michael@0: michael@0: #undef SK_ANNOTATE_BENIGN_RACE michael@0: michael@0: #endif // SkOnce_DEFINED