security/sandbox/chromium/base/atomicops_internals_x86_gcc.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

michael@0 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
michael@0 2 // Use of this source code is governed by a BSD-style license that can be
michael@0 3 // found in the LICENSE file.
michael@0 4
michael@0 5 // This file is an internal atomic implementation, use base/atomicops.h instead.
michael@0 6
michael@0 7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
michael@0 8 #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
michael@0 9
michael@0 10 #include "base/base_export.h"
michael@0 11
michael@0 12 // This struct is not part of the public API of this module; clients may not
michael@0 13 // use it. (However, it's exported via BASE_EXPORT because clients implicitly
michael@0 14 // do use it at link time by inlining these functions.)
michael@0 15 // Features of this x86. Values may not be correct before main() is run,
michael@0 16 // but are set conservatively.
michael@0 17 struct AtomicOps_x86CPUFeatureStruct {
michael@0 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
michael@0 19 // after acquire compare-and-swap.
michael@0 20 bool has_sse2; // Processor has SSE2.
michael@0 21 };
michael@0 22 BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
michael@0 23 AtomicOps_Internalx86CPUFeatures;
michael@0 24
michael@0 25 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
michael@0 26
michael@0 27 namespace base {
michael@0 28 namespace subtle {
michael@0 29
michael@0 30 // 32-bit low-level operations on any platform.
michael@0 31
michael@0 32 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
michael@0 33 Atomic32 old_value,
michael@0 34 Atomic32 new_value) {
michael@0 35 Atomic32 prev;
michael@0 36 __asm__ __volatile__("lock; cmpxchgl %1,%2"
michael@0 37 : "=a" (prev)
michael@0 38 : "q" (new_value), "m" (*ptr), "0" (old_value)
michael@0 39 : "memory");
michael@0 40 return prev;
michael@0 41 }
michael@0 42
michael@0 43 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
michael@0 44 Atomic32 new_value) {
michael@0 45 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
michael@0 46 : "=r" (new_value)
michael@0 47 : "m" (*ptr), "0" (new_value)
michael@0 48 : "memory");
michael@0 49 return new_value; // Now it's the previous value.
michael@0 50 }
michael@0 51
michael@0 52 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
michael@0 53 Atomic32 increment) {
michael@0 54 Atomic32 temp = increment;
michael@0 55 __asm__ __volatile__("lock; xaddl %0,%1"
michael@0 56 : "+r" (temp), "+m" (*ptr)
michael@0 57 : : "memory");
michael@0 58 // temp now holds the old value of *ptr
michael@0 59 return temp + increment;
michael@0 60 }
michael@0 61
michael@0 62 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
michael@0 63 Atomic32 increment) {
michael@0 64 Atomic32 temp = increment;
michael@0 65 __asm__ __volatile__("lock; xaddl %0,%1"
michael@0 66 : "+r" (temp), "+m" (*ptr)
michael@0 67 : : "memory");
michael@0 68 // temp now holds the old value of *ptr
michael@0 69 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
michael@0 70 __asm__ __volatile__("lfence" : : : "memory");
michael@0 71 }
michael@0 72 return temp + increment;
michael@0 73 }
michael@0 74
michael@0 75 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
michael@0 76 Atomic32 old_value,
michael@0 77 Atomic32 new_value) {
michael@0 78 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
michael@0 79 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
michael@0 80 __asm__ __volatile__("lfence" : : : "memory");
michael@0 81 }
michael@0 82 return x;
michael@0 83 }
michael@0 84
michael@0 85 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
michael@0 86 Atomic32 old_value,
michael@0 87 Atomic32 new_value) {
michael@0 88 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
michael@0 89 }
michael@0 90
michael@0 91 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
michael@0 92 *ptr = value;
michael@0 93 }
michael@0 94
michael@0 95 #if defined(__x86_64__)
michael@0 96
michael@0 97 // 64-bit implementations of memory barrier can be simpler, because it
michael@0 98 // "mfence" is guaranteed to exist.
michael@0 99 inline void MemoryBarrier() {
michael@0 100 __asm__ __volatile__("mfence" : : : "memory");
michael@0 101 }
michael@0 102
michael@0 103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
michael@0 104 *ptr = value;
michael@0 105 MemoryBarrier();
michael@0 106 }
michael@0 107
michael@0 108 #else
michael@0 109
michael@0 110 inline void MemoryBarrier() {
michael@0 111 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
michael@0 112 __asm__ __volatile__("mfence" : : : "memory");
michael@0 113 } else { // mfence is faster but not present on PIII
michael@0 114 Atomic32 x = 0;
michael@0 115 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
michael@0 116 }
michael@0 117 }
michael@0 118
michael@0 119 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
michael@0 120 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
michael@0 121 *ptr = value;
michael@0 122 __asm__ __volatile__("mfence" : : : "memory");
michael@0 123 } else {
michael@0 124 NoBarrier_AtomicExchange(ptr, value);
michael@0 125 // acts as a barrier on PIII
michael@0 126 }
michael@0 127 }
michael@0 128 #endif
michael@0 129
michael@0 130 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
michael@0 131 ATOMICOPS_COMPILER_BARRIER();
michael@0 132 *ptr = value; // An x86 store acts as a release barrier.
michael@0 133 // See comments in Atomic64 version of Release_Store(), below.
michael@0 134 }
michael@0 135
michael@0 136 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
michael@0 137 return *ptr;
michael@0 138 }
michael@0 139
michael@0 140 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
michael@0 141 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
michael@0 142 // See comments in Atomic64 version of Release_Store(), below.
michael@0 143 ATOMICOPS_COMPILER_BARRIER();
michael@0 144 return value;
michael@0 145 }
michael@0 146
michael@0 147 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
michael@0 148 MemoryBarrier();
michael@0 149 return *ptr;
michael@0 150 }
michael@0 151
michael@0 152 #if defined(__x86_64__)
michael@0 153
michael@0 154 // 64-bit low-level operations on 64-bit platform.
michael@0 155
michael@0 156 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
michael@0 157 Atomic64 old_value,
michael@0 158 Atomic64 new_value) {
michael@0 159 Atomic64 prev;
michael@0 160 __asm__ __volatile__("lock; cmpxchgq %1,%2"
michael@0 161 : "=a" (prev)
michael@0 162 : "q" (new_value), "m" (*ptr), "0" (old_value)
michael@0 163 : "memory");
michael@0 164 return prev;
michael@0 165 }
michael@0 166
michael@0 167 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
michael@0 168 Atomic64 new_value) {
michael@0 169 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
michael@0 170 : "=r" (new_value)
michael@0 171 : "m" (*ptr), "0" (new_value)
michael@0 172 : "memory");
michael@0 173 return new_value; // Now it's the previous value.
michael@0 174 }
michael@0 175
michael@0 176 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
michael@0 177 Atomic64 increment) {
michael@0 178 Atomic64 temp = increment;
michael@0 179 __asm__ __volatile__("lock; xaddq %0,%1"
michael@0 180 : "+r" (temp), "+m" (*ptr)
michael@0 181 : : "memory");
michael@0 182 // temp now contains the previous value of *ptr
michael@0 183 return temp + increment;
michael@0 184 }
michael@0 185
michael@0 186 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
michael@0 187 Atomic64 increment) {
michael@0 188 Atomic64 temp = increment;
michael@0 189 __asm__ __volatile__("lock; xaddq %0,%1"
michael@0 190 : "+r" (temp), "+m" (*ptr)
michael@0 191 : : "memory");
michael@0 192 // temp now contains the previous value of *ptr
michael@0 193 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
michael@0 194 __asm__ __volatile__("lfence" : : : "memory");
michael@0 195 }
michael@0 196 return temp + increment;
michael@0 197 }
michael@0 198
michael@0 199 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
michael@0 200 *ptr = value;
michael@0 201 }
michael@0 202
michael@0 203 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
michael@0 204 *ptr = value;
michael@0 205 MemoryBarrier();
michael@0 206 }
michael@0 207
michael@0 208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
michael@0 209 ATOMICOPS_COMPILER_BARRIER();
michael@0 210
michael@0 211 *ptr = value; // An x86 store acts as a release barrier
michael@0 212 // for current AMD/Intel chips as of Jan 2008.
michael@0 213 // See also Acquire_Load(), below.
michael@0 214
michael@0 215 // When new chips come out, check:
michael@0 216 // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
michael@0 217 // System Programming Guide, Chatper 7: Multiple-processor management,
michael@0 218 // Section 7.2, Memory Ordering.
michael@0 219 // Last seen at:
michael@0 220 // http://developer.intel.com/design/pentium4/manuals/index_new.htm
michael@0 221 //
michael@0 222 // x86 stores/loads fail to act as barriers for a few instructions (clflush
michael@0 223 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
michael@0 224 // not generated by the compiler, and are rare. Users of these instructions
michael@0 225 // need to know about cache behaviour in any case since all of these involve
michael@0 226 // either flushing cache lines or non-temporal cache hints.
michael@0 227 }
michael@0 228
michael@0 229 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
michael@0 230 return *ptr;
michael@0 231 }
michael@0 232
michael@0 233 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
michael@0 234 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
michael@0 235 // for current AMD/Intel chips as of Jan 2008.
michael@0 236 // See also Release_Store(), above.
michael@0 237 ATOMICOPS_COMPILER_BARRIER();
michael@0 238 return value;
michael@0 239 }
michael@0 240
michael@0 241 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
michael@0 242 MemoryBarrier();
michael@0 243 return *ptr;
michael@0 244 }
michael@0 245
michael@0 246 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
michael@0 247 Atomic64 old_value,
michael@0 248 Atomic64 new_value) {
michael@0 249 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
michael@0 250 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
michael@0 251 __asm__ __volatile__("lfence" : : : "memory");
michael@0 252 }
michael@0 253 return x;
michael@0 254 }
michael@0 255
michael@0 256 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
michael@0 257 Atomic64 old_value,
michael@0 258 Atomic64 new_value) {
michael@0 259 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
michael@0 260 }
michael@0 261
michael@0 262 #endif // defined(__x86_64__)
michael@0 263
michael@0 264 } // namespace base::subtle
michael@0 265 } // namespace base
michael@0 266
michael@0 267 #undef ATOMICOPS_COMPILER_BARRIER
michael@0 268
michael@0 269 #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_

mercurial