ipc/chromium/src/base/atomicops_internals_mips_gcc.h

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/ipc/chromium/src/base/atomicops_internals_mips_gcc.h	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,182 @@
     1.4 +// Copyright 2010 the V8 project authors. All rights reserved.
     1.5 +// Redistribution and use in source and binary forms, with or without
     1.6 +// modification, are permitted provided that the following conditions are
     1.7 +// met:
     1.8 +//
     1.9 +//     * Redistributions of source code must retain the above copyright
    1.10 +//       notice, this list of conditions and the following disclaimer.
    1.11 +//     * Redistributions in binary form must reproduce the above
    1.12 +//       copyright notice, this list of conditions and the following
    1.13 +//       disclaimer in the documentation and/or other materials provided
    1.14 +//       with the distribution.
    1.15 +//     * Neither the name of Google Inc. nor the names of its
    1.16 +//       contributors may be used to endorse or promote products derived
    1.17 +//       from this software without specific prior written permission.
    1.18 +//
    1.19 +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    1.20 +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    1.21 +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    1.22 +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    1.23 +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    1.24 +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    1.25 +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    1.26 +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    1.27 +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    1.28 +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    1.29 +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    1.30 +
    1.31 +// This file is an internal atomic implementation, use atomicops.h instead.
    1.32 +
    1.33 +#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
    1.34 +#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
    1.35 +
    1.36 +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
    1.37 +
    1.38 +namespace base {
    1.39 +namespace subtle {
    1.40 +
    1.41 +// Atomically execute:
    1.42 +//      result = *ptr;
    1.43 +//      if (*ptr == old_value)
    1.44 +//        *ptr = new_value;
    1.45 +//      return result;
    1.46 +//
    1.47 +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
    1.48 +// Always return the old value of "*ptr"
    1.49 +//
    1.50 +// This routine implies no memory barriers.
    1.51 +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    1.52 +                                         Atomic32 old_value,
    1.53 +                                         Atomic32 new_value) {
    1.54 +  Atomic32 prev, tmp;
    1.55 +  __asm__ __volatile__(".set push\n"
    1.56 +                       ".set noreorder\n"
    1.57 +                       "1:\n"
    1.58 +                       "ll %0, %5\n"  // prev = *ptr
    1.59 +                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
    1.60 +                       "move %2, %4\n"  // tmp = new_value
    1.61 +                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
    1.62 +                       "beqz %2, 1b\n"  // start again on atomic error
    1.63 +                       "nop\n"  // delay slot nop
    1.64 +                       "2:\n"
    1.65 +                       ".set pop\n"
    1.66 +                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
    1.67 +                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
    1.68 +                       : "memory");
    1.69 +  return prev;
    1.70 +}
    1.71 +
    1.72 +// Atomically store new_value into *ptr, returning the previous value held in
    1.73 +// *ptr.  This routine implies no memory barriers.
    1.74 +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
    1.75 +                                         Atomic32 new_value) {
    1.76 +  Atomic32 temp, old;
    1.77 +  __asm__ __volatile__(".set push\n"
    1.78 +                       ".set noreorder\n"
    1.79 +                       "1:\n"
    1.80 +                       "ll %1, %2\n"  // old = *ptr
    1.81 +                       "move %0, %3\n"  // temp = new_value
    1.82 +                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
    1.83 +                       "beqz %0, 1b\n"  // start again on atomic error
    1.84 +                       "nop\n"  // delay slot nop
    1.85 +                       ".set pop\n"
    1.86 +                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
    1.87 +                       : "r" (new_value), "m" (*ptr)
    1.88 +                       : "memory");
    1.89 +
    1.90 +  return old;
    1.91 +}
    1.92 +
    1.93 +// Atomically increment *ptr by "increment".  Returns the new value of
    1.94 +// *ptr with the increment applied.  This routine implies no memory barriers.
    1.95 +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
    1.96 +                                          Atomic32 increment) {
    1.97 +  Atomic32 temp, temp2;
    1.98 +
    1.99 +  __asm__ __volatile__(".set push\n"
   1.100 +                       ".set noreorder\n"
   1.101 +                       "1:\n"
   1.102 +                       "ll %0, %2\n"  // temp = *ptr
   1.103 +                       "addu %1, %0, %3\n"  // temp2 = temp + increment
   1.104 +                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
   1.105 +                       "beqz %1, 1b\n"  // start again on atomic error
   1.106 +                       "addu %1, %0, %3\n"  // temp2 = temp + increment
   1.107 +                       ".set pop\n"
   1.108 +                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
   1.109 +                       : "Ir" (increment), "m" (*ptr)
   1.110 +                       : "memory");
   1.111 +  // temp2 now holds the final value.
   1.112 +  return temp2;
   1.113 +}
   1.114 +
   1.115 +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
   1.116 +                                        Atomic32 increment) {
   1.117 +  ATOMICOPS_COMPILER_BARRIER();
   1.118 +  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   1.119 +  ATOMICOPS_COMPILER_BARRIER();
   1.120 +  return res;
   1.121 +}
   1.122 +
   1.123 +// "Acquire" operations
   1.124 +// ensure that no later memory access can be reordered ahead of the operation.
   1.125 +// "Release" operations ensure that no previous memory access can be reordered
   1.126 +// after the operation.  "Barrier" operations have both "Acquire" and "Release"
   1.127 +// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
   1.128 +// access.
   1.129 +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
   1.130 +                                       Atomic32 old_value,
   1.131 +                                       Atomic32 new_value) {
   1.132 +  ATOMICOPS_COMPILER_BARRIER();
   1.133 +  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.134 +  ATOMICOPS_COMPILER_BARRIER();
   1.135 +  return res;
   1.136 +}
   1.137 +
   1.138 +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
   1.139 +                                       Atomic32 old_value,
   1.140 +                                       Atomic32 new_value) {
   1.141 +  ATOMICOPS_COMPILER_BARRIER();
   1.142 +  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   1.143 +  ATOMICOPS_COMPILER_BARRIER();
   1.144 +  return res;
   1.145 +}
   1.146 +
   1.147 +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   1.148 +  *ptr = value;
   1.149 +}
   1.150 +
   1.151 +inline void MemoryBarrier() {
   1.152 +  __asm__ __volatile__("sync" : : : "memory");
   1.153 +}
   1.154 +
   1.155 +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
   1.156 +  *ptr = value;
   1.157 +  MemoryBarrier();
   1.158 +}
   1.159 +
   1.160 +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   1.161 +  MemoryBarrier();
   1.162 +  *ptr = value;
   1.163 +}
   1.164 +
   1.165 +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   1.166 +  return *ptr;
   1.167 +}
   1.168 +
   1.169 +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   1.170 +  Atomic32 value = *ptr;
   1.171 +  MemoryBarrier();
   1.172 +  return value;
   1.173 +}
   1.174 +
   1.175 +inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
   1.176 +  MemoryBarrier();
   1.177 +  return *ptr;
   1.178 +}
   1.179 +
   1.180 +}  // namespace subtle
   1.181 +}  // namespace base
   1.182 +
   1.183 +#undef ATOMICOPS_COMPILER_BARRIER
   1.184 +
   1.185 +#endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_

mercurial