ipc/chromium/src/base/atomicops_internals_mips_gcc.h

Wed, 31 Dec 2014 06:09:35 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Wed, 31 Dec 2014 06:09:35 +0100
changeset 0
6474c204b198
permissions
-rw-r--r--

Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.

     1 // Copyright 2010 the V8 project authors. All rights reserved.
     2 // Redistribution and use in source and binary forms, with or without
     3 // modification, are permitted provided that the following conditions are
     4 // met:
     5 //
     6 //     * Redistributions of source code must retain the above copyright
     7 //       notice, this list of conditions and the following disclaimer.
     8 //     * Redistributions in binary form must reproduce the above
     9 //       copyright notice, this list of conditions and the following
    10 //       disclaimer in the documentation and/or other materials provided
    11 //       with the distribution.
    12 //     * Neither the name of Google Inc. nor the names of its
    13 //       contributors may be used to endorse or promote products derived
    14 //       from this software without specific prior written permission.
    15 //
    16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    28 // This file is an internal atomic implementation, use atomicops.h instead.
    30 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
    31 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
    33 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
    35 namespace base {
    36 namespace subtle {
    38 // Atomically execute:
    39 //      result = *ptr;
    40 //      if (*ptr == old_value)
    41 //        *ptr = new_value;
    42 //      return result;
    43 //
    44 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
    45 // Always return the old value of "*ptr"
    46 //
    47 // This routine implies no memory barriers.
    48 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
    49                                          Atomic32 old_value,
    50                                          Atomic32 new_value) {
    51   Atomic32 prev, tmp;
    52   __asm__ __volatile__(".set push\n"
    53                        ".set noreorder\n"
    54                        "1:\n"
    55                        "ll %0, %5\n"  // prev = *ptr
    56                        "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
    57                        "move %2, %4\n"  // tmp = new_value
    58                        "sc %2, %1\n"  // *ptr = tmp (with atomic check)
    59                        "beqz %2, 1b\n"  // start again on atomic error
    60                        "nop\n"  // delay slot nop
    61                        "2:\n"
    62                        ".set pop\n"
    63                        : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
    64                        : "Ir" (old_value), "r" (new_value), "m" (*ptr)
    65                        : "memory");
    66   return prev;
    67 }
    69 // Atomically store new_value into *ptr, returning the previous value held in
    70 // *ptr.  This routine implies no memory barriers.
    71 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
    72                                          Atomic32 new_value) {
    73   Atomic32 temp, old;
    74   __asm__ __volatile__(".set push\n"
    75                        ".set noreorder\n"
    76                        "1:\n"
    77                        "ll %1, %2\n"  // old = *ptr
    78                        "move %0, %3\n"  // temp = new_value
    79                        "sc %0, %2\n"  // *ptr = temp (with atomic check)
    80                        "beqz %0, 1b\n"  // start again on atomic error
    81                        "nop\n"  // delay slot nop
    82                        ".set pop\n"
    83                        : "=&r" (temp), "=&r" (old), "=m" (*ptr)
    84                        : "r" (new_value), "m" (*ptr)
    85                        : "memory");
    87   return old;
    88 }
    90 // Atomically increment *ptr by "increment".  Returns the new value of
    91 // *ptr with the increment applied.  This routine implies no memory barriers.
    92 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
    93                                           Atomic32 increment) {
    94   Atomic32 temp, temp2;
    96   __asm__ __volatile__(".set push\n"
    97                        ".set noreorder\n"
    98                        "1:\n"
    99                        "ll %0, %2\n"  // temp = *ptr
   100                        "addu %1, %0, %3\n"  // temp2 = temp + increment
   101                        "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
   102                        "beqz %1, 1b\n"  // start again on atomic error
   103                        "addu %1, %0, %3\n"  // temp2 = temp + increment
   104                        ".set pop\n"
   105                        : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
   106                        : "Ir" (increment), "m" (*ptr)
   107                        : "memory");
   108   // temp2 now holds the final value.
   109   return temp2;
   110 }
   112 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
   113                                         Atomic32 increment) {
   114   ATOMICOPS_COMPILER_BARRIER();
   115   Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
   116   ATOMICOPS_COMPILER_BARRIER();
   117   return res;
   118 }
   120 // "Acquire" operations
   121 // ensure that no later memory access can be reordered ahead of the operation.
   122 // "Release" operations ensure that no previous memory access can be reordered
   123 // after the operation.  "Barrier" operations have both "Acquire" and "Release"
   124 // semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
   125 // access.
   126 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
   127                                        Atomic32 old_value,
   128                                        Atomic32 new_value) {
   129   ATOMICOPS_COMPILER_BARRIER();
   130   Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   131   ATOMICOPS_COMPILER_BARRIER();
   132   return res;
   133 }
   135 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
   136                                        Atomic32 old_value,
   137                                        Atomic32 new_value) {
   138   ATOMICOPS_COMPILER_BARRIER();
   139   Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
   140   ATOMICOPS_COMPILER_BARRIER();
   141   return res;
   142 }
   144 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
   145   *ptr = value;
   146 }
   148 inline void MemoryBarrier() {
   149   __asm__ __volatile__("sync" : : : "memory");
   150 }
   152 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
   153   *ptr = value;
   154   MemoryBarrier();
   155 }
   157 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
   158   MemoryBarrier();
   159   *ptr = value;
   160 }
   162 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
   163   return *ptr;
   164 }
   166 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
   167   Atomic32 value = *ptr;
   168   MemoryBarrier();
   169   return value;
   170 }
   172 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
   173   MemoryBarrier();
   174   return *ptr;
   175 }
   177 }  // namespace subtle
   178 }  // namespace base
   180 #undef ATOMICOPS_COMPILER_BARRIER
   182 #endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_

mercurial