Wed, 31 Dec 2014 06:09:35 +0100
Cloned upstream origin tor-browser at tor-browser-31.3.0esr-4.5-1-build1
revision ID fc1c9ff7c1b2defdbc039f12214767608f46423f for hacking purpose.
michael@0 | 1 | // Copyright 2010 the V8 project authors. All rights reserved. |
michael@0 | 2 | // Redistribution and use in source and binary forms, with or without |
michael@0 | 3 | // modification, are permitted provided that the following conditions are |
michael@0 | 4 | // met: |
michael@0 | 5 | // |
michael@0 | 6 | // * Redistributions of source code must retain the above copyright |
michael@0 | 7 | // notice, this list of conditions and the following disclaimer. |
michael@0 | 8 | // * Redistributions in binary form must reproduce the above |
michael@0 | 9 | // copyright notice, this list of conditions and the following |
michael@0 | 10 | // disclaimer in the documentation and/or other materials provided |
michael@0 | 11 | // with the distribution. |
michael@0 | 12 | // * Neither the name of Google Inc. nor the names of its |
michael@0 | 13 | // contributors may be used to endorse or promote products derived |
michael@0 | 14 | // from this software without specific prior written permission. |
michael@0 | 15 | // |
michael@0 | 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
michael@0 | 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
michael@0 | 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
michael@0 | 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
michael@0 | 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
michael@0 | 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
michael@0 | 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
michael@0 | 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
michael@0 | 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
michael@0 | 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
michael@0 | 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
michael@0 | 27 | |
michael@0 | 28 | // This file is an internal atomic implementation, use atomicops.h instead. |
michael@0 | 29 | |
michael@0 | 30 | #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
michael@0 | 31 | #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
michael@0 | 32 | |
michael@0 | 33 | #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
michael@0 | 34 | |
michael@0 | 35 | namespace base { |
michael@0 | 36 | namespace subtle { |
michael@0 | 37 | |
michael@0 | 38 | // Atomically execute: |
michael@0 | 39 | // result = *ptr; |
michael@0 | 40 | // if (*ptr == old_value) |
michael@0 | 41 | // *ptr = new_value; |
michael@0 | 42 | // return result; |
michael@0 | 43 | // |
michael@0 | 44 | // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
michael@0 | 45 | // Always return the old value of "*ptr" |
michael@0 | 46 | // |
michael@0 | 47 | // This routine implies no memory barriers. |
michael@0 | 48 | inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
michael@0 | 49 | Atomic32 old_value, |
michael@0 | 50 | Atomic32 new_value) { |
michael@0 | 51 | Atomic32 prev, tmp; |
michael@0 | 52 | __asm__ __volatile__(".set push\n" |
michael@0 | 53 | ".set noreorder\n" |
michael@0 | 54 | "1:\n" |
michael@0 | 55 | "ll %0, %5\n" // prev = *ptr |
michael@0 | 56 | "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
michael@0 | 57 | "move %2, %4\n" // tmp = new_value |
michael@0 | 58 | "sc %2, %1\n" // *ptr = tmp (with atomic check) |
michael@0 | 59 | "beqz %2, 1b\n" // start again on atomic error |
michael@0 | 60 | "nop\n" // delay slot nop |
michael@0 | 61 | "2:\n" |
michael@0 | 62 | ".set pop\n" |
michael@0 | 63 | : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
michael@0 | 64 | : "Ir" (old_value), "r" (new_value), "m" (*ptr) |
michael@0 | 65 | : "memory"); |
michael@0 | 66 | return prev; |
michael@0 | 67 | } |
michael@0 | 68 | |
michael@0 | 69 | // Atomically store new_value into *ptr, returning the previous value held in |
michael@0 | 70 | // *ptr. This routine implies no memory barriers. |
michael@0 | 71 | inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
michael@0 | 72 | Atomic32 new_value) { |
michael@0 | 73 | Atomic32 temp, old; |
michael@0 | 74 | __asm__ __volatile__(".set push\n" |
michael@0 | 75 | ".set noreorder\n" |
michael@0 | 76 | "1:\n" |
michael@0 | 77 | "ll %1, %2\n" // old = *ptr |
michael@0 | 78 | "move %0, %3\n" // temp = new_value |
michael@0 | 79 | "sc %0, %2\n" // *ptr = temp (with atomic check) |
michael@0 | 80 | "beqz %0, 1b\n" // start again on atomic error |
michael@0 | 81 | "nop\n" // delay slot nop |
michael@0 | 82 | ".set pop\n" |
michael@0 | 83 | : "=&r" (temp), "=&r" (old), "=m" (*ptr) |
michael@0 | 84 | : "r" (new_value), "m" (*ptr) |
michael@0 | 85 | : "memory"); |
michael@0 | 86 | |
michael@0 | 87 | return old; |
michael@0 | 88 | } |
michael@0 | 89 | |
michael@0 | 90 | // Atomically increment *ptr by "increment". Returns the new value of |
michael@0 | 91 | // *ptr with the increment applied. This routine implies no memory barriers. |
michael@0 | 92 | inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
michael@0 | 93 | Atomic32 increment) { |
michael@0 | 94 | Atomic32 temp, temp2; |
michael@0 | 95 | |
michael@0 | 96 | __asm__ __volatile__(".set push\n" |
michael@0 | 97 | ".set noreorder\n" |
michael@0 | 98 | "1:\n" |
michael@0 | 99 | "ll %0, %2\n" // temp = *ptr |
michael@0 | 100 | "addu %1, %0, %3\n" // temp2 = temp + increment |
michael@0 | 101 | "sc %1, %2\n" // *ptr = temp2 (with atomic check) |
michael@0 | 102 | "beqz %1, 1b\n" // start again on atomic error |
michael@0 | 103 | "addu %1, %0, %3\n" // temp2 = temp + increment |
michael@0 | 104 | ".set pop\n" |
michael@0 | 105 | : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
michael@0 | 106 | : "Ir" (increment), "m" (*ptr) |
michael@0 | 107 | : "memory"); |
michael@0 | 108 | // temp2 now holds the final value. |
michael@0 | 109 | return temp2; |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
michael@0 | 113 | Atomic32 increment) { |
michael@0 | 114 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 115 | Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
michael@0 | 116 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 117 | return res; |
michael@0 | 118 | } |
michael@0 | 119 | |
michael@0 | 120 | // "Acquire" operations |
michael@0 | 121 | // ensure that no later memory access can be reordered ahead of the operation. |
michael@0 | 122 | // "Release" operations ensure that no previous memory access can be reordered |
michael@0 | 123 | // after the operation. "Barrier" operations have both "Acquire" and "Release" |
michael@0 | 124 | // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
michael@0 | 125 | // access. |
michael@0 | 126 | inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
michael@0 | 127 | Atomic32 old_value, |
michael@0 | 128 | Atomic32 new_value) { |
michael@0 | 129 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 130 | Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
michael@0 | 131 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 132 | return res; |
michael@0 | 133 | } |
michael@0 | 134 | |
michael@0 | 135 | inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
michael@0 | 136 | Atomic32 old_value, |
michael@0 | 137 | Atomic32 new_value) { |
michael@0 | 138 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 139 | Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
michael@0 | 140 | ATOMICOPS_COMPILER_BARRIER(); |
michael@0 | 141 | return res; |
michael@0 | 142 | } |
michael@0 | 143 | |
michael@0 | 144 | inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
michael@0 | 145 | *ptr = value; |
michael@0 | 146 | } |
michael@0 | 147 | |
michael@0 | 148 | inline void MemoryBarrier() { |
michael@0 | 149 | __asm__ __volatile__("sync" : : : "memory"); |
michael@0 | 150 | } |
michael@0 | 151 | |
michael@0 | 152 | inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
michael@0 | 153 | *ptr = value; |
michael@0 | 154 | MemoryBarrier(); |
michael@0 | 155 | } |
michael@0 | 156 | |
michael@0 | 157 | inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
michael@0 | 158 | MemoryBarrier(); |
michael@0 | 159 | *ptr = value; |
michael@0 | 160 | } |
michael@0 | 161 | |
michael@0 | 162 | inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
michael@0 | 163 | return *ptr; |
michael@0 | 164 | } |
michael@0 | 165 | |
michael@0 | 166 | inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
michael@0 | 167 | Atomic32 value = *ptr; |
michael@0 | 168 | MemoryBarrier(); |
michael@0 | 169 | return value; |
michael@0 | 170 | } |
michael@0 | 171 | |
michael@0 | 172 | inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
michael@0 | 173 | MemoryBarrier(); |
michael@0 | 174 | return *ptr; |
michael@0 | 175 | } |
michael@0 | 176 | |
michael@0 | 177 | } // namespace subtle |
michael@0 | 178 | } // namespace base |
michael@0 | 179 | |
michael@0 | 180 | #undef ATOMICOPS_COMPILER_BARRIER |
michael@0 | 181 | |
michael@0 | 182 | #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |