michael@0: // Copyright 2010 the V8 project authors. All rights reserved. michael@0: // Redistribution and use in source and binary forms, with or without michael@0: // modification, are permitted provided that the following conditions are michael@0: // met: michael@0: // michael@0: // * Redistributions of source code must retain the above copyright michael@0: // notice, this list of conditions and the following disclaimer. michael@0: // * Redistributions in binary form must reproduce the above michael@0: // copyright notice, this list of conditions and the following michael@0: // disclaimer in the documentation and/or other materials provided michael@0: // with the distribution. michael@0: // * Neither the name of Google Inc. nor the names of its michael@0: // contributors may be used to endorse or promote products derived michael@0: // from this software without specific prior written permission. michael@0: // michael@0: // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS michael@0: // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT michael@0: // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR michael@0: // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT michael@0: // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, michael@0: // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT michael@0: // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, michael@0: // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY michael@0: // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT michael@0: // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE michael@0: // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. michael@0: michael@0: // This file is an internal atomic implementation, use atomicops.h instead. michael@0: michael@0: #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ michael@0: #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ michael@0: michael@0: #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") michael@0: michael@0: namespace base { michael@0: namespace subtle { michael@0: michael@0: // Atomically execute: michael@0: // result = *ptr; michael@0: // if (*ptr == old_value) michael@0: // *ptr = new_value; michael@0: // return result; michael@0: // michael@0: // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". michael@0: // Always return the old value of "*ptr" michael@0: // michael@0: // This routine implies no memory barriers. michael@0: inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: Atomic32 prev, tmp; michael@0: __asm__ __volatile__(".set push\n" michael@0: ".set noreorder\n" michael@0: "1:\n" michael@0: "ll %0, %5\n" // prev = *ptr michael@0: "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 michael@0: "move %2, %4\n" // tmp = new_value michael@0: "sc %2, %1\n" // *ptr = tmp (with atomic check) michael@0: "beqz %2, 1b\n" // start again on atomic error michael@0: "nop\n" // delay slot nop michael@0: "2:\n" michael@0: ".set pop\n" michael@0: : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) michael@0: : "Ir" (old_value), "r" (new_value), "m" (*ptr) michael@0: : "memory"); michael@0: return prev; michael@0: } michael@0: michael@0: // Atomically store new_value into *ptr, returning the previous value held in michael@0: // *ptr. This routine implies no memory barriers. michael@0: inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, michael@0: Atomic32 new_value) { michael@0: Atomic32 temp, old; michael@0: __asm__ __volatile__(".set push\n" michael@0: ".set noreorder\n" michael@0: "1:\n" michael@0: "ll %1, %2\n" // old = *ptr michael@0: "move %0, %3\n" // temp = new_value michael@0: "sc %0, %2\n" // *ptr = temp (with atomic check) michael@0: "beqz %0, 1b\n" // start again on atomic error michael@0: "nop\n" // delay slot nop michael@0: ".set pop\n" michael@0: : "=&r" (temp), "=&r" (old), "=m" (*ptr) michael@0: : "r" (new_value), "m" (*ptr) michael@0: : "memory"); michael@0: michael@0: return old; michael@0: } michael@0: michael@0: // Atomically increment *ptr by "increment". Returns the new value of michael@0: // *ptr with the increment applied. This routine implies no memory barriers. michael@0: inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, michael@0: Atomic32 increment) { michael@0: Atomic32 temp, temp2; michael@0: michael@0: __asm__ __volatile__(".set push\n" michael@0: ".set noreorder\n" michael@0: "1:\n" michael@0: "ll %0, %2\n" // temp = *ptr michael@0: "addu %1, %0, %3\n" // temp2 = temp + increment michael@0: "sc %1, %2\n" // *ptr = temp2 (with atomic check) michael@0: "beqz %1, 1b\n" // start again on atomic error michael@0: "addu %1, %0, %3\n" // temp2 = temp + increment michael@0: ".set pop\n" michael@0: : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) michael@0: : "Ir" (increment), "m" (*ptr) michael@0: : "memory"); michael@0: // temp2 now holds the final value. michael@0: return temp2; michael@0: } michael@0: michael@0: inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, michael@0: Atomic32 increment) { michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: return res; michael@0: } michael@0: michael@0: // "Acquire" operations michael@0: // ensure that no later memory access can be reordered ahead of the operation. michael@0: // "Release" operations ensure that no previous memory access can be reordered michael@0: // after the operation. "Barrier" operations have both "Acquire" and "Release" michael@0: // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory michael@0: // access. michael@0: inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: return res; michael@0: } michael@0: michael@0: inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, michael@0: Atomic32 old_value, michael@0: Atomic32 new_value) { michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); michael@0: ATOMICOPS_COMPILER_BARRIER(); michael@0: return res; michael@0: } michael@0: michael@0: inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { michael@0: *ptr = value; michael@0: } michael@0: michael@0: inline void MemoryBarrier() { michael@0: __asm__ __volatile__("sync" : : : "memory"); michael@0: } michael@0: michael@0: inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { michael@0: *ptr = value; michael@0: MemoryBarrier(); michael@0: } michael@0: michael@0: inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { michael@0: MemoryBarrier(); michael@0: *ptr = value; michael@0: } michael@0: michael@0: inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { michael@0: return *ptr; michael@0: } michael@0: michael@0: inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { michael@0: Atomic32 value = *ptr; michael@0: MemoryBarrier(); michael@0: return value; michael@0: } michael@0: michael@0: inline Atomic32 Release_Load(volatile const Atomic32* ptr) { michael@0: MemoryBarrier(); michael@0: return *ptr; michael@0: } michael@0: michael@0: } // namespace subtle michael@0: } // namespace base michael@0: michael@0: #undef ATOMICOPS_COMPILER_BARRIER michael@0: michael@0: #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_