diff --git a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp --- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp +++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.inline.hpp @@ -292,7 +292,7 @@ } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE -inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) { +inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, bool fence, bool acquire) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' @@ -312,9 +312,13 @@ unsigned int old_value, value32; + if (fence) + __asm__ __volatile__ ( + /* fence */ + strasm_sync + ); + __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " lbz %[old_value], 0(%[dest]) \n" " cmpw %[masked_compare_val], %[old_value] \n" @@ -331,8 +335,6 @@ " xor %[value32], %[xor_value], %[value32] \n" " stwcx. %[value32], 0, %[dest_base] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ @@ -353,10 +355,16 @@ "memory" ); + if (acquire) + __asm__ __volatile__ ( + /* acquire */ + strasm_sync + ); + return (jbyte)(unsigned char)old_value; } -inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) { +inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value, bool fence, bool acquire) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' @@ -365,9 +373,13 @@ unsigned int old_value; const uint64_t zero = 0; + if (fence) + __asm__ __volatile__ ( + /* fence */ + strasm_sync + ); + __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " lwz %[old_value], 0(%[dest]) \n" " cmpw %[compare_value], %[old_value] \n" @@ -379,8 +391,6 @@ " bne- 2f \n" " stwcx. %[exchange_value], %[dest], %[zero] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ @@ -397,10 +407,16 @@ "memory" ); + if (acquire) + __asm__ __volatile__ ( + /* acquire */ + strasm_sync + ); + return (jint) old_value; } -inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) { +inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value, bool fence, bool acquire) { // Note that cmpxchg guarantees a two-way memory barrier across // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire' @@ -409,9 +425,13 @@ long old_value; const uint64_t zero = 0; + if (fence) + __asm__ __volatile__ ( + /* fence */ + strasm_sync + ); + __asm__ __volatile__ ( - /* fence */ - strasm_sync /* simple guard */ " ld %[old_value], 0(%[dest]) \n" " cmpd %[compare_value], %[old_value] \n" @@ -423,8 +443,6 @@ " bne- 2f \n" " stdcx. %[exchange_value], %[dest], %[zero] \n" " bne- 1b \n" - /* acquire */ - strasm_sync /* exit */ "2: \n" /* out */ @@ -441,15 +459,21 @@ "memory" ); + if (acquire) + __asm__ __volatile__ ( + /* acquire */ + strasm_sync + ); + return (jlong) old_value; } -inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) { - return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, bool fence, bool acquire) { + return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, fence, acquire); } -inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) { - return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value); +inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, bool fence, bool acquire) { + return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value, fence, acquire); } #undef strasm_sync diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp +++ b/src/share/vm/oops/oop.inline.hpp @@ -93,7 +93,11 @@ } markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) { +#ifndef PPC64 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark); +#else + return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark, false, false); +#endif } void oopDesc::init_mark() { diff --git a/src/share/vm/runtime/atomic.cpp b/src/share/vm/runtime/atomic.cpp --- a/src/share/vm/runtime/atomic.cpp +++ b/src/share/vm/runtime/atomic.cpp @@ -56,12 +56,22 @@ return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } +#ifndef PPC64 unsigned Atomic::cmpxchg(unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value) { assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, (jint)compare_value); } +#else + unsigned Atomic::cmpxchg(unsigned int exchange_value, + volatile unsigned int* dest, unsigned int compare_value, + bool fence, bool acquire) { + assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); + return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest, + (jint)compare_value, fence, acquire); +} +#endif jlong Atomic::add(jlong add_value, volatile jlong* dest) { jlong old = load(dest); diff --git a/src/share/vm/runtime/atomic.hpp b/src/share/vm/runtime/atomic.hpp --- a/src/share/vm/runtime/atomic.hpp +++ b/src/share/vm/runtime/atomic.hpp @@ -107,6 +107,7 @@ // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange +#ifndef PPC64 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value); inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value); // See comment above about using jlong atomics on 32-bit platforms @@ -114,6 +115,15 @@ static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value); inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value); inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value); +#else + inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, bool fence = true, bool acquire = true); + inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, bool fence = true, bool acquire = true); + // See comment above about using jlong atomics on 32-bit platforms + inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, bool fence = true, bool acquire = true); + static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, bool fence = true, bool acquire = true); + inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, bool fence = true, bool acquire = true); + inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, bool fence = true, bool acquire = true); +#endif }; // To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially