@@ -12,11 +12,11 @@ * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -29,11 +29,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -46,11 +46,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -63,11 +63,11 @@ return (void *)i; } static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -80,11 +80,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -97,11 +97,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -114,11 +114,11 @@ return (void *)i; } static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) +OFAtomicIntIncrease(volatile int *_Nonnull p) { int i; __asm__ __volatile__ ( "0:\n\t" @@ -133,11 +133,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "0:\n\t" @@ -152,11 +152,11 @@ return i; } static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) +OFAtomicIntDecrease(volatile int *_Nonnull p) { int i; __asm__ __volatile__ ( "0:\n\t" @@ -171,11 +171,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "0:\n\t" @@ -190,11 +190,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "or %0, %0, %1\n\t" @@ -207,11 +207,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "or %0, %0, %1\n\t" @@ -224,11 +224,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "and %0, %0, %1\n\t" @@ -241,11 +241,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "and %0, %0, %1\n\t" @@ -258,11 +258,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "xor %0, %0, %1\n\t" @@ -275,11 +275,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "xor %0, %0, %1\n\t" @@ -292,11 +292,37 @@ return i; } static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) +{ + int r; + + __asm__ __volatile__ ( + "0:\n\t" + "lwarx %0, 0, %3\n\t" + "cmpw %0, %1\n\t" + "bne 1f\n\t" + "stwcx. %2, 0, %3\n\t" + "bne- 0b\n\t" + "li %0, 1\n\t" + "b 2f\n\t" + "1:\n\t" + "stwcx. %0, 0, %3\n\t" + "li %0, 0\n\t" + "2:" + : "=&r"(r) + : "r"(o), "r"(n), "r"(p) + : "cc", "memory" + ); + + return r; +} + +static OF_INLINE bool +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { int r; __asm__ __volatile__ ( "0:\n\t" @@ -318,37 +344,11 @@ return r; } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - int r; - - __asm__ __volatile__ ( - "0:\n\t" - "lwarx %0, 0, %3\n\t" - "cmpw %0, %1\n\t" - "bne 1f\n\t" - "stwcx. %2, 0, %3\n\t" - "bne- 0b\n\t" - "li %0, 1\n\t" - "b 2f\n\t" - "1:\n\t" - "stwcx. %0, 0, %3\n\t" - "li %0, 0\n\t" - "2:" - : "=&r"(r) - : "r"(o), "r"(n), "r"(p) - : "cc", "memory" - ); - - return r; -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { int r; __asm__ __volatile__ ( @@ -371,27 +371,27 @@ return r; } static OF_INLINE void -of_memory_barrier(void) +OFMemoryBarrier(void) { __asm__ __volatile__ ( ".long 0x7C2004AC /* lwsync */" ::: "memory" ); } static OF_INLINE void -of_memory_barrier_acquire(void) +OFAcquireMemoryBarrier(void) { __asm__ __volatile__ ( ".long 0x7C2004AC /* lwsync */" ::: "memory" ); } static OF_INLINE void -of_memory_barrier_release(void) +OFReleaseMemoryBarrier(void) { __asm__ __volatile__ ( ".long 0x7C2004AC /* lwsync */" ::: "memory" ); }