Index: src/OFBlock.m ================================================================== --- src/OFBlock.m +++ src/OFBlock.m @@ -198,11 +198,11 @@ return copy; } if ([(id)block isMemberOfClass: (Class)&_NSConcreteMallocBlock]) { #ifdef OF_HAVE_ATOMIC_OPS - of_atomic_int_inc(&block->flags); + OFAtomicIntIncrease(&block->flags); #else unsigned hash = SPINLOCK_HASH(block); OF_ENSURE(OFSpinlockLock(&blockSpinlocks[hash]) == 0); block->flags++; @@ -220,11 +220,12 @@ if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock) return; #ifdef OF_HAVE_ATOMIC_OPS - if ((of_atomic_int_dec(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) { + if ((OFAtomicIntDecrease(&block->flags) & + OF_BLOCK_REFCOUNT_MASK) == 0) { if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE) block->descriptor->dispose_helper(block); free(block); } @@ -284,12 +285,12 @@ if (src->flags & OF_BLOCK_HAS_COPY_DISPOSE) src->byref_keep(*dst, src); #ifdef OF_HAVE_ATOMIC_OPS - if (!of_atomic_ptr_cmpswap((void **)&src->forwarding, - src, *dst)) { + if (!OFAtomicPointerCompareAndSwap( + (void **)&src->forwarding, src, *dst)) { src->byref_dispose(*dst); free(*dst); *dst = src->forwarding; } @@ -310,11 +311,11 @@ #endif } else *dst = src; #ifdef OF_HAVE_ATOMIC_OPS - of_atomic_int_inc(&(*dst)->flags); + OFAtomicIntIncrease(&(*dst)->flags); #else unsigned hash = SPINLOCK_HASH(*dst); OF_ENSURE(OFSpinlockLock(&byrefSpinlocks[hash]) == 0); (*dst)->flags++; @@ -345,11 +346,11 @@ struct byref *object = (struct byref *)object_; object = object->forwarding; #ifdef OF_HAVE_ATOMIC_OPS - if ((of_atomic_int_dec(&object->flags) & + if ((OFAtomicIntDecrease(&object->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) { if (object->flags & OF_BLOCK_HAS_COPY_DISPOSE) object->byref_dispose(object); free(object); Index: src/OFObject.m ================================================================== --- src/OFObject.m +++ src/OFObject.m @@ -1097,11 +1097,11 @@ } - (instancetype)retain { #if defined(OF_HAVE_ATOMIC_OPS) - of_atomic_int_inc(&PRE_IVARS->retainCount); + OFAtomicIntIncrease(&PRE_IVARS->retainCount); #elif defined(OF_AMIGAOS) /* * On AmigaOS, we can only have one CPU. As increasing a variable is a * single instruction on M68K, we don't need Forbid() / Permit() on * M68K. @@ -1131,11 +1131,11 @@ - (void)release { #if defined(OF_HAVE_ATOMIC_OPS) of_memory_barrier_release(); - if (of_atomic_int_dec(&PRE_IVARS->retainCount) <= 0) { + if (OFAtomicIntDecrease(&PRE_IVARS->retainCount) <= 0) { of_memory_barrier_acquire(); [self dealloc]; } #elif defined(OF_AMIGAOS) Index: src/OFThread.m ================================================================== --- src/OFThread.m +++ src/OFThread.m @@ -466,11 +466,12 @@ { # if defined(OF_HAVE_ATOMIC_OPS) && !defined(__clang_analyzer__) if (_runLoop == nil) { OFRunLoop *tmp = [[OFRunLoop alloc] init]; - if (!of_atomic_ptr_cmpswap((void **)&_runLoop, nil, tmp)) + if (!OFAtomicPointerCompareAndSwap( + (void **)&_runLoop, nil, tmp)) [tmp release]; } # else @synchronized (self) { if (_runLoop == nil) Index: src/atomic_builtins.h ================================================================== --- src/atomic_builtins.h +++ src/atomic_builtins.h @@ -12,121 +12,121 @@ * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) -{ - return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) -{ - return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) -{ - return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) -{ - return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) -{ - return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); -} - -static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) -{ - return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); -} - -static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) -{ - return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); -} - -static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) -{ - return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); -} - -static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); -} - -static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int +OFAtomicIntIncrease(volatile int *_Nonnull p) +{ + return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) +{ + return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int +OFAtomicIntDecrease(volatile int *_Nonnull p) +{ + return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) +{ + return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE bool +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) +{ + return __atomic_compare_exchange(p, &o, &n, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); +} + +static OF_INLINE bool +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { return __atomic_compare_exchange(p, &o, &n, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - return __atomic_compare_exchange(p, &o, &n, false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { return __atomic_compare_exchange(p, &o, &n, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); } Index: src/atomic_no_threads.h ================================================================== --- src/atomic_no_threads.h +++ src/atomic_no_threads.h @@ -12,107 +12,118 @@ * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) -{ - return (*p += i); -} - -static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) -{ - return (*p += i); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return (*(char *volatile *)p += i); -} - -static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) -{ - return (*p -= i); -} - -static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) -{ - return (*p -= i); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return (*(char *volatile *)p -= i); -} - -static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) -{ - return ++*p; -} - -static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) -{ - return ++*p; -} - -static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) -{ - return --*p; -} - -static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) -{ - return --*p; -} - -static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return (*p |= i); -} - -static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return (*p |= i); -} - -static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return (*p &= i); -} - -static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return (*p &= i); -} - -static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return (*p ^= i); -} - -static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return (*p ^= i); -} - -static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) +{ + return (*p += i); +} + +static OF_INLINE int32_t +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) +{ + return (*p += i); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return (*(char *volatile *)p += i); +} + +static OF_INLINE int +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) +{ + return (*p -= i); +} + +static OF_INLINE int32_t +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) +{ + return (*p -= i); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return (*(char *volatile *)p -= i); +} + +static OF_INLINE int +OFAtomicIntIncrease(volatile int *_Nonnull p) +{ + return ++*p; +} + +static OF_INLINE int32_t +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) +{ + return ++*p; +} + +static OF_INLINE int +OFAtomicIntDecrease(volatile int *_Nonnull p) +{ + return --*p; +} + +static OF_INLINE int32_t +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) +{ + return --*p; +} + +static OF_INLINE unsigned int +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return (*p |= i); +} + +static OF_INLINE uint32_t +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return (*p |= i); +} + +static OF_INLINE unsigned int +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return (*p &= i); +} + +static OF_INLINE uint32_t +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return (*p &= i); +} + +static OF_INLINE unsigned int +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return (*p ^= i); +} + +static OF_INLINE uint32_t +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return (*p ^= i); +} + +static OF_INLINE bool +OFAtomicIntCompareSwap(volatile int *_Nonnull p, int o, int n) +{ + if (*p == o) { + *p = n; + return true; + } + + return false; +} + +static OF_INLINE bool +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { if (*p == o) { *p = n; return true; } @@ -119,22 +130,11 @@ return false; } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - if (*p == o) { - *p = n; - return true; - } - - return false; -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { if (*p == o) { *p = n; return true; Index: src/atomic_osatomic.h ================================================================== --- src/atomic_osatomic.h +++ src/atomic_osatomic.h @@ -14,127 +14,127 @@ */ #include static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) { return OSAtomicAdd32(i, p); } static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { return OSAtomicAdd32(i, p); } static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { #ifdef __LP64__ return (void *)OSAtomicAdd64(i, (int64_t *)p); #else return (void *)OSAtomicAdd32(i, (int32_t *)p); #endif } static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { return OSAtomicAdd32(-i, p); } static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { return OSAtomicAdd32(-i, p); } static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { #ifdef __LP64__ return (void *)OSAtomicAdd64(-i, (int64_t *)p); #else return (void *)OSAtomicAdd32(-i, (int32_t *)p); #endif } static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) -{ - return OSAtomicIncrement32(p); -} - -static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) -{ - return OSAtomicIncrement32(p); -} - -static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) -{ - return OSAtomicDecrement32(p); -} - -static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) -{ - return OSAtomicDecrement32(p); -} - -static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return OSAtomicOr32(i, p); -} - -static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return OSAtomicOr32(i, p); -} - -static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return OSAtomicAnd32(i, p); -} - -static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return OSAtomicAnd32(i, p); -} - -static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return OSAtomicXor32(i, p); -} - -static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return OSAtomicXor32(i, p); -} - -static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) -{ - return OSAtomicCompareAndSwapInt(o, n, p); -} - -static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - return OSAtomicCompareAndSwap32(o, n, p); -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicIntIncrease(volatile int *_Nonnull p) +{ + return OSAtomicIncrement32(p); +} + +static OF_INLINE int32_t +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) +{ + return OSAtomicIncrement32(p); +} + +static OF_INLINE int +OFAtomicIntDecrease(volatile int *_Nonnull p) +{ + return OSAtomicDecrement32(p); +} + +static OF_INLINE int32_t +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) +{ + return OSAtomicDecrement32(p); +} + +static OF_INLINE unsigned int +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return OSAtomicOr32(i, p); +} + +static OF_INLINE uint32_t +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return OSAtomicOr32(i, p); +} + +static OF_INLINE unsigned int +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return OSAtomicAnd32(i, p); +} + +static OF_INLINE uint32_t +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return OSAtomicAnd32(i, p); +} + +static OF_INLINE unsigned int +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return OSAtomicXor32(i, p); +} + +static OF_INLINE uint32_t +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return OSAtomicXor32(i, p); +} + +static OF_INLINE bool +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) +{ + return OSAtomicCompareAndSwapInt(o, n, p); +} + +static OF_INLINE bool +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) +{ + return OSAtomicCompareAndSwap32(o, n, p); +} + +static OF_INLINE bool +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { return OSAtomicCompareAndSwapPtr(o, n, p); } Index: src/atomic_powerpc.h ================================================================== --- src/atomic_powerpc.h +++ src/atomic_powerpc.h @@ -12,11 +12,11 @@ * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -29,11 +29,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -46,11 +46,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "add %0, %0, %1\n\t" @@ -63,11 +63,11 @@ return (void *)i; } static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -80,11 +80,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -97,11 +97,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "sub %0, %0, %1\n\t" @@ -114,11 +114,11 @@ return (void *)i; } static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) +OFAtomicIntIncrease(volatile int *_Nonnull p) { int i; __asm__ __volatile__ ( "0:\n\t" @@ -133,11 +133,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "0:\n\t" @@ -152,11 +152,11 @@ return i; } static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) +OFAtomicIntDecrease(volatile int *_Nonnull p) { int i; __asm__ __volatile__ ( "0:\n\t" @@ -171,11 +171,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "0:\n\t" @@ -190,11 +190,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "or %0, %0, %1\n\t" @@ -207,11 +207,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "or %0, %0, %1\n\t" @@ -224,11 +224,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "and %0, %0, %1\n\t" @@ -241,11 +241,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "and %0, %0, %1\n\t" @@ -258,11 +258,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "xor %0, %0, %1\n\t" @@ -275,11 +275,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "lwarx %0, 0, %2\n\t" "xor %0, %0, %1\n\t" @@ -292,11 +292,37 @@ return i; } static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntCompAndSwap(volatile int *_Nonnull p, int o, int n) +{ + int r; + + __asm__ __volatile__ ( + "0:\n\t" + "lwarx %0, 0, %3\n\t" + "cmpw %0, %1\n\t" + "bne 1f\n\t" + "stwcx. %2, 0, %3\n\t" + "bne- 0b\n\t" + "li %0, 1\n\t" + "b 2f\n\t" + "1:\n\t" + "stwcx. %0, 0, %3\n\t" + "li %0, 0\n\t" + "2:" + : "=&r"(r) + : "r"(o), "r"(n), "r"(p) + : "cc", "memory" + ); + + return r; +} + +static OF_INLINE bool +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { int r; __asm__ __volatile__ ( "0:\n\t" @@ -318,37 +344,11 @@ return r; } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - int r; - - __asm__ __volatile__ ( - "0:\n\t" - "lwarx %0, 0, %3\n\t" - "cmpw %0, %1\n\t" - "bne 1f\n\t" - "stwcx. %2, 0, %3\n\t" - "bne- 0b\n\t" - "li %0, 1\n\t" - "b 2f\n\t" - "1:\n\t" - "stwcx. %0, 0, %3\n\t" - "li %0, 0\n\t" - "2:" - : "=&r"(r) - : "r"(o), "r"(n), "r"(p) - : "cc", "memory" - ); - - return r; -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { int r; __asm__ __volatile__ ( Index: src/atomic_sync_builtins.h ================================================================== --- src/atomic_sync_builtins.h +++ src/atomic_sync_builtins.h @@ -12,119 +12,119 @@ * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) -{ - return __sync_add_and_fetch(p, i); -} - -static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) -{ - return __sync_add_and_fetch(p, i); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return __sync_add_and_fetch(p, (void *)i); -} - -static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) -{ - return __sync_sub_and_fetch(p, i); -} - -static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) -{ - return __sync_sub_and_fetch(p, i); -} - -static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) -{ - return __sync_sub_and_fetch(p, (void *)i); -} - -static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) -{ - return __sync_add_and_fetch(p, 1); -} - -static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) -{ - return __sync_add_and_fetch(p, 1); -} - -static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) -{ - return __sync_sub_and_fetch(p, 1); -} - -static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) -{ - return __sync_sub_and_fetch(p, 1); -} - -static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __sync_or_and_fetch(p, i); -} - -static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __sync_or_and_fetch(p, i); -} - -static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __sync_and_and_fetch(p, i); -} - -static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __sync_and_and_fetch(p, i); -} - -static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) -{ - return __sync_xor_and_fetch(p, i); -} - -static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) -{ - return __sync_xor_and_fetch(p, i); -} - -static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) -{ - return __sync_bool_compare_and_swap(p, o, n); -} - -static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) -{ - return __sync_bool_compare_and_swap(p, o, n); -} - -static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicIntAdd(volatile int *_Nonnull p, int i) +{ + return __sync_add_and_fetch(p, i); +} + +static OF_INLINE int32_t +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) +{ + return __sync_add_and_fetch(p, i); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __sync_add_and_fetch(p, (void *)i); +} + +static OF_INLINE int +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) +{ + return __sync_sub_and_fetch(p, i); +} + +static OF_INLINE int32_t +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) +{ + return __sync_sub_and_fetch(p, i); +} + +static OF_INLINE void *_Nullable +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __sync_sub_and_fetch(p, (void *)i); +} + +static OF_INLINE int +OFAtomicIntIncrease(volatile int *_Nonnull p) +{ + return __sync_add_and_fetch(p, 1); +} + +static OF_INLINE int32_t +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) +{ + return __sync_add_and_fetch(p, 1); +} + +static OF_INLINE int +OFAtomicIntDecrease(volatile int *_Nonnull p) +{ + return __sync_sub_and_fetch(p, 1); +} + +static OF_INLINE int32_t +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) +{ + return __sync_sub_and_fetch(p, 1); +} + +static OF_INLINE unsigned int +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __sync_or_and_fetch(p, i); +} + +static OF_INLINE uint32_t +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __sync_or_and_fetch(p, i); +} + +static OF_INLINE unsigned int +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __sync_and_and_fetch(p, i); +} + +static OF_INLINE uint32_t +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __sync_and_and_fetch(p, i); +} + +static OF_INLINE unsigned int +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __sync_xor_and_fetch(p, i); +} + +static OF_INLINE uint32_t +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __sync_xor_and_fetch(p, i); +} + +static OF_INLINE bool +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) +{ + return __sync_bool_compare_and_swap(p, o, n); +} + +static OF_INLINE bool +OFAtomicInt32CompAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) +{ + return __sync_bool_compare_and_swap(p, o, n); +} + +static OF_INLINE bool +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { return __sync_bool_compare_and_swap(p, o, n); } Index: src/atomic_x86.h ================================================================== --- src/atomic_x86.h +++ src/atomic_x86.h @@ -14,11 +14,11 @@ */ OF_ASSUME_NONNULL_BEGIN static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" @@ -41,11 +41,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" @@ -55,11 +55,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_X86_64) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" @@ -81,11 +81,11 @@ return (void *)i; #endif } static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" @@ -110,11 +110,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" @@ -125,11 +125,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_X86_64) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" @@ -153,11 +153,11 @@ return (void *)i; #endif } static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) +OFAtomicIntIncrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) __asm__ __volatile__ ( @@ -186,11 +186,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -204,11 +204,11 @@ return i; } static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) +OFAtomicIntDecrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) __asm__ __volatile__ ( @@ -237,11 +237,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -255,11 +255,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -292,11 +292,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -311,11 +311,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -348,11 +348,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -367,11 +367,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -404,11 +404,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -423,11 +423,11 @@ return i; } static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) { int r; __asm__ __volatile__ ( "lock\n\t" @@ -441,11 +441,11 @@ return r; } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { int r; __asm__ __volatile__ ( "lock\n\t" @@ -459,11 +459,11 @@ return r; } static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { int r; __asm__ __volatile__ ( Index: src/mutex.h ================================================================== --- src/mutex.h +++ src/mutex.h @@ -106,11 +106,11 @@ static OF_INLINE int OFSpinlockTryLock(OFSpinlock *spinlock) { #if defined(OF_HAVE_ATOMIC_OPS) - if (of_atomic_int_cmpswap(spinlock, 0, 1)) { + if (OFAtomicIntCompareAndSwap(spinlock, 0, 1)) { of_memory_barrier_acquire(); return 0; } return EBUSY; @@ -144,11 +144,11 @@ static OF_INLINE int OFSpinlockUnlock(OFSpinlock *spinlock) { #if defined(OF_HAVE_ATOMIC_OPS) - bool ret = of_atomic_int_cmpswap(spinlock, 1, 0); + bool ret = OFAtomicIntCompareAndSwap(spinlock, 1, 0); of_memory_barrier_release(); return (ret ? 0 : EINVAL); #elif defined(OF_HAVE_PTHREAD_SPINLOCKS) Index: src/once.m ================================================================== --- src/once.m +++ src/once.m @@ -41,16 +41,16 @@ #elif defined(OF_HAVE_ATOMIC_OPS) /* Avoid atomic operations in case it's already done. */ if (*control == 2) return; - if (of_atomic_int_cmpswap(control, 0, 1)) { + if (OFAtomicIntCompareAndSwap(control, 0, 1)) { func(); of_memory_barrier(); - of_atomic_int_inc(control); + OFAtomicIntIncrease(control); } else while (*control == 1) OFYieldThread(); #elif defined(OF_AMIGAOS) bool run = false; Index: src/platform/windows/condition.m ================================================================== --- src/platform/windows/condition.m +++ src/platform/windows/condition.m @@ -73,13 +73,13 @@ DWORD status; if ((error = OFPlainMutexUnlock(mutex)) != 0) return error; - of_atomic_int_inc(&condition->count); + OFAtomicIntIncrease(&condition->count); status = WaitForSingleObject(condition->event, INFINITE); - of_atomic_int_dec(&condition->count); + OFAtomicIntDecrease(&condition->count); switch (status) { case WAIT_OBJECT_0: return OFPlainMutexLock(mutex); case WAIT_FAILED: @@ -102,13 +102,13 @@ DWORD status; if ((error = OFPlainMutexUnlock(mutex)) != 0) return error; - of_atomic_int_inc(&condition->count); + OFAtomicIntIncrease(&condition->count); status = WaitForSingleObject(condition->event, timeout * 1000); - of_atomic_int_dec(&condition->count); + OFAtomicIntDecrease(&condition->count); switch (status) { case WAIT_OBJECT_0: return OFPlainMutexLock(mutex); case WAIT_TIMEOUT: