Index: src/OFObject.m ================================================================== --- src/OFObject.m +++ src/OFObject.m @@ -500,11 +500,11 @@ } - retain { #ifdef OF_ATOMIC_OPS - of_atomic_inc32(&PRE_IVAR->retain_count); + of_atomic_inc_32(&PRE_IVAR->retain_count); #else assert(of_spinlock_lock(&PRE_IVAR->retain_spinlock)); PRE_IVAR->retain_count++; assert(of_spinlock_unlock(&PRE_IVAR->retain_spinlock)); #endif @@ -519,11 +519,11 @@ } - (void)release { #ifdef OF_ATOMIC_OPS - if (of_atomic_dec32(&PRE_IVAR->retain_count) <= 0) + if (of_atomic_dec_32(&PRE_IVAR->retain_count) <= 0) [self dealloc]; #else size_t c; assert(of_spinlock_lock(&PRE_IVAR->retain_spinlock)); Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -19,11 +19,11 @@ #ifdef OF_HAVE_LIBKERN_OSATOMIC_H # include #endif static OF_INLINE int32_t -of_atomic_add32(volatile int32_t *p, int32_t i) +of_atomic_add_32(volatile int32_t *p, int32_t i) { #if !defined(OF_THREADS) return (*p += i); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, i); @@ -31,11 +31,11 @@ return OSAtomicAdd32Barrier(i, p); #endif } static OF_INLINE int32_t -of_atomic_sub32(volatile int32_t *p, int32_t i) +of_atomic_sub_32(volatile int32_t *p, int32_t i) { #if !defined(OF_THREADS) return (*p -= i); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, i); @@ -43,11 +43,11 @@ return OSAtomicAdd32Barrier(-i, p); #endif } static OF_INLINE int32_t -of_atomic_inc32(volatile int32_t *p) +of_atomic_inc_32(volatile int32_t *p) { #if !defined(OF_THREADS) return ++*p; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, 1); @@ -55,11 +55,11 @@ return OSAtomicIncrement32Barrier(p); #endif } static OF_INLINE int32_t -of_atomic_dec32(volatile int32_t *p) +of_atomic_dec_32(volatile int32_t *p) { #if !defined(OF_THREADS) return --*p; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, 1); @@ -67,11 +67,11 @@ return OSAtomicDecrement32Barrier(p); #endif } static OF_INLINE uint32_t -of_atomic_or32(volatile uint32_t *p, uint32_t i) +of_atomic_or_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p |= i); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_or_and_fetch(p, i); @@ -79,11 +79,11 @@ return OSAtomicOr32Barrier(i, p); #endif } static OF_INLINE uint32_t -of_atomic_and32(volatile uint32_t *p, uint32_t i) +of_atomic_and_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p &= i); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_and_and_fetch(p, i); @@ -91,11 +91,11 @@ return OSAtomicAnd32Barrier(i, p); #endif } static OF_INLINE uint32_t -of_atomic_xor32(volatile uint32_t *p, uint32_t i) +of_atomic_xor_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p ^= i); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_xor_and_fetch(p, i); @@ -103,11 +103,11 @@ return OSAtomicXor32Barrier(i, p); #endif } static OF_INLINE BOOL -of_atomic_cmpswap32(volatile int32_t *p, int32_t o, int32_t n) +of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n) { #if !defined(OF_THREADS) if (*p == o) { *p = n; return YES; @@ -118,5 +118,22 @@ return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicCompareAndSwap32Barrier(o, n, p); #endif } + +static OF_INLINE BOOL +of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n) +{ +#if !defined(OF_THREADS) + if (*p == o) { + *p = n; + return YES; + } + + return NO; +#elif defined(OF_HAVE_GCC_ATOMIC_OPS) + return __sync_bool_compare_and_swap(p, o, n); +#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) + return OSAtomicCompareAndSwapPtrBarrier(o, n, p); +#endif +} Index: src/threading.h ================================================================== --- src/threading.h +++ src/threading.h @@ -210,11 +210,11 @@ static OF_INLINE BOOL of_spinlock_trylock(of_spinlock_t *s) { #if defined(OF_ATOMIC_OPS) - return (of_atomic_cmpswap32(s, 0, 1) ? YES : NO); + return (of_atomic_cmpswap_32(s, 0, 1) ? YES : NO); #elif defined(OF_HAVE_PTHREAD_SPINLOCKS) return (pthread_spin_trylock(s) ? NO : YES); #else return of_mutex_trylock(s); #endif @@ -240,11 +240,11 @@ static OF_INLINE BOOL of_spinlock_unlock(of_spinlock_t *s) { #if defined(OF_ATOMIC_OPS) - of_atomic_and32((uint32_t*)s, 0); + of_atomic_and_32((uint32_t*)s, 0); return YES; #elif defined(OF_HAVE_PTHREAD_SPINLOCKS) return (pthread_spin_unlock(s) ? NO : YES); #else return of_mutex_unlock(s);