Index: src/OFBlock.m ================================================================== --- src/OFBlock.m +++ src/OFBlock.m @@ -188,11 +188,11 @@ return copy; } if (object_getClass((id)block) == (Class)&_NSConcreteMallocBlock) { #ifdef OF_HAVE_ATOMIC_OPS - of_atomic_inc_int(&block->flags); + of_atomic_int_inc(&block->flags); #else unsigned hash = SPINLOCK_HASH(block); OF_ENSURE(of_spinlock_lock(&spinlocks[hash])); block->flags++; @@ -210,11 +210,11 @@ if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock) return; #ifdef OF_HAVE_ATOMIC_OPS - if ((of_atomic_dec_int(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) { + if ((of_atomic_int_dec(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) { if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE) block->descriptor->dispose_helper(block); free(block); } Index: src/OFObject.m ================================================================== --- src/OFObject.m +++ src/OFObject.m @@ -70,15 +70,15 @@ # define of_forward of_method_not_found # define of_forward_stret of_method_not_found_stret #endif struct pre_ivar { - int32_t retainCount; - struct pre_mem *firstMem, *lastMem; + int retainCount; #if !defined(OF_HAVE_ATOMIC_OPS) && defined(OF_HAVE_THREADS) of_spinlock_t retainCountSpinlock; #endif + struct pre_mem *firstMem, *lastMem; }; struct pre_mem { struct pre_mem *prev, *next; id owner; @@ -958,11 +958,11 @@ } - retain { #if defined(OF_HAVE_ATOMIC_OPS) - of_atomic_inc_32(&PRE_IVARS->retainCount); + of_atomic_int_inc(&PRE_IVARS->retainCount); #else OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock)); PRE_IVARS->retainCount++; OF_ENSURE(of_spinlock_unlock(&PRE_IVARS->retainCountSpinlock)); #endif @@ -977,11 +977,11 @@ } - (void)release { #if defined(OF_HAVE_ATOMIC_OPS) - if (of_atomic_dec_32(&PRE_IVARS->retainCount) <= 0) + if (of_atomic_int_dec(&PRE_IVARS->retainCount) <= 0) [self dealloc]; #else size_t c; OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock)); Index: src/OFThread.m ================================================================== --- src/OFThread.m +++ src/OFThread.m @@ -345,11 +345,11 @@ { # ifdef OF_HAVE_ATOMIC_OPS if (_runLoop == nil) { OFRunLoop *tmp = [[OFRunLoop alloc] init]; - if (!of_atomic_cmpswap_ptr((void**)&_runLoop, nil, tmp)) + if (!of_atomic_ptr_cmpswap((void**)&_runLoop, nil, tmp)) [tmp release]; } # else @synchronized (self) { if (_runLoop == nil) Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -27,11 +27,11 @@ #ifdef OF_HAVE_OSATOMIC # include #endif static OF_INLINE int -of_atomic_add_int(volatile int *p, int i) +of_atomic_int_add(volatile int *p, int i) { #if !defined(OF_HAVE_THREADS) return (*p += i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) @@ -59,16 +59,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAdd32Barrier(i, p); #else -# error of_atomic_add_int not implemented! +# error of_atomic_int_add not implemented! #endif } static OF_INLINE int32_t -of_atomic_add_32(volatile int32_t *p, int32_t i) +of_atomic_int32_add(volatile int32_t *p, int32_t i) { #if !defined(OF_HAVE_THREADS) return (*p += i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( @@ -83,16 +83,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAdd32Barrier(i, p); #else -# error of_atomic_add_32 not implemented! +# error of_atomic_int32_add not implemented! #endif } static OF_INLINE void* -of_atomic_add_ptr(void* volatile *p, intptr_t i) +of_atomic_ptr_add(void* volatile *p, intptr_t i) { #if !defined(OF_HAVE_THREADS) return (*(char* volatile*)p += i); #elif defined(OF_X86_64_ASM) __asm__ __volatile__ ( @@ -121,16 +121,16 @@ return (void*)OSAtomicAdd64Barrier(i, (int64_t*)p); # else return (void*)OSAtomicAdd32Barrier(i, (int32_t*)p); # endif #else -# error of_atomic_add_ptr not implemented! +# error of_atomic_ptr_add not implemented! #endif } static OF_INLINE int -of_atomic_sub_int(volatile int *p, int i) +of_atomic_int_sub(volatile int *p, int i) { #if !defined(OF_HAVE_THREADS) return (*p -= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) @@ -160,16 +160,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAdd32Barrier(-i, p); #else -# error of_atomic_sub_int not implemented! +# error of_atomic_int_sub not implemented! #endif } static OF_INLINE int32_t -of_atomic_sub_32(volatile int32_t *p, int32_t i) +of_atomic_int32_sub(volatile int32_t *p, int32_t i) { #if !defined(OF_HAVE_THREADS) return (*p -= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( @@ -185,16 +185,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAdd32Barrier(-i, p); #else -# error of_atomic_sub_32 not implemented! +# error of_atomic_int32_sub not implemented! #endif } static OF_INLINE void* -of_atomic_sub_ptr(void* volatile *p, intptr_t i) +of_atomic_ptr_sub(void* volatile *p, intptr_t i) { #if !defined(OF_HAVE_THREADS) return (*(char* volatile*)p -= i); #elif defined(OF_X86_64_ASM) __asm__ __volatile__ ( @@ -225,16 +225,16 @@ return (void*)OSAtomicAdd64Barrier(-i, (int64_t*)p); # else return (void*)OSAtomicAdd32Barrier(-i, (int32_t*)p); # endif #else -# error of_atomic_sub_ptr not implemented! +# error of_atomic_ptr_sub not implemented! #endif } static OF_INLINE int -of_atomic_inc_int(volatile int *p) +of_atomic_int_inc(volatile int *p) { #if !defined(OF_HAVE_THREADS) return ++*p; #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int i; @@ -268,16 +268,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, 1); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicIncrement32Barrier(p); #else -# error of_atomic_inc_int not implemented! +# error of_atomic_int_inc not implemented! #endif } static OF_INLINE int32_t -of_atomic_inc_32(volatile int32_t *p) +of_atomic_int32_inc(volatile int32_t *p) { #if !defined(OF_HAVE_THREADS) return ++*p; #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) uint32_t i; @@ -296,16 +296,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, 1); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicIncrement32Barrier(p); #else -# error of_atomic_inc_32 not implemented! +# error of_atomic_int32_inc not implemented! #endif } static OF_INLINE int -of_atomic_dec_int(volatile int *p) +of_atomic_int_dec(volatile int *p) { #if !defined(OF_HAVE_THREADS) return --*p; #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int i; @@ -339,16 +339,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, 1); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicDecrement32Barrier(p); #else -# error of_atomic_dec_int not implemented! +# error of_atomic_int_dec not implemented! #endif } static OF_INLINE int32_t -of_atomic_dec_32(volatile int32_t *p) +of_atomic_int32_dec(volatile int32_t *p) { #if !defined(OF_HAVE_THREADS) return --*p; #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) uint32_t i; @@ -367,16 +367,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, 1); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicDecrement32Barrier(p); #else -# error of_atomic_dec_32 not implemented! +# error of_atomic_int32_dec not implemented! #endif } static OF_INLINE unsigned int -of_atomic_or_int(volatile unsigned int *p, unsigned int i) +of_atomic_int_or(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p |= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) @@ -414,16 +414,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_or_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicOr32Barrier(i, p); #else -# error of_atomic_or_int not implemented! +# error of_atomic_int_or not implemented! #endif } static OF_INLINE uint32_t -of_atomic_or_32(volatile uint32_t *p, uint32_t i) +of_atomic_int32_or(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p |= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( @@ -443,16 +443,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_or_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicOr32Barrier(i, p); #else -# error of_atomic_or_32 not implemented! +# error of_atomic_int32_or not implemented! #endif } static OF_INLINE unsigned int -of_atomic_and_int(volatile unsigned int *p, unsigned int i) +of_atomic_int_and(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p &= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) @@ -490,16 +490,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_and_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAnd32Barrier(i, p); #else -# error of_atomic_and_int not implemented! +# error of_atomic_int_and not implemented! #endif } static OF_INLINE uint32_t -of_atomic_and_32(volatile uint32_t *p, uint32_t i) +of_atomic_int32_and(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p &= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( @@ -519,16 +519,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_and_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicAnd32Barrier(i, p); #else -# error of_atomic_and_32 not implemented! +# error of_atomic_int32_and not implemented! #endif } static OF_INLINE unsigned int -of_atomic_xor_int(volatile unsigned int *p, unsigned int i) +of_atomic_int_xor(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p ^= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) @@ -566,16 +566,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_xor_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicXor32Barrier(i, p); #else -# error of_atomic_xor_int not implemented! +# error of_atomic_int_xor not implemented! #endif } static OF_INLINE uint32_t -of_atomic_xor_32(volatile uint32_t *p, uint32_t i) +of_atomic_int32_xor(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p ^= i); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( @@ -595,16 +595,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_xor_and_fetch(p, i); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicXor32Barrier(i, p); #else -# error of_atomic_xor_32 not implemented! +# error of_atomic_int32_xor not implemented! #endif } static OF_INLINE bool -of_atomic_cmpswap_int(volatile int *p, int o, int n) +of_atomic_int_cmpswap(volatile int *p, int o, int n) { #if !defined(OF_HAVE_THREADS) if (*p == o) { *p = n; return true; @@ -628,16 +628,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicCompareAndSwapIntBarrier(o, n, p); #else -# error of_atomic_cmpswap_int not implemented! +# error of_atomic_int_cmpswap not implemented! #endif } static OF_INLINE bool -of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n) +of_atomic_int32_cmpswap(volatile int32_t *p, int32_t o, int32_t n) { #if !defined(OF_HAVE_THREADS) if (*p == o) { *p = n; return true; @@ -661,16 +661,16 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicCompareAndSwap32Barrier(o, n, p); #else -# error of_atomic_cmpswap_32 not implemented! +# error of_atomic_int32_cmpswap not implemented! #endif } static OF_INLINE bool -of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n) +of_atomic_ptr_cmpswap(void* volatile *p, void *o, void *n) { #if !defined(OF_HAVE_THREADS) if (*p == o) { *p = n; return true; @@ -694,11 +694,11 @@ #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_OSATOMIC) return OSAtomicCompareAndSwapPtrBarrier(o, n, p); #else -# error of_atomic_cmpswap_ptr not implemented! +# error of_atomic_ptr_cmpswap not implemented! #endif } static OF_INLINE void of_memory_barrier(void) Index: src/threading.h ================================================================== --- src/threading.h +++ src/threading.h @@ -264,18 +264,18 @@ return !pthread_cond_wait(condition, mutex); #elif defined(_WIN32) if (!of_mutex_unlock(mutex)) return false; - of_atomic_inc_int(&condition->count); + of_atomic_int_inc(&condition->count); if (WaitForSingleObject(condition->event, INFINITE) != WAIT_OBJECT_0) { of_mutex_lock(mutex); return false; } - of_atomic_dec_int(&condition->count); + of_atomic_int_dec(&condition->count); if (!of_mutex_lock(mutex)) return false; return true; @@ -297,19 +297,19 @@ return !pthread_cond_timedwait(condition, mutex, &ts); #elif defined(_WIN32) if (!of_mutex_unlock(mutex)) return false; - of_atomic_inc_int(&condition->count); + of_atomic_int_inc(&condition->count); if (WaitForSingleObject(condition->event, timeout * 1000) != WAIT_OBJECT_0) { of_mutex_lock(mutex); return false; } - of_atomic_dec_int(&condition->count); + of_atomic_int_dec(&condition->count); if (!of_mutex_lock(mutex)) return false; return true; @@ -426,11 +426,11 @@ static OF_INLINE bool of_spinlock_trylock(of_spinlock_t *spinlock) { #if defined(OF_HAVE_ATOMIC_OPS) - return of_atomic_cmpswap_int(spinlock, 0, 1); + return of_atomic_int_cmpswap(spinlock, 0, 1); #elif defined(OF_HAVE_PTHREAD_SPINLOCKS) return !pthread_spin_trylock(spinlock); #else return of_mutex_trylock(spinlock); #endif @@ -467,11 +467,11 @@ static OF_INLINE bool of_spinlock_unlock(of_spinlock_t *spinlock) { #if defined(OF_HAVE_ATOMIC_OPS) - return of_atomic_cmpswap_int(spinlock, 1, 0); + return of_atomic_int_cmpswap(spinlock, 1, 0); #elif defined(OF_HAVE_PTHREAD_SPINLOCKS) return !pthread_spin_unlock(spinlock); #else return of_mutex_unlock(spinlock); #endif