Index: src/OFBlock.m ================================================================== --- src/OFBlock.m +++ src/OFBlock.m @@ -14,11 +14,10 @@ #include #include #include #import "OFBlock.h" -#import "OFAutoreleasePool.h" #if defined(OF_GNU_RUNTIME) || defined(OF_OBJFW_RUNTIME) struct objc_abi_class { struct objc_abi_metaclass *metaclass; const char *superclass, *name; Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -121,18 +121,24 @@ of_atomic_or_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p |= i); #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) - uint32_t r = *p | i; - __asm__ volatile ( + __asm__ ( + "0:\n\t" + "movl %2, %0\n\t" + "movl %2, %%eax\n\t" + "orl %1, %0\n\t" "lock\n\t" - "orl %0, (%1)" - : - : "r"(i), "r"(p), "m"(*p) + "cmpxchg %0, %2\n\t" + "jne 0\n\t" + : "=&r"(i) + : "r"(i), "m"(*p) + : "eax" ); - return r; + + return i; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_or_and_fetch(p, i); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicOr32Barrier(i, p); #endif @@ -142,18 +148,24 @@ of_atomic_and_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p &= i); #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) - uint32_t r = *p & i; - __asm__ volatile ( + __asm__ ( + "0:\n\t" + "movl %2, %0\n\t" + "movl %2, %%eax\n\t" + "andl %1, %0\n\t" "lock\n\t" - "andl %0, (%1)" - : - : "r"(i), "r"(p), "m"(*p) + "cmpxchg %0, %2\n\t" + "jne 0\n\t" + : "=&r"(i) + : "r"(i), "m"(*p) + : "eax" ); - return r; + + return i; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_and_and_fetch(p, i); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicAnd32Barrier(i, p); #endif @@ -163,18 +175,24 @@ of_atomic_xor_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_THREADS) return (*p ^= i); #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) - uint32_t r = *p ^ i; - __asm__ volatile ( + __asm__ ( + "0:\n\t" + "movl %2, %0\n\t" + "movl %2, %%eax\n\t" + "xorl %1, %0\n\t" "lock\n\t" - "xorl %0, (%1)" - : - : "r"(i), "r"(p), "m"(*p) + "cmpxchgl %0, %2\n\t" + "jne 0\n\t" + : "=&r"(i) + : "r"(i), "m"(*p) + : "eax" ); - return r; + + return i; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_xor_and_fetch(p, i); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicXor32Barrier(i, p); #endif @@ -190,19 +208,22 @@ } return NO; #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) uint32_t r; + __asm__ ( - "lock; cmpxchg %2, (%3)\n\t" + "lock\n\t" + "cmpxchg %2, %3\n\t" "lahf\n\t" "andb $64, %%ah\n\t" "shrb $6, %%ah\n\t" "movzx %%ah, %0\n\t" : "=a"(r) - : "a"(o), "r"(n), "r"(p), "m"(*p) + : "a"(o), "r"(n), "m"(*p) ); + return r; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicCompareAndSwap32Barrier(o, n, p); @@ -219,21 +240,24 @@ } return NO; #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) uint32_t r; + __asm__ ( - "lock; cmpxchg %2, (%3)\n\t" + "lock\n\t" + "cmpxchg %2, %3\n\t" "lahf\n\t" "andb $64, %%ah\n\t" "shrb $6, %%ah\n\t" "movzx %%ah, %0\n\t" : "=a"(r) - : "a"(o), "q"(n), "q"(p), "m"(*p) + : "a"(o), "q"(n), "m"(*p) ); + return r; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_bool_compare_and_swap(p, o, n); #elif defined(OF_HAVE_LIBKERN_OSATOMIC_H) return OSAtomicCompareAndSwapPtrBarrier(o, n, p); #endif }