@@ -31,20 +31,20 @@ static OF_INLINE int of_atomic_add_int(volatile int *p, int i) { #if !defined(OF_HAVE_THREADS) return (*p += i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" "addq %1, %0" @@ -68,11 +68,11 @@ static OF_INLINE int32_t of_atomic_add_32(volatile int32_t *p, int32_t i) { #if !defined(OF_HAVE_THREADS) return (*p += i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" : "+&r"(i) @@ -92,25 +92,25 @@ static OF_INLINE void* of_atomic_add_ptr(void* volatile *p, intptr_t i) { #if !defined(OF_HAVE_THREADS) return (*(char* volatile*)p += i); -#elif defined(OF_X86_ASM) +#elif defined(OF_X86_64_ASM) __asm__ __volatile__ ( "lock\n\t" - "xaddl %0, %2\n\t" - "addl %1, %0" + "xaddq %0, %2\n\t" + "addq %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); return (void*)i; -#elif defined(OF_AMD64_ASM) +#elif defined(OF_X86_ASM) __asm__ __volatile__ ( "lock\n\t" - "xaddq %0, %2\n\t" - "addq %1, %0" + "xaddl %0, %2\n\t" + "addl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); return (void*)i; @@ -130,21 +130,21 @@ static OF_INLINE int of_atomic_sub_int(volatile int *p, int i) { #if !defined(OF_HAVE_THREADS) return (*p -= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" "subl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" "xaddq %0, %2\n\t" @@ -169,11 +169,11 @@ static OF_INLINE int32_t of_atomic_sub_32(volatile int32_t *p, int32_t i) { #if !defined(OF_HAVE_THREADS) return (*p -= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" "subl %1, %0" @@ -194,28 +194,28 @@ static OF_INLINE void* of_atomic_sub_ptr(void* volatile *p, intptr_t i) { #if !defined(OF_HAVE_THREADS) return (*(char* volatile*)p -= i); +#elif defined(OF_X86_64_ASM) + __asm__ __volatile__ ( + "negq %0\n\t" + "lock\n\t" + "xaddq %0, %2\n\t" + "subq %1, %0" + : "+&r"(i) + : "r"(i), "m"(*p) + ); + + return (void*)i; #elif defined(OF_X86_ASM) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" "subl %1, %0" : "+&r"(i) - : "r"(i), "m"(*p) - ); - - return (void*)i; -#elif defined(OF_AMD64_ASM) - __asm__ __volatile__ ( - "negq %0\n\t" - "lock\n\t" - "xaddq %0, %2\n\t" - "subq %1, %0" - : "+&r"(i) : "r"(i), "m"(*p) ); return (void*)i; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) @@ -234,11 +234,11 @@ static OF_INLINE int of_atomic_inc_int(volatile int *p) { #if !defined(OF_HAVE_THREADS) return ++*p; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int i; if (sizeof(int) == 4) __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -247,11 +247,11 @@ "xaddl %0, %1\n\t" "incl %0" : "=&r"(i) : "m"(*p) ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "incq %0\n\t" "lock\n\t" @@ -277,11 +277,11 @@ static OF_INLINE int32_t of_atomic_inc_32(volatile int32_t *p) { #if !defined(OF_HAVE_THREADS) return ++*p; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) uint32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" "incl %0\n\t" @@ -305,11 +305,11 @@ static OF_INLINE int of_atomic_dec_int(volatile int *p) { #if !defined(OF_HAVE_THREADS) return --*p; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int i; if (sizeof(int) == 4) __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -318,11 +318,11 @@ "xaddl %0, %1\n\t" "decl %0" : "=&r"(i) : "m"(*p) ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "decq %0\n\t" "lock\n\t" @@ -348,11 +348,11 @@ static OF_INLINE int32_t of_atomic_dec_32(volatile int32_t *p) { #if !defined(OF_HAVE_THREADS) return --*p; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) uint32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" "decl %0\n\t" @@ -376,11 +376,11 @@ static OF_INLINE unsigned int of_atomic_or_int(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p |= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -390,11 +390,11 @@ "jne 0\n\t" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" @@ -423,11 +423,11 @@ static OF_INLINE uint32_t of_atomic_or_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p |= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" "orl %1, %0\n\t" @@ -452,11 +452,11 @@ static OF_INLINE unsigned int of_atomic_and_int(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p &= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -466,11 +466,11 @@ "jne 0\n\t" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" @@ -499,11 +499,11 @@ static OF_INLINE uint32_t of_atomic_and_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p &= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" "andl %1, %0\n\t" @@ -528,11 +528,11 @@ static OF_INLINE unsigned int of_atomic_xor_int(volatile unsigned int *p, unsigned int i) { #if !defined(OF_HAVE_THREADS) return (*p ^= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -542,11 +542,11 @@ "jne 0\n\t" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -# ifdef OF_AMD64_ASM +# ifdef OF_X86_64_ASM else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" @@ -575,11 +575,11 @@ static OF_INLINE uint32_t of_atomic_xor_32(volatile uint32_t *p, uint32_t i) { #if !defined(OF_HAVE_THREADS) return (*p ^= i); -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" "xorl %1, %0\n\t" @@ -609,11 +609,11 @@ *p = n; return true; } return false; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int r; __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" @@ -642,11 +642,11 @@ *p = n; return true; } return false; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int r; __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" @@ -675,11 +675,11 @@ *p = n; return true; } return false; -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) int r; __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" @@ -702,11 +702,11 @@ static OF_INLINE void of_memory_barrier(void) { #if !defined(OF_HAVE_THREADS) -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "mfence" ); #elif defined(OF_HAVE_GCC_ATOMIC_OPS) __sync_synchronize(); @@ -719,11 +719,11 @@ static OF_INLINE void of_memory_read_barrier(void) { #if !defined(OF_HAVE_THREADS) -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "lfence" ); #else of_memory_barrier(); @@ -732,13 +732,13 @@ static OF_INLINE void of_memory_write_barrier(void) { #if !defined(OF_HAVE_THREADS) -#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM) +#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) __asm__ __volatile__ ( "sfence" ); #else of_memory_barrier(); #endif }