Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -280,11 +280,11 @@ #if !defined(OF_HAVE_THREADS) return ++*p; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_add_and_fetch(p, 1); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) - uint32_t i; + int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" "incl %0\n\t" "lock\n\t" @@ -351,11 +351,11 @@ #if !defined(OF_HAVE_THREADS) return --*p; #elif defined(OF_HAVE_GCC_ATOMIC_OPS) return __sync_sub_and_fetch(p, 1); #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) - uint32_t i; + int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" "decl %0\n\t" "lock\n\t" @@ -387,11 +387,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "orl %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); # ifdef OF_X86_64_ASM @@ -401,11 +401,11 @@ "movq %2, %0\n\t" "movq %0, %%rax\n\t" "orq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); # endif @@ -433,11 +433,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "orl %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); @@ -463,11 +463,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "andl %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); # ifdef OF_X86_64_ASM @@ -477,11 +477,11 @@ "movq %2, %0\n\t" "movq %0, %%rax\n\t" "andq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); # endif @@ -509,11 +509,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "andl %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); @@ -539,11 +539,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "xorl %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); # ifdef OF_X86_64_ASM @@ -553,11 +553,11 @@ "movq %2, %0\n\t" "movq %0, %%rax\n\t" "xorq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); # endif @@ -585,11 +585,11 @@ "movl %2, %0\n\t" "movl %0, %%eax\n\t" "xorl %1, %0\n\t" "lock\n\t" "cmpxchgl %0, %2\n\t" - "jne 0\n\t" + "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); @@ -619,11 +619,11 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" "sete %b0\n\t" "movzbl %b0, %0" - : "=&d"(r), "+a"(o) /* use d instead of r due to gcc bug */ + : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ : "r"(n), "m"(*p) : "cc" ); return r; @@ -652,11 +652,11 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" "sete %b0\n\t" "movzbl %b0, %0" - : "=&d"(r), "+a"(o) /* use d instead of r due to gcc bug */ + : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ : "r"(n), "m"(*p) : "cc" ); return r; @@ -685,11 +685,11 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" "sete %b0\n\t" "movzbl %b0, %0" - : "=&d"(r), "+a"(o) /* use d instead of r due to gcc bug */ + : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ : "r"(n), "m"(*p) : "cc" ); return r;