@@ -20,12 +20,13 @@ { __asm__ __volatile__ ( "lock\n\t" "xadd{l} { %0, %2 | %2, %0 }\n\t" "add{l} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return i; } @@ -38,12 +39,13 @@ else if (sizeof(int) == 8) __asm__ __volatile__ ( "lock\n\t" "xadd{q} { %0, %2 | %2, %0 }\n\t" "add{q} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); #endif else abort(); @@ -56,22 +58,24 @@ #if defined(OF_AMD64) __asm__ __volatile__ ( "lock\n\t" "xadd{q} { %0, %2 | %2, %0 }\n\t" "add{q} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return (void *)i; #elif defined(OF_X86) __asm__ __volatile__ ( "lock\n\t" "xadd{l} { %0, %2 | %2, %0 }\n\t" "add{l} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return (void *)i; #endif } @@ -82,12 +86,13 @@ __asm__ __volatile__ ( "neg{l} %0\n\t" "lock\n\t" "xadd{l} { %0, %2 | %2, %0 }\n\t" "sub{l} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return i; } @@ -101,12 +106,13 @@ __asm__ __volatile__ ( "neg{q} %0\n\t" "lock\n\t" "xadd{q} { %0, %2 | %2, %0 }\n\t" "sub{q} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); #endif else abort(); @@ -120,23 +126,25 @@ __asm__ __volatile__ ( "neg{q} %0\n\t" "lock\n\t" "xadd{q} { %0, %2 | %2, %0 }\n\t" "sub{q} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return (void *)i; #elif defined(OF_X86) __asm__ __volatile__ ( "neg{l} %0\n\t" "lock\n\t" "xadd{l} { %0, %2 | %2, %0 }\n\t" "sub{l} { %1, %0 | %0, %1 }" - : "+&r"(i) - : "r"(i), "m"(*p) + : "+&r" (i) + : "r" (i), + "m" (*p) ); return (void *)i; #endif } @@ -150,12 +158,12 @@ "xor{l} %0, %0\n\t" "inc{l} %0\n\t" "lock\n\t" "xadd{l} { %0, %1 | %1, %0 }\n\t" "inc{l} %0" - : "=&r"(i) - : "m"(*p) + : "=&r" (i) + : "m" (*p) ); return i; } @@ -172,12 +180,12 @@ "xor{q} %0, %0\n\t" "inc{q} %0\n\t" "lock\n\t" "xadd{q} { %0, %1 | %1, %0 }\n\t" "inc{q} %0" - : "=&r"(i) - : "m"(*p) + : "=&r" (i) + : "m" (*p) ); #endif else abort(); @@ -193,12 +201,12 @@ "xor{l} %0, %0\n\t" "dec{l} %0\n\t" "lock\n\t" "xadd{l} { %0, %1 | %1, %0 }\n\t" "dec{l} %0" - : "=&r"(i) - : "m"(*p) + : "=&r" (i) + : "m" (*p) ); return i; } @@ -215,12 +223,12 @@ "xor{q} %0, %0\n\t" "dec{q} %0\n\t" "lock\n\t" "xadd{q} { %0, %1 | %1, %0 }\n\t" "dec{q} %0" - : "=&r"(i) - : "m"(*p) + : "=&r" (i) + : "m" (*p) ); #endif else abort(); @@ -236,12 +244,13 @@ "mov{l} { %0, %%eax | eax, %0 }\n\t" "or{l} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{l} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "eax", "cc" ); return i; } @@ -259,12 +268,13 @@ "mov{q} { %0, %%rax | rax, %0 }\n\t" "or{q} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{q} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "rax", "cc" ); #endif else abort(); @@ -281,12 +291,13 @@ "mov{l} { %0, %%eax | eax, %0 }\n\t" "and{l} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{l} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "eax", "cc" ); return i; } @@ -304,12 +315,13 @@ "mov{q} { %0, %%rax | rax, %0 }\n\t" "and{q} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{q} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "rax", "cc" ); #endif else abort(); @@ -326,12 +338,13 @@ "mov{l} { %0, %%eax | eax, %0 }\n\t" "xor{l} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{l} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "eax", "cc" ); return i; } @@ -349,12 +362,13 @@ "mov{q} { %0, %%rax | rax, %0 }\n\t" "xor{q} { %1, %0 | %0, %1 }\n\t" "lock\n\t" "cmpxchg{q} { %0, %2 | %2, %0 }\n\t" "jne 0b" - : "=&r"(i) - : "r"(i), "m"(*p) + : "=&r" (i) + : "r" (i), + "m" (*p) : "rax", "cc" ); #endif else abort(); @@ -370,12 +384,14 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg{l} { %2, %3 | %3, %2 }\n\t" "sete %b0\n\t" "movz{bl|x} { %b0, %0 | %0, %b0 }" - : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ - : "r"(n), "m"(*p) + : "=&d" (r), /* use d instead of r to avoid a gcc bug */ + "+a" (o) + : "r" (n), + "m" (*p) : "cc" ); return r; } @@ -388,12 +404,14 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg { %2, %3 | %3, %2 }\n\t" "sete %b0\n\t" "movz{bl|x} { %b0, %0 | %0, %b0 }" - : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ - : "r"(n), "m"(*p) + : "=&d" (r), /* use d instead of r to avoid a gcc bug */ + "+a" (o) + : "r" (n), + "m" (*p) : "cc" ); return r; } @@ -407,12 +425,14 @@ __asm__ __volatile__ ( "lock\n\t" "cmpxchg { %2, %3 | %3, %2 }\n\t" "sete %b0\n\t" "movz{bl|x} { %b0, %0 | %0, %b0 }" - : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ - : "r"(n), "m"(*p) + : "=&d" (r), /* use d instead of r to avoid a gcc bug */ + "+a" (o) + : "r" (n), + "m" (*p) : "cc" ); return r; }