ObjFW
src/atomic.h
00001 /*
00002  * Copyright (c) 2008, 2009, 2010, 2011, 2012
00003  *   Jonathan Schleifer <js@webkeks.org>
00004  *
00005  * All rights reserved.
00006  *
00007  * This file is part of ObjFW. It may be distributed under the terms of the
00008  * Q Public License 1.0, which can be found in the file LICENSE.QPL included in
00009  * the packaging of this file.
00010  *
00011  * Alternatively, it may be distributed under the terms of the GNU General
00012  * Public License, either version 2 or 3, which can be found in the file
00013  * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
00014  * file.
00015  */
00016 
00017 #include <stdlib.h>
00018 
00019 #import "macros.h"
00020 
00021 #if defined(OF_THREADS) && !defined(OF_X86_ASM) && !defined(OF_AMD64_ASM) && \
00022     !defined(OF_HAVE_GCC_ATOMIC_OPS) && !defined(OF_HAVE_OSATOMIC)
00023 # error No atomic operations available!
00024 #endif
00025 
00026 #ifdef OF_HAVE_OSATOMIC
00027 # include <libkern/OSAtomic.h>
00028 #endif
00029 
00030 static OF_INLINE int
00031 of_atomic_add_int(volatile int *p, int i)
00032 {
00033 #if !defined(OF_THREADS)
00034         return (*p += i);
00035 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00036         __asm__ (
00037             "lock\n\t"
00038             "xaddl      %0, %2\n\t"
00039             "addl       %1, %0"
00040             : "+&r"(i)
00041             : "r"(i), "m"(*p)
00042         );
00043 
00044         return i;
00045 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00046         return __sync_add_and_fetch(p, i);
00047 #elif defined(OF_HAVE_OSATOMIC)
00048         if (sizeof(int) == 4)
00049                 return OSAtomicAdd32Barrier(i, p);
00050 # ifdef OF_HAVE_OSATOMIC_64
00051         else if (sizeof(int) == 8)
00052                 return OSAtomicAdd64Barrier(i, p);
00053 # endif
00054         else
00055                 abort();
00056 #endif
00057 }
00058 
00059 static OF_INLINE int32_t
00060 of_atomic_add_32(volatile int32_t *p, int32_t i)
00061 {
00062 #if !defined(OF_THREADS)
00063         return (*p += i);
00064 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00065         __asm__ (
00066             "lock\n\t"
00067             "xaddl      %0, %2\n\t"
00068             "addl       %1, %0"
00069             : "+&r"(i)
00070             : "r"(i), "m"(*p)
00071         );
00072 
00073         return i;
00074 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00075         return __sync_add_and_fetch(p, i);
00076 #elif defined(OF_HAVE_OSATOMIC)
00077         return OSAtomicAdd32Barrier(i, p);
00078 #endif
00079 }
00080 
00081 static OF_INLINE void*
00082 of_atomic_add_ptr(void* volatile *p, intptr_t i)
00083 {
00084 #if !defined(OF_THREADS)
00085         return (*(char* volatile*)p += i);
00086 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00087         __asm__ (
00088             "lock\n\t"
00089             "xaddl      %0, %2\n\t"
00090             "addl       %1, %0"
00091             : "+&r"(i)
00092             : "r"(i), "m"(*p)
00093         );
00094 
00095         return (void*)i;
00096 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00097         return __sync_add_and_fetch(p, i);
00098 #elif defined(OF_HAVE_OSATOMIC)
00099         if (sizeof(void*) == 4)
00100                 return (void*)OSAtomicAdd32Barrier(i, (int32_t*)p);
00101 # ifdef OF_HAVE_OSATOMIC_64
00102         else if (sizeof(void*) == 8)
00103                 return (void*)OSAtomicAdd64Barrier(i, (int64_t*)p);
00104 # endif
00105         else
00106                 abort();
00107 #endif
00108 }
00109 
00110 static OF_INLINE int
00111 of_atomic_sub_int(volatile int *p, int i)
00112 {
00113 #if !defined(OF_THREADS)
00114         return (*p -= i);
00115 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00116         __asm__ (
00117             "negl       %0\n\t"
00118             "lock\n\t"
00119             "xaddl      %0, %2\n\t"
00120             "subl       %1, %0"
00121             : "+&r"(i)
00122             : "r"(i), "m"(*p)
00123         );
00124 
00125         return i;
00126 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00127         return __sync_sub_and_fetch(p, i);
00128 #elif defined(OF_HAVE_OSATOMIC)
00129         if (sizeof(int) == 4)
00130                 return OSAtomicAdd32Barrier(-i, p);
00131 # ifdef OF_HAVE_OSATOMIC_64
00132         else if (sizeof(int) == 8)
00133                 return OSAtomicAdd64Barrier(-i, p);
00134 # endif
00135         else
00136                 abort();
00137 #endif
00138 }
00139 
00140 static OF_INLINE int32_t
00141 of_atomic_sub_32(volatile int32_t *p, int32_t i)
00142 {
00143 #if !defined(OF_THREADS)
00144         return (*p -= i);
00145 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00146         __asm__ (
00147             "negl       %0\n\t"
00148             "lock\n\t"
00149             "xaddl      %0, %2\n\t"
00150             "subl       %1, %0"
00151             : "+&r"(i)
00152             : "r"(i), "m"(*p)
00153         );
00154 
00155         return i;
00156 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00157         return __sync_sub_and_fetch(p, i);
00158 #elif defined(OF_HAVE_OSATOMIC)
00159         return OSAtomicAdd32Barrier(-i, p);
00160 #endif
00161 }
00162 
00163 static OF_INLINE void*
00164 of_atomic_sub_ptr(void* volatile *p, intptr_t i)
00165 {
00166 #if !defined(OF_THREADS)
00167         return (*(char* volatile*)p -= i);
00168 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00169         __asm__ (
00170             "negl       %0\n\t"
00171             "lock\n\t"
00172             "xaddl      %0, %2\n\t"
00173             "subl       %1, %0"
00174             : "+&r"(i)
00175             : "r"(i), "m"(*p)
00176         );
00177 
00178         return (void*)i;
00179 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00180         return __sync_sub_and_fetch(p, i);
00181 #elif defined(OF_HAVE_OSATOMIC)
00182         if (sizeof(void*) == 4)
00183                 return (void*)OSAtomicAdd32Barrier(-i, (int32_t*)p);
00184 # ifdef OF_HAVE_OSATOMIC_64
00185         else if (sizeof(void*) == 8)
00186                 return (void*)OSAtomicAdd64Barrier(-i, (int64_t*)p);
00187 # endif
00188         else
00189                 abort();
00190 #endif
00191 }
00192 
00193 static OF_INLINE int
00194 of_atomic_inc_int(volatile int *p)
00195 {
00196 #if !defined(OF_THREADS)
00197         return ++*p;
00198 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00199         uint32_t i;
00200 
00201         __asm__ (
00202             "xorl       %0, %0\n\t"
00203             "incl       %0\n\t"
00204             "lock\n\t"
00205             "xaddl      %0, %1\n\t"
00206             "incl       %0"
00207             : "=&r"(i)
00208             : "m"(*p)
00209         );
00210 
00211         return i;
00212 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00213         return __sync_add_and_fetch(p, 1);
00214 #elif defined(OF_HAVE_OSATOMIC)
00215         if (sizeof(int) == 4)
00216                 return OSAtomicIncrement32Barrier(p);
00217 # ifdef OF_HAVE_OSATOMIC_64
00218         else if (sizeof(int) == 8)
00219                 return OSAtomicDecrement64Barrier(p);
00220 # endif
00221         else
00222                 abort();
00223 #endif
00224 }
00225 static OF_INLINE int32_t
00226 of_atomic_inc_32(volatile int32_t *p)
00227 {
00228 #if !defined(OF_THREADS)
00229         return ++*p;
00230 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00231         uint32_t i;
00232 
00233         __asm__ (
00234             "xorl       %0, %0\n\t"
00235             "incl       %0\n\t"
00236             "lock\n\t"
00237             "xaddl      %0, %1\n\t"
00238             "incl       %0"
00239             : "=&r"(i)
00240             : "m"(*p)
00241         );
00242 
00243         return i;
00244 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00245         return __sync_add_and_fetch(p, 1);
00246 #elif defined(OF_HAVE_OSATOMIC)
00247         return OSAtomicIncrement32Barrier(p);
00248 #endif
00249 }
00250 
00251 static OF_INLINE int
00252 of_atomic_dec_int(volatile int *p)
00253 {
00254 #if !defined(OF_THREADS)
00255         return --*p;
00256 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00257         uint32_t i;
00258 
00259         __asm__ (
00260             "xorl       %0, %0\n\t"
00261             "decl       %0\n\t"
00262             "lock\n\t"
00263             "xaddl      %0, %1\n\t"
00264             "decl       %0"
00265             : "=&r"(i)
00266             : "m"(*p)
00267         );
00268 
00269         return i;
00270 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00271         return __sync_sub_and_fetch(p, 1);
00272 #elif defined(OF_HAVE_OSATOMIC)
00273         if (sizeof(int) == 4)
00274                 return OSAtomicDecrement32Barrier(p);
00275 # ifdef OF_HAVE_OSATOMIC_64
00276         else if (sizeof(int) == 8)
00277                 return OSAtomicDecrement64Barrier(p);
00278 # endif
00279         else
00280                 abort();
00281 #endif
00282 }
00283 
00284 static OF_INLINE int32_t
00285 of_atomic_dec_32(volatile int32_t *p)
00286 {
00287 #if !defined(OF_THREADS)
00288         return --*p;
00289 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00290         uint32_t i;
00291 
00292         __asm__ (
00293             "xorl       %0, %0\n\t"
00294             "decl       %0\n\t"
00295             "lock\n\t"
00296             "xaddl      %0, %1\n\t"
00297             "decl       %0"
00298             : "=&r"(i)
00299             : "m"(*p)
00300         );
00301 
00302         return i;
00303 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00304         return __sync_sub_and_fetch(p, 1);
00305 #elif defined(OF_HAVE_OSATOMIC)
00306         return OSAtomicDecrement32Barrier(p);
00307 #endif
00308 }
00309 
00310 static OF_INLINE unsigned int
00311 of_atomic_or_int(volatile unsigned int *p, unsigned int i)
00312 {
00313 #if !defined(OF_THREADS)
00314         return (*p |= i);
00315 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00316         __asm__ (
00317             "0:\n\t"
00318             "movl       %2, %0\n\t"
00319             "movl       %2, %%eax\n\t"
00320             "orl        %1, %0\n\t"
00321             "lock\n\t"
00322             "cmpxchg    %0, %2\n\t"
00323             "jne        0\n\t"
00324             : "=&r"(i)
00325             : "r"(i), "m"(*p)
00326             : "eax"
00327         );
00328 
00329         return i;
00330 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00331         return __sync_or_and_fetch(p, i);
00332 #elif defined(OF_HAVE_OSATOMIC)
00333         if (sizeof(int) == 4)
00334                 return OSAtomicOr32Barrier(i, p);
00335 # ifdef OF_HAVE_OSATOMIC_64
00336         else if (sizeof(int) == 8)
00337                 return OSAtomicOr64Barrier(i, p);
00338 # endif
00339         else
00340                 abort();
00341 #endif
00342 }
00343 
00344 static OF_INLINE uint32_t
00345 of_atomic_or_32(volatile uint32_t *p, uint32_t i)
00346 {
00347 #if !defined(OF_THREADS)
00348         return (*p |= i);
00349 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00350         __asm__ (
00351             "0:\n\t"
00352             "movl       %2, %0\n\t"
00353             "movl       %2, %%eax\n\t"
00354             "orl        %1, %0\n\t"
00355             "lock\n\t"
00356             "cmpxchg    %0, %2\n\t"
00357             "jne        0\n\t"
00358             : "=&r"(i)
00359             : "r"(i), "m"(*p)
00360             : "eax"
00361         );
00362 
00363         return i;
00364 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00365         return __sync_or_and_fetch(p, i);
00366 #elif defined(OF_HAVE_OSATOMIC)
00367         return OSAtomicOr32Barrier(i, p);
00368 #endif
00369 }
00370 
00371 static OF_INLINE unsigned int
00372 of_atomic_and_int(volatile unsigned int *p, unsigned int i)
00373 {
00374 #if !defined(OF_THREADS)
00375         return (*p &= i);
00376 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00377         __asm__ (
00378             "0:\n\t"
00379             "movl       %2, %0\n\t"
00380             "movl       %2, %%eax\n\t"
00381             "andl       %1, %0\n\t"
00382             "lock\n\t"
00383             "cmpxchg    %0, %2\n\t"
00384             "jne        0\n\t"
00385             : "=&r"(i)
00386             : "r"(i), "m"(*p)
00387             : "eax"
00388         );
00389 
00390         return i;
00391 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00392         return __sync_and_and_fetch(p, i);
00393 #elif defined(OF_HAVE_OSATOMIC)
00394         if (sizeof(int) == 4)
00395                 return OSAtomicAnd32Barrier(i, p);
00396 # ifdef OF_HAVE_OSATOMIC_64
00397         else if (sizeof(int) == 8)
00398                 return OSAtomicAnd64Barrier(i, p);
00399 # endif
00400         else
00401                 abort();
00402 #endif
00403 }
00404 
00405 static OF_INLINE uint32_t
00406 of_atomic_and_32(volatile uint32_t *p, uint32_t i)
00407 {
00408 #if !defined(OF_THREADS)
00409         return (*p &= i);
00410 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00411         __asm__ (
00412             "0:\n\t"
00413             "movl       %2, %0\n\t"
00414             "movl       %2, %%eax\n\t"
00415             "andl       %1, %0\n\t"
00416             "lock\n\t"
00417             "cmpxchg    %0, %2\n\t"
00418             "jne        0\n\t"
00419             : "=&r"(i)
00420             : "r"(i), "m"(*p)
00421             : "eax"
00422         );
00423 
00424         return i;
00425 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00426         return __sync_and_and_fetch(p, i);
00427 #elif defined(OF_HAVE_OSATOMIC)
00428         return OSAtomicAnd32Barrier(i, p);
00429 #endif
00430 }
00431 
00432 static OF_INLINE unsigned int
00433 of_atomic_xor_int(volatile unsigned int *p, unsigned int i)
00434 {
00435 #if !defined(OF_THREADS)
00436         return (*p ^= i);
00437 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00438         __asm__ (
00439             "0:\n\t"
00440             "movl       %2, %0\n\t"
00441             "movl       %2, %%eax\n\t"
00442             "xorl       %1, %0\n\t"
00443             "lock\n\t"
00444             "cmpxchgl   %0, %2\n\t"
00445             "jne        0\n\t"
00446             : "=&r"(i)
00447             : "r"(i), "m"(*p)
00448             : "eax"
00449         );
00450 
00451         return i;
00452 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00453         return __sync_xor_and_fetch(p, i);
00454 #elif defined(OF_HAVE_OSATOMIC)
00455         if (sizeof(int) == 4)
00456                 return OSAtomicXor32Barrier(i, p);
00457 # ifdef OF_HAVE_OSATOMIC_64
00458         else (sizeof(int) == 8)
00459                 return OSAtomicXor64Barrier(i, p);
00460 # endif
00461         else
00462                 abort();
00463 #endif
00464 }
00465 
00466 static OF_INLINE uint32_t
00467 of_atomic_xor_32(volatile uint32_t *p, uint32_t i)
00468 {
00469 #if !defined(OF_THREADS)
00470         return (*p ^= i);
00471 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00472         __asm__ (
00473             "0:\n\t"
00474             "movl       %2, %0\n\t"
00475             "movl       %2, %%eax\n\t"
00476             "xorl       %1, %0\n\t"
00477             "lock\n\t"
00478             "cmpxchgl   %0, %2\n\t"
00479             "jne        0\n\t"
00480             : "=&r"(i)
00481             : "r"(i), "m"(*p)
00482             : "eax"
00483         );
00484 
00485         return i;
00486 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00487         return __sync_xor_and_fetch(p, i);
00488 #elif defined(OF_HAVE_OSATOMIC)
00489         return OSAtomicXor32Barrier(i, p);
00490 #endif
00491 }
00492 
00493 static OF_INLINE BOOL
00494 of_atomic_cmpswap_int(volatile int *p, int o, int n)
00495 {
00496 #if !defined(OF_THREADS)
00497         if (*p == o) {
00498                 *p = n;
00499                 return YES;
00500         }
00501 
00502         return NO;
00503 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00504         int r;
00505 
00506         __asm__ (
00507             "xorl       %0, %0\n\t"
00508             "lock\n\t"
00509             "cmpxchg    %2, %3\n\t"
00510             "jne        0\n\t"
00511             "incl       %0\n"
00512             "0:"
00513             : "=&r"(r)
00514             : "a"(o), "r"(n), "m"(*p)
00515         );
00516 
00517         return r;
00518 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00519         return __sync_bool_compare_and_swap(p, o, n);
00520 #elif defined(OF_HAVE_OSATOMIC)
00521         return OSAtomicCompareAndSwapIntBarrier(o, n, p);
00522 #endif
00523 }
00524 
00525 static OF_INLINE BOOL
00526 of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n)
00527 {
00528 #if !defined(OF_THREADS)
00529         if (*p == o) {
00530                 *p = n;
00531                 return YES;
00532         }
00533 
00534         return NO;
00535 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00536         int r;
00537 
00538         __asm__ (
00539             "xorl       %0, %0\n\t"
00540             "lock\n\t"
00541             "cmpxchg    %2, %3\n\t"
00542             "jne        0\n\t"
00543             "incl       %0\n"
00544             "0:"
00545             : "=&r"(r)
00546             : "a"(o), "r"(n), "m"(*p)
00547         );
00548 
00549         return r;
00550 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00551         return __sync_bool_compare_and_swap(p, o, n);
00552 #elif defined(OF_HAVE_OSATOMIC)
00553         return OSAtomicCompareAndSwap32Barrier(o, n, p);
00554 #endif
00555 }
00556 
00557 static OF_INLINE BOOL
00558 of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n)
00559 {
00560 #if !defined(OF_THREADS)
00561         if (*p == o) {
00562                 *p = n;
00563                 return YES;
00564         }
00565 
00566         return NO;
00567 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
00568         int r;
00569 
00570         __asm__ (
00571             "xorl       %0, %0\n\t"
00572             "lock\n\t"
00573             "cmpxchg    %2, %3\n\t"
00574             "jne        0\n\t"
00575             "incl       %0\n"
00576             "0:"
00577             : "=&r"(r)
00578             : "a"(o), "r"(n), "m"(*p)
00579         );
00580 
00581         return r;
00582 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
00583         return __sync_bool_compare_and_swap(p, o, n);
00584 #elif defined(OF_HAVE_OSATOMIC)
00585         return OSAtomicCompareAndSwapPtrBarrier(o, n, p);
00586 #endif
00587 }
 All Classes Functions Variables Properties