Index: configure.ac ================================================================== --- configure.ac +++ configure.ac @@ -745,10 +745,31 @@ AC_MSG_RESULT(yes) atomic_ops="assembly implementation" ], [ AC_MSG_RESULT(no) ]) + + AC_MSG_CHECKING(whether __atomic_* works) + AC_TRY_LINK([ + #include + #include + ], [ + int32_t i, j; + if (__atomic_add_fetch(&i, 1, __ATOMIC_RELAXED)) + j = __atomic_sub_fetch(&i, 1, __ATOMIC_RELAXED); + while (!__atomic_compare_exchange_n(&i, &j, 1, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED)); + __atomic_thread_fence(__ATOMIC_SEQ_CST); + ], [ + AC_MSG_RESULT(yes) + test x"$atomic_ops" = x"none" && \ + atomic_ops="__atomic_* builtins" + AC_DEFINE(OF_HAVE_ATOMIC_BUILTINS, 1, + [Whether __atomic_* builtins are available]) + ], [ + AC_MSG_RESULT(no) + ]) AC_MSG_CHECKING(whether __sync_* works) AC_TRY_LINK([#include ], [ int32_t i, j; if (__sync_add_and_fetch(&i, 1)) Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -26,12 +26,14 @@ # import "atomic_no_threads.h" #elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) # import "atomic_x86.h" #elif defined(OF_POWERPC_ASM) # import "atomic_powerpc.h" +#elif defined(OF_HAVE_ATOMIC_BUILTINS) +# import "atomic_builtins.h" #elif defined(OF_HAVE_SYNC_BUILTINS) # import "atomic_sync_builtins.h" #elif defined(OF_HAVE_OSATOMIC) # import "atomic_osatomic.h" #else # error No atomic operations available! #endif ADDED src/atomic_builtins.h Index: src/atomic_builtins.h ================================================================== --- src/atomic_builtins.h +++ src/atomic_builtins.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 + * Jonathan Schleifer + * + * All rights reserved. + * + * This file is part of ObjFW. It may be distributed under the terms of the + * Q Public License 1.0, which can be found in the file LICENSE.QPL included in + * the packaging of this file. + * + * Alternatively, it may be distributed under the terms of the GNU General + * Public License, either version 2 or 3, which can be found in the file + * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this + * file. + */ + +OF_ASSUME_NONNULL_BEGIN + +static OF_INLINE int +of_atomic_int_add(volatile int *_Nonnull p, int i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE void* +of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __atomic_add_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int +of_atomic_int_sub(volatile int *_Nonnull p, int i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE void* +of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +{ + return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE int +of_atomic_int_inc(volatile int *_Nonnull p) +{ + return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +of_atomic_int32_inc(volatile int32_t *_Nonnull p) +{ + return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int +of_atomic_int_dec(volatile int *_Nonnull p) +{ + return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE int32_t +of_atomic_int32_dec(volatile int32_t *_Nonnull p) +{ + return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_or_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_and_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE unsigned int +of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) +{ + return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE uint32_t +of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) +{ + return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED); +} + +static OF_INLINE bool +of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +{ + return __atomic_compare_exchange(p, &o, &n, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); +} + +static OF_INLINE bool +of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) +{ + return __atomic_compare_exchange(p, &o, &n, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); +} + +static OF_INLINE bool +of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, + void *_Nullable o, void *_Nullable n) +{ + return __atomic_compare_exchange(p, &o, &n, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); +} + +static OF_INLINE void +of_memory_barrier_sync(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +static OF_INLINE void +of_memory_barrier_enter(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +static OF_INLINE void +of_memory_barrier_exit(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +static OF_INLINE void +of_memory_barrier_producer(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +static OF_INLINE void +of_memory_barrier_consumer(void) +{ + __atomic_thread_fence(__ATOMIC_SEQ_CST); +} + +OF_ASSUME_NONNULL_END Index: src/objfw-defs.h.in ================================================================== --- src/objfw-defs.h.in +++ src/objfw-defs.h.in @@ -1,8 +1,9 @@ #undef OF_APPLE_RUNTIME #undef OF_BIG_ENDIAN #undef OF_FLOAT_BIG_ENDIAN +#undef OF_HAVE_ATOMIC_BUILTINS #undef OF_HAVE_ATOMIC_OPS #undef OF_HAVE_BUILTIN_BSWAP16 #undef OF_HAVE_BUILTIN_BSWAP32 #undef OF_HAVE_BUILTIN_BSWAP64 #undef OF_HAVE_CHMOD