@@ -14,11 +14,11 @@ */ OF_ASSUME_NONNULL_BEGIN static OF_INLINE int -of_atomic_int_add(volatile int *_Nonnull p, int i) +OFAtomicIntAdd(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" @@ -41,11 +41,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" @@ -55,11 +55,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_X86_64) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" @@ -81,11 +81,11 @@ return (void *)i; #endif } static OF_INLINE int -of_atomic_int_sub(volatile int *_Nonnull p, int i) +OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" @@ -110,11 +110,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i) +OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" @@ -125,11 +125,11 @@ return i; } static OF_INLINE void *_Nullable -of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) +OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_X86_64) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" @@ -153,11 +153,11 @@ return (void *)i; #endif } static OF_INLINE int -of_atomic_int_inc(volatile int *_Nonnull p) +OFAtomicIntIncrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) __asm__ __volatile__ ( @@ -186,11 +186,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_inc(volatile int32_t *_Nonnull p) +OFAtomicInt32Increase(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -204,11 +204,11 @@ return i; } static OF_INLINE int -of_atomic_int_dec(volatile int *_Nonnull p) +OFAtomicIntDecrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) __asm__ __volatile__ ( @@ -237,11 +237,11 @@ return i; } static OF_INLINE int32_t -of_atomic_int32_dec(volatile int32_t *_Nonnull p) +OFAtomicInt32Decrease(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" @@ -255,11 +255,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -292,11 +292,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -311,11 +311,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -348,11 +348,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -367,11 +367,11 @@ return i; } static OF_INLINE unsigned int -of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i) +OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" @@ -404,11 +404,11 @@ return i; } static OF_INLINE uint32_t -of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i) +OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i) { __asm__ __volatile__ ( "0:\n\t" "movl %2, %0\n\t" "movl %0, %%eax\n\t" @@ -423,11 +423,11 @@ return i; } static OF_INLINE bool -of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n) +OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) { int r; __asm__ __volatile__ ( "lock\n\t" @@ -441,11 +441,11 @@ return r; } static OF_INLINE bool -of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) +OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { int r; __asm__ __volatile__ ( "lock\n\t" @@ -459,11 +459,11 @@ return r; } static OF_INLINE bool -of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p, +OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p, void *_Nullable o, void *_Nullable n) { int r; __asm__ __volatile__ (