ObjFW  Check-in [dfaf287249]

Overview
Comment:Rename atomic operations
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | new-naming-convention
Files: files | file ages | folders
SHA3-256: dfaf2872492a189bfe3f213ce16c74bc20a0699ce0e610417be92822b4e4b241
User & Date: js on 2021-04-17 20:47:24
Other Links: branch diff | manifest | tags
Context
2021-04-17
22:45
Rename of_(re)alloc and add OFFreeMemory check-in: 498074dab9 user: js tags: new-naming-convention
20:47
Rename atomic operations check-in: dfaf287249 user: js tags: new-naming-convention
18:36
Rename all remaining enums check-in: aa74e85220 user: js tags: new-naming-convention
Changes

Modified src/OFBlock.m from [f799cd3c36] to [67452f43b9].

196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
			block->descriptor->copy_helper(copy, block);

		return copy;
	}

	if ([(id)block isMemberOfClass: (Class)&_NSConcreteMallocBlock]) {
#ifdef OF_HAVE_ATOMIC_OPS
		of_atomic_int_inc(&block->flags);
#else
		unsigned hash = SPINLOCK_HASH(block);

		OF_ENSURE(OFSpinlockLock(&blockSpinlocks[hash]) == 0);
		block->flags++;
		OF_ENSURE(OFSpinlockUnlock(&blockSpinlocks[hash]) == 0);
#endif







|







196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
			block->descriptor->copy_helper(copy, block);

		return copy;
	}

	if ([(id)block isMemberOfClass: (Class)&_NSConcreteMallocBlock]) {
#ifdef OF_HAVE_ATOMIC_OPS
		OFAtomicIntIncrease(&block->flags);
#else
		unsigned hash = SPINLOCK_HASH(block);

		OF_ENSURE(OFSpinlockLock(&blockSpinlocks[hash]) == 0);
		block->flags++;
		OF_ENSURE(OFSpinlockUnlock(&blockSpinlocks[hash]) == 0);
#endif
218
219
220
221
222
223
224

225
226
227
228
229
230
231
232
{
	struct block *block = (struct block *)block_;

	if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock)
		return;

#ifdef OF_HAVE_ATOMIC_OPS

	if ((of_atomic_int_dec(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) {
		if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE)
			block->descriptor->dispose_helper(block);

		free(block);
	}
#else
	unsigned hash = SPINLOCK_HASH(block);







>
|







218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
{
	struct block *block = (struct block *)block_;

	if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock)
		return;

#ifdef OF_HAVE_ATOMIC_OPS
	if ((OFAtomicIntDecrease(&block->flags) &
	    OF_BLOCK_REFCOUNT_MASK) == 0) {
		if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE)
			block->descriptor->dispose_helper(block);

		free(block);
	}
#else
	unsigned hash = SPINLOCK_HASH(block);
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
			    ((*dst)->flags & ~OF_BLOCK_REFCOUNT_MASK) | 1;
			(*dst)->forwarding = *dst;

			if (src->flags & OF_BLOCK_HAS_COPY_DISPOSE)
				src->byref_keep(*dst, src);

#ifdef OF_HAVE_ATOMIC_OPS
			if (!of_atomic_ptr_cmpswap((void **)&src->forwarding,
			    src, *dst)) {
				src->byref_dispose(*dst);
				free(*dst);

				*dst = src->forwarding;
			}
#else
			unsigned hash = SPINLOCK_HASH(src);







|
|







283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
			    ((*dst)->flags & ~OF_BLOCK_REFCOUNT_MASK) | 1;
			(*dst)->forwarding = *dst;

			if (src->flags & OF_BLOCK_HAS_COPY_DISPOSE)
				src->byref_keep(*dst, src);

#ifdef OF_HAVE_ATOMIC_OPS
			if (!OFAtomicPointerCompareAndSwap(
			    (void **)&src->forwarding, src, *dst)) {
				src->byref_dispose(*dst);
				free(*dst);

				*dst = src->forwarding;
			}
#else
			unsigned hash = SPINLOCK_HASH(src);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
			OF_ENSURE(
			    OFSpinlockUnlock(&byrefSpinlocks[hash]) == 0);
#endif
		} else
			*dst = src;

#ifdef OF_HAVE_ATOMIC_OPS
		of_atomic_int_inc(&(*dst)->flags);
#else
		unsigned hash = SPINLOCK_HASH(*dst);

		OF_ENSURE(OFSpinlockLock(&byrefSpinlocks[hash]) == 0);
		(*dst)->flags++;
		OF_ENSURE(OFSpinlockUnlock(&byrefSpinlocks[hash]) == 0);
#endif







|







309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
			OF_ENSURE(
			    OFSpinlockUnlock(&byrefSpinlocks[hash]) == 0);
#endif
		} else
			*dst = src;

#ifdef OF_HAVE_ATOMIC_OPS
		OFAtomicIntIncrease(&(*dst)->flags);
#else
		unsigned hash = SPINLOCK_HASH(*dst);

		OF_ENSURE(OFSpinlockLock(&byrefSpinlocks[hash]) == 0);
		(*dst)->flags++;
		OF_ENSURE(OFSpinlockUnlock(&byrefSpinlocks[hash]) == 0);
#endif
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
		break;
	case OF_BLOCK_FIELD_IS_BYREF:;
		struct byref *object = (struct byref *)object_;

		object = object->forwarding;

#ifdef OF_HAVE_ATOMIC_OPS
		if ((of_atomic_int_dec(&object->flags) &
		    OF_BLOCK_REFCOUNT_MASK) == 0) {
			if (object->flags & OF_BLOCK_HAS_COPY_DISPOSE)
				object->byref_dispose(object);

			free(object);
		}
#else







|







344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
		break;
	case OF_BLOCK_FIELD_IS_BYREF:;
		struct byref *object = (struct byref *)object_;

		object = object->forwarding;

#ifdef OF_HAVE_ATOMIC_OPS
		if ((OFAtomicIntDecrease(&object->flags) &
		    OF_BLOCK_REFCOUNT_MASK) == 0) {
			if (object->flags & OF_BLOCK_HAS_COPY_DISPOSE)
				object->byref_dispose(object);

			free(object);
		}
#else

Modified src/OFObject.m from [b4c19ab1e4] to [0c3d27624a].

1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
	@throw [OFNotImplementedException exceptionWithSelector: selector
							 object: self];
}

- (instancetype)retain
{
#if defined(OF_HAVE_ATOMIC_OPS)
	of_atomic_int_inc(&PRE_IVARS->retainCount);
#elif defined(OF_AMIGAOS)
	/*
	 * On AmigaOS, we can only have one CPU. As increasing a variable is a
	 * single instruction on M68K, we don't need Forbid() / Permit() on
	 * M68K.
	 */
# ifndef OF_AMIGAOS_M68K







|







1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
	@throw [OFNotImplementedException exceptionWithSelector: selector
							 object: self];
}

- (instancetype)retain
{
#if defined(OF_HAVE_ATOMIC_OPS)
	OFAtomicIntIncrease(&PRE_IVARS->retainCount);
#elif defined(OF_AMIGAOS)
	/*
	 * On AmigaOS, we can only have one CPU. As increasing a variable is a
	 * single instruction on M68K, we don't need Forbid() / Permit() on
	 * M68K.
	 */
# ifndef OF_AMIGAOS_M68K
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
}

- (void)release
{
#if defined(OF_HAVE_ATOMIC_OPS)
	of_memory_barrier_release();

	if (of_atomic_int_dec(&PRE_IVARS->retainCount) <= 0) {
		of_memory_barrier_acquire();

		[self dealloc];
	}
#elif defined(OF_AMIGAOS)
	int retainCount;








|







1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
}

- (void)release
{
#if defined(OF_HAVE_ATOMIC_OPS)
	of_memory_barrier_release();

	if (OFAtomicIntDecrease(&PRE_IVARS->retainCount) <= 0) {
		of_memory_barrier_acquire();

		[self dealloc];
	}
#elif defined(OF_AMIGAOS)
	int retainCount;

Modified src/OFThread.m from [135a1ec8ae] to [48147e27cd].

464
465
466
467
468
469
470

471
472
473
474
475
476
477
478

- (OFRunLoop *)runLoop
{
# if defined(OF_HAVE_ATOMIC_OPS) && !defined(__clang_analyzer__)
	if (_runLoop == nil) {
		OFRunLoop *tmp = [[OFRunLoop alloc] init];


		if (!of_atomic_ptr_cmpswap((void **)&_runLoop, nil, tmp))
			[tmp release];
	}
# else
	@synchronized (self) {
		if (_runLoop == nil)
			_runLoop = [[OFRunLoop alloc] init];
	}







>
|







464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479

- (OFRunLoop *)runLoop
{
# if defined(OF_HAVE_ATOMIC_OPS) && !defined(__clang_analyzer__)
	if (_runLoop == nil) {
		OFRunLoop *tmp = [[OFRunLoop alloc] init];

		if (!OFAtomicPointerCompareAndSwap(
		    (void **)&_runLoop, nil, tmp))
			[tmp release];
	}
# else
	@synchronized (self) {
		if (_runLoop == nil)
			_runLoop = [[OFRunLoop alloc] init];
	}

Modified src/atomic_builtins.h from [f2deeb95f9] to [faa8445463].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113







114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_or_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_or_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_and_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_and_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)







{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE void







|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|
>
>
>
>
>
>
>






<
<
<
<
<
<
<
|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126







127
128
129
130
131
132
133
134
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __atomic_add_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __atomic_sub_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	return __atomic_add_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	return __atomic_sub_fetch(p, 1, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_or_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_or_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_and_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_and_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __atomic_xor_fetch(p, i, __ATOMIC_RELAXED);
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE bool







OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return __atomic_compare_exchange(p, &o, &n, false,
	    __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}

static OF_INLINE void

Modified src/atomic_no_threads.h from [5ca4dac2a4] to [6020e5ba6e].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113











114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	return (*p += i);
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	return (*p += i);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return (*(char *volatile *)p += i);
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	return (*p -= i);
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	return (*p -= i);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return (*(char *volatile *)p -= i);
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	return ++*p;
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	return ++*p;
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	return --*p;
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	return --*p;
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p |= i);
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p |= i);
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p &= i);
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p &= i);
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p ^= i);
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p ^= i);
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)











{
	if (*p == o) {
		*p = n;
		return true;
	}

	return false;
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	if (*p == o) {
		*p = n;
		return true;
	}

	return false;
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	if (*p == o) {
		*p = n;
		return true;
	}








|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|
>
>
>
>
>
>
>
>
>
>
>










<
<
<
<
<
<
<
<
<
<
<
|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134











135
136
137
138
139
140
141
142
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	return (*p += i);
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	return (*p += i);
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return (*(char *volatile *)p += i);
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	return (*p -= i);
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	return (*p -= i);
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return (*(char *volatile *)p -= i);
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	return ++*p;
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	return ++*p;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	return --*p;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	return --*p;
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p |= i);
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p |= i);
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p &= i);
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p &= i);
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return (*p ^= i);
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return (*p ^= i);
}

static OF_INLINE bool
OFAtomicIntCompareSwap(volatile int *_Nonnull p, int o, int n)
{
	if (*p == o) {
		*p = n;
		return true;
	}

	return false;
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	if (*p == o) {
		*p = n;
		return true;
	}

	return false;
}

static OF_INLINE bool











OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	if (*p == o) {
		*p = n;
		return true;
	}

Modified src/atomic_osatomic.h from [0fe4f3a992] to [9f3398c71a].

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

#include <libkern/OSAtomic.h>

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	return OSAtomicAdd32(i, p);
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	return OSAtomicAdd32(i, p);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#ifdef __LP64__
	return (void *)OSAtomicAdd64(i, (int64_t *)p);
#else
	return (void *)OSAtomicAdd32(i, (int32_t *)p);
#endif
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	return OSAtomicAdd32(-i, p);
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	return OSAtomicAdd32(-i, p);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#ifdef __LP64__
	return (void *)OSAtomicAdd64(-i, (int64_t *)p);
#else
	return (void *)OSAtomicAdd32(-i, (int32_t *)p);
#endif
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	return OSAtomicIncrement32(p);
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	return OSAtomicIncrement32(p);
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	return OSAtomicDecrement32(p);
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	return OSAtomicDecrement32(p);
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicOr32(i, p);
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicOr32(i, p);
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicAnd32(i, p);
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicAnd32(i, p);
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicXor32(i, p);
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicXor32(i, p);
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)
{
	return OSAtomicCompareAndSwapInt(o, n, p);
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return OSAtomicCompareAndSwap32(o, n, p);
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return OSAtomicCompareAndSwapPtr(o, n, p);
}

static OF_INLINE void
of_memory_barrier(void)







|





|





|









|





|





|









|





|





|





|





|





|





|





|





|





|





|





|





|







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

#include <libkern/OSAtomic.h>

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	return OSAtomicAdd32(i, p);
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	return OSAtomicAdd32(i, p);
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#ifdef __LP64__
	return (void *)OSAtomicAdd64(i, (int64_t *)p);
#else
	return (void *)OSAtomicAdd32(i, (int32_t *)p);
#endif
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	return OSAtomicAdd32(-i, p);
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	return OSAtomicAdd32(-i, p);
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#ifdef __LP64__
	return (void *)OSAtomicAdd64(-i, (int64_t *)p);
#else
	return (void *)OSAtomicAdd32(-i, (int32_t *)p);
#endif
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	return OSAtomicIncrement32(p);
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	return OSAtomicIncrement32(p);
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	return OSAtomicDecrement32(p);
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	return OSAtomicDecrement32(p);
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicOr32(i, p);
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicOr32(i, p);
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicAnd32(i, p);
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicAnd32(i, p);
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return OSAtomicXor32(i, p);
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return OSAtomicXor32(i, p);
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	return OSAtomicCompareAndSwapInt(o, n, p);
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return OSAtomicCompareAndSwap32(o, n, p);
}

static OF_INLINE bool
OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return OSAtomicCompareAndSwapPtr(o, n, p);
}

static OF_INLINE void
of_memory_barrier(void)

Modified src/atomic_powerpc.h from [a52241434a] to [fb429fc6e7].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297


























298
299
300
301
302
303
304
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)


























{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"
	    "cmpw	%0, %1\n\t"







|
















|
















|
















|
















|
















|
















|


















|


















|


















|


















|
















|
















|
















|
















|
















|
















|
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>
>







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE bool
OFAtomicIntCompAndSwap(volatile int *_Nonnull p, int o, int n)
{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"
	    "cmpw	%0, %1\n\t"
	    "bne	1f\n\t"
	    "stwcx.	%2, 0, %3\n\t"
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r"(r)
	    : "r"(o), "r"(n), "r"(p)
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"
	    "cmpw	%0, %1\n\t"
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"
	    "cmpw	%0, %1\n\t"
	    "bne	1f\n\t"
	    "stwcx.	%2, 0, %3\n\t"
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r"(r)
	    : "r"(o), "r"(n), "r"(p)
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"







<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
<
|







342
343
344
345
346
347
348


























349
350
351
352
353
354
355
356
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool


























OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %3\n\t"

Modified src/atomic_sync_builtins.h from [7ba8dbb2f0] to [ca7c72a3e0].

10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	return __sync_add_and_fetch(p, i);
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	return __sync_add_and_fetch(p, i);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __sync_add_and_fetch(p, (void *)i);
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	return __sync_sub_and_fetch(p, i);
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	return __sync_sub_and_fetch(p, i);
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __sync_sub_and_fetch(p, (void *)i);
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	return __sync_add_and_fetch(p, 1);
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	return __sync_add_and_fetch(p, 1);
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	return __sync_sub_and_fetch(p, 1);
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	return __sync_sub_and_fetch(p, 1);
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_or_and_fetch(p, i);
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_or_and_fetch(p, i);
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_and_and_fetch(p, i);
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_and_and_fetch(p, i);
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_xor_and_fetch(p, i);
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_xor_and_fetch(p, i);
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE void
of_memory_barrier(void)







|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|





|







10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
 * Alternatively, it may be distributed under the terms of the GNU General
 * Public License, either version 2 or 3, which can be found in the file
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	return __sync_add_and_fetch(p, i);
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	return __sync_add_and_fetch(p, i);
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __sync_add_and_fetch(p, (void *)i);
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	return __sync_sub_and_fetch(p, i);
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	return __sync_sub_and_fetch(p, i);
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	return __sync_sub_and_fetch(p, (void *)i);
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	return __sync_add_and_fetch(p, 1);
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	return __sync_add_and_fetch(p, 1);
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	return __sync_sub_and_fetch(p, 1);
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	return __sync_sub_and_fetch(p, 1);
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_or_and_fetch(p, i);
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_or_and_fetch(p, i);
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_and_and_fetch(p, i);
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_and_and_fetch(p, i);
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	return __sync_xor_and_fetch(p, i);
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	return __sync_xor_and_fetch(p, i);
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE bool
OFAtomicInt32CompAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE bool
OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	return __sync_bool_compare_and_swap(p, o, n);
}

static OF_INLINE void
of_memory_barrier(void)

Modified src/atomic_x86.h from [3d60327fd6] to [7a6e063b7c].

12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

OF_ASSUME_NONNULL_BEGIN

static OF_INLINE int
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "addl	%1, %0"
		    : "+&r"(i)







|







12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
 * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
 * file.
 */

OF_ASSUME_NONNULL_BEGIN

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "addl	%1, %0"
		    : "+&r"(i)
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
	else
		abort();

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "addl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
}

static OF_INLINE void *_Nullable
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "addq	%1, %0"
	    : "+&r"(i)







|













|







39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "addl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "addq	%1, %0"
	    : "+&r"(i)
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
	);

	return (void *)i;
#endif
}

static OF_INLINE int
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "negl	%0\n\t"
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "subl	%1, %0"







|







79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
	);

	return (void *)i;
#endif
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "negl	%0\n\t"
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "subl	%1, %0"
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
	else
		abort();

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "negl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "subl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
}

static OF_INLINE void *_Nullable
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
	__asm__ __volatile__ (
	    "negq	%0\n\t"
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "subq	%1, %0"







|














|







108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "negl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "subl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
	__asm__ __volatile__ (
	    "negq	%0\n\t"
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "subq	%1, %0"
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
	);

	return (void *)i;
#endif
}

static OF_INLINE int
of_atomic_int_inc(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "xorl	%0, %0\n\t"
		    "incl	%0\n\t"







|







151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
	);

	return (void *)i;
#endif
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "xorl	%0, %0\n\t"
		    "incl	%0\n\t"
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
	else
		abort();

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xorl	%0, %0\n\t"
	    "incl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %1\n\t"
	    "incl	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE int
of_atomic_int_dec(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "xorl	%0, %0\n\t"
		    "decl	%0\n\t"







|

















|







184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xorl	%0, %0\n\t"
	    "incl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %1\n\t"
	    "incl	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "xorl	%0, %0\n\t"
		    "decl	%0\n\t"
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
	else
		abort();

	return i;
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xorl	%0, %0\n\t"
	    "decl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %1\n\t"
	    "decl	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "orl	%1, %0\n\t"







|

















|







235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xorl	%0, %0\n\t"
	    "decl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %1\n\t"
	    "decl	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "orl	%1, %0\n\t"
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "orl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchg	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "andl	%1, %0\n\t"







|


















|







290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "orl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchg	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "andl	%1, %0\n\t"
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "andl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchg	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "xorl	%1, %0\n\t"







|


















|







346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "andl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchg	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movl	%2, %0\n\t"
		    "movl	%0, %%eax\n\t"
		    "xorl	%1, %0\n\t"
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "xorl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchgl	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"
	    "sete	%b0\n\t"
	    "movzbl	%b0, %0"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */
	    : "r"(n), "m"(*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"
	    "sete	%b0\n\t"
	    "movzbl	%b0, %0"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */
	    : "r"(n), "m"(*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"







|


















|

















|

















|







402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
	    "movl	%0, %%eax\n\t"
	    "xorl	%1, %0\n\t"
	    "lock\n\t"
	    "cmpxchgl	%0, %2\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"
	    "sete	%b0\n\t"
	    "movzbl	%b0, %0"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */
	    : "r"(n), "m"(*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"
	    "sete	%b0\n\t"
	    "movzbl	%b0, %0"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */
	    : "r"(n), "m"(*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	%2, %3\n\t"

Modified src/mutex.h from [89d7ed7ceb] to [d443bf1d19].

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#endif
}

static OF_INLINE int
OFSpinlockTryLock(OFSpinlock *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	if (of_atomic_int_cmpswap(spinlock, 0, 1)) {
		of_memory_barrier_acquire();
		return 0;
	}

	return EBUSY;
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return pthread_spin_trylock(spinlock);







|







104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
#endif
}

static OF_INLINE int
OFSpinlockTryLock(OFSpinlock *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	if (OFAtomicIntCompareAndSwap(spinlock, 0, 1)) {
		of_memory_barrier_acquire();
		return 0;
	}

	return EBUSY;
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return pthread_spin_trylock(spinlock);
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#endif
}

static OF_INLINE int
OFSpinlockUnlock(OFSpinlock *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	bool ret = of_atomic_int_cmpswap(spinlock, 1, 0);

	of_memory_barrier_release();

	return (ret ? 0 : EINVAL);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return pthread_spin_unlock(spinlock);
#else







|







142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#endif
}

static OF_INLINE int
OFSpinlockUnlock(OFSpinlock *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	bool ret = OFAtomicIntCompareAndSwap(spinlock, 1, 0);

	of_memory_barrier_release();

	return (ret ? 0 : EINVAL);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return pthread_spin_unlock(spinlock);
#else

Modified src/once.m from [8f91aff896] to [00fe24ae69].

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#elif defined(OF_HAVE_PTHREADS)
	pthread_once(control, func);
#elif defined(OF_HAVE_ATOMIC_OPS)
	/* Avoid atomic operations in case it's already done. */
	if (*control == 2)
		return;

	if (of_atomic_int_cmpswap(control, 0, 1)) {
		func();

		of_memory_barrier();

		of_atomic_int_inc(control);
	} else
		while (*control == 1)
			OFYieldThread();
#elif defined(OF_AMIGAOS)
	bool run = false;

	/* Avoid Forbid() in case it's already done. */







|




|







39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#elif defined(OF_HAVE_PTHREADS)
	pthread_once(control, func);
#elif defined(OF_HAVE_ATOMIC_OPS)
	/* Avoid atomic operations in case it's already done. */
	if (*control == 2)
		return;

	if (OFAtomicIntCompareAndSwap(control, 0, 1)) {
		func();

		of_memory_barrier();

		OFAtomicIntIncrease(control);
	} else
		while (*control == 1)
			OFYieldThread();
#elif defined(OF_AMIGAOS)
	bool run = false;

	/* Avoid Forbid() in case it's already done. */

Modified src/platform/windows/condition.m from [5e19c9c3c0] to [bd016e33b7].

71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
	int error;
	DWORD status;

	if ((error = OFPlainMutexUnlock(mutex)) != 0)
		return error;

	of_atomic_int_inc(&condition->count);
	status = WaitForSingleObject(condition->event, INFINITE);
	of_atomic_int_dec(&condition->count);

	switch (status) {
	case WAIT_OBJECT_0:
		return OFPlainMutexLock(mutex);
	case WAIT_FAILED:
		switch (GetLastError()) {
		case ERROR_INVALID_HANDLE:







|

|







71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
{
	int error;
	DWORD status;

	if ((error = OFPlainMutexUnlock(mutex)) != 0)
		return error;

	OFAtomicIntIncrease(&condition->count);
	status = WaitForSingleObject(condition->event, INFINITE);
	OFAtomicIntDecrease(&condition->count);

	switch (status) {
	case WAIT_OBJECT_0:
		return OFPlainMutexLock(mutex);
	case WAIT_FAILED:
		switch (GetLastError()) {
		case ERROR_INVALID_HANDLE:
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
{
	int error;
	DWORD status;

	if ((error = OFPlainMutexUnlock(mutex)) != 0)
		return error;

	of_atomic_int_inc(&condition->count);
	status = WaitForSingleObject(condition->event, timeout * 1000);
	of_atomic_int_dec(&condition->count);

	switch (status) {
	case WAIT_OBJECT_0:
		return OFPlainMutexLock(mutex);
	case WAIT_TIMEOUT:
		return ETIMEDOUT;
	case WAIT_FAILED:







|

|







100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
{
	int error;
	DWORD status;

	if ((error = OFPlainMutexUnlock(mutex)) != 0)
		return error;

	OFAtomicIntIncrease(&condition->count);
	status = WaitForSingleObject(condition->event, timeout * 1000);
	OFAtomicIntDecrease(&condition->count);

	switch (status) {
	case WAIT_OBJECT_0:
		return OFPlainMutexLock(mutex);
	case WAIT_TIMEOUT:
		return ETIMEDOUT;
	case WAIT_FAILED: