ObjFW  Check-in [a92844520a]

Overview
Comment:Improve atomics API
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA3-256: a92844520ad6c2e2f7b533f64b4eb01181d3618a6a40f41cbd57b252707dabd7
User & Date: js on 2014-05-05 01:08:47
Other Links: manifest | tags
Context
2014-05-05
01:42
atomic.h: Prefer GCC builtins over ASM check-in: a199313c53 user: js tags: trunk
01:08
Improve atomics API check-in: a92844520a user: js tags: trunk
00:14
Always build tests check-in: 974b1b203f user: js tags: trunk
Changes

Modified src/OFBlock.m from [aa21105e0f] to [5efc26dc00].

186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
			block->descriptor->copy_helper(copy, block);

		return copy;
	}

	if (object_getClass((id)block) == (Class)&_NSConcreteMallocBlock) {
#ifdef OF_HAVE_ATOMIC_OPS
		of_atomic_inc_int(&block->flags);
#else
		unsigned hash = SPINLOCK_HASH(block);

		OF_ENSURE(of_spinlock_lock(&spinlocks[hash]));
		block->flags++;
		OF_ENSURE(of_spinlock_unlock(&spinlocks[hash]));
#endif







|







186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
			block->descriptor->copy_helper(copy, block);

		return copy;
	}

	if (object_getClass((id)block) == (Class)&_NSConcreteMallocBlock) {
#ifdef OF_HAVE_ATOMIC_OPS
		of_atomic_int_inc(&block->flags);
#else
		unsigned hash = SPINLOCK_HASH(block);

		OF_ENSURE(of_spinlock_lock(&spinlocks[hash]));
		block->flags++;
		OF_ENSURE(of_spinlock_unlock(&spinlocks[hash]));
#endif
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
{
	of_block_literal_t *block = (of_block_literal_t*)block_;

	if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock)
		return;

#ifdef OF_HAVE_ATOMIC_OPS
	if ((of_atomic_dec_int(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) {
		if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE)
			block->descriptor->dispose_helper(block);

		free(block);
	}
#else
	unsigned hash = SPINLOCK_HASH(block);







|







208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
{
	of_block_literal_t *block = (of_block_literal_t*)block_;

	if (object_getClass((id)block) != (Class)&_NSConcreteMallocBlock)
		return;

#ifdef OF_HAVE_ATOMIC_OPS
	if ((of_atomic_int_dec(&block->flags) & OF_BLOCK_REFCOUNT_MASK) == 0) {
		if (block->flags & OF_BLOCK_HAS_COPY_DISPOSE)
			block->descriptor->dispose_helper(block);

		free(block);
	}
#else
	unsigned hash = SPINLOCK_HASH(block);

Modified src/OFObject.m from [f3bed53382] to [e8aca19951].

68
69
70
71
72
73
74
75
76
77
78
79

80
81
82
83
84
85
86
extern struct stret of_forward_stret(id, SEL, ...);
#else
# define of_forward of_method_not_found
# define of_forward_stret of_method_not_found_stret
#endif

struct pre_ivar {
	int32_t retainCount;
	struct pre_mem *firstMem, *lastMem;
#if !defined(OF_HAVE_ATOMIC_OPS) && defined(OF_HAVE_THREADS)
	of_spinlock_t retainCountSpinlock;
#endif

};

struct pre_mem {
	struct pre_mem *prev, *next;
	id owner;
};








|
<



>







68
69
70
71
72
73
74
75

76
77
78
79
80
81
82
83
84
85
86
extern struct stret of_forward_stret(id, SEL, ...);
#else
# define of_forward of_method_not_found
# define of_forward_stret of_method_not_found_stret
#endif

struct pre_ivar {
	int retainCount;

#if !defined(OF_HAVE_ATOMIC_OPS) && defined(OF_HAVE_THREADS)
	of_spinlock_t retainCountSpinlock;
#endif
	struct pre_mem *firstMem, *lastMem;
};

struct pre_mem {
	struct pre_mem *prev, *next;
	id owner;
};

956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
	@throw [OFNotImplementedException exceptionWithSelector: selector
							 object: self];
}

- retain
{
#if defined(OF_HAVE_ATOMIC_OPS)
	of_atomic_inc_32(&PRE_IVARS->retainCount);
#else
	OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock));
	PRE_IVARS->retainCount++;
	OF_ENSURE(of_spinlock_unlock(&PRE_IVARS->retainCountSpinlock));
#endif

	return self;
}

- (unsigned int)retainCount
{
	assert(PRE_IVARS->retainCount >= 0);
	return PRE_IVARS->retainCount;
}

- (void)release
{
#if defined(OF_HAVE_ATOMIC_OPS)
	if (of_atomic_dec_32(&PRE_IVARS->retainCount) <= 0)
		[self dealloc];
#else
	size_t c;

	OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock));
	c = --PRE_IVARS->retainCount;
	OF_ENSURE(of_spinlock_unlock(&PRE_IVARS->retainCountSpinlock));







|


















|







956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
	@throw [OFNotImplementedException exceptionWithSelector: selector
							 object: self];
}

- retain
{
#if defined(OF_HAVE_ATOMIC_OPS)
	of_atomic_int_inc(&PRE_IVARS->retainCount);
#else
	OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock));
	PRE_IVARS->retainCount++;
	OF_ENSURE(of_spinlock_unlock(&PRE_IVARS->retainCountSpinlock));
#endif

	return self;
}

- (unsigned int)retainCount
{
	assert(PRE_IVARS->retainCount >= 0);
	return PRE_IVARS->retainCount;
}

- (void)release
{
#if defined(OF_HAVE_ATOMIC_OPS)
	if (of_atomic_int_dec(&PRE_IVARS->retainCount) <= 0)
		[self dealloc];
#else
	size_t c;

	OF_ENSURE(of_spinlock_lock(&PRE_IVARS->retainCountSpinlock));
	c = --PRE_IVARS->retainCount;
	OF_ENSURE(of_spinlock_unlock(&PRE_IVARS->retainCountSpinlock));

Modified src/OFThread.m from [d65d9ab0c1] to [68ca15bbc7].

343
344
345
346
347
348
349
350
351
352
353
354
355
356
357

- (OFRunLoop*)runLoop
{
# ifdef OF_HAVE_ATOMIC_OPS
	if (_runLoop == nil) {
		OFRunLoop *tmp = [[OFRunLoop alloc] init];

		if (!of_atomic_cmpswap_ptr((void**)&_runLoop, nil, tmp))
			[tmp release];
	}
# else
	@synchronized (self) {
		if (_runLoop == nil)
			_runLoop = [[OFRunLoop alloc] init];
	}







|







343
344
345
346
347
348
349
350
351
352
353
354
355
356
357

- (OFRunLoop*)runLoop
{
# ifdef OF_HAVE_ATOMIC_OPS
	if (_runLoop == nil) {
		OFRunLoop *tmp = [[OFRunLoop alloc] init];

		if (!of_atomic_ptr_cmpswap((void**)&_runLoop, nil, tmp))
			[tmp release];
	}
# else
	@synchronized (self) {
		if (_runLoop == nil)
			_runLoop = [[OFRunLoop alloc] init];
	}

Modified src/atomic.h from [9f476a30f6] to [838c97203e].

25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#import "macros.h"

#ifdef OF_HAVE_OSATOMIC
# include <libkern/OSAtomic.h>
#endif

static OF_INLINE int
of_atomic_add_int(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "lock\n\t"







|







25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
#import "macros.h"

#ifdef OF_HAVE_OSATOMIC
# include <libkern/OSAtomic.h>
#endif

static OF_INLINE int
of_atomic_int_add(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "lock\n\t"
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_add_int not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_add_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "addl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_add_32 not implemented!
#endif
}

static OF_INLINE void*
of_atomic_add_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p += i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"







|




|


















|




|







57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_int_add not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "addl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_int32_add not implemented!
#endif
}

static OF_INLINE void*
of_atomic_ptr_add(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p += i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#elif defined(OF_HAVE_OSATOMIC)
# ifdef __LP64__
	return (void*)OSAtomicAdd64Barrier(i, (int64_t*)p);
# else
	return (void*)OSAtomicAdd32Barrier(i, (int32_t*)p);
# endif
#else
# error of_atomic_add_ptr not implemented!
#endif
}

static OF_INLINE int
of_atomic_sub_int(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "negl	%0\n\t"







|




|







119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
#elif defined(OF_HAVE_OSATOMIC)
# ifdef __LP64__
	return (void*)OSAtomicAdd64Barrier(i, (int64_t*)p);
# else
	return (void*)OSAtomicAdd32Barrier(i, (int32_t*)p);
# endif
#else
# error of_atomic_ptr_add not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_sub(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "negl	%0\n\t"
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_sub_int not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_sub_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "negl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "subl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_sub_32 not implemented!
#endif
}

static OF_INLINE void*
of_atomic_sub_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p -= i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
	    "negq	%0\n\t"
	    "lock\n\t"







|




|



















|




|







158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_int_sub not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "negl	%0\n\t"
	    "lock\n\t"
	    "xaddl	%0, %2\n\t"
	    "subl	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_int32_sub not implemented!
#endif
}

static OF_INLINE void*
of_atomic_ptr_sub(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p -= i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
	    "negq	%0\n\t"
	    "lock\n\t"
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#elif defined(OF_HAVE_OSATOMIC)
# ifdef __LP64__
	return (void*)OSAtomicAdd64Barrier(-i, (int64_t*)p);
# else
	return (void*)OSAtomicAdd32Barrier(-i, (int32_t*)p);
# endif
#else
# error of_atomic_sub_ptr not implemented!
#endif
}

static OF_INLINE int
of_atomic_inc_int(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;

	if (sizeof(int) == 4)







|




|







223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
#elif defined(OF_HAVE_OSATOMIC)
# ifdef __LP64__
	return (void*)OSAtomicAdd64Barrier(-i, (int64_t*)p);
# else
	return (void*)OSAtomicAdd32Barrier(-i, (int32_t*)p);
# endif
#else
# error of_atomic_ptr_sub not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_inc(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;

	if (sizeof(int) == 4)
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_inc_int not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_inc_32(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	uint32_t i;

	__asm__ __volatile__ (







|




|







266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_int_inc not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	uint32_t i;

	__asm__ __volatile__ (
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_inc_32 not implemented!
#endif
}

static OF_INLINE int
of_atomic_dec_int(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;

	if (sizeof(int) == 4)







|




|







294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_int32_inc not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_dec(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;

	if (sizeof(int) == 4)
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_dec_int not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_dec_32(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	uint32_t i;

	__asm__ __volatile__ (







|




|







337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_int_dec not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	uint32_t i;

	__asm__ __volatile__ (
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_dec_32 not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_or_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"







|




|







365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_int32_dec not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_or_int not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_or_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"







|




|







412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_int_or not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_or_32 not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_and_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"







|




|







441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_int32_or not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_and_int not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_and_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"







|




|







488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_int_and not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_and_32 not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_xor_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"







|




|







517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_int32_and not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
		__asm__ __volatile__ (
		    "0:\n\t"
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_xor_int not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_xor_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"







|




|







564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_int_xor not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
	    "0:\n\t"
	    "movl	%2, %0\n\t"
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_xor_32 not implemented!
#endif
}

static OF_INLINE bool
of_atomic_cmpswap_int(volatile int *p, int o, int n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}








|




|







593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612

	return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_int32_xor not implemented!
#endif
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *p, int o, int n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}

626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwapIntBarrier(o, n, p);
#else
# error of_atomic_cmpswap_int not implemented!
#endif
}

static OF_INLINE bool
of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}








|




|







626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwapIntBarrier(o, n, p);
#else
# error of_atomic_int_cmpswap not implemented!
#endif
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *p, int32_t o, int32_t n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}

659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwap32Barrier(o, n, p);
#else
# error of_atomic_cmpswap_32 not implemented!
#endif
}

static OF_INLINE bool
of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}








|




|







659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwap32Barrier(o, n, p);
#else
# error of_atomic_int32_cmpswap not implemented!
#endif
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void* volatile *p, void *o, void *n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}

692
693
694
695
696
697
698
699
700
701
702
703
704
705
706

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwapPtrBarrier(o, n, p);
#else
# error of_atomic_cmpswap_ptr not implemented!
#endif
}

static OF_INLINE void
of_memory_barrier(void)
{
#if !defined(OF_HAVE_THREADS)







|







692
693
694
695
696
697
698
699
700
701
702
703
704
705
706

	return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_bool_compare_and_swap(p, o, n);
#elif defined(OF_HAVE_OSATOMIC)
	return OSAtomicCompareAndSwapPtrBarrier(o, n, p);
#else
# error of_atomic_ptr_cmpswap not implemented!
#endif
}

static OF_INLINE void
of_memory_barrier(void)
{
#if !defined(OF_HAVE_THREADS)

Modified src/threading.h from [1628bdd768] to [a65488fd6f].

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
{
#if defined(OF_HAVE_PTHREADS)
	return !pthread_cond_wait(condition, mutex);
#elif defined(_WIN32)
	if (!of_mutex_unlock(mutex))
		return false;

	of_atomic_inc_int(&condition->count);

	if (WaitForSingleObject(condition->event, INFINITE) != WAIT_OBJECT_0) {
		of_mutex_lock(mutex);
		return false;
	}

	of_atomic_dec_int(&condition->count);

	if (!of_mutex_lock(mutex))
		return false;

	return true;
#else
# error of_condition_wait not implemented!







|






|







262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
{
#if defined(OF_HAVE_PTHREADS)
	return !pthread_cond_wait(condition, mutex);
#elif defined(_WIN32)
	if (!of_mutex_unlock(mutex))
		return false;

	of_atomic_int_inc(&condition->count);

	if (WaitForSingleObject(condition->event, INFINITE) != WAIT_OBJECT_0) {
		of_mutex_lock(mutex);
		return false;
	}

	of_atomic_int_dec(&condition->count);

	if (!of_mutex_lock(mutex))
		return false;

	return true;
#else
# error of_condition_wait not implemented!
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
	ts.tv_nsec = lrint((timeout - ts.tv_sec) * 1000000000);

	return !pthread_cond_timedwait(condition, mutex, &ts);
#elif defined(_WIN32)
	if (!of_mutex_unlock(mutex))
		return false;

	of_atomic_inc_int(&condition->count);

	if (WaitForSingleObject(condition->event,
	    timeout * 1000) != WAIT_OBJECT_0) {
		of_mutex_lock(mutex);
		return false;
	}

	of_atomic_dec_int(&condition->count);

	if (!of_mutex_lock(mutex))
		return false;

	return true;
#else
# error of_condition_timed_wait not implemented!







|







|







295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
	ts.tv_nsec = lrint((timeout - ts.tv_sec) * 1000000000);

	return !pthread_cond_timedwait(condition, mutex, &ts);
#elif defined(_WIN32)
	if (!of_mutex_unlock(mutex))
		return false;

	of_atomic_int_inc(&condition->count);

	if (WaitForSingleObject(condition->event,
	    timeout * 1000) != WAIT_OBJECT_0) {
		of_mutex_lock(mutex);
		return false;
	}

	of_atomic_int_dec(&condition->count);

	if (!of_mutex_lock(mutex))
		return false;

	return true;
#else
# error of_condition_timed_wait not implemented!
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
#endif
}

static OF_INLINE bool
of_spinlock_trylock(of_spinlock_t *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	return of_atomic_cmpswap_int(spinlock, 0, 1);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return !pthread_spin_trylock(spinlock);
#else
	return of_mutex_trylock(spinlock);
#endif
}








|







424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
#endif
}

static OF_INLINE bool
of_spinlock_trylock(of_spinlock_t *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	return of_atomic_int_cmpswap(spinlock, 0, 1);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return !pthread_spin_trylock(spinlock);
#else
	return of_mutex_trylock(spinlock);
#endif
}

465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
#endif
}

static OF_INLINE bool
of_spinlock_unlock(of_spinlock_t *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	return of_atomic_cmpswap_int(spinlock, 1, 0);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return !pthread_spin_unlock(spinlock);
#else
	return of_mutex_unlock(spinlock);
#endif
}








|







465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
#endif
}

static OF_INLINE bool
of_spinlock_unlock(of_spinlock_t *spinlock)
{
#if defined(OF_HAVE_ATOMIC_OPS)
	return of_atomic_int_cmpswap(spinlock, 1, 0);
#elif defined(OF_HAVE_PTHREAD_SPINLOCKS)
	return !pthread_spin_unlock(spinlock);
#else
	return of_mutex_unlock(spinlock);
#endif
}