ObjFW  Diff

Differences From Artifact [db489edc3a]:

To Artifact [07765f0312]:


1
2

3
4
5
6
7
8
9
1

2
3
4
5
6
7
8
9

-
+







/*
 * Copyright (c) 2008-2022 Jonathan Schleifer <js@nil.im>
 * Copyright (c) 2008-2023 Jonathan Schleifer <js@nil.im>
 *
 * All rights reserved.
 *
 * This file is part of ObjFW. It may be distributed under the terms of the
 * Q Public License 1.0, which can be found in the file LICENSE.QPL included in
 * the packaging of this file.
 *
22
23
24
25
26
27
28
29

30
31
32
33
34
35
36
22
23
24
25
26
27
28

29
30
31
32
33
34
35
36







-
+







		__asm__ __volatile__ (
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "addl	%1, %0"
		    : "+&r"(i)
		    : "r"(i), "m"(*p)
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "lock\n\t"
		    "xaddq	%0, %2\n\t"
		    "addq	%1, %0"
		    : "+&r"(i)
		    : "r"(i), "m"(*p)
55
56
57
58
59
60
61
62

63
64
65
66
67
68
69
55
56
57
58
59
60
61

62
63
64
65
66
67
68
69







-
+








	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "addq	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
	);
90
91
92
93
94
95
96
97

98
99
100
101
102
103
104
90
91
92
93
94
95
96

97
98
99
100
101
102
103
104







-
+







		    "negl	%0\n\t"
		    "lock\n\t"
		    "xaddl	%0, %2\n\t"
		    "subl	%1, %0"
		    : "+&r"(i)
		    : "r"(i), "m"(*p)
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "negq	%0\n\t"
		    "lock\n\t"
		    "xaddq	%0, %2\n\t"
		    "subq	%1, %0"
		    : "+&r"(i)
125
126
127
128
129
130
131
132

133
134
135
136
137
138
139
125
126
127
128
129
130
131

132
133
134
135
136
137
138
139







-
+








	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_X86_64)
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "negq	%0\n\t"
	    "lock\n\t"
	    "xaddq	%0, %2\n\t"
	    "subq	%1, %0"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)
165
166
167
168
169
170
171
172

173
174
175
176
177
178
179
165
166
167
168
169
170
171

172
173
174
175
176
177
178
179







-
+







		    "incl	%0\n\t"
		    "lock\n\t"
		    "xaddl	%0, %1\n\t"
		    "incl	%0"
		    : "=&r"(i)
		    : "m"(*p)
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xorq	%0, %0\n\t"
		    "incq	%0\n\t"
		    "lock\n\t"
		    "xaddq	%0, %1\n\t"
		    "incq	%0"
216
217
218
219
220
221
222
223

224
225
226
227
228
229
230
216
217
218
219
220
221
222

223
224
225
226
227
228
229
230







-
+







		    "decl	%0\n\t"
		    "lock\n\t"
		    "xaddl	%0, %1\n\t"
		    "decl	%0"
		    : "=&r"(i)
		    : "m"(*p)
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xorq	%0, %0\n\t"
		    "decq	%0\n\t"
		    "lock\n\t"
		    "xaddq	%0, %1\n\t"
		    "decq	%0"
268
269
270
271
272
273
274
275

276
277
278
279
280
281
282
268
269
270
271
272
273
274

275
276
277
278
279
280
281
282







-
+







		    "lock\n\t"
		    "cmpxchg	%0, %2\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)
		    : "eax", "cc"
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movq	%2, %0\n\t"
		    "movq	%0, %%rax\n\t"
		    "orq	%1, %0\n\t"
		    "lock\n\t"
324
325
326
327
328
329
330
331

332
333
334
335
336
337
338
324
325
326
327
328
329
330

331
332
333
334
335
336
337
338







-
+







		    "lock\n\t"
		    "cmpxchg	%0, %2\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)
		    : "eax", "cc"
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movq	%2, %0\n\t"
		    "movq	%0, %%rax\n\t"
		    "andq	%1, %0\n\t"
		    "lock\n\t"
380
381
382
383
384
385
386
387

388
389
390
391
392
393
394
380
381
382
383
384
385
386

387
388
389
390
391
392
393
394







-
+







		    "lock\n\t"
		    "cmpxchg	%0, %2\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)
		    : "eax", "cc"
		);
#ifdef OF_X86_64
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "0:\n\t"
		    "movq	%2, %0\n\t"
		    "movq	%0, %%rax\n\t"
		    "xorq	%1, %0\n\t"
		    "lock\n\t"
478
479
480
481
482
483
484

485
486

487





488
489
490
491
492
493
494
478
479
480
481
482
483
484
485
486

487
488
489
490
491
492
493
494
495
496
497
498
499
500







+

-
+

+
+
+
+
+








	return r;
}

static OF_INLINE void
OFMemoryBarrier(void)
{
#ifdef OF_AMD64
	__asm__ __volatile__ (
	    "mfence" ::: "memory"
	    "lock orq	$0, (%%rsp)" ::: "memory", "cc"
	);
#else
	__asm__ __volatile__ (
	    "lock orl	$0, (%%esp)" ::: "memory", "cc"
	);
#endif
}

static OF_INLINE void
OFAcquireMemoryBarrier(void)
{
	__asm__ __volatile__ ("" ::: "memory");
}