ObjFW  Check-in [1b22456db6]

Overview
Comment:Use named operands for __asm__
Downloads: Tarball | ZIP archive | SQL archive
Timelines: family | ancestors | descendants | both | trunk
Files: files | file ages | folders
SHA3-256: 1b22456db61822269acd3e8bc0afba6f6e9715e7e420ccca792d711c5bfe4b27
User & Date: js on 2023-11-09 21:09:29
Other Links: manifest | tags
Context
2023-11-10
00:25
OFSystemInfo: Check OS support for SSE/AVX/AVX512 check-in: 42efa8c05a user: js tags: trunk
2023-11-09
21:09
Use named operands for __asm__ check-in: 1b22456db6 user: js tags: trunk
2023-11-06
20:17
OFMatrix4x4: Use an extra SSE register on AMD64 check-in: 5edf0d083d user: js tags: trunk
Changes

Modified src/OFMatrix4x4.m from [a4f9a5f812] to [c5d68e8b03].

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

97
98

99
100
101
102
103
104
105
static void
transformVectors_SSE(OFMatrix4x4 *self, SEL _cmd, OFVector4D *vectors,
    size_t count)
{
	OF_ALIGN(16) float tmp[4];

	__asm__ __volatile__ (
	    "test	%0, %0\n\t"
	    "jz		0f\n"
	    "\n\t"
	    "movaps	(%2), %%xmm0\n\t"
	    "movaps	16(%2), %%xmm1\n\t"
	    "movaps	32(%2), %%xmm2\n\t"
# ifdef OF_AMD64
	    "movaps	48(%2), %%xmm8\n"
# endif
	    "\n\t"
	    "0:\n\t"
	    "movaps	(%1), %%xmm3\n"
	    "\n\t"
	    "movaps	%%xmm0, %%xmm4\n\t"
	    "mulps	%%xmm3, %%xmm4\n\t"
	    "movaps	%%xmm4, (%3)\n\t"
	    "addss	4(%3), %%xmm4\n\t"
	    "addss	8(%3), %%xmm4\n\t"
	    "addss	12(%3), %%xmm4\n"
	    "\n\t"
	    "movaps	%%xmm1, %%xmm5\n\t"
	    "mulps	%%xmm3, %%xmm5\n\t"
	    "movaps	%%xmm5, (%3)\n\t"
	    "addss	4(%3), %%xmm5\n\t"
	    "addss	8(%3), %%xmm5\n\t"
	    "addss	12(%3), %%xmm5\n"
	    "\n\t"
	    "movaps	%%xmm2, %%xmm6\n\t"
	    "mulps	%%xmm3, %%xmm6\n\t"
	    "movaps	%%xmm6, (%3)\n\t"
	    "addss	4(%3), %%xmm6\n\t"
	    "addss	8(%3), %%xmm6\n\t"
	    "addss	12(%3), %%xmm6\n"
	    "\n\t"
# ifdef OF_AMD64
	    "movaps	%%xmm8, %%xmm7\n\t"
# else
	    "movaps	48(%2), %%xmm7\n\t"
# endif
	    "mulps	%%xmm3, %%xmm7\n\t"
	    "movaps	%%xmm7, (%3)\n\t"
	    "addss	4(%3), %%xmm7\n\t"
	    "addss	8(%3), %%xmm7\n\t"
	    "addss	12(%3), %%xmm7\n"
	    "\n\t"
	    "movss	%%xmm4, (%1)\n\t"
	    "movss	%%xmm5, 4(%1)\n\t"
	    "movss	%%xmm6, 8(%1)\n\t"
	    "movss	%%xmm7, 12(%1)\n"
	    "\n\t"
	    "add	$16, %1\n\t"
	    "dec	%0\n\t"
	    "jnz	0b\n"

	    : "+r"(count), "+r"(vectors)
	    : "r"(self->_values), "r"(&tmp)

	    : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
# ifdef OF_AMD64
	      "xmm8",
# endif
	      "memory"
	);
}







|


|
|
|

|



|



|
|
|
|



|
|
|
|



|
|
|
|




|


|
|
|
|

|
|
|
|

|
|

>
|
|
>







37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
static void
transformVectors_SSE(OFMatrix4x4 *self, SEL _cmd, OFVector4D *vectors,
    size_t count)
{
	OF_ALIGN(16) float tmp[4];

	__asm__ __volatile__ (
	    "test	%[count], %[count]\n\t"
	    "jz		0f\n"
	    "\n\t"
	    "movaps	(%[matrix]), %%xmm0\n\t"
	    "movaps	16(%[matrix]), %%xmm1\n\t"
	    "movaps	32(%[matrix]), %%xmm2\n\t"
# ifdef OF_AMD64
	    "movaps	48(%[matrix]), %%xmm8\n"
# endif
	    "\n\t"
	    "0:\n\t"
	    "movaps	(%[vectors]), %%xmm3\n"
	    "\n\t"
	    "movaps	%%xmm0, %%xmm4\n\t"
	    "mulps	%%xmm3, %%xmm4\n\t"
	    "movaps	%%xmm4, (%[tmp])\n\t"
	    "addss	4(%[tmp]), %%xmm4\n\t"
	    "addss	8(%[tmp]), %%xmm4\n\t"
	    "addss	12(%[tmp]), %%xmm4\n"
	    "\n\t"
	    "movaps	%%xmm1, %%xmm5\n\t"
	    "mulps	%%xmm3, %%xmm5\n\t"
	    "movaps	%%xmm5, (%[tmp])\n\t"
	    "addss	4(%[tmp]), %%xmm5\n\t"
	    "addss	8(%[tmp]), %%xmm5\n\t"
	    "addss	12(%[tmp]), %%xmm5\n"
	    "\n\t"
	    "movaps	%%xmm2, %%xmm6\n\t"
	    "mulps	%%xmm3, %%xmm6\n\t"
	    "movaps	%%xmm6, (%[tmp])\n\t"
	    "addss	4(%[tmp]), %%xmm6\n\t"
	    "addss	8(%[tmp]), %%xmm6\n\t"
	    "addss	12(%[tmp]), %%xmm6\n"
	    "\n\t"
# ifdef OF_AMD64
	    "movaps	%%xmm8, %%xmm7\n\t"
# else
	    "movaps	48(%[matrix]), %%xmm7\n\t"
# endif
	    "mulps	%%xmm3, %%xmm7\n\t"
	    "movaps	%%xmm7, (%[tmp])\n\t"
	    "addss	4(%[tmp]), %%xmm7\n\t"
	    "addss	8(%[tmp]), %%xmm7\n\t"
	    "addss	12(%[tmp]), %%xmm7\n"
	    "\n\t"
	    "movss	%%xmm4, (%[vectors])\n\t"
	    "movss	%%xmm5, 4(%[vectors])\n\t"
	    "movss	%%xmm6, 8(%[vectors])\n\t"
	    "movss	%%xmm7, 12(%[vectors])\n"
	    "\n\t"
	    "add	$16, %[vectors]\n\t"
	    "dec	%[count]\n\t"
	    "jnz	0b\n"
	    : [count] "+r" (count),
	      [vectors] "+r" (vectors)
	    : [matrix] "r" (self->_values),
	      [tmp] "r" (&tmp)
	    : "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
# ifdef OF_AMD64
	      "xmm8",
# endif
	      "memory"
	);
}
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167



168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

224
225
226
227
228
229
230
231
232
	float (*left)[4] = matrix->_values, (*right)[4] = self->_values;
	float result[4][4], (*resultPtr)[4] = result;

	__asm__ __volatile__ (
	    "movl	$4, %%ecx\n\t"
	    "\n\t"
	    "0:\n\t"
	    "movd	(%2), %%mm0\n\t"
	    "punpckldq	16(%2), %%mm0\n\t"
	    "pfmul	(%1), %%mm0\n\t"
	    "movd	32(%2), %%mm1\n\t"
	    "punpckldq  48(%2), %%mm1\n\t"
	    "pfmul	8(%1), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, (%0)\n\t"
	    "movd	4(%2), %%mm0\n\t"
	    "punpckldq	20(%2), %%mm0\n\t"
	    "pfmul	(%1), %%mm0\n\t"
	    "movd	36(%2), %%mm1\n\t"
	    "punpckldq  52(%2), %%mm1\n\t"
	    "pfmul	8(%1), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 4(%0)\n\t"
	    "movd	8(%2), %%mm0\n\t"
	    "punpckldq	24(%2), %%mm0\n\t"
	    "pfmul	(%1), %%mm0\n\t"
	    "movd	40(%2), %%mm1\n\t"
	    "punpckldq  56(%2), %%mm1\n\t"
	    "pfmul	8(%1), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 8(%0)\n\t"
	    "movd	12(%2), %%mm0\n\t"
	    "punpckldq	28(%2), %%mm0\n\t"
	    "pfmul	(%1), %%mm0\n\t"
	    "movd	44(%2), %%mm1\n\t"
	    "punpckldq  60(%2), %%mm1\n\t"
	    "pfmul	8(%1), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 12(%0)\n"
	    "\n\t"
	    "add	$16, %0\n\t"
	    "add	$16, %1\n\t"
	    "decl	%%ecx\n\t"
	    "jnz	0b\n"
	    "\n\t"
	    "femms"
	    : "+r"(resultPtr), "+r"(left), "+r"(right)



	    :: "ecx", "mm0", "mm1", "memory"
	);

	memcpy(self->_values, result, 16 * sizeof(float));
}

static void
transformVectors_3DNow(OFMatrix4x4 *self, SEL _cmd, OFVector4D *vectors,
    size_t count)
{
	__asm__ __volatile__ (
	    "test	%0, %0\n\t"
	    "jz		0f\n"
	    "\n\t"
	    "0:\n\t"
	    "movq	(%1), %%mm0\n\t"
	    "movq	8(%1), %%mm1\n"
	    "\n\t"
	    "movq	%%mm0, %%mm2\n\t"
	    "movq	%%mm1, %%mm3\n\t"
	    "pfmul	(%2), %%mm2\n\t"
	    "pfmul	8(%2), %%mm3\n\t"
	    "pfacc	%%mm3, %%mm2\n\t"
	    "pfacc	%%mm2, %%mm2\n\t"
	    "\n\t"
	    "movq	%%mm0, %%mm3\n\t"
	    "movq	%%mm1, %%mm4\n\t"
	    "pfmul	16(%2), %%mm3\n\t"
	    "pfmul	24(%2), %%mm4\n\t"
	    "pfacc	%%mm4, %%mm3\n\t"
	    "pfacc	%%mm3, %%mm3\n\t"
	    "\n\t"
	    "punpckldq	%%mm3, %%mm2\n\t"
	    "movq	%%mm2, (%1)\n"
	    "\n\t"
	    "movq	%%mm0, %%mm2\n\t"
	    "movq	%%mm1, %%mm3\n\t"
	    "pfmul	32(%2), %%mm2\n\t"
	    "pfmul	40(%2), %%mm3\n\t"
	    "pfacc	%%mm3, %%mm2\n\t"
	    "pfacc	%%mm2, %%mm2\n\t"
	    "\n\t"
	    "pfmul	48(%2), %%mm0\n\t"
	    "pfmul	56(%2), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "\n\t"
	    "punpckldq	%%mm0, %%mm2\n\t"
	    "movq	%%mm2, 8(%1)\n"
	    "\n\t"
	    "add	$16, %1\n\t"
	    "dec	%0\n\t"
	    "jnz	0b\n"
	    "\n\t"
	    "0:\n\t"
	    "femms"

	    : "+r"(count), "+r"(vectors)
	    : "r"(self->_values)
	    : "mm0", "mm1", "mm2", "mm3", "mm4", "memory"
	);
}
# ifndef __clang__
#  pragma GCC pop_options
# endif








|
|
|
|
|
|


|
|
|
|
|
|
|


|
|
|
|
|
|
|


|
|
|
|
|
|
|


|

|
|




|
>
>
>
|










|



|
|



|
|





|
|




|



|
|



|
|




|

|
|




>
|
|







119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
	float (*left)[4] = matrix->_values, (*right)[4] = self->_values;
	float result[4][4], (*resultPtr)[4] = result;

	__asm__ __volatile__ (
	    "movl	$4, %%ecx\n\t"
	    "\n\t"
	    "0:\n\t"
	    "movd	(%[right]), %%mm0\n\t"
	    "punpckldq	16(%[right]), %%mm0\n\t"
	    "pfmul	(%[left]), %%mm0\n\t"
	    "movd	32(%[right]), %%mm1\n\t"
	    "punpckldq  48(%[right]), %%mm1\n\t"
	    "pfmul	8(%[left]), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, (%[result])\n\t"
	    "movd	4(%[right]), %%mm0\n\t"
	    "punpckldq	20(%[right]), %%mm0\n\t"
	    "pfmul	(%[left]), %%mm0\n\t"
	    "movd	36(%[right]), %%mm1\n\t"
	    "punpckldq  52(%[right]), %%mm1\n\t"
	    "pfmul	8(%[left]), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 4(%[result])\n\t"
	    "movd	8(%[right]), %%mm0\n\t"
	    "punpckldq	24(%[right]), %%mm0\n\t"
	    "pfmul	(%[left]), %%mm0\n\t"
	    "movd	40(%[right]), %%mm1\n\t"
	    "punpckldq  56(%[right]), %%mm1\n\t"
	    "pfmul	8(%[left]), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 8(%[result])\n\t"
	    "movd	12(%[right]), %%mm0\n\t"
	    "punpckldq	28(%[right]), %%mm0\n\t"
	    "pfmul	(%[left]), %%mm0\n\t"
	    "movd	44(%[right]), %%mm1\n\t"
	    "punpckldq  60(%[right]), %%mm1\n\t"
	    "pfmul	8(%[left]), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "movd	%%mm0, 12(%[result])\n"
	    "\n\t"
	    "add	$16, %[result]\n\t"
	    "add	$16, %[left]\n\t"
	    "decl	%%ecx\n\t"
	    "jnz	0b\n"
	    "\n\t"
	    "femms"
	    : [result] "+r" (resultPtr),
	      [left] "+r" (left),
	      [right] "+r" (right)
	    :
	    : "ecx", "mm0", "mm1", "memory"
	);

	memcpy(self->_values, result, 16 * sizeof(float));
}

static void
transformVectors_3DNow(OFMatrix4x4 *self, SEL _cmd, OFVector4D *vectors,
    size_t count)
{
	__asm__ __volatile__ (
	    "test	%[count], %[count]\n\t"
	    "jz		0f\n"
	    "\n\t"
	    "0:\n\t"
	    "movq	(%[vectors]), %%mm0\n\t"
	    "movq	8(%[vectors]), %%mm1\n"
	    "\n\t"
	    "movq	%%mm0, %%mm2\n\t"
	    "movq	%%mm1, %%mm3\n\t"
	    "pfmul	(%[matrix]), %%mm2\n\t"
	    "pfmul	8(%[matrix]), %%mm3\n\t"
	    "pfacc	%%mm3, %%mm2\n\t"
	    "pfacc	%%mm2, %%mm2\n\t"
	    "\n\t"
	    "movq	%%mm0, %%mm3\n\t"
	    "movq	%%mm1, %%mm4\n\t"
	    "pfmul	16(%[matrix]), %%mm3\n\t"
	    "pfmul	24(%[matrix]), %%mm4\n\t"
	    "pfacc	%%mm4, %%mm3\n\t"
	    "pfacc	%%mm3, %%mm3\n\t"
	    "\n\t"
	    "punpckldq	%%mm3, %%mm2\n\t"
	    "movq	%%mm2, (%[vectors])\n"
	    "\n\t"
	    "movq	%%mm0, %%mm2\n\t"
	    "movq	%%mm1, %%mm3\n\t"
	    "pfmul	32(%[matrix]), %%mm2\n\t"
	    "pfmul	40(%[matrix]), %%mm3\n\t"
	    "pfacc	%%mm3, %%mm2\n\t"
	    "pfacc	%%mm2, %%mm2\n\t"
	    "\n\t"
	    "pfmul	48(%[matrix]), %%mm0\n\t"
	    "pfmul	56(%[matrix]), %%mm1\n\t"
	    "pfacc	%%mm1, %%mm0\n\t"
	    "pfacc	%%mm0, %%mm0\n\t"
	    "\n\t"
	    "punpckldq	%%mm0, %%mm2\n\t"
	    "movq	%%mm2, 8(%[vectors])\n"
	    "\n\t"
	    "add	$16, %[vectors]\n\t"
	    "dec	%[count]\n\t"
	    "jnz	0b\n"
	    "\n\t"
	    "0:\n\t"
	    "femms"
	    : [count] "+r" (count),
	      [vectors] "+r" (vectors)
	    : [matrix] "r" (self->_values)
	    : "mm0", "mm1", "mm2", "mm3", "mm4", "memory"
	);
}
# ifndef __clang__
#  pragma GCC pop_options
# endif

Modified src/OFSystemInfo.m from [491407fc38] to [4a5cc32620].

283
284
285
286
287
288
289

290



291
292
293
294
295
296
297
298
299
300
301
302
303

304



305
306
307
308
309
310
311
312
x86CPUID(uint32_t eax, uint32_t ecx)
{
	struct X86Regs regs;

# if defined(OF_AMD64) && defined(__GNUC__)
	__asm__ (
	    "cpuid"

	    : "=a"(regs.eax), "=b"(regs.ebx), "=c"(regs.ecx), "=d"(regs.edx)



	    : "a"(eax), "c"(ecx)
	);
# elif defined(OF_X86) && defined(__GNUC__)
	/*
	 * This workaround is required by older GCC versions when using -fPIC,
	 * as ebx is a special register in PIC code. Yes, GCC is indeed not
	 * able to just push a register onto the stack before the __asm__ block
	 * and to pop it afterwards.
	 */
	__asm__ (
	    "xchgl	%%ebx, %%edi\n\t"
	    "cpuid\n\t"
	    "xchgl	%%edi, %%ebx"

	    : "=a"(regs.eax), "=D"(regs.ebx), "=c"(regs.ecx), "=d"(regs.edx)



	    : "a"(eax), "c"(ecx)
	);
# else
	memset(&regs, 0, sizeof(regs));
# endif

	return regs;
}







>
|
>
>
>
|












>
|
>
>
>
|







283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
x86CPUID(uint32_t eax, uint32_t ecx)
{
	struct X86Regs regs;

# if defined(OF_AMD64) && defined(__GNUC__)
	__asm__ (
	    "cpuid"
	    : "=a" (regs.eax),
	      "=b" (regs.ebx),
	      "=c" (regs.ecx),
	      "=d" (regs.edx)
	    : "a" (eax),
	      "c" (ecx)
	);
# elif defined(OF_X86) && defined(__GNUC__)
	/*
	 * This workaround is required by older GCC versions when using -fPIC,
	 * as ebx is a special register in PIC code. Yes, GCC is indeed not
	 * able to just push a register onto the stack before the __asm__ block
	 * and to pop it afterwards.
	 */
	__asm__ (
	    "xchgl	%%ebx, %%edi\n\t"
	    "cpuid\n\t"
	    "xchgl	%%edi, %%ebx"
	    : "=a" (regs.eax),
	      "=D" (regs.ebx),
	      "=c" (regs.ecx),
	      "=d" (regs.edx)
	    : "a" (eax),
	      "c" (ecx)
	);
# else
	memset(&regs, 0, sizeof(regs));
# endif

	return regs;
}

Modified src/macros.h from [c4d1f32dd0] to [779db83f80].

494
495
496
497
498
499
500
501
502
503
504
505
506
507
508

509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538

539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
OFByteSwap16NonConst(uint16_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP16)
	return __builtin_bswap16(i);
#elif (defined(OF_AMD64) || defined(OF_X86)) && defined(__GNUC__)
	__asm__ (
	    "xchg{b}	{ %h0, %b0 | %b0, %h0 }"
	    : "=Q"(i)
	    : "0"(i)
	);
#elif defined(OF_POWERPC) && defined(__GNUC__)
	__asm__ (
	    "lhbrx	%0, 0, %1"
	    : "=r"(i)
	    : "r"(&i), "m"(i)

	);
#elif defined(OF_ARMV6) && defined(__GNUC__)
	__asm__ (
	    "rev16	%0, %0"
	    : "=r"(i)
	    : "0"(i)
	);
#else
	i = (i & UINT16_C(0xFF00)) >> 8 |
	    (i & UINT16_C(0x00FF)) << 8;
#endif
	return i;
}

static OF_INLINE uint32_t OF_CONST_FUNC
OFByteSwap32NonConst(uint32_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP32)
	return __builtin_bswap32(i);
#elif (defined(OF_AMD64) || defined(OF_X86)) && defined(__GNUC__)
	__asm__ (
	    "bswap	%0"
	    : "=q"(i)
	    : "0"(i)
	);
#elif defined(OF_POWERPC) && defined(__GNUC__)
	__asm__ (
	    "lwbrx	%0, 0, %1"
	    : "=r"(i)
	    : "r"(&i), "m"(i)

	);
#elif defined(OF_ARMV6) && defined(__GNUC__)
	__asm__ (
	    "rev	%0, %0"
	    : "=r"(i)
	    : "0"(i)
	);
#else
	i = (i & UINT32_C(0xFF000000)) >> 24 |
	    (i & UINT32_C(0x00FF0000)) >>  8 |
	    (i & UINT32_C(0x0000FF00)) <<  8 |
	    (i & UINT32_C(0x000000FF)) << 24;
#endif
	return i;
}

static OF_INLINE uint64_t OF_CONST_FUNC
OFByteSwap64NonConst(uint64_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP64)
	return __builtin_bswap64(i);
#elif defined(OF_AMD64) && defined(__GNUC__)
	__asm__ (
	    "bswap	%0"
	    : "=r"(i)
	    : "0"(i)
	);
#elif defined(OF_X86) && defined(__GNUC__)
	__asm__ (
	    "bswap	{%%}eax\n\t"
	    "bswap	{%%}edx\n\t"
	    "xchg{l}	{ %%eax, %%edx | edx, eax }"
	    : "=A"(i)
	    : "0"(i)
	);
#else
	i = (uint64_t)OFByteSwap32NonConst(
	    (uint32_t)(i & UINT32_C(0xFFFFFFFF))) << 32 |
	    OFByteSwap32NonConst((uint32_t)(i >> 32));
#endif
	return i;







|
|




|
|
>




|
|
















|
|




|
|
>




|
|


















|
|






|
|







494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
OFByteSwap16NonConst(uint16_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP16)
	return __builtin_bswap16(i);
#elif (defined(OF_AMD64) || defined(OF_X86)) && defined(__GNUC__)
	__asm__ (
	    "xchg{b}	{ %h0, %b0 | %b0, %h0 }"
	    : "=Q" (i)
	    : "0" (i)
	);
#elif defined(OF_POWERPC) && defined(__GNUC__)
	__asm__ (
	    "lhbrx	%0, 0, %1"
	    : "=r" (i)
	    : "r" (&i),
	      "m" (i)
	);
#elif defined(OF_ARMV6) && defined(__GNUC__)
	__asm__ (
	    "rev16	%0, %0"
	    : "=r" (i)
	    : "0" (i)
	);
#else
	i = (i & UINT16_C(0xFF00)) >> 8 |
	    (i & UINT16_C(0x00FF)) << 8;
#endif
	return i;
}

static OF_INLINE uint32_t OF_CONST_FUNC
OFByteSwap32NonConst(uint32_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP32)
	return __builtin_bswap32(i);
#elif (defined(OF_AMD64) || defined(OF_X86)) && defined(__GNUC__)
	__asm__ (
	    "bswap	%0"
	    : "=q" (i)
	    : "0" (i)
	);
#elif defined(OF_POWERPC) && defined(__GNUC__)
	__asm__ (
	    "lwbrx	%0, 0, %1"
	    : "=r" (i)
	    : "r" (&i),
	      "m" (i)
	);
#elif defined(OF_ARMV6) && defined(__GNUC__)
	__asm__ (
	    "rev	%0, %0"
	    : "=r" (i)
	    : "0" (i)
	);
#else
	i = (i & UINT32_C(0xFF000000)) >> 24 |
	    (i & UINT32_C(0x00FF0000)) >>  8 |
	    (i & UINT32_C(0x0000FF00)) <<  8 |
	    (i & UINT32_C(0x000000FF)) << 24;
#endif
	return i;
}

static OF_INLINE uint64_t OF_CONST_FUNC
OFByteSwap64NonConst(uint64_t i)
{
#if defined(OF_HAVE_BUILTIN_BSWAP64)
	return __builtin_bswap64(i);
#elif defined(OF_AMD64) && defined(__GNUC__)
	__asm__ (
	    "bswap	%0"
	    : "=r" (i)
	    : "0" (i)
	);
#elif defined(OF_X86) && defined(__GNUC__)
	__asm__ (
	    "bswap	{%%}eax\n\t"
	    "bswap	{%%}edx\n\t"
	    "xchg{l}	{ %%eax, %%edx | edx, eax }"
	    : "=A" (i)
	    : "0" (i)
	);
#else
	i = (uint64_t)OFByteSwap32NonConst(
	    (uint32_t)(i & UINT32_C(0xFFFFFFFF))) << 32 |
	    OFByteSwap32NonConst((uint32_t)(i >> 32));
#endif
	return i;

Modified src/platform/PowerPC/OFAtomic.h from [70aa2844b9] to [1d1e0e8ad4].

18
19
20
21
22
23
24
25
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43

44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77

78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94

95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111

112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204

205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221

222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238

239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289

290
291
292
293
294
295
296
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r"(i)
	    : "r"(i), "r"(p)

	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE bool







|
|
>















|
|
>















|
|
>















|
|
>















|
|
>















|
|
>

















|
|

















|
|

















|
|

















|
|















|
|
>















|
|
>















|
|
>















|
|
>















|
|
>















|
|
>







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "add	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "sub	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return (void *)i;
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "addi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %1\n\t"
	    "subi	%0, %0, 1\n\t"
	    "stwcx.	%0, 0, %1\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "or		%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "and	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE unsigned int
OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "lwarx	%0, 0, %2\n\t"
	    "xor	%0, %0, %1\n\t"
	    "stwcx.	%0, 0, %2\n\t"
	    "bne-	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "r" (p)
	    : "cc", "memory"
	);

	return i;
}

static OF_INLINE bool
307
308
309
310
311
312
313
314
315


316
317
318
319
320
321
322
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r"(r)
	    : "r"(o), "r"(n), "r"(p)


	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool







|
|
>
>







319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r" (r)
	    : "r" (o),
	      "r" (n),
	      "r" (p)
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool
333
334
335
336
337
338
339
340
341


342
343
344
345
346
347
348
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r"(r)
	    : "r"(o), "r"(n), "r"(p)


	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool







|
|
>
>







347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r" (r)
	    : "r" (o),
	      "r" (n),
	      "r" (p)
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE bool
360
361
362
363
364
365
366
367
368


369
370
371
372
373
374
375
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r"(r)
	    : "r"(o), "r"(n), "r"(p)


	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE void







|
|
>
>







376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
	    "bne-	0b\n\t"
	    "li		%0, 1\n\t"
	    "b		2f\n\t"
	    "1:\n\t"
	    "stwcx.	%0, 0, %3\n\t"
	    "li		%0, 0\n\t"
	    "2:"
	    : "=&r" (r)
	    : "r" (o),
	      "r" (n),
	      "r" (p)
	    : "cc", "memory"
	);

	return r;
}

static OF_INLINE void

Modified src/platform/x86/OFAtomic.h from [5dfd999b87] to [2f1d39434a].

18
19
20
21
22
23
24
25
26

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62

63
64
65
66
67
68
69
70
71
72

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107

108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126

127
128
129
130
131
132
133
134
135
136
137

138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

243
244
245
246
247
248
249
static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "add{l}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return i;
}

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		return OFAtomicInt32Add(p, i);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "lock\n\t"
		    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
		    "add{q}	{ %1, %0 | %0, %1 }"
		    : "+&r"(i)
		    : "r"(i), "m"(*p)

		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
	    "add{q}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return (void *)i;
#elif defined(OF_X86)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "add{l}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return (void *)i;
#endif
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "neg{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{l}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return i;
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		return OFAtomicInt32Subtract(p, i);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "neg{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
		    "sub{q}	{ %1, %0 | %0, %1 }"
		    : "+&r"(i)
		    : "r"(i), "m"(*p)

		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "neg{q}	%0\n\t"
	    "lock\n\t"
	    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{q}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return (void *)i;
#elif defined(OF_X86)
	__asm__ __volatile__ (
	    "neg{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{l}	{ %1, %0 | %0, %1 }"
	    : "+&r"(i)
	    : "r"(i), "m"(*p)

	);

	return (void *)i;
#endif
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xor{l}	%0, %0\n\t"
	    "inc{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %1 | %1, %0 }\n\t"
	    "inc{l}	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		return OFAtomicInt32Increase(p);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xor{q}	%0, %0\n\t"
		    "inc{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %1 | %1, %0 }\n\t"
		    "inc{q}	%0"
		    : "=&r"(i)
		    : "m"(*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xor{l}	%0, %0\n\t"
	    "dec{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %1 | %1, %0 }\n\t"
	    "dec{l}	%0"
	    : "=&r"(i)
	    : "m"(*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		return OFAtomicInt32Decrease(p);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xor{q}	%0, %0\n\t"
		    "dec{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %1 | %1, %0 }\n\t"
		    "dec{q}	%0"
		    : "=&r"(i)
		    : "m"(*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "or{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)

	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int







|
|
>
















|
|
>
















|
|
>








|
|
>














|
|
>

















|
|
>

















|
|
>









|
|
>

















|
|




















|
|



















|
|




















|
|



















|
|
>







18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
static OF_INLINE int32_t
OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "add{l}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntAdd(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		return OFAtomicInt32Add(p, i);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "lock\n\t"
		    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
		    "add{q}	{ %1, %0 | %0, %1 }"
		    : "+&r" (i)
		    : "r" (i),
		      "m" (*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
	    "add{q}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return (void *)i;
#elif defined(OF_X86)
	__asm__ __volatile__ (
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "add{l}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return (void *)i;
#endif
}

static OF_INLINE int32_t
OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i)
{
	__asm__ __volatile__ (
	    "neg{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{l}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntSubtract(volatile int *_Nonnull p, int i)
{
	if (sizeof(int) == 4)
		return OFAtomicInt32Subtract(p, i);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "neg{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
		    "sub{q}	{ %1, %0 | %0, %1 }"
		    : "+&r" (i)
		    : "r" (i),
		      "m" (*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE void *_Nullable
OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if defined(OF_AMD64)
	__asm__ __volatile__ (
	    "neg{q}	%0\n\t"
	    "lock\n\t"
	    "xadd{q}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{q}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return (void *)i;
#elif defined(OF_X86)
	__asm__ __volatile__ (
	    "neg{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %2 | %2, %0 }\n\t"
	    "sub{l}	{ %1, %0 | %0, %1 }"
	    : "+&r" (i)
	    : "r" (i),
	      "m" (*p)
	);

	return (void *)i;
#endif
}

static OF_INLINE int32_t
OFAtomicInt32Increase(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xor{l}	%0, %0\n\t"
	    "inc{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %1 | %1, %0 }\n\t"
	    "inc{l}	%0"
	    : "=&r" (i)
	    : "m" (*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntIncrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		return OFAtomicInt32Increase(p);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xor{q}	%0, %0\n\t"
		    "inc{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %1 | %1, %0 }\n\t"
		    "inc{q}	%0"
		    : "=&r" (i)
		    : "m" (*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE int32_t
OFAtomicInt32Decrease(volatile int32_t *_Nonnull p)
{
	int32_t i;

	__asm__ __volatile__ (
	    "xor{l}	%0, %0\n\t"
	    "dec{l}	%0\n\t"
	    "lock\n\t"
	    "xadd{l}	{ %0, %1 | %1, %0 }\n\t"
	    "dec{l}	%0"
	    : "=&r" (i)
	    : "m" (*p)
	);

	return i;
}

static OF_INLINE int
OFAtomicIntDecrease(volatile int *_Nonnull p)
{
	int i;

	if (sizeof(int) == 4)
		return OFAtomicInt32Decrease(p);
#ifdef OF_AMD64
	else if (sizeof(int) == 8)
		__asm__ __volatile__ (
		    "xor{q}	%0, %0\n\t"
		    "dec{q}	%0\n\t"
		    "lock\n\t"
		    "xadd{q}	{ %0, %1 | %1, %0 }\n\t"
		    "dec{q}	%0"
		    : "=&r" (i)
		    : "m" (*p)
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Or(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "or{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "m" (*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
257
258
259
260
261
262
263
264
265

266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287

288
289
290
291
292
293
294
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "or{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)

		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "and{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)

	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int







|
|
>




















|
|
>







266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "or{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r" (i)
		    : "r" (i),
		      "m" (*p)
		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32And(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "and{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "m" (*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
302
303
304
305
306
307
308
309
310

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332

333
334
335
336
337
338
339
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "and{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)

		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "xor{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r"(i)
	    : "r"(i), "m"(*p)

	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int







|
|
>




















|
|
>







313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "and{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r" (i)
		    : "r" (i),
		      "m" (*p)
		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE uint32_t
OFAtomicInt32Xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
	__asm__ __volatile__ (
	    "0:\n\t"
	    "mov{l}	{ %2, %0 | %0, %2 }\n\t"
	    "mov{l}	{ %0, %%eax | eax, %0 }\n\t"
	    "xor{l}	{ %1, %0 | %0, %1 }\n\t"
	    "lock\n\t"
	    "cmpxchg{l}	{ %0, %2 | %2, %0 }\n\t"
	    "jne	0b"
	    : "=&r" (i)
	    : "r" (i),
	      "m" (*p)
	    : "eax", "cc"
	);

	return i;
}

static OF_INLINE unsigned int
347
348
349
350
351
352
353
354
355

356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375

376

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393

394

395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412

413

414
415
416
417
418
419
420
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "xor{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r"(i)
		    : "r"(i), "m"(*p)

		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg{l}	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */

	    : "r"(n), "m"(*p)

	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */

	    : "r"(n), "m"(*p)

	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d"(r), "+a"(o)	/* use d instead of r to avoid a gcc bug */

	    : "r"(n), "m"(*p)

	    : "cc"
	);

	return r;
}

static OF_INLINE void







|
|
>



















|
>
|
>
















|
>
|
>

















|
>
|
>







360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
		    "0:\n\t"
		    "mov{q}	{ %2, %0 | %0, %2 }\n\t"
		    "mov{q}	{ %0, %%rax | rax, %0 }\n\t"
		    "xor{q}	{ %1, %0 | %0, %1 }\n\t"
		    "lock\n\t"
		    "cmpxchg{q}	{ %0, %2 | %2, %0 }\n\t"
		    "jne	0b"
		    : "=&r" (i)
		    : "r" (i),
		      "m" (*p)
		    : "rax", "cc"
		);
#endif
	else
		abort();

	return i;
}

static OF_INLINE bool
OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg{l}	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d" (r),	/* use d instead of r to avoid a gcc bug */
	      "+a" (o)
	    : "r" (n),
	      "m" (*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d" (r),	/* use d instead of r to avoid a gcc bug */
	      "+a" (o)
	    : "r" (n),
	      "m" (*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE bool
OFAtomicPointerCompareAndSwap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
	int r;

	__asm__ __volatile__ (
	    "lock\n\t"
	    "cmpxchg	{ %2, %3 | %3, %2 }\n\t"
	    "sete	%b0\n\t"
	    "movz{bl|x}	{ %b0, %0 | %0, %b0 }"
	    : "=&d" (r),	/* use d instead of r to avoid a gcc bug */
	      "+a" (o)
	    : "r" (n),
	      "m" (*p)
	    : "cc"
	);

	return r;
}

static OF_INLINE void