22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
|
static OF_INLINE int32_t
of_atomic_add_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int32_t r = *p + i;
__asm__ volatile (
"lock\n\t"
"addl %0, (%1)"
:
: "r"(i), "r"(p), "m"(*p)
);
return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicAdd32Barrier(i, p);
#endif
}
static OF_INLINE int32_t
of_atomic_sub_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int32_t r = *p - i;
__asm__ volatile (
"lock\n\t"
"subl %0, (%1)"
:
: "r"(i), "r"(p), "m"(*p)
);
return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicAdd32Barrier(-i, p);
#endif
}
static OF_INLINE int32_t
of_atomic_inc_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int32_t r = *p + 1;
__asm__ volatile (
"lock\n\t"
"incl (%0)"
:
: "r"(p), "m"(*p)
);
return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicIncrement32Barrier(p);
#endif
}
static OF_INLINE int32_t
of_atomic_dec_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int32_t r = *p - 1;
__asm__ volatile (
"lock\n\t"
"decl (%0)"
:
: "r"(p), "m"(*p)
);
return r;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicDecrement32Barrier(p);
#endif
}
|
<
|
>
|
|
|
>
|
<
|
>
>
|
|
|
>
|
|
>
|
>
>
>
|
|
|
>
|
|
>
|
>
>
>
|
|
|
>
|
|
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
|
static OF_INLINE int32_t
of_atomic_add_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
: "+&r"(i)
: "r"(i), "m"(*p)
);
return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_add_and_fetch(p, i);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicAdd32Barrier(i, p);
#endif
}
static OF_INLINE int32_t
of_atomic_sub_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
"subl %1, %0"
: "+&r"(i)
: "r"(i), "m"(*p)
);
return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_sub_and_fetch(p, i);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicAdd32Barrier(-i, p);
#endif
}
static OF_INLINE int32_t
of_atomic_inc_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"incl %0\n\t"
"lock\n\t"
"xaddl %0, %1\n\t"
"incl %0"
: "=&r"(i)
: "m"(*p)
);
return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_add_and_fetch(p, 1);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicIncrement32Barrier(p);
#endif
}
static OF_INLINE int32_t
of_atomic_dec_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"decl %0\n\t"
"lock\n\t"
"xaddl %0, %1\n\t"
"decl %0"
: "=&r"(i)
: "m"(*p)
);
return i;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
return __sync_sub_and_fetch(p, 1);
#elif defined(OF_HAVE_LIBKERN_OSATOMIC_H)
return OSAtomicDecrement32Barrier(p);
#endif
}
|
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
|
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t r;
__asm__ volatile (
"lock; cmpxchg %2, (%3)\n\t"
"lahf\n\t"
"andb $64, %%ah\n\t"
"shrb $6, %%ah\n\t"
"movzx %%ah, %0\n\t"
: "=a"(r)
: "a"(o), "r"(n), "r"(p), "m"(*p)
|
|
|
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
|
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t r;
__asm__ (
"lock; cmpxchg %2, (%3)\n\t"
"lahf\n\t"
"andb $64, %%ah\n\t"
"shrb $6, %%ah\n\t"
"movzx %%ah, %0\n\t"
: "=a"(r)
: "a"(o), "r"(n), "r"(p), "m"(*p)
|
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
|
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t r;
__asm__ volatile (
"lock; cmpxchg %2, (%3)\n\t"
"lahf\n\t"
"andb $64, %%ah\n\t"
"shrb $6, %%ah\n\t"
"movzx %%ah, %0\n\t"
: "=a"(r)
: "a"(o), "q"(n), "q"(p), "m"(*p)
|
|
|
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
|
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t r;
__asm__ (
"lock; cmpxchg %2, (%3)\n\t"
"lahf\n\t"
"andb $64, %%ah\n\t"
"shrb $6, %%ah\n\t"
"movzx %%ah, %0\n\t"
: "=a"(r)
: "a"(o), "q"(n), "q"(p), "m"(*p)
|