︙ | | | ︙ | |
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
#ifdef OF_HAVE_OSATOMIC
# include <libkern/OSAtomic.h>
#endif
static OF_INLINE int
of_atomic_add_int(volatile int *p, int i)
{
#if !defined(OF_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
|
|
|
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
#ifdef OF_HAVE_OSATOMIC
# include <libkern/OSAtomic.h>
#endif
static OF_INLINE int
of_atomic_add_int(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
|
︙ | | | ︙ | |
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_add_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
: "+&r"(i)
|
|
|
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_add_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*p += i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
: "+&r"(i)
|
︙ | | | ︙ | |
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
# error No atomic operations available!
#endif
}
static OF_INLINE void*
of_atomic_add_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_THREADS)
return (*(char* volatile*)p += i);
#elif defined(OF_X86_ASM)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
: "+&r"(i)
|
|
|
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
# error No atomic operations available!
#endif
}
static OF_INLINE void*
of_atomic_add_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*(char* volatile*)p += i);
#elif defined(OF_X86_ASM)
__asm__ (
"lock\n\t"
"xaddl %0, %2\n\t"
"addl %1, %0"
: "+&r"(i)
|
︙ | | | ︙ | |
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_sub_int(volatile int *p, int i)
{
#if !defined(OF_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
|
|
|
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_sub_int(volatile int *p, int i)
{
#if !defined(OF_HAVE_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
|
︙ | | | ︙ | |
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_sub_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
"subl %1, %0"
|
|
|
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_sub_32(volatile int32_t *p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*p -= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
"subl %1, %0"
|
︙ | | | ︙ | |
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
|
# error No atomic operations available!
#endif
}
static OF_INLINE void*
of_atomic_sub_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_THREADS)
return (*(char* volatile*)p -= i);
#elif defined(OF_X86_ASM)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
"subl %1, %0"
|
|
|
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
|
# error No atomic operations available!
#endif
}
static OF_INLINE void*
of_atomic_sub_ptr(void* volatile *p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*(char* volatile*)p -= i);
#elif defined(OF_X86_ASM)
__asm__ (
"negl %0\n\t"
"lock\n\t"
"xaddl %0, %2\n\t"
"subl %1, %0"
|
︙ | | | ︙ | |
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_inc_int(volatile int *p)
{
#if !defined(OF_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int i;
if (sizeof(int) == 4)
__asm__ (
"xorl %0, %0\n\t"
|
|
|
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_inc_int(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int i;
if (sizeof(int) == 4)
__asm__ (
"xorl %0, %0\n\t"
|
︙ | | | ︙ | |
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_inc_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"incl %0\n\t"
|
|
|
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_inc_32(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
return ++*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"incl %0\n\t"
|
︙ | | | ︙ | |
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_dec_int(volatile int *p)
{
#if !defined(OF_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int i;
if (sizeof(int) == 4)
__asm__ (
"xorl %0, %0\n\t"
|
|
|
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
|
# error No atomic operations available!
#endif
}
static OF_INLINE int
of_atomic_dec_int(volatile int *p)
{
#if !defined(OF_HAVE_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
int i;
if (sizeof(int) == 4)
__asm__ (
"xorl %0, %0\n\t"
|
︙ | | | ︙ | |
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_dec_32(volatile int32_t *p)
{
#if !defined(OF_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"decl %0\n\t"
|
|
|
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
|
# error No atomic operations available!
#endif
}
static OF_INLINE int32_t
of_atomic_dec_32(volatile int32_t *p)
{
#if !defined(OF_HAVE_THREADS)
return --*p;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
uint32_t i;
__asm__ (
"xorl %0, %0\n\t"
"decl %0\n\t"
|
︙ | | | ︙ | |
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_or_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_THREADS)
return (*p |= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
|
|
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_or_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
return (*p |= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
︙ | | | ︙ | |
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_or_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_THREADS)
return (*p |= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"orl %1, %0\n\t"
|
|
|
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_or_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*p |= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"orl %1, %0\n\t"
|
︙ | | | ︙ | |
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_and_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_THREADS)
return (*p &= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
|
|
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_and_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
return (*p &= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
︙ | | | ︙ | |
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_and_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_THREADS)
return (*p &= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"andl %1, %0\n\t"
|
|
|
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_and_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*p &= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"andl %1, %0\n\t"
|
︙ | | | ︙ | |
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_xor_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_THREADS)
return (*p ^= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
|
|
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
|
# error No atomic operations available!
#endif
}
static OF_INLINE unsigned int
of_atomic_xor_int(volatile unsigned int *p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
return (*p ^= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
if (sizeof(int) == 4)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
|
︙ | | | ︙ | |
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_xor_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_THREADS)
return (*p ^= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"xorl %1, %0\n\t"
|
|
|
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
|
# error No atomic operations available!
#endif
}
static OF_INLINE uint32_t
of_atomic_xor_32(volatile uint32_t *p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
return (*p ^= i);
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
__asm__ (
"0:\n\t"
"movl %2, %0\n\t"
"movl %2, %%eax\n\t"
"xorl %1, %0\n\t"
|
︙ | | | ︙ | |
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_int(volatile int *p, int o, int n)
{
#if !defined(OF_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
|
|
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_int(volatile int *p, int o, int n)
{
#if !defined(OF_HAVE_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
︙ | | | ︙ | |
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n)
{
#if !defined(OF_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
|
|
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n)
{
#if !defined(OF_HAVE_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
︙ | | | ︙ | |
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n)
{
#if !defined(OF_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
|
|
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
|
# error No atomic operations available!
#endif
}
static OF_INLINE BOOL
of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n)
{
#if !defined(OF_HAVE_THREADS)
if (*p == o) {
*p = n;
return YES;
}
return NO;
#elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
|
︙ | | | ︙ | |