ObjFW  Diff

Differences From Artifact [6e95bb1842]:

To Artifact [5acf14775c]:

  • File src/atomic.h — part of check-in [d526d938d7] at 2015-11-21 18:35:39 on branch trunk — Remove OF_NULLABLE / OF_NONNULL

    Now that Clang switched to use _Nullable and _Nonnull instead of
    __nullable / __nonnull, there is no longer a conflict with glibc, which
    means we can just define _Nullable / _Nonnull to nothing if they are not
    understood by the compiler (which did not work with __nullable /
    __nonnull due to this conflict).

    This also defines _Null_unspecified to nothing if unavailable. (user: js, size: 20349) [annotate] [blame] [check-ins using]


25
26
27
28
29
30
31
32

33
34
35
36
37
38
39
25
26
27
28
29
30
31

32
33
34
35
36
37
38
39







-
+







#ifdef OF_HAVE_OSATOMIC
# include <libkern/OSAtomic.h>
#endif

OF_ASSUME_NONNULL_BEGIN

static OF_INLINE int
of_atomic_int_add(volatile int *OF_NONNULL p, int i)
of_atomic_int_add(volatile int *_Nonnull p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
74
75
76
77
78
79
80
81

82
83
84
85
86
87
88
74
75
76
77
78
79
80

81
82
83
84
85
86
87
88







-
+







	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_int_add not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_add(volatile int32_t *OF_NONNULL p, int32_t i)
of_atomic_int32_add(volatile int32_t *_Nonnull p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p += i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
110
111
112
113
114
115
116
117

118
119
120
121
122
123
124
110
111
112
113
114
115
116

117
118
119
120
121
122
123
124







-
+







	return OSAtomicAdd32Barrier(i, p);
#else
# error of_atomic_int32_add not implemented!
#endif
}

static OF_INLINE void*
of_atomic_ptr_add(void *volatile OF_NULLABLE *OF_NONNULL p, intptr_t i)
of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p += i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, (void*)i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
160
161
162
163
164
165
166
167

168
169
170
171
172
173
174
160
161
162
163
164
165
166

167
168
169
170
171
172
173
174







-
+







# endif
#else
# error of_atomic_ptr_add not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_sub(volatile int *OF_NONNULL p, int i)
of_atomic_int_sub(volatile int *_Nonnull p, int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
211
212
213
214
215
216
217
218

219
220
221
222
223
224
225
211
212
213
214
215
216
217

218
219
220
221
222
223
224
225







-
+







	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_int_sub not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_sub(volatile int32_t *OF_NONNULL p, int32_t i)
of_atomic_int32_sub(volatile int32_t *_Nonnull p, int32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p -= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
248
249
250
251
252
253
254
255

256
257
258
259
260
261
262
248
249
250
251
252
253
254

255
256
257
258
259
260
261
262







-
+







	return OSAtomicAdd32Barrier(-i, p);
#else
# error of_atomic_int32_sub not implemented!
#endif
}

static OF_INLINE void*
of_atomic_ptr_sub(void *volatile OF_NULLABLE *OF_NONNULL p, intptr_t i)
of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*(char* volatile*)p -= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, (void*)i);
#elif defined(OF_X86_64_ASM)
	__asm__ __volatile__ (
300
301
302
303
304
305
306
307

308
309
310
311
312
313
314
300
301
302
303
304
305
306

307
308
309
310
311
312
313
314







-
+







# endif
#else
# error of_atomic_ptr_sub not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_inc(volatile int *OF_NONNULL p)
of_atomic_int_inc(volatile int *_Nonnull p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;
357
358
359
360
361
362
363
364

365
366
367
368
369
370
371
357
358
359
360
361
362
363

364
365
366
367
368
369
370
371







-
+







	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_int_inc not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_inc(volatile int32_t *OF_NONNULL p)
of_atomic_int32_inc(volatile int32_t *_Nonnull p)
{
#if !defined(OF_HAVE_THREADS)
	return ++*p;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_add_and_fetch(p, 1);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int32_t i;
399
400
401
402
403
404
405
406

407
408
409
410
411
412
413
399
400
401
402
403
404
405

406
407
408
409
410
411
412
413







-
+







	return OSAtomicIncrement32Barrier(p);
#else
# error of_atomic_int32_inc not implemented!
#endif
}

static OF_INLINE int
of_atomic_int_dec(volatile int *OF_NONNULL p)
of_atomic_int_dec(volatile int *_Nonnull p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int i;
456
457
458
459
460
461
462
463

464
465
466
467
468
469
470
456
457
458
459
460
461
462

463
464
465
466
467
468
469
470







-
+







	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_int_dec not implemented!
#endif
}

static OF_INLINE int32_t
of_atomic_int32_dec(volatile int32_t *OF_NONNULL p)
of_atomic_int32_dec(volatile int32_t *_Nonnull p)
{
#if !defined(OF_HAVE_THREADS)
	return --*p;
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_sub_and_fetch(p, 1);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	int32_t i;
498
499
500
501
502
503
504
505

506
507
508
509
510
511
512
498
499
500
501
502
503
504

505
506
507
508
509
510
511
512







-
+







	return OSAtomicDecrement32Barrier(p);
#else
# error of_atomic_int32_dec not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_or(volatile unsigned int *OF_NONNULL p, unsigned int i)
of_atomic_int_or(volatile unsigned int *_Nonnull p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
557
558
559
560
561
562
563
564

565
566
567
568
569
570
571
557
558
559
560
561
562
563

564
565
566
567
568
569
570
571







-
+







	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_int_or not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_or(volatile uint32_t *OF_NONNULL p, uint32_t i)
of_atomic_int32_or(volatile uint32_t *_Nonnull p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p |= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_or_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
598
599
600
601
602
603
604
605

606
607
608
609
610
611
612
598
599
600
601
602
603
604

605
606
607
608
609
610
611
612







-
+







	return OSAtomicOr32Barrier(i, p);
#else
# error of_atomic_int32_or not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_and(volatile unsigned int *OF_NONNULL p, unsigned int i)
of_atomic_int_and(volatile unsigned int *_Nonnull p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
657
658
659
660
661
662
663
664

665
666
667
668
669
670
671
657
658
659
660
661
662
663

664
665
666
667
668
669
670
671







-
+







	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_int_and not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_and(volatile uint32_t *OF_NONNULL p, uint32_t i)
of_atomic_int32_and(volatile uint32_t *_Nonnull p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p &= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_and_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
698
699
700
701
702
703
704
705

706
707
708
709
710
711
712
698
699
700
701
702
703
704

705
706
707
708
709
710
711
712







-
+







	return OSAtomicAnd32Barrier(i, p);
#else
# error of_atomic_int32_and not implemented!
#endif
}

static OF_INLINE unsigned int
of_atomic_int_xor(volatile unsigned int *OF_NONNULL p, unsigned int i)
of_atomic_int_xor(volatile unsigned int *_Nonnull p, unsigned int i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	if (sizeof(int) == 4)
757
758
759
760
761
762
763
764

765
766
767
768
769
770
771
757
758
759
760
761
762
763

764
765
766
767
768
769
770
771







-
+







	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_int_xor not implemented!
#endif
}

static OF_INLINE uint32_t
of_atomic_int32_xor(volatile uint32_t *OF_NONNULL p, uint32_t i)
of_atomic_int32_xor(volatile uint32_t *_Nonnull p, uint32_t i)
{
#if !defined(OF_HAVE_THREADS)
	return (*p ^= i);
#elif defined(OF_HAVE_GCC_ATOMIC_OPS)
	return __sync_xor_and_fetch(p, i);
#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM)
	__asm__ __volatile__ (
798
799
800
801
802
803
804
805

806
807
808
809
810
811
812
798
799
800
801
802
803
804

805
806
807
808
809
810
811
812







-
+







	return OSAtomicXor32Barrier(i, p);
#else
# error of_atomic_int32_xor not implemented!
#endif
}

static OF_INLINE bool
of_atomic_int_cmpswap(volatile int *OF_NONNULL p, int o, int n)
of_atomic_int_cmpswap(volatile int *_Nonnull p, int o, int n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}

853
854
855
856
857
858
859
860

861
862
863
864
865
866
867
853
854
855
856
857
858
859

860
861
862
863
864
865
866
867







-
+







	return OSAtomicCompareAndSwapIntBarrier(o, n, p);
#else
# error of_atomic_int_cmpswap not implemented!
#endif
}

static OF_INLINE bool
of_atomic_int32_cmpswap(volatile int32_t *OF_NONNULL p, int32_t o, int32_t n)
of_atomic_int32_cmpswap(volatile int32_t *_Nonnull p, int32_t o, int32_t n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}

908
909
910
911
912
913
914
915
916


917
918
919
920
921
922
923
908
909
910
911
912
913
914


915
916
917
918
919
920
921
922
923







-
-
+
+







	return OSAtomicCompareAndSwap32Barrier(o, n, p);
#else
# error of_atomic_int32_cmpswap not implemented!
#endif
}

static OF_INLINE bool
of_atomic_ptr_cmpswap(void *volatile OF_NULLABLE *OF_NONNULL p,
    void *OF_NULLABLE o, void *OF_NULLABLE n)
of_atomic_ptr_cmpswap(void *volatile _Nullable *_Nonnull p,
    void *_Nullable o, void *_Nullable n)
{
#if !defined(OF_HAVE_THREADS)
	if (*p == o) {
		*p = n;
		return true;
	}