ObjFW
 All Classes Functions Variables
atomic.h
1 /*
2  * Copyright (c) 2008, 2009, 2010, 2011, 2012
3  * Jonathan Schleifer <js@webkeks.org>
4  *
5  * All rights reserved.
6  *
7  * This file is part of ObjFW. It may be distributed under the terms of the
8  * Q Public License 1.0, which can be found in the file LICENSE.QPL included in
9  * the packaging of this file.
10  *
11  * Alternatively, it may be distributed under the terms of the GNU General
12  * Public License, either version 2 or 3, which can be found in the file
13  * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this
14  * file.
15  */
16 
17 #include <stdlib.h>
18 
19 #import "macros.h"
20 
21 #ifdef OF_HAVE_OSATOMIC
22 # include <libkern/OSAtomic.h>
23 #endif
24 
25 static OF_INLINE int
26 of_atomic_add_int(volatile int *p, int i)
27 {
28 #if !defined(OF_THREADS)
29  return (*p += i);
30 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
31  if (sizeof(int) == 4)
32  __asm__ (
33  "lock\n\t"
34  "xaddl %0, %2\n\t"
35  "addl %1, %0"
36  : "+&r"(i)
37  : "r"(i), "m"(*p)
38  );
39 # ifdef OF_AMD64_ASM
40  else if (sizeof(int) == 8)
41  __asm__ (
42  "lock\n\t"
43  "xaddq %0, %2\n\t"
44  "addq %1, %0"
45  : "+&r"(i)
46  : "r"(i), "m"(*p)
47  );
48 # endif
49  else
50  abort();
51 
52  return i;
53 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
54  return __sync_add_and_fetch(p, i);
55 #elif defined(OF_HAVE_OSATOMIC)
56  if (sizeof(int) == 4)
57  return OSAtomicAdd32Barrier(i, p);
58 # ifdef OF_HAVE_OSATOMIC_64
59  else if (sizeof(int) == 8)
60  return OSAtomicAdd64Barrier(i, p);
61 # endif
62  else
63  abort();
64 #else
65 # error No atomic operations available!
66 #endif
67 }
68 
69 static OF_INLINE int32_t
70 of_atomic_add_32(volatile int32_t *p, int32_t i)
71 {
72 #if !defined(OF_THREADS)
73  return (*p += i);
74 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
75  __asm__ (
76  "lock\n\t"
77  "xaddl %0, %2\n\t"
78  "addl %1, %0"
79  : "+&r"(i)
80  : "r"(i), "m"(*p)
81  );
82 
83  return i;
84 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
85  return __sync_add_and_fetch(p, i);
86 #elif defined(OF_HAVE_OSATOMIC)
87  return OSAtomicAdd32Barrier(i, p);
88 #else
89 # error No atomic operations available!
90 #endif
91 }
92 
93 static OF_INLINE void*
94 of_atomic_add_ptr(void* volatile *p, intptr_t i)
95 {
96 #if !defined(OF_THREADS)
97  return (*(char* volatile*)p += i);
98 #elif defined(OF_X86_ASM)
99  __asm__ (
100  "lock\n\t"
101  "xaddl %0, %2\n\t"
102  "addl %1, %0"
103  : "+&r"(i)
104  : "r"(i), "m"(*p)
105  );
106 
107  return (void*)i;
108 #elif defined(OF_AMD64_ASM)
109  __asm__ (
110  "lock\n\t"
111  "xaddq %0, %2\n\t"
112  "addq %1, %0"
113  : "+&r"(i)
114  : "r"(i), "m"(*p)
115  );
116 
117  return (void*)i;
118 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
119  return __sync_add_and_fetch(p, (void*)i);
120 #elif defined(OF_HAVE_OSATOMIC)
121  if (sizeof(void*) == 4)
122  return (void*)OSAtomicAdd32Barrier(i, (int32_t*)p);
123 # ifdef OF_HAVE_OSATOMIC_64
124  else if (sizeof(void*) == 8)
125  return (void*)OSAtomicAdd64Barrier(i, (int64_t*)p);
126 # endif
127  else
128  abort();
129 #else
130 # error No atomic operations available!
131 #endif
132 }
133 
134 static OF_INLINE int
135 of_atomic_sub_int(volatile int *p, int i)
136 {
137 #if !defined(OF_THREADS)
138  return (*p -= i);
139 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
140  if (sizeof(int) == 4)
141  __asm__ (
142  "negl %0\n\t"
143  "lock\n\t"
144  "xaddl %0, %2\n\t"
145  "subl %1, %0"
146  : "+&r"(i)
147  : "r"(i), "m"(*p)
148  );
149 # ifdef OF_AMD64_ASM
150  else if (sizeof(int) == 8)
151  __asm__ (
152  "negq %0\n\t"
153  "lock\n\t"
154  "xaddq %0, %2\n\t"
155  "subq %1, %0"
156  : "+&r"(i)
157  : "r"(i), "m"(*p)
158  );
159 # endif
160  else
161  abort();
162 
163  return i;
164 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
165  return __sync_sub_and_fetch(p, i);
166 #elif defined(OF_HAVE_OSATOMIC)
167  if (sizeof(int) == 4)
168  return OSAtomicAdd32Barrier(-i, p);
169 # ifdef OF_HAVE_OSATOMIC_64
170  else if (sizeof(int) == 8)
171  return OSAtomicAdd64Barrier(-i, p);
172 # endif
173  else
174  abort();
175 #else
176 # error No atomic operations available!
177 #endif
178 }
179 
180 static OF_INLINE int32_t
181 of_atomic_sub_32(volatile int32_t *p, int32_t i)
182 {
183 #if !defined(OF_THREADS)
184  return (*p -= i);
185 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
186  __asm__ (
187  "negl %0\n\t"
188  "lock\n\t"
189  "xaddl %0, %2\n\t"
190  "subl %1, %0"
191  : "+&r"(i)
192  : "r"(i), "m"(*p)
193  );
194 
195  return i;
196 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
197  return __sync_sub_and_fetch(p, i);
198 #elif defined(OF_HAVE_OSATOMIC)
199  return OSAtomicAdd32Barrier(-i, p);
200 #else
201 # error No atomic operations available!
202 #endif
203 }
204 
205 static OF_INLINE void*
206 of_atomic_sub_ptr(void* volatile *p, intptr_t i)
207 {
208 #if !defined(OF_THREADS)
209  return (*(char* volatile*)p -= i);
210 #elif defined(OF_X86_ASM)
211  __asm__ (
212  "negl %0\n\t"
213  "lock\n\t"
214  "xaddl %0, %2\n\t"
215  "subl %1, %0"
216  : "+&r"(i)
217  : "r"(i), "m"(*p)
218  );
219 
220  return (void*)i;
221 #elif defined(OF_AMD64_ASM)
222  __asm__ (
223  "negq %0\n\t"
224  "lock\n\t"
225  "xaddq %0, %2\n\t"
226  "subq %1, %0"
227  : "+&r"(i)
228  : "r"(i), "m"(*p)
229  );
230 
231  return (void*)i;
232 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
233  return __sync_sub_and_fetch(p, (void*)i);
234 #elif defined(OF_HAVE_OSATOMIC)
235  if (sizeof(void*) == 4)
236  return (void*)OSAtomicAdd32Barrier(-i, (int32_t*)p);
237 # ifdef OF_HAVE_OSATOMIC_64
238  else if (sizeof(void*) == 8)
239  return (void*)OSAtomicAdd64Barrier(-i, (int64_t*)p);
240 # endif
241  else
242  abort();
243 #else
244 # error No atomic operations available!
245 #endif
246 }
247 
248 static OF_INLINE int
249 of_atomic_inc_int(volatile int *p)
250 {
251 #if !defined(OF_THREADS)
252  return ++*p;
253 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
254  int i;
255 
256  if (sizeof(int) == 4)
257  __asm__ (
258  "xorl %0, %0\n\t"
259  "incl %0\n\t"
260  "lock\n\t"
261  "xaddl %0, %1\n\t"
262  "incl %0"
263  : "=&r"(i)
264  : "m"(*p)
265  );
266 # ifdef OF_AMD64_ASM
267  else if (sizeof(int) == 8)
268  __asm__ (
269  "xorq %0, %0\n\t"
270  "incq %0\n\t"
271  "lock\n\t"
272  "xaddq %0, %1\n\t"
273  "incq %0"
274  : "=&r"(i)
275  : "m"(*p)
276  );
277 # endif
278  else
279  abort();
280 
281  return i;
282 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
283  return __sync_add_and_fetch(p, 1);
284 #elif defined(OF_HAVE_OSATOMIC)
285  if (sizeof(int) == 4)
286  return OSAtomicIncrement32Barrier(p);
287 # ifdef OF_HAVE_OSATOMIC_64
288  else if (sizeof(int) == 8)
289  return OSAtomicDecrement64Barrier(p);
290 # endif
291  else
292  abort();
293 #else
294 # error No atomic operations available!
295 #endif
296 }
297 
298 static OF_INLINE int32_t
299 of_atomic_inc_32(volatile int32_t *p)
300 {
301 #if !defined(OF_THREADS)
302  return ++*p;
303 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
304  uint32_t i;
305 
306  __asm__ (
307  "xorl %0, %0\n\t"
308  "incl %0\n\t"
309  "lock\n\t"
310  "xaddl %0, %1\n\t"
311  "incl %0"
312  : "=&r"(i)
313  : "m"(*p)
314  );
315 
316  return i;
317 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
318  return __sync_add_and_fetch(p, 1);
319 #elif defined(OF_HAVE_OSATOMIC)
320  return OSAtomicIncrement32Barrier(p);
321 #else
322 # error No atomic operations available!
323 #endif
324 }
325 
326 static OF_INLINE int
327 of_atomic_dec_int(volatile int *p)
328 {
329 #if !defined(OF_THREADS)
330  return --*p;
331 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
332  int i;
333 
334  if (sizeof(int) == 4)
335  __asm__ (
336  "xorl %0, %0\n\t"
337  "decl %0\n\t"
338  "lock\n\t"
339  "xaddl %0, %1\n\t"
340  "decl %0"
341  : "=&r"(i)
342  : "m"(*p)
343  );
344 # ifdef OF_AMD64_ASM
345  else if (sizeof(int) == 8)
346  __asm__ (
347  "xorq %0, %0\n\t"
348  "decq %0\n\t"
349  "lock\n\t"
350  "xaddq %0, %1\n\t"
351  "decq %0"
352  : "=&r"(i)
353  : "m"(*p)
354  );
355 # endif
356  else
357  abort();
358 
359  return i;
360 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
361  return __sync_sub_and_fetch(p, 1);
362 #elif defined(OF_HAVE_OSATOMIC)
363  if (sizeof(int) == 4)
364  return OSAtomicDecrement32Barrier(p);
365 # ifdef OF_HAVE_OSATOMIC_64
366  else if (sizeof(int) == 8)
367  return OSAtomicDecrement64Barrier(p);
368 # endif
369  else
370  abort();
371 #else
372 # error No atomic operations available!
373 #endif
374 }
375 
376 static OF_INLINE int32_t
377 of_atomic_dec_32(volatile int32_t *p)
378 {
379 #if !defined(OF_THREADS)
380  return --*p;
381 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
382  uint32_t i;
383 
384  __asm__ (
385  "xorl %0, %0\n\t"
386  "decl %0\n\t"
387  "lock\n\t"
388  "xaddl %0, %1\n\t"
389  "decl %0"
390  : "=&r"(i)
391  : "m"(*p)
392  );
393 
394  return i;
395 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
396  return __sync_sub_and_fetch(p, 1);
397 #elif defined(OF_HAVE_OSATOMIC)
398  return OSAtomicDecrement32Barrier(p);
399 #else
400 # error No atomic operations available!
401 #endif
402 }
403 
404 static OF_INLINE unsigned int
405 of_atomic_or_int(volatile unsigned int *p, unsigned int i)
406 {
407 #if !defined(OF_THREADS)
408  return (*p |= i);
409 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
410  if (sizeof(int) == 4)
411  __asm__ (
412  "0:\n\t"
413  "movl %2, %0\n\t"
414  "movl %2, %%eax\n\t"
415  "orl %1, %0\n\t"
416  "lock\n\t"
417  "cmpxchg %0, %2\n\t"
418  "jne 0\n\t"
419  : "=&r"(i)
420  : "r"(i), "m"(*p)
421  : "eax", "cc"
422  );
423 # ifdef OF_AMD64_ASM
424  if (sizeof(int) == 8)
425  __asm__ (
426  "0:\n\t"
427  "movq %2, %0\n\t"
428  "movq %2, %%rax\n\t"
429  "orq %1, %0\n\t"
430  "lock\n\t"
431  "cmpxchg %0, %2\n\t"
432  "jne 0\n\t"
433  : "=&r"(i)
434  : "r"(i), "m"(*p)
435  : "rax", "cc"
436  );
437 # endif
438  else
439  abort();
440 
441  return i;
442 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
443  return __sync_or_and_fetch(p, i);
444 #elif defined(OF_HAVE_OSATOMIC)
445  if (sizeof(int) == 4)
446  return OSAtomicOr32Barrier(i, p);
447 # ifdef OF_HAVE_OSATOMIC_64
448  else if (sizeof(int) == 8)
449  return OSAtomicOr64Barrier(i, p);
450 # endif
451  else
452  abort();
453 #else
454 # error No atomic operations available!
455 #endif
456 }
457 
458 static OF_INLINE uint32_t
459 of_atomic_or_32(volatile uint32_t *p, uint32_t i)
460 {
461 #if !defined(OF_THREADS)
462  return (*p |= i);
463 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
464  __asm__ (
465  "0:\n\t"
466  "movl %2, %0\n\t"
467  "movl %2, %%eax\n\t"
468  "orl %1, %0\n\t"
469  "lock\n\t"
470  "cmpxchg %0, %2\n\t"
471  "jne 0\n\t"
472  : "=&r"(i)
473  : "r"(i), "m"(*p)
474  : "eax", "cc"
475  );
476 
477  return i;
478 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
479  return __sync_or_and_fetch(p, i);
480 #elif defined(OF_HAVE_OSATOMIC)
481  return OSAtomicOr32Barrier(i, p);
482 #else
483 # error No atomic operations available!
484 #endif
485 }
486 
487 static OF_INLINE unsigned int
488 of_atomic_and_int(volatile unsigned int *p, unsigned int i)
489 {
490 #if !defined(OF_THREADS)
491  return (*p &= i);
492 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
493  if (sizeof(int) == 4)
494  __asm__ (
495  "0:\n\t"
496  "movl %2, %0\n\t"
497  "movl %2, %%eax\n\t"
498  "andl %1, %0\n\t"
499  "lock\n\t"
500  "cmpxchg %0, %2\n\t"
501  "jne 0\n\t"
502  : "=&r"(i)
503  : "r"(i), "m"(*p)
504  : "eax", "cc"
505  );
506 # ifdef OF_AMD64_ASM
507  if (sizeof(int) == 8)
508  __asm__ (
509  "0:\n\t"
510  "movq %2, %0\n\t"
511  "movq %2, %%rax\n\t"
512  "andq %1, %0\n\t"
513  "lock\n\t"
514  "cmpxchg %0, %2\n\t"
515  "jne 0\n\t"
516  : "=&r"(i)
517  : "r"(i), "m"(*p)
518  : "rax", "cc"
519  );
520 # endif
521  else
522  abort();
523 
524  return i;
525 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
526  return __sync_and_and_fetch(p, i);
527 #elif defined(OF_HAVE_OSATOMIC)
528  if (sizeof(int) == 4)
529  return OSAtomicAnd32Barrier(i, p);
530 # ifdef OF_HAVE_OSATOMIC_64
531  else if (sizeof(int) == 8)
532  return OSAtomicAnd64Barrier(i, p);
533 # endif
534  else
535  abort();
536 #else
537 # error No atomic operations available!
538 #endif
539 }
540 
541 static OF_INLINE uint32_t
542 of_atomic_and_32(volatile uint32_t *p, uint32_t i)
543 {
544 #if !defined(OF_THREADS)
545  return (*p &= i);
546 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
547  __asm__ (
548  "0:\n\t"
549  "movl %2, %0\n\t"
550  "movl %2, %%eax\n\t"
551  "andl %1, %0\n\t"
552  "lock\n\t"
553  "cmpxchg %0, %2\n\t"
554  "jne 0\n\t"
555  : "=&r"(i)
556  : "r"(i), "m"(*p)
557  : "eax", "cc"
558  );
559 
560  return i;
561 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
562  return __sync_and_and_fetch(p, i);
563 #elif defined(OF_HAVE_OSATOMIC)
564  return OSAtomicAnd32Barrier(i, p);
565 #else
566 # error No atomic operations available!
567 #endif
568 }
569 
570 static OF_INLINE unsigned int
571 of_atomic_xor_int(volatile unsigned int *p, unsigned int i)
572 {
573 #if !defined(OF_THREADS)
574  return (*p ^= i);
575 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
576  if (sizeof(int) == 4)
577  __asm__ (
578  "0:\n\t"
579  "movl %2, %0\n\t"
580  "movl %2, %%eax\n\t"
581  "xorl %1, %0\n\t"
582  "lock\n\t"
583  "cmpxchg %0, %2\n\t"
584  "jne 0\n\t"
585  : "=&r"(i)
586  : "r"(i), "m"(*p)
587  : "eax", "cc"
588  );
589 # ifdef OF_AMD64_ASM
590  if (sizeof(int) == 8)
591  __asm__ (
592  "0:\n\t"
593  "movq %2, %0\n\t"
594  "movq %2, %%rax\n\t"
595  "xorq %1, %0\n\t"
596  "lock\n\t"
597  "cmpxchg %0, %2\n\t"
598  "jne 0\n\t"
599  : "=&r"(i)
600  : "r"(i), "m"(*p)
601  : "rax", "cc"
602  );
603 # endif
604  else
605  abort();
606 
607  return i;
608 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
609  return __sync_xor_and_fetch(p, i);
610 #elif defined(OF_HAVE_OSATOMIC)
611  if (sizeof(int) == 4)
612  return OSAtomicXor32Barrier(i, p);
613 # ifdef OF_HAVE_OSATOMIC_64
614  else (sizeof(int) == 8)
615  return OSAtomicXor64Barrier(i, p);
616 # endif
617  else
618  abort();
619 #else
620 # error No atomic operations available!
621 #endif
622 }
623 
624 static OF_INLINE uint32_t
625 of_atomic_xor_32(volatile uint32_t *p, uint32_t i)
626 {
627 #if !defined(OF_THREADS)
628  return (*p ^= i);
629 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
630  __asm__ (
631  "0:\n\t"
632  "movl %2, %0\n\t"
633  "movl %2, %%eax\n\t"
634  "xorl %1, %0\n\t"
635  "lock\n\t"
636  "cmpxchgl %0, %2\n\t"
637  "jne 0\n\t"
638  : "=&r"(i)
639  : "r"(i), "m"(*p)
640  : "eax", "cc"
641  );
642 
643  return i;
644 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
645  return __sync_xor_and_fetch(p, i);
646 #elif defined(OF_HAVE_OSATOMIC)
647  return OSAtomicXor32Barrier(i, p);
648 #else
649 # error No atomic operations available!
650 #endif
651 }
652 
653 static OF_INLINE BOOL
654 of_atomic_cmpswap_int(volatile int *p, int o, int n)
655 {
656 #if !defined(OF_THREADS)
657  if (*p == o) {
658  *p = n;
659  return YES;
660  }
661 
662  return NO;
663 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
664  int r;
665 
666  __asm__ (
667  "xorl %0, %0\n\t"
668  "lock\n\t"
669  "cmpxchg %2, %3\n\t"
670  "sete %b0\n\t"
671  "movzbl %b0, %0"
672  : "=&d"(r) /* use d instead of r due to gcc bug */
673  : "a"(o), "r"(n), "m"(*p)
674  : "cc"
675  );
676 
677  return r;
678 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
679  return __sync_bool_compare_and_swap(p, o, n);
680 #elif defined(OF_HAVE_OSATOMIC)
681  return OSAtomicCompareAndSwapIntBarrier(o, n, p);
682 #else
683 # error No atomic operations available!
684 #endif
685 }
686 
687 static OF_INLINE BOOL
688 of_atomic_cmpswap_32(volatile int32_t *p, int32_t o, int32_t n)
689 {
690 #if !defined(OF_THREADS)
691  if (*p == o) {
692  *p = n;
693  return YES;
694  }
695 
696  return NO;
697 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
698  int r;
699 
700  __asm__ (
701  "xorl %0, %0\n\t"
702  "lock\n\t"
703  "cmpxchg %2, %3\n\t"
704  "sete %b0\n\t"
705  "movzbl %b0, %0"
706  : "=&d"(r) /* use d instead of r due to gcc bug */
707  : "a"(o), "r"(n), "m"(*p)
708  : "cc"
709  );
710 
711  return r;
712 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
713  return __sync_bool_compare_and_swap(p, o, n);
714 #elif defined(OF_HAVE_OSATOMIC)
715  return OSAtomicCompareAndSwap32Barrier(o, n, p);
716 #else
717 # error No atomic operations available!
718 #endif
719 }
720 
721 static OF_INLINE BOOL
722 of_atomic_cmpswap_ptr(void* volatile *p, void *o, void *n)
723 {
724 #if !defined(OF_THREADS)
725  if (*p == o) {
726  *p = n;
727  return YES;
728  }
729 
730  return NO;
731 #elif defined(OF_X86_ASM) || defined(OF_AMD64_ASM)
732  int r;
733 
734  __asm__ (
735  "xorl %0, %0\n\t"
736  "lock\n\t"
737  "cmpxchg %2, %3\n\t"
738  "sete %b0\n\t"
739  "movzbl %b0, %0"
740  : "=&d"(r) /* use d instead of r due to gcc bug */
741  : "a"(o), "r"(n), "m"(*p)
742  : "cc"
743  );
744 
745  return r;
746 #elif defined(OF_HAVE_GCC_ATOMIC_OPS)
747  return __sync_bool_compare_and_swap(p, o, n);
748 #elif defined(OF_HAVE_OSATOMIC)
749  return OSAtomicCompareAndSwapPtrBarrier(o, n, p);
750 #else
751 # error No atomic operations available!
752 #endif
753 }