Overview
Comment: | platform/x86/OFAtomic.h: Reduce code duplication |
---|---|
Downloads: | Tarball | ZIP archive | SQL archive |
Timelines: | family | ancestors | descendants | both | trunk |
Files: | files | file ages | folders |
SHA3-256: |
3751618dd1a36e204e8846daf53cfbfd |
User & Date: | js on 2023-10-26 19:16:07 |
Other Links: | manifest | tags |
Context
2023-10-26
| ||
19:42 | Make headers compatible with -masm=intel check-in: a1ab43e0b3 user: js tags: trunk | |
19:16 | platform/x86/OFAtomic.h: Reduce code duplication check-in: 3751618dd1 user: js tags: trunk | |
2023-10-25
| ||
22:26 | Move x86/ELF assembly back to Intel syntax check-in: a338982b3b user: js tags: trunk | |
Changes
Modified src/platform/x86/OFAtomic.h from [07765f0312] to [b697a98325].
︙ | ︙ | |||
11 12 13 14 15 16 17 | * Public License, either version 2 or 3, which can be found in the file * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ OF_ASSUME_NONNULL_BEGIN | | | < > > > > > > > > > < < < < < < < < < < < < < < | 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | * Public License, either version 2 or 3, which can be found in the file * LICENSE.GPLv2 or LICENSE.GPLv3 respectively included in the packaging of this * file. */ OF_ASSUME_NONNULL_BEGIN static OF_INLINE int32_t OFAtomicInt32Add(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); return i; } static OF_INLINE int OFAtomicIntAdd(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) return OFAtomicInt32Add(p, i); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" "addq %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); #endif else abort(); return i; } static OF_INLINE void *_Nullable OFAtomicPointerAdd(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_AMD64) |
︙ | ︙ | |||
78 79 80 81 82 83 84 | : "r"(i), "m"(*p) ); return (void *)i; #endif } | | | < > > > > > > > > > < < < < < < < < < < < < < < < | 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 | : "r"(i), "m"(*p) ); return (void *)i; #endif } static OF_INLINE int32_t OFAtomicInt32Subtract(volatile int32_t *_Nonnull p, int32_t i) { __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" "subl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); return i; } static OF_INLINE int OFAtomicIntSubtract(volatile int *_Nonnull p, int i) { if (sizeof(int) == 4) return OFAtomicInt32Subtract(p, i); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" "xaddq %0, %2\n\t" "subq %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); #endif else abort(); return i; } static OF_INLINE void *_Nullable OFAtomicPointerSubtract(void *volatile _Nullable *_Nonnull p, intptr_t i) { #if defined(OF_AMD64) |
︙ | ︙ | |||
150 151 152 153 154 155 156 | : "r"(i), "m"(*p) ); return (void *)i; #endif } | < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < | < < < < < < < < | | | | 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | : "r"(i), "m"(*p) ); return (void *)i; #endif } static OF_INLINE int32_t OFAtomicInt32Increase(volatile int32_t *_Nonnull p) { int32_t i; __asm__ __volatile__ ( "xorl %0, %0\n\t" "incl %0\n\t" "lock\n\t" "xaddl %0, %1\n\t" "incl %0" : "=&r"(i) : "m"(*p) ); return i; } static OF_INLINE int OFAtomicIntIncrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) return OFAtomicInt32Increase(p); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "incq %0\n\t" "lock\n\t" "xaddq %0, %1\n\t" "incq %0" : "=&r"(i) : "m"(*p) ); #endif else abort(); |
︙ | ︙ | |||
252 253 254 255 256 257 258 | : "=&r"(i) : "m"(*p) ); return i; } | | | > > < < < < < < < < < < < | < | < | | | | < | 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | : "=&r"(i) : "m"(*p) ); return i; } static OF_INLINE int OFAtomicIntDecrease(volatile int *_Nonnull p) { int i; if (sizeof(int) == 4) return OFAtomicInt32Decrease(p); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "decq %0\n\t" "lock\n\t" "xaddq %0, %1\n\t" "decq %0" : "=&r"(i) : "m"(*p) ); #endif else abort(); return i; } |
︙ | ︙ | |||
309 310 311 312 313 314 315 | : "eax", "cc" ); return i; } static OF_INLINE unsigned int | | < < < < < < < < < < < | | | 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 | : "eax", "cc" ); return i; } static OF_INLINE unsigned int OFAtomicIntOr(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) return OFAtomicInt32Or(p, i); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" "orq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); |
︙ | ︙ | |||
365 366 367 368 369 370 371 | : "eax", "cc" ); return i; } static OF_INLINE unsigned int | | < < < < < < < < < < < | | | 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 | : "eax", "cc" ); return i; } static OF_INLINE unsigned int OFAtomicIntAnd(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) return OFAtomicInt32And(p, i); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" "andq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); |
︙ | ︙ | |||
419 420 421 422 423 424 425 426 427 | : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); return i; } static OF_INLINE bool | > > > > > > > > > > > > > > > > > > > > > > > > > > | | | 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 | : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); return i; } static OF_INLINE unsigned int OFAtomicIntXor(volatile unsigned int *_Nonnull p, unsigned int i) { if (sizeof(int) == 4) return OFAtomicInt32Xor(p, i); #ifdef OF_AMD64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" "xorq %1, %0\n\t" "lock\n\t" "cmpxchg %0, %2\n\t" "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "rax", "cc" ); #endif else abort(); return i; } static OF_INLINE bool OFAtomicInt32CompareAndSwap(volatile int32_t *_Nonnull p, int32_t o, int32_t n) { int r; __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" "sete %b0\n\t" "movzbl %b0, %0" : "=&d"(r), "+a"(o) /* use d instead of r to avoid a gcc bug */ : "r"(n), "m"(*p) : "cc" ); return r; } static OF_INLINE bool OFAtomicIntCompareAndSwap(volatile int *_Nonnull p, int o, int n) { int r; __asm__ __volatile__ ( "lock\n\t" "cmpxchg %2, %3\n\t" "sete %b0\n\t" |
︙ | ︙ |