Index: src/OFSystemInfo.m ================================================================== --- src/OFSystemInfo.m +++ src/OFSystemInfo.m @@ -242,17 +242,17 @@ static OF_INLINE struct x86_regs OF_CONST_FUNC x86_cpuid(uint32_t eax, uint32_t ecx) { struct x86_regs regs; -# if defined(OF_X86_64_ASM) +# if defined(OF_X86_64) && defined(__GNUC__) __asm__ ( "cpuid" : "=a"(regs.eax), "=b"(regs.ebx), "=c"(regs.ecx), "=d"(regs.edx) : "a"(eax), "c"(ecx) ); -# elif defined(OF_X86_ASM) +# elif defined(OF_X86) && defined(__GNUC__) /* * This workaround is required by older GCC versions when using -fPIC, * as ebx is a special register in PIC code. Yes, GCC is indeed not * able to just push a register onto the stack before the __asm__ block * and to pop it afterwards. @@ -530,11 +530,11 @@ } #endif + (OFString *)CPUVendor { -#if defined(OF_X86_64_ASM) || defined(OF_X86_ASM) +#if (defined(OF_X86_64) || defined(OF_X86)) && defined(__GNUC__) struct x86_regs regs = x86_cpuid(0, 0); uint32_t buffer[3]; if (regs.eax == 0) return nil; @@ -551,11 +551,11 @@ #endif } + (OFString *)CPUModel { -#if defined(OF_X86_64_ASM) || defined(OF_X86_ASM) +#if (defined(OF_X86_64) || defined(OF_X86)) && defined(__GNUC__) uint32_t buffer[12]; size_t i; i = 0; for (uint32_t eax = 0x80000002; eax <= 0x80000004; eax++) { Index: src/atomic.h ================================================================== --- src/atomic.h +++ src/atomic.h @@ -23,13 +23,14 @@ # error No atomic operations available! #endif #if !defined(OF_HAVE_THREADS) # import "atomic_no_threads.h" -#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) +#elif (defined(OF_X86_64) || defined(OF_X86)) && defined(__GNUC__) # import "atomic_x86.h" -#elif defined(OF_POWERPC_ASM) && !defined(__APPLE_CC__) && !defined(OF_AIX) +#elif defined(OF_POWERPC) && defined(__GNUC__) && !defined(__APPLE_CC__) && \ + !defined(OF_AIX) # import "atomic_powerpc.h" #elif defined(OF_HAVE_ATOMIC_BUILTINS) # import "atomic_builtins.h" #elif defined(OF_HAVE_SYNC_BUILTINS) # import "atomic_sync_builtins.h" Index: src/atomic_x86.h ================================================================== --- src/atomic_x86.h +++ src/atomic_x86.h @@ -26,11 +26,11 @@ "xaddl %0, %2\n\t" "addl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" "addq %1, %0" @@ -59,21 +59,21 @@ } static OF_INLINE void *_Nullable of_atomic_ptr_add(void *volatile _Nullable *_Nonnull p, intptr_t i) { -#if defined(OF_X86_64_ASM) +#if defined(OF_X86_64) __asm__ __volatile__ ( "lock\n\t" "xaddq %0, %2\n\t" "addq %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); return (void *)i; -#elif defined(OF_X86_ASM) +#elif defined(OF_X86) __asm__ __volatile__ ( "lock\n\t" "xaddl %0, %2\n\t" "addl %1, %0" : "+&r"(i) @@ -94,11 +94,11 @@ "xaddl %0, %2\n\t" "subl %1, %0" : "+&r"(i) : "r"(i), "m"(*p) ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" "xaddq %0, %2\n\t" @@ -129,11 +129,11 @@ } static OF_INLINE void *_Nullable of_atomic_ptr_sub(void *volatile _Nullable *_Nonnull p, intptr_t i) { -#if defined(OF_X86_64_ASM) +#if defined(OF_X86_64) __asm__ __volatile__ ( "negq %0\n\t" "lock\n\t" "xaddq %0, %2\n\t" "subq %1, %0" @@ -140,11 +140,11 @@ : "+&r"(i) : "r"(i), "m"(*p) ); return (void *)i; -#elif defined(OF_X86_ASM) +#elif defined(OF_X86) __asm__ __volatile__ ( "negl %0\n\t" "lock\n\t" "xaddl %0, %2\n\t" "subl %1, %0" @@ -169,11 +169,11 @@ "xaddl %0, %1\n\t" "incl %0" : "=&r"(i) : "m"(*p) ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "incq %0\n\t" "lock\n\t" @@ -220,11 +220,11 @@ "xaddl %0, %1\n\t" "decl %0" : "=&r"(i) : "m"(*p) ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "xorq %0, %0\n\t" "decq %0\n\t" "lock\n\t" @@ -272,11 +272,11 @@ "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" @@ -328,11 +328,11 @@ "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" @@ -384,11 +384,11 @@ "jne 0b" : "=&r"(i) : "r"(i), "m"(*p) : "eax", "cc" ); -#ifdef OF_X86_64_ASM +#ifdef OF_X86_64 else if (sizeof(int) == 8) __asm__ __volatile__ ( "0:\n\t" "movq %2, %0\n\t" "movq %0, %%rax\n\t" Index: src/macros.h ================================================================== --- src/macros.h +++ src/macros.h @@ -319,46 +319,10 @@ # define OF_DIRECT_MEMBERS __attribute__((__objc_direct_members__)) #else # define OF_DIRECT_MEMBERS #endif -#ifdef __GNUC__ -# ifdef OF_X86_64 -# define OF_X86_64_ASM -# endif -# ifdef OF_X86 -# define OF_X86_ASM -# endif -# ifdef OF_POWERPC -# define OF_POWERPC_ASM -# endif -# ifdef OF_ARM64 -# define OF_ARM64_ASM -# endif -# ifdef OF_ARM -# define OF_ARM_ASM -# endif -# ifdef OF_ARMV7 -# define OF_ARMV7_ASM -# endif -# ifdef OF_ARMV6 -# define OF_ARMV6_ASM -# endif -# ifdef OF_MIPS64 -# define OF_MIPS64_ASM -# endif -# ifdef OF_MIPS -# define OF_MIPS_ASM -# endif -# ifdef OF_PA_RISC -# define OF_PA_RISC_ASM -# endif -# ifdef OF_ITANIUM -# define OF_ITANIUM_ASM -# endif -#endif - #ifdef OF_APPLE_RUNTIME # if defined(OF_X86_64) || defined(OF_X86) || defined(OF_ARM64) || \ defined(OF_ARM) || defined(OF_POWERPC) # define OF_HAVE_FORWARDING_TARGET_FOR_SELECTOR # define OF_HAVE_FORWARDING_TARGET_FOR_SELECTOR_STRET @@ -467,23 +431,23 @@ static OF_INLINE uint16_t OF_CONST_FUNC OF_BSWAP16_NONCONST(uint16_t i) { #if defined(OF_HAVE_BUILTIN_BSWAP16) return __builtin_bswap16(i); -#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) +#elif (defined(OF_X86_64) || defined(OF_X86)) && defined(__GNUC__) __asm__ ( "xchgb %h0, %b0" : "=Q"(i) : "0"(i) ); -#elif defined(OF_POWERPC_ASM) +#elif defined(OF_POWERPC) && defined(__GNUC__) __asm__ ( "lhbrx %0, 0, %1" : "=r"(i) : "r"(&i), "m"(i) ); -#elif defined(OF_ARMV6_ASM) +#elif defined(OF_ARMV6) && defined(__GNUC__) __asm__ ( "rev16 %0, %0" : "=r"(i) : "0"(i) ); @@ -497,23 +461,23 @@ static OF_INLINE uint32_t OF_CONST_FUNC OF_BSWAP32_NONCONST(uint32_t i) { #if defined(OF_HAVE_BUILTIN_BSWAP32) return __builtin_bswap32(i); -#elif defined(OF_X86_64_ASM) || defined(OF_X86_ASM) +#elif (defined(OF_X86_64) || defined(OF_X86)) && defined(__GNUC__) __asm__ ( "bswap %0" : "=q"(i) : "0"(i) ); -#elif defined(OF_POWERPC_ASM) +#elif defined(OF_POWERPC) && defined(__GNUC__) __asm__ ( "lwbrx %0, 0, %1" : "=r"(i) : "r"(&i), "m"(i) ); -#elif defined(OF_ARMV6_ASM) +#elif defined(OF_ARMV6) && defined(__GNUC__) __asm__ ( "rev %0, %0" : "=r"(i) : "0"(i) ); @@ -529,17 +493,17 @@ static OF_INLINE uint64_t OF_CONST_FUNC OF_BSWAP64_NONCONST(uint64_t i) { #if defined(OF_HAVE_BUILTIN_BSWAP64) return __builtin_bswap64(i); -#elif defined(OF_X86_64_ASM) +#elif defined(OF_X86_64) && defined(__GNUC__) __asm__ ( "bswap %0" : "=r"(i) : "0"(i) ); -#elif defined(OF_X86_ASM) +#elif defined(OF_X86) && defined(__GNUC__) __asm__ ( "bswap %%eax\n\t" "bswap %%edx\n\t" "xchgl %%eax, %%edx" : "=A"(i)