#endif
/*** memcopy must be memmove ***/
-void* memmove(void* dest, const void* source, size_t num);
-__INTRIN_INLINE void* memcpy(void* dest, const void* source, size_t num)
+void* __cdecl memmove(void* dest, const void* source, size_t num);
+__INTRIN_INLINE void* __cdecl memcpy(void* dest, const void* source, size_t num)
{
return memmove(dest, source, num);
}
/*** Memory barriers ***/
+#ifndef __clang__
__INTRIN_INLINE void _ReadWriteBarrier(void)
{
__asm__ __volatile__("" : : : "memory");
__asm__ __volatile__("sfence");
_WriteBarrier();
}
+#endif /* !__clang__ */
#ifdef __x86_64__
__INTRIN_INLINE void __faststorefence(void)
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * Destination, long Exchange, long Comperand)
+__INTRIN_INLINE long __cdecl _InterlockedCompareExchange(volatile long * Destination, long Exchange, long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchange(volatile long * Target, long Value)
+__INTRIN_INLINE long __cdecl _InterlockedExchange(volatile long * Target, long Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
}
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * Addend, long Value)
+__INTRIN_INLINE long __cdecl _InterlockedExchangeAdd(volatile long * Addend, long Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#endif
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * lpAddend)
+__INTRIN_INLINE long __cdecl _InterlockedDecrement(volatile long * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * lpAddend)
+__INTRIN_INLINE long __cdecl _InterlockedIncrement(volatile long * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
#else /* (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 */
+#ifndef __clang__
+
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * Destination, char Exchange, char Comperand)
{
char retval = Comperand;
return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
}
+#endif /* !__clang__ */
+
#if defined(__x86_64__)
__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * lpAddend)
{
#else
+#ifndef __clang__
__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * Destination, long long Exchange, long long Comperand)
{
long long retval = Comperand;
return retval;
}
+#endif /* !__clang__ */
#endif
}
#endif
+#ifndef __clang__
__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, long b)
{
unsigned char retval;
__asm__("lock; btsl %[b], %[a]; setc %b[retval]" : [retval] "=q" (retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
return retval;
}
+#endif /* !__clang__ */
#if defined(__x86_64__)
__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, long long b)
/*** String operations ***/
+#ifndef __clang__
/* NOTE: we don't set a memory clobber in the __stosX functions because Visual C++ doesn't */
__INTRIN_INLINE void __stosb(unsigned char * Dest, unsigned char Data, size_t Count)
{
"[Dest]" (Dest), "a" (Data), "[Count]" (Count)
);
}
+#endif
__INTRIN_INLINE void __stosw(unsigned short * Dest, unsigned short Data, size_t Count)
{
__asm__ __volatile__("movl %k[Data], %%fs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
}
+#ifndef __clang__
+
__INTRIN_INLINE unsigned char __readfsbyte(unsigned long Offset)
{
unsigned char value;
return value;
}
+#endif /* !__clang__ */
+
__INTRIN_INLINE void __incfsbyte(unsigned long Offset)
{
__asm__ __volatile__("incb %%fs:%a[Offset]" : : [Offset] "ir" (Offset) : "memory");
/*** Bit manipulation ***/
+#ifndef __clang__
+
__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * Index, unsigned long Mask)
{
__asm__("bsfl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask] "mr" (Mask));
return Mask ? 1 : 0;
}
+#endif /* !__clang__ */
+
/* NOTE: again, the bizarre implementation follows Visual C++ */
__INTRIN_INLINE unsigned char _bittest(const long * a, long b)
{
#endif
-__INTRIN_INLINE unsigned char _rotl8(unsigned char value, unsigned char shift)
+#ifndef __clang__
+
+__INTRIN_INLINE unsigned char __cdecl _rotl8(unsigned char value, unsigned char shift)
{
unsigned char retval;
__asm__("rolb %b[shift], %b[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-__INTRIN_INLINE unsigned short _rotl16(unsigned short value, unsigned char shift)
+__INTRIN_INLINE unsigned short __cdecl _rotl16(unsigned short value, unsigned char shift)
{
unsigned short retval;
__asm__("rolw %b[shift], %w[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-__INTRIN_INLINE unsigned int _rotl(unsigned int value, int shift)
+__INTRIN_INLINE unsigned int __cdecl _rotl(unsigned int value, int shift)
{
unsigned int retval;
__asm__("roll %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
+#endif /* !__clang__ */
+
#ifdef __x86_64__
__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
{
__asm__("rolq %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-#else
-__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
+#else /* __x86_64__ */
+#ifndef __clang__
+__INTRIN_INLINE unsigned long long __cdecl _rotl64(unsigned long long value, int shift)
{
/* FIXME: this is probably not optimal */
return (value << shift) | (value >> (64 - shift));
}
-#endif
+#endif /* !__clang__ */
+#endif /* __x86_64__ */
-__INTRIN_INLINE unsigned int _rotr(unsigned int value, int shift)
+#ifndef __clang__
+
+__INTRIN_INLINE unsigned int __cdecl _rotr(unsigned int value, int shift)
{
unsigned int retval;
__asm__("rorl %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-__INTRIN_INLINE unsigned char _rotr8(unsigned char value, unsigned char shift)
+__INTRIN_INLINE unsigned char __cdecl _rotr8(unsigned char value, unsigned char shift)
{
unsigned char retval;
__asm__("rorb %b[shift], %b[retval]" : [retval] "=qm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-__INTRIN_INLINE unsigned short _rotr16(unsigned short value, unsigned char shift)
+__INTRIN_INLINE unsigned short __cdecl _rotr16(unsigned short value, unsigned char shift)
{
unsigned short retval;
__asm__("rorw %b[shift], %w[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
+#endif /* !__clang__ */
+
#ifdef __x86_64__
__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
{
__asm__("rorq %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-#else
-__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
+#else /* __x86_64__ */
+#ifndef __clang__
+__INTRIN_INLINE unsigned long long __cdecl _rotr64(unsigned long long value, int shift)
{
/* FIXME: this is probably not optimal */
return (value >> shift) | (value << (64 - shift));
}
-#endif
+#endif /* !__clang__ */
+#endif /* __x86_64__ */
+
+#ifndef __clang__
__INTRIN_INLINE unsigned long __cdecl _lrotl(unsigned long value, int shift)
{
return retval;
}
+#endif /* !__clang__ */
+
/*
NOTE: in __ll_lshift, __ll_rshift and __ull_rshift we use the "A"
constraint (edx:eax) for the Mask argument, because it's the only way GCC
return retval;
}
-__INTRIN_INLINE unsigned short _byteswap_ushort(unsigned short value)
+__INTRIN_INLINE unsigned short __cdecl _byteswap_ushort(unsigned short value)
{
unsigned short retval;
__asm__("rorw $8, %w[retval]" : [retval] "=rm" (retval) : "[retval]" (value));
return retval;
}
-__INTRIN_INLINE unsigned long _byteswap_ulong(unsigned long value)
+__INTRIN_INLINE unsigned long __cdecl _byteswap_ulong(unsigned long value)
{
unsigned long retval;
__asm__("bswapl %[retval]" : [retval] "=r" (retval) : "[retval]" (value));
return retval;
}
#else
-__INTRIN_INLINE unsigned long long _byteswap_uint64(unsigned long long value)
+__INTRIN_INLINE unsigned long long __cdecl _byteswap_uint64(unsigned long long value)
{
union {
unsigned long long int64part;
return __builtin_clz(value);
}
+#ifndef __clang__
+
__INTRIN_INLINE unsigned int __popcnt(unsigned int value)
{
return __builtin_popcount(value);
return __builtin_popcount(value);
}
+#endif /* !__clang__ */
+
#ifdef __x86_64__
__INTRIN_INLINE unsigned long long __lzcnt64(unsigned long long value)
{
/*** 64-bit math ***/
+#ifndef __clang__
+
__INTRIN_INLINE long long __emul(int a, int b)
{
long long retval;
return retval;
}
+#endif /* !__clang__ */
+
__INTRIN_INLINE long long __cdecl _abs64(long long value)
{
return (value >= 0) ? value : -value;
__asm__ __volatile__("rep; outsl" : : [Port] "d" (Port), [Buffer] "S" (Buffer), "c" (Count));
}
-__INTRIN_INLINE int _inp(unsigned short Port)
+__INTRIN_INLINE int __cdecl _inp(unsigned short Port)
{
return __inbyte(Port);
}
-__INTRIN_INLINE unsigned short _inpw(unsigned short Port)
+__INTRIN_INLINE unsigned short __cdecl _inpw(unsigned short Port)
{
return __inword(Port);
}
-__INTRIN_INLINE unsigned long _inpd(unsigned short Port)
+__INTRIN_INLINE unsigned long __cdecl _inpd(unsigned short Port)
{
return __indword(Port);
}
-__INTRIN_INLINE int _outp(unsigned short Port, int databyte)
+__INTRIN_INLINE int __cdecl _outp(unsigned short Port, int databyte)
{
__outbyte(Port, (unsigned char)databyte);
return databyte;
}
-__INTRIN_INLINE unsigned short _outpw(unsigned short Port, unsigned short dataword)
+__INTRIN_INLINE unsigned short __cdecl _outpw(unsigned short Port, unsigned short dataword)
{
__outword(Port, dataword);
return dataword;
}
-__INTRIN_INLINE unsigned long _outpd(unsigned short Port, unsigned long dataword)
+__INTRIN_INLINE unsigned long __cdecl _outpd(unsigned short Port, unsigned long dataword)
{
__outdword(Port, dataword);
return dataword;
__asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a" (InfoType), "c" (ECXValue));
}
+#ifndef __clang__
__INTRIN_INLINE unsigned long long __rdtsc(void)
{
#ifdef __x86_64__
return retval;
#endif
}
+#endif /* !__clang__ */
__INTRIN_INLINE void __writeeflags(uintptr_t Value)
{
/*** Interrupts ***/
-#ifdef __clang__
-#define __debugbreak() __asm__("int $3")
-#else
-__INTRIN_INLINE void __debugbreak(void)
+#ifndef __clang__
+
+__INTRIN_INLINE void __cdecl __debugbreak(void)
{
__asm__("int $3");
}
-#endif
__INTRIN_INLINE void __ud2(void)
{
__asm__("int $0x2c");
}
-__INTRIN_INLINE void _disable(void)
+#endif /* !__clang__ */
+
+__INTRIN_INLINE void __cdecl _disable(void)
{
__asm__("cli" : : : "memory");
}
-__INTRIN_INLINE void _enable(void)
+__INTRIN_INLINE void __cdecl _enable(void)
{
__asm__("sti" : : : "memory");
}
__asm__("hlt" : : : "memory");
}
+#ifndef __clang__
__declspec(noreturn)
__INTRIN_INLINE void __fastfail(unsigned int Code)
{
__asm__("int $0x29" : : "c"(Code) : "memory");
__builtin_unreachable();
}
+#endif
/*** Protected memory management ***/
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
}
+#ifndef __clang__
__INTRIN_INLINE void __writecr8(unsigned int Data)
{
__asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) : "memory");
}
+#endif
__INTRIN_INLINE unsigned long __readcr0(void)
{
return value;
}
+#ifndef __clang__
__INTRIN_INLINE unsigned long __readcr8(void)
{
unsigned long value;
__asm__ __volatile__("mov %%cr8, %[value]" : [value] "=r" (value));
return value;
}
+#endif
#endif /* __x86_64__ */
__INTRIN_INLINE void __invlpg(void *Address)
{
- __asm__("invlpg %[Address]" : : [Address] "m" (*((unsigned char *)(Address))) : "memory");
+ __asm__ __volatile__ ("invlpg (%[Address])" : : [Address] "b" (Address) : "memory");
}
__asm__ __volatile__("sidt %0" : : "m"(*(short*)Destination) : "memory");
}
+__INTRIN_INLINE void _sgdt(void *Destination)
+{
+ __asm__ __volatile__("sgdt %0" : : "m"(*(short*)Destination) : "memory");
+}
+
/*** Misc operations ***/
+#ifndef __clang__
__INTRIN_INLINE void _mm_pause(void)
{
__asm__ __volatile__("pause" : : : "memory");
}
+#endif
__INTRIN_INLINE void __nop(void)
{