extern "C" {
#endif
+/*** memcopy must be memmove ***/
+__INTRIN_INLINE void* memcpy(void* dest, const void* source, size_t num)
+{
+ return memmove(dest, source, num);
+}
+
+
/*** Stack frame juggling ***/
#define _ReturnAddress() (__builtin_return_address(0))
#define _AddressOfReturnAddress() (&(((void **)(__builtin_frame_address(0)))[1]))
/* TODO: __getcallerseflags but how??? */
/* Maybe the same for x86? */
-#ifdef _x86_64
+#ifdef __x86_64__
#define _alloca(s) __builtin_alloca(s)
#endif
-/*** Atomic operations ***/
+/*** Memory barriers ***/
-#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
-#define _ReadWriteBarrier() __sync_synchronize()
-#else
-__INTRIN_INLINE void _MemoryBarrier(void)
+__INTRIN_INLINE void _ReadWriteBarrier(void)
{
__asm__ __volatile__("" : : : "memory");
}
-#define _ReadWriteBarrier() _MemoryBarrier()
-#endif
-/* BUGBUG: GCC only supports full barriers */
+/* GCC only supports full barriers */
#define _ReadBarrier _ReadWriteBarrier
#define _WriteBarrier _ReadWriteBarrier
+__INTRIN_INLINE void _mm_mfence(void)
+{
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+__INTRIN_INLINE void _mm_lfence(void)
+{
+ _ReadBarrier();
+ __asm__ __volatile__("lfence");
+ _ReadBarrier();
+}
+
+__INTRIN_INLINE void _mm_sfence(void)
+{
+ _WriteBarrier();
+ __asm__ __volatile__("sfence");
+ _WriteBarrier();
+}
+
+#ifdef __x86_64__
+__INTRIN_INLINE void __faststorefence(void)
+{
+ long local;
+ __asm__ __volatile__("lock; orl $0, %0;" : : "m"(local));
+}
+#endif
+
+
+/*** Atomic operations ***/
+
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
}
#if defined(_M_AMD64)
-__INTRIN_INLINE long _InterlockedAnd64(volatile long long * const value, const long long mask)
+__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_and(value, mask);
}
}
#if defined(_M_AMD64)
-__INTRIN_INLINE long _InterlockedOr64(volatile long long * const value, const long long mask)
+__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_or(value, mask);
}
return __sync_fetch_and_xor(value, mask);
}
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long long mask)
+{
+ return __sync_fetch_and_xor(value, mask);
+}
+#endif
+
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+#endif
+
#else
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
return y;
}
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+{
+ return _InterlockedExchangeAdd(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+{
+ return _InterlockedExchangeAdd(lpAddend, 1) + 1;
+}
+
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+{
+ return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+{
+ return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
+}
+
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+{
+ return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+{
+ return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
+}
+#endif
+
#endif
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 && defined(__x86_64__)
return Value;
}
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
-{
- return _InterlockedExchangeAdd(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
-{
- return _InterlockedExchangeAdd(lpAddend, 1) + 1;
-}
-
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
-{
- return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
-{
- return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
-}
-
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
-{
- return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
-{
- return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
-}
-#endif
-
__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long b)
{
unsigned char retval;
__INTRIN_INLINE unsigned char _rotr8(unsigned char value, unsigned char shift)
{
unsigned char retval;
- __asm__("rorb %b[shift], %b[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
+ __asm__("rorb %b[shift], %b[retval]" : [retval] "=qm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
__asm__ __volatile__("rep; outsl" : : [Port] "d" (Port), [Buffer] "S" (Buffer), "c" (Count));
}
+__INTRIN_INLINE int _inp(unsigned short Port)
+{
+ return __inbyte(Port);
+}
+
+__INTRIN_INLINE unsigned short _inpw(unsigned short Port)
+{
+ return __inword(Port);
+}
+
+__INTRIN_INLINE unsigned long _inpd(unsigned short Port)
+{
+ return __indword(Port);
+}
+
+__INTRIN_INLINE int _outp(unsigned short Port, int databyte)
+{
+ __outbyte(Port, databyte);
+ return databyte;
+}
+
+__INTRIN_INLINE unsigned short _outpw(unsigned short Port, unsigned short dataword)
+{
+ __outword(Port, dataword);
+ return dataword;
+}
+
+__INTRIN_INLINE unsigned long _outpd(unsigned short Port, unsigned long dataword)
+{
+ __outdword(Port, dataword);
+ return dataword;
+}
+
/*** System information ***/
__INTRIN_INLINE void __cpuid(int CPUInfo[], const int InfoType)
}
/*** Interrupts ***/
+#ifdef __clang__
+#define __debugbreak() __asm__("int $3")
+#else
__INTRIN_INLINE void __debugbreak(void)
{
__asm__("int $3");
}
+#endif
__INTRIN_INLINE void __int2c(void)
{
__INTRIN_INLINE void _disable(void)
{
- __asm__("cli");
+ __asm__("cli" : : : "memory");
}
__INTRIN_INLINE void _enable(void)
{
- __asm__("sti");
+ __asm__("sti" : : : "memory");
}
__INTRIN_INLINE void __halt(void)
{
- __asm__("hlt\n\t");
+ __asm__("hlt\n\t" : : : "memory");
}
/*** Protected memory management ***/
+#ifdef _M_AMD64
__INTRIN_INLINE void __writecr0(const unsigned __int64 Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
}
-#ifdef _M_AMD64
__INTRIN_INLINE void __writecr8(const unsigned __int64 Data)
{
__asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) : "memory");
return value;
}
#else
+__INTRIN_INLINE void __writecr0(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
+}
+
+__INTRIN_INLINE void __writecr3(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) : "memory");
+}
+
+__INTRIN_INLINE void __writecr4(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
+}
+
__INTRIN_INLINE unsigned long __readcr0(void)
{
unsigned long value;
__INTRIN_INLINE void __invlpg(void * const Address)
{
- __asm__("invlpg %[Address]" : : [Address] "m" (*((unsigned char *)(Address))));
+ __asm__("invlpg %[Address]" : : [Address] "m" (*((unsigned char *)(Address))) : "memory");
}
__INTRIN_INLINE void __wbinvd(void)
{
- __asm__ __volatile__("wbinvd");
+ __asm__ __volatile__("wbinvd" : : : "memory");
}
__INTRIN_INLINE void __lidt(void *Source)
__asm__ __volatile__("sidt %0" : : "m"(*(short*)Destination) : "memory");
}
+/*** Misc operations ***/
+
__INTRIN_INLINE void _mm_pause(void)
{
- __asm__ __volatile__("pause");
+ __asm__ __volatile__("pause" : : : "memory");
+}
+
+__INTRIN_INLINE void __nop(void)
+{
+ __asm__ __volatile__("nop");
}
#ifdef __cplusplus