extern "C" {
#endif
+/*** memcopy must be memmove ***/
+__INTRIN_INLINE void* memcpy(void* dest, const void* source, size_t num)
+{
+ return memmove(dest, source, num);
+}
+
+
/*** Stack frame juggling ***/
#define _ReturnAddress() (__builtin_return_address(0))
#define _AddressOfReturnAddress() (&(((void **)(__builtin_frame_address(0)))[1]))
/* TODO: __getcallerseflags but how??? */
/* Maybe the same for x86? */
-#ifdef _x86_64
+#ifdef __x86_64__
#define _alloca(s) __builtin_alloca(s)
#endif
_WriteBarrier();
}
-#ifdef _x86_64
+#ifdef __x86_64__
__INTRIN_INLINE void __faststorefence(void)
{
- long local;
+ long local;
__asm__ __volatile__("lock; orl $0, %0;" : : "m"(local));
}
#endif
}
#if defined(_M_AMD64)
-__INTRIN_INLINE long _InterlockedAnd64(volatile long long * const value, const long long mask)
+__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_and(value, mask);
}
}
#if defined(_M_AMD64)
-__INTRIN_INLINE long _InterlockedOr64(volatile long long * const value, const long long mask)
+__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long long mask)
{
return __sync_fetch_and_or(value, mask);
}
return __sync_fetch_and_xor(value, mask);
}
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long long mask)
+{
+ return __sync_fetch_and_xor(value, mask);
+}
+#endif
+
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+{
+ return __sync_sub_and_fetch(lpAddend, 1);
+}
+
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+{
+ return __sync_add_and_fetch(lpAddend, 1);
+}
+#endif
+
#else
__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
return y;
}
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+{
+ return _InterlockedExchangeAdd(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+{
+ return _InterlockedExchangeAdd(lpAddend, 1) + 1;
+}
+
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+{
+ return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+{
+ return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
+}
+
+#if defined(_M_AMD64)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+{
+ return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
+}
+
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+{
+ return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
+}
+#endif
+
#endif
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 && defined(__x86_64__)
return Value;
}
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
-{
- return _InterlockedExchangeAdd(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
-{
- return _InterlockedExchangeAdd(lpAddend, 1) + 1;
-}
-
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
-{
- return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
-{
- return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
-}
-
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
-{
- return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
-}
-
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
-{
- return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
-}
-#endif
-
__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long b)
{
unsigned char retval;
}
/*** Interrupts ***/
+#ifdef __clang__
+#define __debugbreak() __asm__("int $3")
+#else
__INTRIN_INLINE void __debugbreak(void)
{
__asm__("int $3");
}
+#endif
__INTRIN_INLINE void __int2c(void)
{
/*** Protected memory management ***/
+#ifdef _M_AMD64
__INTRIN_INLINE void __writecr0(const unsigned __int64 Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
}
-#ifdef _M_AMD64
__INTRIN_INLINE void __writecr8(const unsigned __int64 Data)
{
__asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) : "memory");
return value;
}
#else
+__INTRIN_INLINE void __writecr0(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
+}
+
+__INTRIN_INLINE void __writecr3(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) : "memory");
+}
+
+__INTRIN_INLINE void __writecr4(const unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
+}
+
__INTRIN_INLINE unsigned long __readcr0(void)
{
unsigned long value;