#endif
/*** memcopy must be memmove ***/
+void* memmove(void* dest, const void* source, size_t num);
__INTRIN_INLINE void* memcpy(void* dest, const void* source, size_t num)
{
return memmove(dest, source, num);
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100
-__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * Destination, char Exchange, char Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
-__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * Destination, short Exchange, short Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination, const long Exchange, const long Comperand)
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * Destination, long Exchange, long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
-__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * Destination, void * Exchange, void * Comperand)
{
return (void *)__sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#endif
-__INTRIN_INLINE char _InterlockedExchange8(volatile char * const Target, const char Value)
+__INTRIN_INLINE char _InterlockedExchange8(volatile char * Target, char Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
-__INTRIN_INLINE short _InterlockedExchange16(volatile short * const Target, const short Value)
+__INTRIN_INLINE short _InterlockedExchange16(volatile short * Target, short Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long Value)
+__INTRIN_INLINE long _InterlockedExchange(volatile long * Target, long Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
-__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * Target, void * Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
#endif
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedExchange64(volatile long long * const Target, const long long Value)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedExchange64(volatile long long * Target, long long Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
}
#endif
-__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * const Addend, const short Value)
+__INTRIN_INLINE char _InterlockedExchangeAdd8(char volatile * Addend, char Value)
+{
+ return __sync_fetch_and_add(Addend, Value);
+}
+
+__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * Addend, short Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long Value)
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * Addend, long Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#endif
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedExchangeAdd64(volatile long long * const Addend, const long long Value)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedExchangeAdd64(volatile long long * Addend, long long Value)
{
return __sync_fetch_and_add(Addend, Value);
}
#endif
-__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * value, char mask)
{
return __sync_fetch_and_and(value, mask);
}
-__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * value, short mask)
{
return __sync_fetch_and_and(value, mask);
}
-__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedAnd(volatile long * value, long mask)
{
return __sync_fetch_and_and(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * const value, const long long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedAnd64(volatile long long * value, long long mask)
{
return __sync_fetch_and_and(value, mask);
}
#endif
-__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedOr8(volatile char * value, char mask)
{
return __sync_fetch_and_or(value, mask);
}
-__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedOr16(volatile short * value, short mask)
{
return __sync_fetch_and_or(value, mask);
}
-__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedOr(volatile long * value, long mask)
{
return __sync_fetch_and_or(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedOr64(volatile long long * const value, const long long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedOr64(volatile long long * value, long long mask)
{
return __sync_fetch_and_or(value, mask);
}
#endif
-__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedXor8(volatile char * value, char mask)
{
return __sync_fetch_and_xor(value, mask);
}
-__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedXor16(volatile short * value, short mask)
{
return __sync_fetch_and_xor(value, mask);
}
-__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedXor(volatile long * value, long mask)
{
return __sync_fetch_and_xor(value, mask);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedXor64(volatile long long * const value, const long long mask)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedXor64(volatile long long * value, long long mask)
{
return __sync_fetch_and_xor(value, mask);
}
#endif
#ifndef __clang__
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
#endif
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * lpAddend)
{
return __sync_sub_and_fetch(lpAddend, 1);
}
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * lpAddend)
{
return __sync_add_and_fetch(lpAddend, 1);
}
#else /* (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 */
-__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * const Destination, const char Exchange, const char Comperand)
+__INTRIN_INLINE char _InterlockedCompareExchange8(volatile char * Destination, char Exchange, char Comperand)
{
char retval = Comperand;
__asm__("lock; cmpxchgb %b[Exchange], %[Destination]" : [retval] "+a" (retval) : [Destination] "m" (*Destination), [Exchange] "q" (Exchange) : "memory");
return retval;
}
-__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * const Destination, const short Exchange, const short Comperand)
+__INTRIN_INLINE short _InterlockedCompareExchange16(volatile short * Destination, short Exchange, short Comperand)
{
short retval = Comperand;
__asm__("lock; cmpxchgw %w[Exchange], %[Destination]" : [retval] "+a" (retval) : [Destination] "m" (*Destination), [Exchange] "q" (Exchange): "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * const Destination, const long Exchange, const long Comperand)
+__INTRIN_INLINE long _InterlockedCompareExchange(volatile long * Destination, long Exchange, long Comperand)
{
long retval = Comperand;
__asm__("lock; cmpxchgl %k[Exchange], %[Destination]" : [retval] "+a" (retval) : [Destination] "m" (*Destination), [Exchange] "q" (Exchange): "memory");
return retval;
}
-__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * const Destination, void * const Exchange, void * const Comperand)
+__INTRIN_INLINE void * _InterlockedCompareExchangePointer(void * volatile * Destination, void * Exchange, void * Comperand)
{
void * retval = (void *)Comperand;
__asm__("lock; cmpxchgl %k[Exchange], %[Destination]" : [retval] "=a" (retval) : "[retval]" (retval), [Destination] "m" (*Destination), [Exchange] "q" (Exchange) : "memory");
return retval;
}
-__INTRIN_INLINE char _InterlockedExchange8(volatile char * const Target, const char Value)
+__INTRIN_INLINE char _InterlockedExchange8(volatile char * Target, char Value)
{
char retval = Value;
__asm__("xchgb %[retval], %[Target]" : [retval] "+r" (retval) : [Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE short _InterlockedExchange16(volatile short * const Target, const short Value)
+__INTRIN_INLINE short _InterlockedExchange16(volatile short * Target, short Value)
{
short retval = Value;
__asm__("xchgw %[retval], %[Target]" : [retval] "+r" (retval) : [Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchange(volatile long * const Target, const long Value)
+__INTRIN_INLINE long _InterlockedExchange(volatile long * Target, long Value)
{
long retval = Value;
__asm__("xchgl %[retval], %[Target]" : [retval] "+r" (retval) : [Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * const Target, void * const Value)
+__INTRIN_INLINE void * _InterlockedExchangePointer(void * volatile * Target, void * Value)
{
void * retval = Value;
__asm__("xchgl %[retval], %[Target]" : [retval] "+r" (retval) : [Target] "m" (*Target) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchangeAdd16(volatile short * const Addend, const short Value)
+__INTRIN_INLINE char _InterlockedExchangeAdd8(char volatile * Addend, char Value)
{
- long retval = Value;
+ char retval = Value;
+ __asm__("lock; xaddb %[retval], %[Addend]" : [retval] "+r" (retval) : [Addend] "m" (*Addend) : "memory");
+ return retval;
+}
+
+__INTRIN_INLINE short _InterlockedExchangeAdd16(volatile short * Addend, short Value)
+{
+ short retval = Value;
__asm__("lock; xaddw %[retval], %[Addend]" : [retval] "+r" (retval) : [Addend] "m" (*Addend) : "memory");
return retval;
}
-__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * const Addend, const long Value)
+__INTRIN_INLINE long _InterlockedExchangeAdd(volatile long * Addend, long Value)
{
long retval = Value;
__asm__("lock; xaddl %[retval], %[Addend]" : [retval] "+r" (retval) : [Addend] "m" (*Addend) : "memory");
return retval;
}
-__INTRIN_INLINE char _InterlockedAnd8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedAnd8(volatile char * value, char mask)
{
char x;
char y;
return y;
}
-__INTRIN_INLINE short _InterlockedAnd16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedAnd16(volatile short * value, short mask)
{
short x;
short y;
return y;
}
-__INTRIN_INLINE long _InterlockedAnd(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedAnd(volatile long * value, long mask)
{
long x;
long y;
return y;
}
-__INTRIN_INLINE char _InterlockedOr8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedOr8(volatile char * value, char mask)
{
char x;
char y;
return y;
}
-__INTRIN_INLINE short _InterlockedOr16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedOr16(volatile short * value, short mask)
{
short x;
short y;
return y;
}
-__INTRIN_INLINE long _InterlockedOr(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedOr(volatile long * value, long mask)
{
long x;
long y;
return y;
}
-__INTRIN_INLINE char _InterlockedXor8(volatile char * const value, const char mask)
+__INTRIN_INLINE char _InterlockedXor8(volatile char * value, char mask)
{
char x;
char y;
return y;
}
-__INTRIN_INLINE short _InterlockedXor16(volatile short * const value, const short mask)
+__INTRIN_INLINE short _InterlockedXor16(volatile short * value, short mask)
{
short x;
short y;
return y;
}
-__INTRIN_INLINE long _InterlockedXor(volatile long * const value, const long mask)
+__INTRIN_INLINE long _InterlockedXor(volatile long * value, long mask)
{
long x;
long y;
return y;
}
-__INTRIN_INLINE long _InterlockedDecrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedDecrement(volatile long * lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, -1) - 1;
}
-__INTRIN_INLINE long _InterlockedIncrement(volatile long * const lpAddend)
+__INTRIN_INLINE long _InterlockedIncrement(volatile long * lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, 1) + 1;
}
-__INTRIN_INLINE short _InterlockedDecrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedDecrement16(volatile short * lpAddend)
{
return _InterlockedExchangeAdd16(lpAddend, -1) - 1;
}
-__INTRIN_INLINE short _InterlockedIncrement16(volatile short * const lpAddend)
+__INTRIN_INLINE short _InterlockedIncrement16(volatile short * lpAddend)
{
return _InterlockedExchangeAdd16(lpAddend, 1) + 1;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * const lpAddend)
+#if defined(__x86_64__)
+__INTRIN_INLINE long long _InterlockedDecrement64(volatile long long * lpAddend)
{
return _InterlockedExchangeAdd64(lpAddend, -1) - 1;
}
-__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * const lpAddend)
+__INTRIN_INLINE long long _InterlockedIncrement64(volatile long long * lpAddend)
{
return _InterlockedExchangeAdd64(lpAddend, 1) + 1;
}
#if (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 && defined(__x86_64__)
-__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const Destination, const long long Exchange, const long long Comperand)
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * Destination, long long Exchange, long long Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
#else
-__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * const Destination, const long long Exchange, const long long Comperand)
+__INTRIN_INLINE long long _InterlockedCompareExchange64(volatile long long * Destination, long long Exchange, long long Comperand)
{
long long retval = Comperand;
#endif
-__INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * const Addend, const long Value)
+#ifdef __i386__
+__INTRIN_INLINE long _InterlockedAddLargeStatistic(volatile long long * Addend, long Value)
{
__asm__
(
return Value;
}
+#endif /* __i386__ */
-__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, const long b)
+__INTRIN_INLINE unsigned char _interlockedbittestandreset(volatile long * a, long b)
{
unsigned char retval;
__asm__("lock; btrl %[b], %[a]; setb %b[retval]" : [retval] "=q" (retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
return retval;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE unsigned char _interlockedbittestandreset64(volatile long long * a, const long long b)
+#if defined(__x86_64__)
+__INTRIN_INLINE unsigned char _interlockedbittestandreset64(volatile long long * a, long long b)
{
unsigned char retval;
__asm__("lock; btrq %[b], %[a]; setb %b[retval]" : [retval] "=r" (retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
}
#endif
-__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, const long b)
+__INTRIN_INLINE unsigned char _interlockedbittestandset(volatile long * a, long b)
{
unsigned char retval;
__asm__("lock; btsl %[b], %[a]; setc %b[retval]" : [retval] "=q" (retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
return retval;
}
-#if defined(_M_AMD64)
-__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, const long long b)
+#if defined(__x86_64__)
+__INTRIN_INLINE unsigned char _interlockedbittestandset64(volatile long long * a, long long b)
{
unsigned char retval;
__asm__("lock; btsq %[b], %[a]; setc %b[retval]" : [retval] "=r" (retval), [a] "+m" (*a) : [b] "Ir" (b) : "memory");
/*** String operations ***/
/* NOTE: we don't set a memory clobber in the __stosX functions because Visual C++ doesn't */
-__INTRIN_INLINE void __stosb(unsigned char * Dest, const unsigned char Data, size_t Count)
+__INTRIN_INLINE void __stosb(unsigned char * Dest, unsigned char Data, size_t Count)
{
__asm__ __volatile__
(
);
}
-__INTRIN_INLINE void __stosw(unsigned short * Dest, const unsigned short Data, size_t Count)
+__INTRIN_INLINE void __stosw(unsigned short * Dest, unsigned short Data, size_t Count)
{
__asm__ __volatile__
(
);
}
-__INTRIN_INLINE void __stosd(unsigned long * Dest, const unsigned long Data, size_t Count)
+__INTRIN_INLINE void __stosd(unsigned long * Dest, unsigned long Data, size_t Count)
{
__asm__ __volatile__
(
);
}
-#ifdef _M_AMD64
-__INTRIN_INLINE void __stosq(unsigned __int64 * Dest, const unsigned __int64 Data, size_t Count)
+#ifdef __x86_64__
+__INTRIN_INLINE void __stosq(unsigned long long * Dest, unsigned long long Data, size_t Count)
{
__asm__ __volatile__
(
);
}
-#ifdef _M_AMD64
+#ifdef __x86_64__
__INTRIN_INLINE void __movsq(unsigned long * Destination, const unsigned long * Source, size_t Count)
{
__asm__ __volatile__
}
#endif
-#if defined(_M_AMD64)
+#if defined(__x86_64__)
/*** GS segment addressing ***/
__asm__ __volatile__("movl %k[Data], %%gs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
}
-__INTRIN_INLINE void __writegsqword(unsigned long Offset, unsigned __int64 Data)
+__INTRIN_INLINE void __writegsqword(unsigned long Offset, unsigned long long Data)
{
__asm__ __volatile__("movq %q[Data], %%gs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
}
return value;
}
-__INTRIN_INLINE unsigned __int64 __readgsqword(unsigned long Offset)
+__INTRIN_INLINE unsigned long long __readgsqword(unsigned long Offset)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("movq %%gs:%a[Offset], %q[value]" : [value] "=r" (value) : [Offset] "ir" (Offset));
return value;
}
__asm__ __volatile__("incl %%gs:%a[Offset]" : : [Offset] "ir" (Offset) : "memory");
}
+__INTRIN_INLINE void __incgsqword(unsigned long Offset)
+{
+ __asm__ __volatile__("incq %%gs:%a[Offset]" : : [Offset] "ir" (Offset) : "memory");
+}
+
__INTRIN_INLINE void __addgsbyte(unsigned long Offset, unsigned char Data)
{
__asm__ __volatile__("addb %b[Data], %%gs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
__asm__ __volatile__("addl %k[Data], %%gs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
}
-__INTRIN_INLINE void __addgsqword(unsigned long Offset, unsigned __int64 Data)
+__INTRIN_INLINE void __addgsqword(unsigned long Offset, unsigned long long Data)
{
__asm__ __volatile__("addq %k[Data], %%gs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "ir" (Data) : "memory");
}
-#else /* defined(_M_AMD64) */
+#else /* defined(__x86_64__) */
/*** FS segment addressing ***/
__asm__ __volatile__("addl %k[Data], %%fs:%a[Offset]" : : [Offset] "ir" (Offset), [Data] "iq" (Data) : "memory");
}
-#endif /* defined(_M_AMD64) */
+#endif /* defined(__x86_64__) */
/*** Bit manipulation ***/
-__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * const Index, const unsigned long Mask)
+__INTRIN_INLINE unsigned char _BitScanForward(unsigned long * Index, unsigned long Mask)
{
__asm__("bsfl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask] "mr" (Mask));
return Mask ? 1 : 0;
}
-__INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * const Index, const unsigned long Mask)
+__INTRIN_INLINE unsigned char _BitScanReverse(unsigned long * Index, unsigned long Mask)
{
__asm__("bsrl %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask] "mr" (Mask));
return Mask ? 1 : 0;
}
/* NOTE: again, the bizarre implementation follows Visual C++ */
-__INTRIN_INLINE unsigned char _bittest(const long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittest(const long * a, long b)
{
unsigned char retval;
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned char _bittest64(const __int64 * const a, const __int64 b)
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned char _BitScanForward64(unsigned long * Index, unsigned long long Mask);
+{
+ __asm__("bsfq %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask] "mr" (Mask));
+ return Mask ? 1 : 0;
+}
+
+__INTRIN_INLINE unsigned char _BitScanReverse64(unsigned long * Index, unsigned long long Mask);
+{
+ __asm__("bsrq %[Mask], %[Index]" : [Index] "=r" (*Index) : [Mask] "mr" (Mask));
+ return Mask ? 1 : 0;
+}
+
+__INTRIN_INLINE unsigned char _bittest64(const long long * a, long long b)
{
unsigned char retval;
}
#endif
-__INTRIN_INLINE unsigned char _bittestandcomplement(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandcomplement(long * a, long b)
{
unsigned char retval;
return retval;
}
-__INTRIN_INLINE unsigned char _bittestandreset(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandreset(long * a, long b)
{
unsigned char retval;
return retval;
}
-__INTRIN_INLINE unsigned char _bittestandset(long * const a, const long b)
+__INTRIN_INLINE unsigned char _bittestandset(long * a, long b)
{
unsigned char retval;
return retval;
}
+#ifdef __x86_64__
+
+__INTRIN_INLINE unsigned char _bittestandset64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btsq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b / 64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btsq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a), [retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+__INTRIN_INLINE unsigned char _bittestandreset64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btrq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b / 64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btrq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a), [retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+__INTRIN_INLINE unsigned char _bittestandcomplement64(long long * a, long long b);
+{
+ unsigned char retval;
+
+ if(__builtin_constant_p(b))
+ __asm__("btcq %[b], %[a]; setb %b[retval]" : [a] "+mr" (*(a + (b / 64))), [retval] "=q" (retval) : [b] "Ir" (b % 64));
+ else
+ __asm__("btcq %[b], %[a]; setb %b[retval]" : [a] "+m" (*a), [retval] "=q" (retval) : [b] "r" (b));
+
+ return retval;
+}
+
+#endif
+
__INTRIN_INLINE unsigned char _rotl8(unsigned char value, unsigned char shift)
{
unsigned char retval;
__INTRIN_INLINE unsigned int _rotl(unsigned int value, int shift)
{
- unsigned long retval;
+ unsigned int retval;
__asm__("roll %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned __int64 _rotl64(unsigned __int64 value, int shift)
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
{
- unsigned __int64 retval;
+ unsigned long long retval;
__asm__("rolq %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
#else
-__INTRIN_INLINE unsigned __int64 _rotl64(unsigned __int64 value, int shift)
+__INTRIN_INLINE unsigned long long _rotl64(unsigned long long value, int shift)
{
/* FIXME: this is probably not optimal */
return (value << shift) | (value >> (64 - shift));
__INTRIN_INLINE unsigned int _rotr(unsigned int value, int shift)
{
- unsigned long retval;
+ unsigned int retval;
__asm__("rorl %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
return retval;
}
return retval;
}
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
+{
+ unsigned long long retval;
+ __asm__("rorq %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+#else
+__INTRIN_INLINE unsigned long long _rotr64(unsigned long long value, int shift)
+{
+ /* FIXME: this is probably not optimal */
+ return (value >> shift) | (value << (64 - shift));
+}
+#endif
+
+__INTRIN_INLINE unsigned long __cdecl _lrotl(unsigned long value, int shift)
+{
+ unsigned long retval;
+ __asm__("roll %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
+__INTRIN_INLINE unsigned long __cdecl _lrotr(unsigned long value, int shift)
+{
+ unsigned long retval;
+ __asm__("rorl %b[shift], %k[retval]" : [retval] "=rm" (retval) : "[retval]" (value), [shift] "Nc" (shift));
+ return retval;
+}
+
/*
NOTE: in __ll_lshift, __ll_rshift and __ull_rshift we use the "A"
constraint (edx:eax) for the Mask argument, because it's the only way GCC
just confuses it. Also we declare Bit as an int and then truncate it to
match Visual C++ behavior
*/
-__INTRIN_INLINE unsigned long long __ll_lshift(const unsigned long long Mask, const int Bit)
+__INTRIN_INLINE unsigned long long __ll_lshift(unsigned long long Mask, int Bit)
{
unsigned long long retval = Mask;
return retval;
}
-__INTRIN_INLINE long long __ll_rshift(const long long Mask, const int Bit)
+__INTRIN_INLINE long long __ll_rshift(long long Mask, int Bit)
{
long long retval = Mask;
return retval;
}
-__INTRIN_INLINE unsigned long long __ull_rshift(const unsigned long long Mask, int Bit)
+__INTRIN_INLINE unsigned long long __ull_rshift(unsigned long long Mask, int Bit)
{
unsigned long long retval = Mask;
return retval;
}
-#ifdef _M_AMD64
-__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value)
+#ifdef __x86_64__
+__INTRIN_INLINE unsigned long long _byteswap_uint64(unsigned long long value)
{
- unsigned __int64 retval;
+ unsigned long long retval;
__asm__("bswapq %[retval]" : [retval] "=r" (retval) : "[retval]" (value));
return retval;
}
#else
-__INTRIN_INLINE unsigned __int64 _byteswap_uint64(unsigned __int64 value)
+__INTRIN_INLINE unsigned long long _byteswap_uint64(unsigned long long value)
{
union {
- unsigned __int64 int64part;
+ unsigned long long int64part;
struct {
unsigned long lowpart;
unsigned long hipart;
}
#endif
+__INTRIN_INLINE unsigned int __lzcnt(unsigned int value)
+{
+ return __builtin_clz(value);
+}
+
+__INTRIN_INLINE unsigned short __lzcnt16(unsigned short value)
+{
+ return __builtin_clz(value);
+}
+
+__INTRIN_INLINE unsigned int __popcnt(unsigned int value)
+{
+ return __builtin_popcount(value);
+}
+
+__INTRIN_INLINE unsigned short __popcnt16(unsigned short value)
+{
+ return __builtin_popcount(value);
+}
+
+#ifdef __x86_64__
+unsigned long long __lzcnt64(unsigned long long value)
+{
+ return __builtin_clzll(value);
+}
+
+unsigned long long __popcnt64(unsigned long long value)
+{
+ return __builtin_popcountll(value);
+}
+#endif
+
/*** 64-bit math ***/
-__INTRIN_INLINE long long __emul(const int a, const int b)
+__INTRIN_INLINE long long __emul(int a, int b)
{
long long retval;
__asm__("imull %[b]" : "=A" (retval) : [a] "a" (a), [b] "rm" (b));
return retval;
}
-__INTRIN_INLINE unsigned long long __emulu(const unsigned int a, const unsigned int b)
+__INTRIN_INLINE unsigned long long __emulu(unsigned int a, unsigned int b)
{
unsigned long long retval;
__asm__("mull %[b]" : "=A" (retval) : [a] "a" (a), [b] "rm" (b));
return retval;
}
-#ifdef _M_AMD64
+__INTRIN_INLINE long long __cdecl _abs64(long long value)
+{
+ return (value >= 0) ? value : -value;
+}
+
+#ifdef __x86_64__
-__INTRIN_INLINE __int64 __mulh(__int64 a, __int64 b)
+__INTRIN_INLINE long long __mulh(long long a, long long b)
{
- __int64 retval;
+ long long retval;
__asm__("imulq %[b]" : "=d" (retval) : [a] "a" (a), [b] "rm" (b));
return retval;
}
-__INTRIN_INLINE unsigned __int64 __umulh(unsigned __int64 a, unsigned __int64 b)
+__INTRIN_INLINE unsigned long long __umulh(unsigned long long a, unsigned long long b)
{
- unsigned __int64 retval;
+ unsigned long long retval;
__asm__("mulq %[b]" : "=d" (retval) : [a] "a" (a), [b] "rm" (b));
return retval;
}
/*** Port I/O ***/
-__INTRIN_INLINE unsigned char __inbyte(const unsigned short Port)
+__INTRIN_INLINE unsigned char __inbyte(unsigned short Port)
{
unsigned char byte;
__asm__ __volatile__("inb %w[Port], %b[byte]" : [byte] "=a" (byte) : [Port] "Nd" (Port));
return byte;
}
-__INTRIN_INLINE unsigned short __inword(const unsigned short Port)
+__INTRIN_INLINE unsigned short __inword(unsigned short Port)
{
unsigned short word;
__asm__ __volatile__("inw %w[Port], %w[word]" : [word] "=a" (word) : [Port] "Nd" (Port));
return word;
}
-__INTRIN_INLINE unsigned long __indword(const unsigned short Port)
+__INTRIN_INLINE unsigned long __indword(unsigned short Port)
{
unsigned long dword;
__asm__ __volatile__("inl %w[Port], %k[dword]" : [dword] "=a" (dword) : [Port] "Nd" (Port));
);
}
-__INTRIN_INLINE void __outbyte(unsigned short const Port, const unsigned char Data)
+__INTRIN_INLINE void __outbyte(unsigned short Port, unsigned char Data)
{
__asm__ __volatile__("outb %b[Data], %w[Port]" : : [Port] "Nd" (Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outword(unsigned short const Port, const unsigned short Data)
+__INTRIN_INLINE void __outword(unsigned short Port, unsigned short Data)
{
__asm__ __volatile__("outw %w[Data], %w[Port]" : : [Port] "Nd" (Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outdword(unsigned short const Port, const unsigned long Data)
+__INTRIN_INLINE void __outdword(unsigned short Port, unsigned long Data)
{
__asm__ __volatile__("outl %k[Data], %w[Port]" : : [Port] "Nd" (Port), [Data] "a" (Data));
}
-__INTRIN_INLINE void __outbytestring(unsigned short Port, unsigned char * const Buffer, const unsigned long Count)
+__INTRIN_INLINE void __outbytestring(unsigned short Port, unsigned char * Buffer, unsigned long Count)
{
__asm__ __volatile__("rep; outsb" : : [Port] "d" (Port), [Buffer] "S" (Buffer), "c" (Count));
}
-__INTRIN_INLINE void __outwordstring(unsigned short Port, unsigned short * const Buffer, const unsigned long Count)
+__INTRIN_INLINE void __outwordstring(unsigned short Port, unsigned short * Buffer, unsigned long Count)
{
__asm__ __volatile__("rep; outsw" : : [Port] "d" (Port), [Buffer] "S" (Buffer), "c" (Count));
}
-__INTRIN_INLINE void __outdwordstring(unsigned short Port, unsigned long * const Buffer, const unsigned long Count)
+__INTRIN_INLINE void __outdwordstring(unsigned short Port, unsigned long * Buffer, unsigned long Count)
{
__asm__ __volatile__("rep; outsl" : : [Port] "d" (Port), [Buffer] "S" (Buffer), "c" (Count));
}
/*** System information ***/
-__INTRIN_INLINE void __cpuid(int CPUInfo[], const int InfoType)
+__INTRIN_INLINE void __cpuid(int CPUInfo[4], int InfoType)
{
__asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a" (InfoType));
}
+__INTRIN_INLINE void __cpuidex(int CPUInfo[4], int InfoType, int ECXValue)
+{
+ __asm__ __volatile__("cpuid" : "=a" (CPUInfo[0]), "=b" (CPUInfo[1]), "=c" (CPUInfo[2]), "=d" (CPUInfo[3]) : "a" (InfoType), "c" (ECXValue));
+}
+
__INTRIN_INLINE unsigned long long __rdtsc(void)
{
-#ifdef _M_AMD64
+#ifdef __x86_64__
unsigned long long low, high;
__asm__ __volatile__("rdtsc" : "=a"(low), "=d"(high));
return low | (high << 32);
}
#endif
+__INTRIN_INLINE void __ud2(void)
+{
+ __asm__("ud2");
+}
+
__INTRIN_INLINE void __int2c(void)
{
__asm__("int $0x2c");
/*** Protected memory management ***/
-#ifdef _M_AMD64
+#ifdef __x86_64__
-__INTRIN_INLINE void __writecr0(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr0(unsigned long long Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE void __writecr3(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr3(unsigned long long Data)
{
__asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE void __writecr4(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr4(unsigned long long Data)
{
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE void __writecr8(const unsigned __int64 Data)
+__INTRIN_INLINE void __writecr8(unsigned long long Data)
{
__asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE unsigned __int64 __readcr0(void)
+__INTRIN_INLINE unsigned long long __readcr0(void)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("mov %%cr0, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr2(void)
+__INTRIN_INLINE unsigned long long __readcr2(void)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("mov %%cr2, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr3(void)
+__INTRIN_INLINE unsigned long long __readcr3(void)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("mov %%cr3, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr4(void)
+__INTRIN_INLINE unsigned long long __readcr4(void)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("mov %%cr4, %[value]" : [value] "=r" (value));
return value;
}
-__INTRIN_INLINE unsigned __int64 __readcr8(void)
+__INTRIN_INLINE unsigned long long __readcr8(void)
{
- unsigned __int64 value;
+ unsigned long long value;
__asm__ __volatile__("movq %%cr8, %q[value]" : [value] "=r" (value));
return value;
}
-#else /* _M_AMD64 */
+#else /* __x86_64__ */
-__INTRIN_INLINE void __writecr0(const unsigned int Data)
+__INTRIN_INLINE void __writecr0(unsigned int Data)
{
__asm__("mov %[Data], %%cr0" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE void __writecr3(const unsigned int Data)
+__INTRIN_INLINE void __writecr3(unsigned int Data)
{
__asm__("mov %[Data], %%cr3" : : [Data] "r" (Data) : "memory");
}
-__INTRIN_INLINE void __writecr4(const unsigned int Data)
+__INTRIN_INLINE void __writecr4(unsigned int Data)
{
__asm__("mov %[Data], %%cr4" : : [Data] "r" (Data) : "memory");
}
+__INTRIN_INLINE void __writecr8(unsigned int Data)
+{
+ __asm__("mov %[Data], %%cr8" : : [Data] "r" (Data) : "memory");
+}
+
__INTRIN_INLINE unsigned long __readcr0(void)
{
unsigned long value;
return value;
}
-#endif /* _M_AMD64 */
+__INTRIN_INLINE unsigned long __readcr8(void)
+{
+ unsigned long value;
+ __asm__ __volatile__("mov %%cr8, %[value]" : [value] "=r" (value));
+ return value;
+}
-#ifdef _M_AMD64
+#endif /* __x86_64__ */
-__INTRIN_INLINE unsigned __int64 __readdr(unsigned int reg)
+#ifdef __x86_64__
+
+__INTRIN_INLINE unsigned long long __readdr(unsigned int reg)
{
- unsigned __int64 value;
+ unsigned long long value;
switch (reg)
{
case 0:
return value;
}
-__INTRIN_INLINE void __writedr(unsigned reg, unsigned __int64 value)
+__INTRIN_INLINE void __writedr(unsigned reg, unsigned long long value)
{
switch (reg)
{
}
}
-#else /* _M_AMD64 */
+#else /* __x86_64__ */
__INTRIN_INLINE unsigned int __readdr(unsigned int reg)
{
}
}
-#endif /* _M_AMD64 */
+#endif /* __x86_64__ */
-__INTRIN_INLINE void __invlpg(void * const Address)
+__INTRIN_INLINE void __invlpg(void *Address)
{
__asm__("invlpg %[Address]" : : [Address] "m" (*((unsigned char *)(Address))) : "memory");
}
__INTRIN_INLINE unsigned long long __readmsr(unsigned long reg)
{
-#ifdef _M_AMD64
+#ifdef __x86_64__
unsigned long low, high;
__asm__ __volatile__("rdmsr" : "=a" (low), "=d" (high) : "c" (reg));
return ((unsigned long long)high << 32) | low;
#endif
}
-__INTRIN_INLINE void __writemsr(const unsigned long Register, const unsigned long long Value)
+__INTRIN_INLINE void __writemsr(unsigned long Register, unsigned long long Value)
{
-#ifdef _M_AMD64
+#ifdef __x86_64__
__asm__ __volatile__("wrmsr" : : "a" (Value), "d" (Value >> 32), "c" (Register));
#else
__asm__ __volatile__("wrmsr" : : "A" (Value), "c" (Register));
}
/* NOTE: an immediate value for 'a' will raise an ICE in Visual C++ */
-__INTRIN_INLINE unsigned long __segmentlimit(const unsigned long a)
+__INTRIN_INLINE unsigned long __segmentlimit(unsigned long a)
{
unsigned long retval;
__asm__ __volatile__("lsl %[a], %[retval]" : [retval] "=r" (retval) : [a] "rm" (a));