Merge from amd64-branch:
[reactos.git] / reactos / ntoskrnl / include / internal / ntoskrnl.h
index fe841c0..0823f1d 100644 (file)
@@ -6,7 +6,7 @@
  */
 #define PLACE_IN_SECTION(s)    __attribute__((section (s)))
 #ifdef __GNUC__
-#define INIT_FUNCTION          PLACE_IN_SECTION("init")
+#define INIT_FUNCTION          PLACE_IN_SECTION("INIT")
 #define PAGE_LOCKED_FUNCTION   PLACE_IN_SECTION("pagelk")
 #define PAGE_UNLOCKED_FUNCTION PLACE_IN_SECTION("pagepo")
 #else
@@ -24,6 +24,8 @@
 #undef  PsGetCurrentProcess
 #define PsGetCurrentProcess _PsGetCurrentProcess
 
+#define RVA(m, b) ((PVOID)((ULONG_PTR)(b) + (ULONG_PTR)(m)))
+
 //
 // We are very lazy on ARM -- we just import intrinsics
 // Question: Why wasn't this done for x86 too? (see fastintrlck.asm)
 #define InterlockedOr                _InterlockedOr
 #define InterlockedAnd               _InterlockedAnd
 
+//
+// Use inlined versions of fast/guarded mutex routines
+//
+#define ExEnterCriticalRegionAndAcquireFastMutexUnsafe _ExEnterCriticalRegionAndAcquireFastMutexUnsafe
+#define ExReleaseFastMutexUnsafeAndLeaveCriticalRegion _ExReleaseFastMutexUnsafeAndLeaveCriticalRegion
+#define ExAcquireFastMutex _ExAcquireFastMutex
+#define ExReleaseFastMutex _ExReleaseFastMutex
+#define ExAcquireFastMutexUnsafe _ExAcquireFastMutexUnsafe
+#define ExReleaseFastMutexUnsafe _ExReleaseFastMutexUnsafe
+#define ExTryToAcquireFastMutex _ExTryToAcquireFastMutex
+
+#define KeInitializeGuardedMutex _KeInitializeGuardedMutex
+#define KeAcquireGuardedMutex _KeAcquireGuardedMutex
+#define KeReleaseGuardedMutex _KeReleaseGuardedMutex
+#define KeAcquireGuardedMutexUnsafe _KeAcquireGuardedMutexUnsafe
+#define KeReleaseGuardedMutexUnsafe _KeReleaseGuardedMutexUnsafe
+#define KeTryToAcquireGuardedMutex _KeTryToAcquireGuardedMutex
+
 #include "ke.h"
 #include "ob.h"
 #include "mm.h"
@@ -63,6 +83,7 @@
 #include "../kdbg/kdb.h"
 #endif
 #include "dbgk.h"
+#include "spinlock.h"
 #include "tag.h"
 #include "test.h"
 #include "inbv.h"
@@ -125,6 +146,7 @@ typedef struct _INFORMATION_CLASS_INFO
 
 #endif
 
+#if defined (_M_IX86) || defined(_M_AMD64)
 C_ASSERT(FIELD_OFFSET(KUSER_SHARED_DATA, SystemCall) == 0x300);
 C_ASSERT(FIELD_OFFSET(KTHREAD, InitialStack) == KTHREAD_INITIAL_STACK);
 C_ASSERT(FIELD_OFFSET(KTHREAD, Teb) == KTHREAD_TEB);
@@ -136,10 +158,11 @@ C_ASSERT(FIELD_OFFSET(KTHREAD, TrapFrame) == KTHREAD_TRAP_FRAME);
 C_ASSERT(FIELD_OFFSET(KTHREAD, CallbackStack) == KTHREAD_CALLBACK_STACK);
 C_ASSERT(FIELD_OFFSET(KTHREAD, ApcState.Process) == KTHREAD_APCSTATE_PROCESS);
 C_ASSERT(FIELD_OFFSET(KPROCESS, DirectoryTableBase) == KPROCESS_DIRECTORY_TABLE_BASE);
-C_ASSERT(FIELD_OFFSET(KPCR, Tib.ExceptionList) == KPCR_EXCEPTION_LIST);
+#endif
 
-C_ASSERT(FIELD_OFFSET(KPCR, Self) == KPCR_SELF);
 #ifdef _M_IX86
+C_ASSERT(FIELD_OFFSET(KPCR, Tib.ExceptionList) == KPCR_EXCEPTION_LIST);
+C_ASSERT(FIELD_OFFSET(KPCR, Self) == KPCR_SELF);
 C_ASSERT(FIELD_OFFSET(KPCR, IRR) == KPCR_IRR);
 C_ASSERT(FIELD_OFFSET(KPCR, IDR) == KPCR_IDR);
 C_ASSERT(FIELD_OFFSET(KPCR, Irql) == KPCR_IRQL);