[NTOS:OB] Rename object types to their official names to satisfy WinDbg
[reactos.git] / ntoskrnl / include / internal / ntoskrnl.h
1 #pragma once
2
3 /*
4 * Use these to place a function in a specific section of the executable
5 */
6 #ifdef __GNUC__
7 #define INIT_SECTION __attribute__((section ("INIT")))
8 #define INIT_FUNCTION INIT_SECTION
9 #else
10 #define INIT_SECTION /* Done via alloc_text for MSC */
11 #define INIT_FUNCTION INIT_SECTION
12 #endif
13
14
15
16 #ifdef _NTOSKRNL_
17
18 #ifndef _ARM_
19 #define KeGetCurrentThread _KeGetCurrentThread
20 #define KeGetPreviousMode _KeGetPreviousMode
21 #endif
22 #undef PsGetCurrentProcess
23 #define PsGetCurrentProcess _PsGetCurrentProcess
24
25 #define RVA(m, b) ((PVOID)((ULONG_PTR)(b) + (ULONG_PTR)(m)))
26
27 //
28 // We are very lazy on ARM -- we just import intrinsics
29 // Question: Why wasn't this done for x86 too? (see fastintrlck.asm)
30 //
31 #define InterlockedDecrement _InterlockedDecrement
32 #define InterlockedDecrement16 _InterlockedDecrement16
33 #define InterlockedIncrement _InterlockedIncrement
34 #define InterlockedIncrement16 _InterlockedIncrement16
35 #define InterlockedCompareExchange _InterlockedCompareExchange
36 #define InterlockedCompareExchange16 _InterlockedCompareExchange16
37 #define InterlockedCompareExchange64 _InterlockedCompareExchange64
38 #define InterlockedExchange _InterlockedExchange
39 #define InterlockedExchangeAdd _InterlockedExchangeAdd
40 #define InterlockedOr _InterlockedOr
41 #define InterlockedAnd _InterlockedAnd
42
43 //
44 // Use inlined versions of fast/guarded mutex routines
45 //
46 #define ExEnterCriticalRegionAndAcquireFastMutexUnsafe _ExEnterCriticalRegionAndAcquireFastMutexUnsafe
47 #define ExReleaseFastMutexUnsafeAndLeaveCriticalRegion _ExReleaseFastMutexUnsafeAndLeaveCriticalRegion
48 #define ExAcquireFastMutex _ExAcquireFastMutex
49 #define ExReleaseFastMutex _ExReleaseFastMutex
50 #define ExAcquireFastMutexUnsafe _ExAcquireFastMutexUnsafe
51 #define ExReleaseFastMutexUnsafe _ExReleaseFastMutexUnsafe
52 #define ExTryToAcquireFastMutex _ExTryToAcquireFastMutex
53
54 #define KeInitializeGuardedMutex _KeInitializeGuardedMutex
55 #define KeAcquireGuardedMutex _KeAcquireGuardedMutex
56 #define KeReleaseGuardedMutex _KeReleaseGuardedMutex
57 #define KeAcquireGuardedMutexUnsafe _KeAcquireGuardedMutexUnsafe
58 #define KeReleaseGuardedMutexUnsafe _KeReleaseGuardedMutexUnsafe
59 #define KeTryToAcquireGuardedMutex _KeTryToAcquireGuardedMutex
60
61 #include "tag.h"
62 #include "ke.h"
63 #include "ob.h"
64 #include "mm.h"
65 #include "ex.h"
66 #include "cm.h"
67 #include "ps.h"
68 #include "cc.h"
69 #include "io.h"
70 #include "po.h"
71 #include "se.h"
72 #include "ldr.h"
73 #ifndef _WINKD_
74 #include "kd.h"
75 #else
76 #include "kd64.h"
77 #endif
78 #include "fsrtl.h"
79 #include "lpc.h"
80 #include "rtl.h"
81 #ifdef KDBG
82 #include <kdbg/kdb.h>
83 #endif
84 #include "dbgk.h"
85 #include "spinlock.h"
86 #include "test.h"
87 #include "inbv.h"
88 #include "vdm.h"
89 #include "hal.h"
90 #include "hdl.h"
91 #include "arch/intrin_i.h"
92
93 /*
94 * generic information class probing code
95 */
96
97 #define ICIF_QUERY 0x1
98 #define ICIF_SET 0x2
99 #define ICIF_QUERY_SIZE_VARIABLE 0x4
100 #define ICIF_SET_SIZE_VARIABLE 0x8
101 #define ICIF_SIZE_VARIABLE (ICIF_QUERY_SIZE_VARIABLE | ICIF_SET_SIZE_VARIABLE)
102
103 typedef struct _INFORMATION_CLASS_INFO
104 {
105 ULONG RequiredSizeQUERY;
106 ULONG RequiredSizeSET;
107 ULONG AlignmentSET;
108 ULONG AlignmentQUERY;
109 ULONG Flags;
110 } INFORMATION_CLASS_INFO, *PINFORMATION_CLASS_INFO;
111
112 #define ICI_SQ_SAME(Type, Alignment, Flags) \
113 { Type, Type, Alignment, Alignment, Flags }
114
115 #define ICI_SQ(TypeQuery, TypeSet, AlignmentQuery, AlignmentSet, Flags) \
116 { TypeQuery, TypeSet, AlignmentQuery, AlignmentSet, Flags }
117
118 //
119 // TEMPORARY
120 //
121 #define IQS_SAME(Type, Alignment, Flags) \
122 { sizeof(Type), sizeof(Type), sizeof(Alignment), sizeof(Alignment), Flags }
123
124 #define IQS(TypeQuery, TypeSet, AlignmentQuery, AlignmentSet, Flags) \
125 { sizeof(TypeQuery), sizeof(TypeSet), sizeof(AlignmentQuery), sizeof(AlignmentSet), Flags }
126
127 /*
128 * Use IsPointerOffset to test whether a pointer should be interpreted as an offset
129 * or as a pointer
130 */
131 #if defined(_X86_) || defined(_M_AMD64) || defined(_MIPS_) || defined(_PPC_) || defined(_ARM_)
132
133 /* for x86 and x86-64 the MSB is 1 so we can simply test on that */
134 #define IsPointerOffset(Ptr) ((LONG_PTR)(Ptr) >= 0)
135
136 #elif defined(_IA64_)
137
138 /* on Itanium if the 24 most significant bits are set, we're not dealing with
139 offsets anymore. */
140 #define IsPointerOffset(Ptr) (((ULONG_PTR)(Ptr) & 0xFFFFFF0000000000ULL) == 0)
141
142 #else
143 #error IsPointerOffset() needs to be defined for this architecture
144 #endif
145
146 #endif
147
148 #ifdef _M_IX86
149 C_ASSERT(FIELD_OFFSET(KUSER_SHARED_DATA, SystemCall) == 0x300);
150
151 C_ASSERT(FIELD_OFFSET(KTHREAD, InitialStack) == KTHREAD_INITIAL_STACK);
152 C_ASSERT(FIELD_OFFSET(KTHREAD, KernelStack) == KTHREAD_KERNEL_STACK);
153 C_ASSERT(FIELD_OFFSET(KTHREAD, SystemAffinityActive) == FIELD_OFFSET(KTHREAD, WaitBlock) + FIELD_OFFSET(KWAIT_BLOCK, SpareByte));
154 C_ASSERT(FIELD_OFFSET(KTHREAD, ApcState.Process) == KTHREAD_APCSTATE_PROCESS);
155 C_ASSERT(FIELD_OFFSET(KTHREAD, ApcQueueable) == FIELD_OFFSET(KTHREAD, ApcState.UserApcPending) + 1);
156 C_ASSERT(FIELD_OFFSET(KTHREAD, ApcQueueable) == 0x3F);
157 C_ASSERT(FIELD_OFFSET(KTHREAD, NextProcessor) == 0x40);
158 C_ASSERT(FIELD_OFFSET(KTHREAD, DeferredProcessor) == 0x41);
159 C_ASSERT(FIELD_OFFSET(KTHREAD, AdjustReason) == 0x42);
160 C_ASSERT(FIELD_OFFSET(KTHREAD, NpxState) == KTHREAD_NPX_STATE);
161 C_ASSERT(FIELD_OFFSET(KTHREAD, Alertable) == 0x58);
162 C_ASSERT(FIELD_OFFSET(KTHREAD, SwapBusy) == 0x05D);
163 C_ASSERT(FIELD_OFFSET(KTHREAD, Teb) == KTHREAD_TEB);
164 C_ASSERT(FIELD_OFFSET(KTHREAD, Timer) == 0x078);
165 C_ASSERT(FIELD_OFFSET(KTHREAD, ThreadFlags) == 0x0A0);
166 C_ASSERT(FIELD_OFFSET(KTHREAD, WaitBlock) == 0x0A8);
167 C_ASSERT(FIELD_OFFSET(KTHREAD, WaitBlockFill0) == 0x0A8);
168 C_ASSERT(FIELD_OFFSET(KTHREAD, QueueListEntry) == 0x108);
169 C_ASSERT(FIELD_OFFSET(KTHREAD, PreviousMode) == KTHREAD_PREVIOUS_MODE);
170 C_ASSERT(FIELD_OFFSET(KTHREAD, PreviousMode) == FIELD_OFFSET(KTHREAD, WaitBlock) + sizeof(KWAIT_BLOCK) + FIELD_OFFSET(KWAIT_BLOCK, SpareByte));
171 C_ASSERT(FIELD_OFFSET(KTHREAD, ResourceIndex) == FIELD_OFFSET(KTHREAD, WaitBlock) + 2*sizeof(KWAIT_BLOCK) + FIELD_OFFSET(KWAIT_BLOCK, SpareByte));
172 C_ASSERT(FIELD_OFFSET(KTHREAD, LargeStack) == FIELD_OFFSET(KTHREAD, WaitBlock) + 3*sizeof(KWAIT_BLOCK) + FIELD_OFFSET(KWAIT_BLOCK, SpareByte));
173 C_ASSERT(FIELD_OFFSET(KTHREAD, TrapFrame) == KTHREAD_TRAP_FRAME);
174 C_ASSERT(FIELD_OFFSET(KTHREAD, CallbackStack) == KTHREAD_CALLBACK_STACK);
175 C_ASSERT(FIELD_OFFSET(KTHREAD, ServiceTable) == KTHREAD_SERVICE_TABLE);
176 C_ASSERT(FIELD_OFFSET(KTHREAD, FreezeCount) == FIELD_OFFSET(KTHREAD, SavedApcState.UserApcPending) + 1);
177 C_ASSERT(FIELD_OFFSET(KTHREAD, Quantum) == FIELD_OFFSET(KTHREAD, SuspendApc.SpareByte0));
178 C_ASSERT(FIELD_OFFSET(KTHREAD, QuantumReset) == FIELD_OFFSET(KTHREAD, SuspendApc.SpareByte1));
179 C_ASSERT(FIELD_OFFSET(KTHREAD, KernelTime) == FIELD_OFFSET(KTHREAD, SuspendApc.SpareLong0));
180 C_ASSERT(FIELD_OFFSET(KTHREAD, TlsArray) == FIELD_OFFSET(KTHREAD, SuspendApc.SystemArgument1));
181 C_ASSERT(FIELD_OFFSET(KTHREAD, LegoData) == FIELD_OFFSET(KTHREAD, SuspendApc.SystemArgument2));
182 C_ASSERT(FIELD_OFFSET(KTHREAD, PowerState) == FIELD_OFFSET(KTHREAD, SuspendApc.Inserted) + 1);
183 C_ASSERT(sizeof(KTHREAD) == 0x1B8);
184
185 C_ASSERT(FIELD_OFFSET(KPROCESS, DirectoryTableBase) == KPROCESS_DIRECTORY_TABLE_BASE);
186
187 C_ASSERT(FIELD_OFFSET(KPCR, NtTib.ExceptionList) == KPCR_EXCEPTION_LIST);
188 C_ASSERT(FIELD_OFFSET(KPCR, SelfPcr) == KPCR_SELF);
189 C_ASSERT(FIELD_OFFSET(KPCR, IRR) == KPCR_IRR);
190 C_ASSERT(FIELD_OFFSET(KPCR, IDR) == KPCR_IDR);
191 C_ASSERT(FIELD_OFFSET(KPCR, Irql) == KPCR_IRQL);
192 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, CurrentThread) == KPCR_CURRENT_THREAD);
193 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, NextThread) == KPCR_PRCB_NEXT_THREAD);
194 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, NpxThread) == KPCR_NPX_THREAD);
195 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) == KPCR_PRCB_DATA);
196 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, KeSystemCalls) == KPCR_SYSTEM_CALLS);
197 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcData) + FIELD_OFFSET(KDPC_DATA, DpcQueueDepth) == KPCR_PRCB_DPC_QUEUE_DEPTH);
198 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcData) + 16 == KPCR_PRCB_DPC_COUNT);
199 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcStack) == KPCR_PRCB_DPC_STACK);
200 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, TimerRequest) == KPCR_PRCB_TIMER_REQUEST);
201 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, MaximumDpcQueueDepth) == KPCR_PRCB_MAXIMUM_DPC_QUEUE_DEPTH);
202 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcRequestRate) == KPCR_PRCB_DPC_REQUEST_RATE);
203 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcInterruptRequested) == KPCR_PRCB_DPC_INTERRUPT_REQUESTED);
204 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcRoutineActive) == KPCR_PRCB_DPC_ROUTINE_ACTIVE);
205 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcLastCount) == KPCR_PRCB_DPC_LAST_COUNT);
206 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, TimerRequest) == KPCR_PRCB_TIMER_REQUEST);
207 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, QuantumEnd) == KPCR_PRCB_QUANTUM_END);
208 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DeferredReadyListHead) == KPCR_PRCB_DEFERRED_READY_LIST_HEAD);
209 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, PowerState) == KPCR_PRCB_POWER_STATE_IDLE_FUNCTION);
210 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, PrcbLock) == KPCR_PRCB_PRCB_LOCK);
211 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, DpcStack) == KPCR_PRCB_DPC_STACK);
212 C_ASSERT(FIELD_OFFSET(KIPCR, PrcbData) + FIELD_OFFSET(KPRCB, IdleSchedule) == KPCR_PRCB_IDLE_SCHEDULE);
213 C_ASSERT(sizeof(FX_SAVE_AREA) == SIZEOF_FX_SAVE_AREA);
214
215 /* Platform specific checks */
216 C_ASSERT(FIELD_OFFSET(KPROCESS, IopmOffset) == KPROCESS_IOPM_OFFSET);
217 C_ASSERT(FIELD_OFFSET(KPROCESS, LdtDescriptor) == KPROCESS_LDT_DESCRIPTOR0);
218 C_ASSERT(FIELD_OFFSET(KTSS, Esp0) == KTSS_ESP0);
219 C_ASSERT(FIELD_OFFSET(KTSS, IoMapBase) == KTSS_IOMAPBASE);
220 #endif