5af888b7e07802385ac44c37926a6bacd198c0a8
[reactos.git] / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 //
14 // INT3 is 1 byte long
15 //
16 #define KD_BREAKPOINT_TYPE UCHAR
17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
18 #define KD_BREAKPOINT_VALUE 0xCC
19
20 //
21 // Macros for getting and setting special purpose registers in portable code
22 //
23 #define KeGetContextPc(Context) \
24 ((Context)->Eip)
25
26 #define KeSetContextPc(Context, ProgramCounter) \
27 ((Context)->Eip = (ProgramCounter))
28
29 #define KeGetTrapFramePc(TrapFrame) \
30 ((TrapFrame)->Eip)
31
32 #define KiGetLinkedTrapFrame(x) \
33 (PKTRAP_FRAME)((x)->Edx)
34
35 #define KeGetContextReturnRegister(Context) \
36 ((Context)->Eax)
37
38 #define KeSetContextReturnRegister(Context, ReturnValue) \
39 ((Context)->Eax = (ReturnValue))
40
41 //
42 // Macro to get trap and exception frame from a thread stack
43 //
44 #define KeGetTrapFrame(Thread) \
45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
46 sizeof(KTRAP_FRAME) - \
47 sizeof(FX_SAVE_AREA))
48
49 #define KeGetExceptionFrame(Thread) \
50 NULL
51
52 //
53 // Macro to get context switches from the PRCB
54 // All architectures but x86 have it in the PRCB's KeContextSwitches
55 //
56 #define KeGetContextSwitches(Prcb) \
57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
58
59 //
60 // Macro to get the second level cache size field name which differs between
61 // CISC and RISC architectures, as the former has unified I/D cache
62 //
63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
64
65 //
66 // Returns the Interrupt State from a Trap Frame.
67 // ON = TRUE, OFF = FALSE
68 //
69 #define KeGetTrapFrameInterruptState(TrapFrame) \
70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
71
72 //
73 // Flags for exiting a trap
74 //
75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
78
79 typedef union _KTRAP_EXIT_SKIP_BITS
80 {
81 struct
82 {
83 UCHAR SkipPreviousMode:1;
84 UCHAR SkipSegments:1;
85 UCHAR SkipVolatiles:1;
86 UCHAR Reserved:5;
87 };
88 UCHAR Bits;
89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
90
91
92 //
93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
94 //
95 #define PFX_FLAG_ES 0x00000100
96 #define PFX_FLAG_CS 0x00000200
97 #define PFX_FLAG_SS 0x00000400
98 #define PFX_FLAG_DS 0x00000800
99 #define PFX_FLAG_FS 0x00001000
100 #define PFX_FLAG_GS 0x00002000
101 #define PFX_FLAG_OPER32 0x00004000
102 #define PFX_FLAG_ADDR32 0x00008000
103 #define PFX_FLAG_LOCK 0x00010000
104 #define PFX_FLAG_REPNE 0x00020000
105 #define PFX_FLAG_REP 0x00040000
106
107 //
108 // VDM Helper Macros
109 //
110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
111 // We need to keep 2 parameters while the original ASM implementation uses 4:
112 // TrapFrame, PrefixFlags, Eip, InstructionSize;
113 //
114 // We pass the trap frame, and prefix flags, in our two parameters.
115 //
116 // We then realize that since the smallest prefix flag is 0x100, this gives us
117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
118 //
119 // We further realize that we always have access to EIP from the trap frame, and
120 // that if we want the *current instruction* EIP, we simply have to add the
121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
122 // at now, so we don't need to use the stack to push this parameter.
123 //
124 // We actually only care about the *current instruction* EIP in one location,
125 // so although it may be slightly more expensive to re-calculate the EIP one
126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
127 // which would be forcing stack usage in all other scenarios.
128 //
129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
133 #define KiVdmUnhandledOpcode(x) \
134 BOOLEAN \
135 FASTCALL \
136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
137 IN ULONG Flags) \
138 { \
139 /* Not yet handled */ \
140 UNIMPLEMENTED_DBGBREAK(); \
141 return FALSE; \
142 }
143
144 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
145
146 //
147 // Local parameters
148 //
149 typedef struct _KV86_FRAME
150 {
151 PVOID ThreadStack;
152 PVOID ThreadTeb;
153 PVOID PcrTeb;
154 } KV86_FRAME, *PKV86_FRAME;
155
156 //
157 // Virtual Stack Frame
158 //
159 typedef struct _KV8086_STACK_FRAME
160 {
161 KTRAP_FRAME TrapFrame;
162 FX_SAVE_AREA NpxArea;
163 KV86_FRAME V86Frame;
164 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
165
166 //
167 // Large Pages Support
168 //
169 typedef struct _LARGE_IDENTITY_MAP
170 {
171 PHARDWARE_PTE TopLevelDirectory;
172 ULONG Cr3;
173 ULONG_PTR StartAddress;
174 ULONG PagesCount;
175 PVOID PagesList[30];
176 } LARGE_IDENTITY_MAP, *PLARGE_IDENTITY_MAP;
177
178 /* Diable interrupts and return whether they were enabled before */
179 FORCEINLINE
180 BOOLEAN
181 KeDisableInterrupts(VOID)
182 {
183 ULONG Flags;
184 BOOLEAN Return;
185
186 /* Get EFLAGS and check if the interrupt bit is set */
187 Flags = __readeflags();
188 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
189
190 /* Disable interrupts */
191 _disable();
192 return Return;
193 }
194
195 /* Restore previous interrupt state */
196 FORCEINLINE
197 VOID
198 KeRestoreInterrupts(BOOLEAN WereEnabled)
199 {
200 if (WereEnabled) _enable();
201 }
202
203 //
204 // Registers an interrupt handler with an IDT vector
205 //
206 FORCEINLINE
207 VOID
208 KeRegisterInterruptHandler(IN ULONG Vector,
209 IN PVOID Handler)
210 {
211 UCHAR Entry;
212 ULONG_PTR Address;
213 PKIPCR Pcr = (PKIPCR)KeGetPcr();
214
215 //
216 // Get the entry from the HAL
217 //
218 Entry = HalVectorToIDTEntry(Vector);
219 Address = PtrToUlong(Handler);
220
221 //
222 // Now set the data
223 //
224 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
225 Pcr->IDT[Entry].Offset = (USHORT)Address;
226 }
227
228 //
229 // Returns the registered interrupt handler for a given IDT vector
230 //
231 FORCEINLINE
232 PVOID
233 KeQueryInterruptHandler(IN ULONG Vector)
234 {
235 PKIPCR Pcr = (PKIPCR)KeGetPcr();
236 UCHAR Entry;
237
238 //
239 // Get the entry from the HAL
240 //
241 Entry = HalVectorToIDTEntry(Vector);
242
243 //
244 // Read the entry from the IDT
245 //
246 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
247 (Pcr->IDT[Entry].Offset & 0xFFFF));
248 }
249
250 //
251 // Invalidates the TLB entry for a specified address
252 //
253 FORCEINLINE
254 VOID
255 KeInvalidateTlbEntry(IN PVOID Address)
256 {
257 /* Invalidate the TLB entry for this address */
258 __invlpg(Address);
259 }
260
261 FORCEINLINE
262 VOID
263 KeFlushProcessTb(VOID)
264 {
265 /* Flush the TLB by resetting CR3 */
266 __writecr3(__readcr3());
267 }
268
269 FORCEINLINE
270 VOID
271 KeSweepICache(IN PVOID BaseAddress,
272 IN SIZE_T FlushSize)
273 {
274 //
275 // Always sweep the whole cache
276 //
277 UNREFERENCED_PARAMETER(BaseAddress);
278 UNREFERENCED_PARAMETER(FlushSize);
279 __wbinvd();
280 }
281
282 FORCEINLINE
283 PRKTHREAD
284 KeGetCurrentThread(VOID)
285 {
286 /* Return the current thread */
287 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
288 }
289
290 FORCEINLINE
291 VOID
292 KiRundownThread(IN PKTHREAD Thread)
293 {
294 #ifndef CONFIG_SMP
295 /* Check if this is the NPX Thread */
296 if (KeGetCurrentPrcb()->NpxThread == Thread)
297 {
298 /* Clear it */
299 KeGetCurrentPrcb()->NpxThread = NULL;
300 Ke386FnInit();
301 }
302 #else
303 /* Nothing to do */
304 #endif
305 }
306
307 FORCEINLINE
308 VOID
309 Ke386SetGdtEntryBase(PKGDTENTRY GdtEntry, PVOID BaseAddress)
310 {
311 GdtEntry->BaseLow = (USHORT)((ULONG_PTR)BaseAddress & 0xFFFF);
312 GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)BaseAddress >> 16);
313 GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)BaseAddress >> 24);
314 }
315
316 FORCEINLINE
317 VOID
318 KiSetTebBase(PKPCR Pcr, PVOID TebAddress)
319 {
320 Pcr->NtTib.Self = TebAddress;
321 Ke386SetGdtEntryBase(&Pcr->GDT[KGDT_R3_TEB / sizeof(KGDTENTRY)], TebAddress);
322 }
323
324 VOID
325 FASTCALL
326 Ki386InitializeTss(
327 IN PKTSS Tss,
328 IN PKIDTENTRY Idt,
329 IN PKGDTENTRY Gdt
330 );
331
332 VOID
333 NTAPI
334 KiSetCR0Bits(VOID);
335
336 VOID
337 NTAPI
338 KiGetCacheInformation(VOID);
339
340 BOOLEAN
341 NTAPI
342 KiIsNpxPresent(
343 VOID
344 );
345
346 BOOLEAN
347 NTAPI
348 KiIsNpxErrataPresent(
349 VOID
350 );
351
352 VOID
353 NTAPI
354 KiSetProcessorType(VOID);
355
356 ULONG
357 NTAPI
358 KiGetFeatureBits(VOID);
359
360 VOID
361 NTAPI
362 KiThreadStartup(VOID);
363
364 NTSTATUS
365 NTAPI
366 Ke386GetGdtEntryThread(
367 IN PKTHREAD Thread,
368 IN ULONG Offset,
369 IN PKGDTENTRY Descriptor
370 );
371
372 VOID
373 NTAPI
374 KiFlushNPXState(
375 IN FLOATING_SAVE_AREA *SaveArea
376 );
377
378 VOID
379 NTAPI
380 Ki386AdjustEsp0(
381 IN PKTRAP_FRAME TrapFrame
382 );
383
384 VOID
385 NTAPI
386 Ki386SetupAndExitToV86Mode(
387 OUT PTEB VdmTeb
388 );
389
390 VOID
391 NTAPI
392 KeI386VdmInitialize(
393 VOID
394 );
395
396 ULONG_PTR
397 NTAPI
398 Ki386EnableGlobalPage(
399 IN ULONG_PTR Context
400 );
401
402 ULONG_PTR
403 NTAPI
404 Ki386EnableTargetLargePage(
405 IN ULONG_PTR Context
406 );
407
408 BOOLEAN
409 NTAPI
410 Ki386CreateIdentityMap(
411 IN PLARGE_IDENTITY_MAP IdentityMap,
412 IN PVOID StartPtr,
413 IN ULONG Length
414 );
415
416 VOID
417 NTAPI
418 Ki386FreeIdentityMap(
419 IN PLARGE_IDENTITY_MAP IdentityMap
420 );
421
422 VOID
423 NTAPI
424 Ki386EnableCurrentLargePage(
425 IN ULONG_PTR StartAddress,
426 IN ULONG Cr3
427 );
428
429 VOID
430 NTAPI
431 KiI386PentiumLockErrataFixup(
432 VOID
433 );
434
435 VOID
436 NTAPI
437 KiInitializePAT(
438 VOID
439 );
440
441 VOID
442 NTAPI
443 KiInitializeMTRR(
444 IN BOOLEAN FinalCpu
445 );
446
447 VOID
448 NTAPI
449 KiAmdK6InitializeMTRR(
450 VOID
451 );
452
453 VOID
454 NTAPI
455 KiRestoreFastSyscallReturnState(
456 VOID
457 );
458
459 ULONG_PTR
460 NTAPI
461 Ki386EnableDE(
462 IN ULONG_PTR Context
463 );
464
465 ULONG_PTR
466 NTAPI
467 Ki386EnableFxsr(
468 IN ULONG_PTR Context
469 );
470
471 ULONG_PTR
472 NTAPI
473 Ki386EnableXMMIExceptions(
474 IN ULONG_PTR Context
475 );
476
477 BOOLEAN
478 NTAPI
479 VdmDispatchBop(
480 IN PKTRAP_FRAME TrapFrame
481 );
482
483 BOOLEAN
484 FASTCALL
485 KiVdmOpcodePrefix(
486 IN PKTRAP_FRAME TrapFrame,
487 IN ULONG Flags
488 );
489
490 BOOLEAN
491 FASTCALL
492 Ki386HandleOpcodeV86(
493 IN PKTRAP_FRAME TrapFrame
494 );
495
496 DECLSPEC_NORETURN
497 VOID
498 FASTCALL
499 KiEoiHelper(
500 IN PKTRAP_FRAME TrapFrame
501 );
502
503 VOID
504 FASTCALL
505 Ki386BiosCallReturnAddress(
506 IN PKTRAP_FRAME TrapFrame
507 );
508
509 ULONG_PTR
510 FASTCALL
511 KiExitV86Mode(
512 IN PKTRAP_FRAME TrapFrame
513 );
514
515 DECLSPEC_NORETURN
516 VOID
517 NTAPI
518 KiDispatchExceptionFromTrapFrame(
519 IN NTSTATUS Code,
520 IN ULONG Flags,
521 IN ULONG_PTR Address,
522 IN ULONG ParameterCount,
523 IN ULONG_PTR Parameter1,
524 IN ULONG_PTR Parameter2,
525 IN ULONG_PTR Parameter3,
526 IN PKTRAP_FRAME TrapFrame
527 );
528
529 NTSTATUS
530 NTAPI
531 KiConvertToGuiThread(
532 VOID
533 );
534
535 //
536 // Global x86 only Kernel data
537 //
538 extern PVOID Ki386IopmSaveArea;
539 extern ULONG KeI386EFlagsAndMaskV86;
540 extern ULONG KeI386EFlagsOrMaskV86;
541 extern BOOLEAN KeI386VirtualIntExtensions;
542 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR+1];
543 extern KDESCRIPTOR KiIdtDescriptor;
544 extern BOOLEAN KiI386PentiumLockErrataPresent;
545 extern ULONG KeI386NpxPresent;
546 extern ULONG KeI386XMMIPresent;
547 extern ULONG KeI386FxsrPresent;
548 extern ULONG KiMXCsrMask;
549 extern ULONG KeI386CpuType;
550 extern ULONG KeI386CpuStep;
551 extern ULONG KiFastSystemCallDisable;
552 extern UCHAR KiDebugRegisterTrapOffsets[9];
553 extern UCHAR KiDebugRegisterContextOffsets[9];
554 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
555 extern VOID __cdecl KiTrap08(VOID);
556 extern VOID __cdecl KiTrap13(VOID);
557 extern VOID __cdecl KiFastCallEntry(VOID);
558 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
559 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID);
560 extern VOID __cdecl CopyParams(VOID);
561 extern VOID __cdecl ReadBatch(VOID);
562 extern CHAR KiSystemCallExitBranch[];
563 extern CHAR KiSystemCallExit[];
564 extern CHAR KiSystemCallExit2[];
565
566 //
567 // Trap Macros
568 //
569 #include "trap_x.h"
570
571 //
572 // Returns a thread's FPU save area
573 //
574 FORCEINLINE
575 PFX_SAVE_AREA
576 KiGetThreadNpxArea(IN PKTHREAD Thread)
577 {
578 ASSERT((ULONG_PTR)Thread->InitialStack % 16 == 0);
579 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
580 }
581
582 //
583 // Sanitizes a selector
584 //
585 FORCEINLINE
586 ULONG
587 Ke386SanitizeSeg(IN ULONG Cs,
588 IN KPROCESSOR_MODE Mode)
589 {
590 //
591 // Check if we're in kernel-mode, and force CPL 0 if so.
592 // Otherwise, force CPL 3.
593 //
594 return ((Mode == KernelMode) ?
595 (Cs & (0xFFFF & ~RPL_MASK)) :
596 (RPL_MASK | (Cs & 0xFFFF)));
597 }
598
599 //
600 // Sanitizes EFLAGS
601 //
602 FORCEINLINE
603 ULONG
604 Ke386SanitizeFlags(IN ULONG Eflags,
605 IN KPROCESSOR_MODE Mode)
606 {
607 //
608 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
609 // Otherwise, also force interrupt mask on.
610 //
611 return ((Mode == KernelMode) ?
612 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
613 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
614 }
615
616 //
617 // Sanitizes a Debug Register
618 //
619 FORCEINLINE
620 PVOID
621 Ke386SanitizeDr(IN PVOID DrAddress,
622 IN KPROCESSOR_MODE Mode)
623 {
624 //
625 // Check if we're in kernel-mode, and return the address directly if so.
626 // Otherwise, make sure it's not inside the kernel-mode address space.
627 // If it is, then clear the address.
628 //
629 return ((Mode == KernelMode) ? DrAddress :
630 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
631 }
632
633 //
634 // Exception with no arguments
635 //
636 FORCEINLINE
637 DECLSPEC_NORETURN
638 VOID
639 KiDispatchException0Args(IN NTSTATUS Code,
640 IN ULONG_PTR Address,
641 IN PKTRAP_FRAME TrapFrame)
642 {
643 /* Helper for exceptions with no arguments */
644 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 0, 0, 0, 0, TrapFrame);
645 }
646
647 //
648 // Exception with one argument
649 //
650 FORCEINLINE
651 DECLSPEC_NORETURN
652 VOID
653 KiDispatchException1Args(IN NTSTATUS Code,
654 IN ULONG_PTR Address,
655 IN ULONG P1,
656 IN PKTRAP_FRAME TrapFrame)
657 {
658 /* Helper for exceptions with no arguments */
659 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 1, P1, 0, 0, TrapFrame);
660 }
661
662 //
663 // Exception with two arguments
664 //
665 FORCEINLINE
666 DECLSPEC_NORETURN
667 VOID
668 KiDispatchException2Args(IN NTSTATUS Code,
669 IN ULONG_PTR Address,
670 IN ULONG P1,
671 IN ULONG P2,
672 IN PKTRAP_FRAME TrapFrame)
673 {
674 /* Helper for exceptions with no arguments */
675 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 2, P1, P2, 0, TrapFrame);
676 }
677
678 //
679 // Performs a system call
680 //
681
682 /*
683 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
684 * and then calls the function associated with the system call.
685 *
686 * It's done in assembly for two reasons: we need to muck with the stack,
687 * and the call itself restores the stack back for us. The only way to do
688 * this in C is to do manual C handlers for every possible number of args on
689 * the stack, and then have the handler issue a call by pointer. This is
690 * wasteful since it'll basically push the values twice and require another
691 * level of call indirection.
692 *
693 * The ARM kernel currently does this, but it should probably be changed
694 * later to function like this as well.
695 *
696 */
697 #ifdef __GNUC__
698 FORCEINLINE
699 NTSTATUS
700 KiSystemCallTrampoline(IN PVOID Handler,
701 IN PVOID Arguments,
702 IN ULONG StackBytes)
703 {
704 NTSTATUS Result;
705
706 __asm__ __volatile__
707 (
708 "subl %1, %%esp\n\t"
709 "movl %%esp, %%edi\n\t"
710 "movl %2, %%esi\n\t"
711 "shrl $2, %1\n\t"
712 "rep movsd\n\t"
713 "call *%3\n\t"
714 "movl %%eax, %0"
715 : "=r"(Result)
716 : "c"(StackBytes),
717 "d"(Arguments),
718 "r"(Handler)
719 : "%esp", "%esi", "%edi"
720 );
721 return Result;
722 }
723 #elif defined(_MSC_VER)
724 FORCEINLINE
725 NTSTATUS
726 KiSystemCallTrampoline(IN PVOID Handler,
727 IN PVOID Arguments,
728 IN ULONG StackBytes)
729 {
730 __asm
731 {
732 mov ecx, StackBytes
733 mov esi, Arguments
734 mov eax, Handler
735 sub esp, ecx
736 mov edi, esp
737 shr ecx, 2
738 rep movsd
739 call eax
740 }
741 /* Return with result in EAX */
742 }
743 #else
744 #error Unknown Compiler
745 #endif
746
747
748 //
749 // Checks for pending APCs
750 //
751 FORCEINLINE
752 VOID
753 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
754 {
755 PKTHREAD Thread;
756 KIRQL OldIrql;
757
758 /* Check for V8086 or user-mode trap */
759 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
760 {
761 /* Get the thread */
762 Thread = KeGetCurrentThread();
763 while (TRUE)
764 {
765 /* Turn off the alerted state for kernel mode */
766 Thread->Alerted[KernelMode] = FALSE;
767
768 /* Are there pending user APCs? */
769 if (!Thread->ApcState.UserApcPending) break;
770
771 /* Raise to APC level and enable interrupts */
772 OldIrql = KfRaiseIrql(APC_LEVEL);
773 _enable();
774
775 /* Deliver APCs */
776 KiDeliverApc(UserMode, NULL, TrapFrame);
777
778 /* Restore IRQL and disable interrupts once again */
779 KfLowerIrql(OldIrql);
780 _disable();
781 }
782 }
783 }
784
785 //
786 // Switches from boot loader to initial kernel stack
787 //
788 FORCEINLINE
789 VOID
790 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
791 {
792 VOID NTAPI KiSystemStartupBootStack(VOID);
793
794 /* We have to switch to a new stack before continuing kernel initialization */
795 #ifdef __GNUC__
796 __asm__
797 (
798 "movl %0, %%esp\n\t"
799 "subl %1, %%esp\n\t"
800 "pushl %2\n\t"
801 "jmp _KiSystemStartupBootStack@0"
802 :
803 : "c"(InitialStack),
804 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
805 "i"(CR0_EM | CR0_TS | CR0_MP),
806 "p"(KiSystemStartupBootStack)
807 : "%esp"
808 );
809 #elif defined(_MSC_VER)
810 __asm
811 {
812 mov esp, InitialStack
813 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
814 push (CR0_EM | CR0_TS | CR0_MP)
815 jmp KiSystemStartupBootStack
816 }
817 #else
818 #error Unknown Compiler
819 #endif
820 }
821
822 //
823 // Emits the iret instruction for C code
824 //
825 FORCEINLINE
826 DECLSPEC_NORETURN
827 VOID
828 KiIret(VOID)
829 {
830 #if defined(__GNUC__)
831 __asm__ __volatile__
832 (
833 "iret"
834 );
835 #elif defined(_MSC_VER)
836 __asm
837 {
838 iretd
839 }
840 #else
841 #error Unsupported compiler
842 #endif
843 UNREACHABLE;
844 }
845
846 //
847 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
848 // initiates the end by calling back into the HAL and exiting the trap here.
849 //
850 FORCEINLINE
851 VOID
852 KiEndInterrupt(IN KIRQL Irql,
853 IN PKTRAP_FRAME TrapFrame)
854 {
855 /* Disable interrupts and end the interrupt */
856 _disable();
857 HalEndSystemInterrupt(Irql, TrapFrame);
858
859 /* Exit the interrupt */
860 KiEoiHelper(TrapFrame);
861 }
862
863 //
864 // PERF Code
865 //
866 FORCEINLINE
867 VOID
868 Ki386PerfEnd(VOID)
869 {
870 extern ULONGLONG BootCyclesEnd, BootCycles;
871 BootCyclesEnd = __rdtsc();
872 DbgPrint("Boot took %I64u cycles!\n", BootCyclesEnd - BootCycles);
873 DbgPrint("Interrupts: %u System Calls: %u Context Switches: %u\n",
874 KeGetCurrentPrcb()->InterruptCount,
875 KeGetCurrentPrcb()->KeSystemCalls,
876 KeGetContextSwitches(KeGetCurrentPrcb()));
877 }
878
879 FORCEINLINE
880 PULONG
881 KiGetUserModeStackAddress(void)
882 {
883 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp);
884 }
885
886 #endif