[NTOS]
[reactos.git] / reactos / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6 #include "v86m.h"
7
8 //
9 // Thread Dispatcher Header DebugActive Mask
10 //
11 #define DR_MASK(x) (1 << (x))
12 #define DR_REG_MASK 0x4F
13
14 #define IMAGE_FILE_MACHINE_ARCHITECTURE IMAGE_FILE_MACHINE_I386
15
16 //
17 // INT3 is 1 byte long
18 //
19 #define KD_BREAKPOINT_TYPE UCHAR
20 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
21 #define KD_BREAKPOINT_VALUE 0xCC
22
23 //
24 // Macros for getting and setting special purpose registers in portable code
25 //
26 #define KeGetContextPc(Context) \
27 ((Context)->Eip)
28
29 #define KeSetContextPc(Context, ProgramCounter) \
30 ((Context)->Eip = (ProgramCounter))
31
32 #define KeGetTrapFramePc(TrapFrame) \
33 ((TrapFrame)->Eip)
34
35 #define KiGetLinkedTrapFrame(x) \
36 (PKTRAP_FRAME)((x)->Edx)
37
38 #define KeGetContextReturnRegister(Context) \
39 ((Context)->Eax)
40
41 #define KeSetContextReturnRegister(Context, ReturnValue) \
42 ((Context)->Eax = (ReturnValue))
43
44 //
45 // Macro to get trap and exception frame from a thread stack
46 //
47 #define KeGetTrapFrame(Thread) \
48 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
49 sizeof(KTRAP_FRAME) - \
50 sizeof(FX_SAVE_AREA))
51
52 #define KeGetExceptionFrame(Thread) \
53 NULL
54
55 //
56 // Macro to get context switches from the PRCB
57 // All architectures but x86 have it in the PRCB's KeContextSwitches
58 //
59 #define KeGetContextSwitches(Prcb) \
60 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
61
62 //
63 // Returns the Interrupt State from a Trap Frame.
64 // ON = TRUE, OFF = FALSE
65 //
66 #define KeGetTrapFrameInterruptState(TrapFrame) \
67 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
68
69 //
70 // Flags for exiting a trap
71 //
72 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
73 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
74 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
75
76 typedef union _KTRAP_EXIT_SKIP_BITS
77 {
78 struct
79 {
80 UCHAR SkipPreviousMode:1;
81 UCHAR SkipSegments:1;
82 UCHAR SkipVolatiles:1;
83 UCHAR Reserved:5;
84 };
85 UCHAR Bits;
86 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
87
88
89 //
90 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
91 //
92 #define PFX_FLAG_ES 0x00000100
93 #define PFX_FLAG_CS 0x00000200
94 #define PFX_FLAG_SS 0x00000400
95 #define PFX_FLAG_DS 0x00000800
96 #define PFX_FLAG_FS 0x00001000
97 #define PFX_FLAG_GS 0x00002000
98 #define PFX_FLAG_OPER32 0x00004000
99 #define PFX_FLAG_ADDR32 0x00008000
100 #define PFX_FLAG_LOCK 0x00010000
101 #define PFX_FLAG_REPNE 0x00020000
102 #define PFX_FLAG_REP 0x00040000
103
104 //
105 // VDM Helper Macros
106 //
107 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
108 // We need to keep 2 parameters while the original ASM implementation uses 4:
109 // TrapFrame, PrefixFlags, Eip, InstructionSize;
110 //
111 // We pass the trap frame, and prefix flags, in our two parameters.
112 //
113 // We then realize that since the smallest prefix flag is 0x100, this gives us
114 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
115 //
116 // We further realize that we always have access to EIP from the trap frame, and
117 // that if we want the *current instruction* EIP, we simply have to add the
118 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
119 // at now, so we don't need to use the stack to push this parameter.
120 //
121 // We actually only care about the *current instruction* EIP in one location,
122 // so although it may be slightly more expensive to re-calculate the EIP one
123 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
124 // which would be forcing stack usage in all other scenarios.
125 //
126 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
127 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
128 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
129 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
130 #define KiVdmUnhandledOpcode(x) \
131 BOOLEAN \
132 FASTCALL \
133 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
134 IN ULONG Flags) \
135 { \
136 /* Not yet handled */ \
137 UNIMPLEMENTED; \
138 while (TRUE); \
139 return TRUE; \
140 }
141
142 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
143
144 //
145 // Local parameters
146 //
147 typedef struct _KV86_FRAME
148 {
149 PVOID ThreadStack;
150 PVOID ThreadTeb;
151 PVOID PcrTeb;
152 } KV86_FRAME, *PKV86_FRAME;
153
154 //
155 // Virtual Stack Frame
156 //
157 typedef struct _KV8086_STACK_FRAME
158 {
159 KTRAP_FRAME TrapFrame;
160 FX_SAVE_AREA NpxArea;
161 KV86_FRAME V86Frame;
162 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
163
164 //
165 // Registers an interrupt handler with an IDT vector
166 //
167 FORCEINLINE
168 VOID
169 KeRegisterInterruptHandler(IN ULONG Vector,
170 IN PVOID Handler)
171 {
172 UCHAR Entry;
173 ULONG_PTR Address;
174 PKIPCR Pcr = (PKIPCR)KeGetPcr();
175
176 //
177 // Get the entry from the HAL
178 //
179 Entry = HalVectorToIDTEntry(Vector);
180 Address = PtrToUlong(Handler);
181
182 //
183 // Now set the data
184 //
185 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
186 Pcr->IDT[Entry].Offset = (USHORT)Address;
187 }
188
189 //
190 // Returns the registered interrupt handler for a given IDT vector
191 //
192 FORCEINLINE
193 PVOID
194 KeQueryInterruptHandler(IN ULONG Vector)
195 {
196 PKIPCR Pcr = (PKIPCR)KeGetPcr();
197 UCHAR Entry;
198
199 //
200 // Get the entry from the HAL
201 //
202 Entry = HalVectorToIDTEntry(Vector);
203
204 //
205 // Read the entry from the IDT
206 //
207 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
208 (Pcr->IDT[Entry].Offset & 0xFFFF));
209 }
210
211 //
212 // Invalidates the TLB entry for a specified address
213 //
214 FORCEINLINE
215 VOID
216 KeInvalidateTlbEntry(IN PVOID Address)
217 {
218 /* Invalidate the TLB entry for this address */
219 __invlpg(Address);
220 }
221
222 FORCEINLINE
223 VOID
224 KeFlushProcessTb(VOID)
225 {
226 /* Flush the TLB by resetting CR3 */
227 __writecr3(__readcr3());
228 }
229
230 FORCEINLINE
231 PRKTHREAD
232 KeGetCurrentThread(VOID)
233 {
234 /* Return the current thread */
235 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
236 }
237
238 FORCEINLINE
239 VOID
240 KiRundownThread(IN PKTHREAD Thread)
241 {
242 #ifndef CONFIG_SMP
243 /* Check if this is the NPX Thread */
244 if (KeGetCurrentPrcb()->NpxThread == Thread)
245 {
246 /* Clear it */
247 KeGetCurrentPrcb()->NpxThread = NULL;
248 Ke386FnInit();
249 }
250 #else
251 /* Nothing to do */
252 #endif
253 }
254
255 VOID
256 FASTCALL
257 Ki386InitializeTss(
258 IN PKTSS Tss,
259 IN PKIDTENTRY Idt,
260 IN PKGDTENTRY Gdt
261 );
262
263 VOID
264 NTAPI
265 KiSetCR0Bits(VOID);
266
267 VOID
268 NTAPI
269 KiGetCacheInformation(VOID);
270
271 BOOLEAN
272 NTAPI
273 KiIsNpxPresent(
274 VOID
275 );
276
277 BOOLEAN
278 NTAPI
279 KiIsNpxErrataPresent(
280 VOID
281 );
282
283 VOID
284 NTAPI
285 KiSetProcessorType(VOID);
286
287 ULONG
288 NTAPI
289 KiGetFeatureBits(VOID);
290
291 VOID
292 NTAPI
293 KiThreadStartup(VOID);
294
295 NTSTATUS
296 NTAPI
297 Ke386GetGdtEntryThread(
298 IN PKTHREAD Thread,
299 IN ULONG Offset,
300 IN PKGDTENTRY Descriptor
301 );
302
303 VOID
304 NTAPI
305 KiFlushNPXState(
306 IN FLOATING_SAVE_AREA *SaveArea
307 );
308
309 VOID
310 NTAPI
311 Ki386AdjustEsp0(
312 IN PKTRAP_FRAME TrapFrame
313 );
314
315 VOID
316 NTAPI
317 Ki386SetupAndExitToV86Mode(
318 OUT PTEB VdmTeb
319 );
320
321 VOID
322 NTAPI
323 KeI386VdmInitialize(
324 VOID
325 );
326
327 ULONG_PTR
328 NTAPI
329 Ki386EnableGlobalPage(
330 IN volatile ULONG_PTR Context
331 );
332
333 VOID
334 NTAPI
335 KiI386PentiumLockErrataFixup(
336 VOID
337 );
338
339 VOID
340 NTAPI
341 KiInitializePAT(
342 VOID
343 );
344
345 VOID
346 NTAPI
347 KiInitializeMTRR(
348 IN BOOLEAN FinalCpu
349 );
350
351 VOID
352 NTAPI
353 KiAmdK6InitializeMTRR(
354 VOID
355 );
356
357 VOID
358 NTAPI
359 KiRestoreFastSyscallReturnState(
360 VOID
361 );
362
363 ULONG_PTR
364 NTAPI
365 Ki386EnableDE(
366 IN ULONG_PTR Context
367 );
368
369 ULONG_PTR
370 NTAPI
371 Ki386EnableFxsr(
372 IN ULONG_PTR Context
373 );
374
375 ULONG_PTR
376 NTAPI
377 Ki386EnableXMMIExceptions(
378 IN ULONG_PTR Context
379 );
380
381 BOOLEAN
382 NTAPI
383 VdmDispatchBop(
384 IN PKTRAP_FRAME TrapFrame
385 );
386
387 BOOLEAN
388 FASTCALL
389 KiVdmOpcodePrefix(
390 IN PKTRAP_FRAME TrapFrame,
391 IN ULONG Flags
392 );
393
394 BOOLEAN
395 FASTCALL
396 Ki386HandleOpcodeV86(
397 IN PKTRAP_FRAME TrapFrame
398 );
399
400 DECLSPEC_NORETURN
401 VOID
402 FASTCALL
403 KiEoiHelper(
404 IN PKTRAP_FRAME TrapFrame
405 );
406
407 VOID
408 FASTCALL
409 Ki386BiosCallReturnAddress(
410 IN PKTRAP_FRAME TrapFrame
411 );
412
413 ULONG_PTR
414 FASTCALL
415 KiExitV86Mode(
416 IN PKTRAP_FRAME TrapFrame
417 );
418
419 DECLSPEC_NORETURN
420 VOID
421 NTAPI
422 KiDispatchExceptionFromTrapFrame(
423 IN NTSTATUS Code,
424 IN ULONG_PTR Address,
425 IN ULONG ParameterCount,
426 IN ULONG_PTR Parameter1,
427 IN ULONG_PTR Parameter2,
428 IN ULONG_PTR Parameter3,
429 IN PKTRAP_FRAME TrapFrame
430 );
431
432 //
433 // Global x86 only Kernel data
434 //
435 extern PVOID Ki386IopmSaveArea;
436 extern ULONG KeI386EFlagsAndMaskV86;
437 extern ULONG KeI386EFlagsOrMaskV86;
438 extern BOOLEAN KeI386VirtualIntExtensions;
439 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR];
440 extern KDESCRIPTOR KiIdtDescriptor;
441 extern BOOLEAN KiI386PentiumLockErrataPresent;
442 extern ULONG KeI386NpxPresent;
443 extern ULONG KeI386XMMIPresent;
444 extern ULONG KeI386FxsrPresent;
445 extern ULONG KiMXCsrMask;
446 extern ULONG KeI386CpuType;
447 extern ULONG KeI386CpuStep;
448 extern ULONG Ke386CacheAlignment;
449 extern ULONG KiFastSystemCallDisable;
450 extern UCHAR KiDebugRegisterTrapOffsets[9];
451 extern UCHAR KiDebugRegisterContextOffsets[9];
452 extern VOID __cdecl KiTrap02(VOID);
453 extern VOID __cdecl KiTrap08(VOID);
454 extern VOID __cdecl KiTrap13(VOID);
455 extern VOID __cdecl KiFastCallEntry(VOID);
456 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
457 extern VOID __cdecl CopyParams(VOID);
458 extern VOID __cdecl ReadBatch(VOID);
459 extern VOID __cdecl FrRestore(VOID);
460 extern CHAR KiSystemCallExitBranch[];
461 extern CHAR KiSystemCallExit[];
462 extern CHAR KiSystemCallExit2[];
463
464 //
465 // Trap Macros
466 //
467 #include "../trap_x.h"
468
469 //
470 // Returns a thread's FPU save area
471 //
472 PFX_SAVE_AREA
473 FORCEINLINE
474 KiGetThreadNpxArea(IN PKTHREAD Thread)
475 {
476 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
477 }
478
479 //
480 // Sanitizes a selector
481 //
482 FORCEINLINE
483 ULONG
484 Ke386SanitizeSeg(IN ULONG Cs,
485 IN KPROCESSOR_MODE Mode)
486 {
487 //
488 // Check if we're in kernel-mode, and force CPL 0 if so.
489 // Otherwise, force CPL 3.
490 //
491 return ((Mode == KernelMode) ?
492 (Cs & (0xFFFF & ~RPL_MASK)) :
493 (RPL_MASK | (Cs & 0xFFFF)));
494 }
495
496 //
497 // Sanitizes EFLAGS
498 //
499 FORCEINLINE
500 ULONG
501 Ke386SanitizeFlags(IN ULONG Eflags,
502 IN KPROCESSOR_MODE Mode)
503 {
504 //
505 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
506 // Otherwise, also force interrupt mask on.
507 //
508 return ((Mode == KernelMode) ?
509 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
510 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
511 }
512
513 //
514 // Gets a DR register from a CONTEXT structure
515 //
516 FORCEINLINE
517 PVOID
518 KiDrFromContext(IN ULONG Dr,
519 IN PCONTEXT Context)
520 {
521 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
522 }
523
524 //
525 // Gets a DR register from a KTRAP_FRAME structure
526 //
527 FORCEINLINE
528 PVOID*
529 KiDrFromTrapFrame(IN ULONG Dr,
530 IN PKTRAP_FRAME TrapFrame)
531 {
532 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
533 }
534
535 //
536 // Sanitizes a Debug Register
537 //
538 FORCEINLINE
539 PVOID
540 Ke386SanitizeDr(IN PVOID DrAddress,
541 IN KPROCESSOR_MODE Mode)
542 {
543 //
544 // Check if we're in kernel-mode, and return the address directly if so.
545 // Otherwise, make sure it's not inside the kernel-mode address space.
546 // If it is, then clear the address.
547 //
548 return ((Mode == KernelMode) ? DrAddress :
549 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
550 }
551
552 //
553 // Exception with no arguments
554 //
555 VOID
556 FORCEINLINE
557 DECLSPEC_NORETURN
558 KiDispatchException0Args(IN NTSTATUS Code,
559 IN ULONG_PTR Address,
560 IN PKTRAP_FRAME TrapFrame)
561 {
562 /* Helper for exceptions with no arguments */
563 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
564 }
565
566 //
567 // Exception with one argument
568 //
569 VOID
570 FORCEINLINE
571 DECLSPEC_NORETURN
572 KiDispatchException1Args(IN NTSTATUS Code,
573 IN ULONG_PTR Address,
574 IN ULONG P1,
575 IN PKTRAP_FRAME TrapFrame)
576 {
577 /* Helper for exceptions with no arguments */
578 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
579 }
580
581 //
582 // Exception with two arguments
583 //
584 VOID
585 FORCEINLINE
586 DECLSPEC_NORETURN
587 KiDispatchException2Args(IN NTSTATUS Code,
588 IN ULONG_PTR Address,
589 IN ULONG P1,
590 IN ULONG P2,
591 IN PKTRAP_FRAME TrapFrame)
592 {
593 /* Helper for exceptions with no arguments */
594 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
595 }
596
597 //
598 // Performs a system call
599 //
600 NTSTATUS
601 FORCEINLINE
602 KiSystemCallTrampoline(IN PVOID Handler,
603 IN PVOID Arguments,
604 IN ULONG StackBytes)
605 {
606 NTSTATUS Result;
607
608 /*
609 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
610 * and then calls the function associated with the system call.
611 *
612 * It's done in assembly for two reasons: we need to muck with the stack,
613 * and the call itself restores the stack back for us. The only way to do
614 * this in C is to do manual C handlers for every possible number of args on
615 * the stack, and then have the handler issue a call by pointer. This is
616 * wasteful since it'll basically push the values twice and require another
617 * level of call indirection.
618 *
619 * The ARM kernel currently does this, but it should probably be changed
620 * later to function like this as well.
621 *
622 */
623 #ifdef __GNUC__
624 __asm__ __volatile__
625 (
626 "subl %1, %%esp\n"
627 "movl %%esp, %%edi\n"
628 "movl %2, %%esi\n"
629 "shrl $2, %1\n"
630 "rep movsd\n"
631 "call *%3\n"
632 "movl %%eax, %0\n"
633 : "=r"(Result)
634 : "c"(StackBytes),
635 "d"(Arguments),
636 "r"(Handler)
637 : "%esp", "%esi", "%edi"
638 );
639 #elif defined(_MSC_VER)
640 __asm
641 {
642 mov ecx, StackBytes
643 mov edx, Arguments
644 sub esp, ecx
645 mov edi, esp
646 mov esi, edx
647 shr ecx, 2
648 rep movsd
649 call Handler
650 mov Result, eax
651 }
652 #else
653 #error Unknown Compiler
654 #endif
655
656 return Result;
657 }
658
659 //
660 // Checks for pending APCs
661 //
662 VOID
663 FORCEINLINE
664 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
665 {
666 PKTHREAD Thread;
667 KIRQL OldIrql;
668
669 /* Check for V8086 or user-mode trap */
670 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
671 {
672 /* Get the thread */
673 Thread = KeGetCurrentThread();
674 while (TRUE)
675 {
676 /* Turn off the alerted state for kernel mode */
677 Thread->Alerted[KernelMode] = FALSE;
678
679 /* Are there pending user APCs? */
680 if (!Thread->ApcState.UserApcPending) break;
681
682 /* Raise to APC level and enable interrupts */
683 OldIrql = KfRaiseIrql(APC_LEVEL);
684 _enable();
685
686 /* Deliver APCs */
687 KiDeliverApc(UserMode, NULL, TrapFrame);
688
689 /* Restore IRQL and disable interrupts once again */
690 KfLowerIrql(OldIrql);
691 _disable();
692 }
693 }
694 }
695
696 //
697 // Converts a base thread to a GUI thread
698 //
699 NTSTATUS
700 FORCEINLINE
701 KiConvertToGuiThread(VOID)
702 {
703 NTSTATUS Result;
704 PVOID StackFrame;
705
706 /*
707 * Converting to a GUI thread safely updates ESP in-place as well as the
708 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
709 *
710 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
711 * caller's value, since it is considered a nonvolatile register. As such,
712 * as soon as we're back after the conversion and we try to store the result
713 * which will probably be in some stack variable (EBP-based), we'll crash as
714 * we are touching the de-allocated non-expanded stack.
715 *
716 * Thus we need a way to update our EBP before EBP is touched, and the only
717 * way to guarantee this is to do the call itself in assembly, use the EAX
718 * register to store the result, fixup EBP, and then let the C code continue
719 * on its merry way.
720 *
721 */
722 #ifdef __GNUC__
723 __asm__ __volatile__
724 (
725 "movl %%ebp, %1\n"
726 "subl %%esp, %1\n"
727 "call _PsConvertToGuiThread@0\n"
728 "addl %%esp, %1\n"
729 "movl %1, %%ebp\n"
730 "movl %%eax, %0\n"
731 : "=r"(Result), "=r"(StackFrame)
732 :
733 : "%esp", "%ecx", "%edx", "memory"
734 );
735 #elif defined(_MSC_VER)
736 NTSTATUS NTAPI PsConvertToGuiThread(VOID);
737 __asm
738 {
739 mov StackFrame, ebp
740 sub StackFrame, esp
741 call PsConvertToGuiThread
742 add StackFrame, esp
743 mov ebp, StackFrame
744 mov Result, eax
745 }
746 #else
747 #error Unknown Compiler
748 #endif
749 return Result;
750 }
751
752 //
753 // Switches from boot loader to initial kernel stack
754 //
755 VOID
756 FORCEINLINE
757 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
758 {
759 /* We have to switch to a new stack before continuing kernel initialization */
760 #ifdef __GNUC__
761 __asm__
762 (
763 "movl %0, %%esp\n"
764 "subl %1, %%esp\n"
765 "pushl %2\n"
766 "jmp _KiSystemStartupBootStack@0\n"
767 :
768 : "c"(InitialStack),
769 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
770 "i"(CR0_EM | CR0_TS | CR0_MP)
771 : "%esp"
772 );
773 #elif defined(_MSC_VER)
774 VOID NTAPI KiSystemStartupBootStack(VOID);
775 __asm
776 {
777 mov ecx, InitialStack
778 mov esp, ecx
779 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
780 push (CR0_EM | CR0_TS | CR0_MP)
781 jmp KiSystemStartupBootStack
782 }
783 #else
784 #error Unknown Compiler
785 #endif
786 }
787
788 //
789 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
790 // initiates the end by calling back into the HAL and exiting the trap here.
791 //
792 VOID
793 FORCEINLINE
794 KiEndInterrupt(IN KIRQL Irql,
795 IN PKTRAP_FRAME TrapFrame)
796 {
797 /* Disable interrupts and end the interrupt */
798 _disable();
799 HalEndSystemInterrupt(Irql, TrapFrame);
800
801 /* Exit the interrupt */
802 KiEoiHelper(TrapFrame);
803 }
804
805 //
806 // PERF Code
807 //
808 VOID
809 FORCEINLINE
810 Ki386PerfEnd(VOID)
811 {
812 extern ULONGLONG BootCyclesEnd, BootCycles;
813 BootCyclesEnd = __rdtsc();
814 DbgPrint("Boot took %I64d cycles!\n", BootCyclesEnd - BootCycles);
815 DbgPrint("Interrupts: %d System Calls: %d Context Switches: %d\n",
816 KeGetCurrentPrcb()->InterruptCount,
817 KeGetCurrentPrcb()->KeSystemCalls,
818 KeGetContextSwitches(KeGetCurrentPrcb()));
819 }
820
821 #endif