- Sync with trunk r58248 to bring the latest changes from Amine (headers) and others...
[reactos.git] / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 //
14 // INT3 is 1 byte long
15 //
16 #define KD_BREAKPOINT_TYPE UCHAR
17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
18 #define KD_BREAKPOINT_VALUE 0xCC
19
20 //
21 // Macros for getting and setting special purpose registers in portable code
22 //
23 #define KeGetContextPc(Context) \
24 ((Context)->Eip)
25
26 #define KeSetContextPc(Context, ProgramCounter) \
27 ((Context)->Eip = (ProgramCounter))
28
29 #define KeGetTrapFramePc(TrapFrame) \
30 ((TrapFrame)->Eip)
31
32 #define KiGetLinkedTrapFrame(x) \
33 (PKTRAP_FRAME)((x)->Edx)
34
35 #define KeGetContextReturnRegister(Context) \
36 ((Context)->Eax)
37
38 #define KeSetContextReturnRegister(Context, ReturnValue) \
39 ((Context)->Eax = (ReturnValue))
40
41 //
42 // Macro to get trap and exception frame from a thread stack
43 //
44 #define KeGetTrapFrame(Thread) \
45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
46 sizeof(KTRAP_FRAME) - \
47 sizeof(FX_SAVE_AREA))
48
49 #define KeGetExceptionFrame(Thread) \
50 NULL
51
52 //
53 // Macro to get context switches from the PRCB
54 // All architectures but x86 have it in the PRCB's KeContextSwitches
55 //
56 #define KeGetContextSwitches(Prcb) \
57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
58
59 //
60 // Macro to get the second level cache size field name which differs between
61 // CISC and RISC architectures, as the former has unified I/D cache
62 //
63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
64
65 //
66 // Returns the Interrupt State from a Trap Frame.
67 // ON = TRUE, OFF = FALSE
68 //
69 #define KeGetTrapFrameInterruptState(TrapFrame) \
70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
71
72 //
73 // Flags for exiting a trap
74 //
75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
78
79 typedef union _KTRAP_EXIT_SKIP_BITS
80 {
81 struct
82 {
83 UCHAR SkipPreviousMode:1;
84 UCHAR SkipSegments:1;
85 UCHAR SkipVolatiles:1;
86 UCHAR Reserved:5;
87 };
88 UCHAR Bits;
89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
90
91
92 //
93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
94 //
95 #define PFX_FLAG_ES 0x00000100
96 #define PFX_FLAG_CS 0x00000200
97 #define PFX_FLAG_SS 0x00000400
98 #define PFX_FLAG_DS 0x00000800
99 #define PFX_FLAG_FS 0x00001000
100 #define PFX_FLAG_GS 0x00002000
101 #define PFX_FLAG_OPER32 0x00004000
102 #define PFX_FLAG_ADDR32 0x00008000
103 #define PFX_FLAG_LOCK 0x00010000
104 #define PFX_FLAG_REPNE 0x00020000
105 #define PFX_FLAG_REP 0x00040000
106
107 //
108 // VDM Helper Macros
109 //
110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
111 // We need to keep 2 parameters while the original ASM implementation uses 4:
112 // TrapFrame, PrefixFlags, Eip, InstructionSize;
113 //
114 // We pass the trap frame, and prefix flags, in our two parameters.
115 //
116 // We then realize that since the smallest prefix flag is 0x100, this gives us
117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
118 //
119 // We further realize that we always have access to EIP from the trap frame, and
120 // that if we want the *current instruction* EIP, we simply have to add the
121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
122 // at now, so we don't need to use the stack to push this parameter.
123 //
124 // We actually only care about the *current instruction* EIP in one location,
125 // so although it may be slightly more expensive to re-calculate the EIP one
126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
127 // which would be forcing stack usage in all other scenarios.
128 //
129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
133 #define KiVdmUnhandledOpcode(x) \
134 BOOLEAN \
135 FASTCALL \
136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
137 IN ULONG Flags) \
138 { \
139 /* Not yet handled */ \
140 UNIMPLEMENTED_DBGBREAK(); \
141 return FALSE; \
142 }
143
144 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
145
146 //
147 // Local parameters
148 //
149 typedef struct _KV86_FRAME
150 {
151 PVOID ThreadStack;
152 PVOID ThreadTeb;
153 PVOID PcrTeb;
154 } KV86_FRAME, *PKV86_FRAME;
155
156 //
157 // Virtual Stack Frame
158 //
159 typedef struct _KV8086_STACK_FRAME
160 {
161 KTRAP_FRAME TrapFrame;
162 FX_SAVE_AREA NpxArea;
163 KV86_FRAME V86Frame;
164 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
165
166 /* Diable interrupts and return whether they were enabled before */
167 FORCEINLINE
168 BOOLEAN
169 KeDisableInterrupts(VOID)
170 {
171 ULONG Flags;
172 BOOLEAN Return;
173
174 /* Get EFLAGS and check if the interrupt bit is set */
175 Flags = __readeflags();
176 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
177
178 /* Disable interrupts */
179 _disable();
180 return Return;
181 }
182
183 /* Restore previous interrupt state */
184 FORCEINLINE
185 VOID
186 KeRestoreInterrupts(BOOLEAN WereEnabled)
187 {
188 if (WereEnabled) _enable();
189 }
190
191 //
192 // Registers an interrupt handler with an IDT vector
193 //
194 FORCEINLINE
195 VOID
196 KeRegisterInterruptHandler(IN ULONG Vector,
197 IN PVOID Handler)
198 {
199 UCHAR Entry;
200 ULONG_PTR Address;
201 PKIPCR Pcr = (PKIPCR)KeGetPcr();
202
203 //
204 // Get the entry from the HAL
205 //
206 Entry = HalVectorToIDTEntry(Vector);
207 Address = PtrToUlong(Handler);
208
209 //
210 // Now set the data
211 //
212 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
213 Pcr->IDT[Entry].Offset = (USHORT)Address;
214 }
215
216 //
217 // Returns the registered interrupt handler for a given IDT vector
218 //
219 FORCEINLINE
220 PVOID
221 KeQueryInterruptHandler(IN ULONG Vector)
222 {
223 PKIPCR Pcr = (PKIPCR)KeGetPcr();
224 UCHAR Entry;
225
226 //
227 // Get the entry from the HAL
228 //
229 Entry = HalVectorToIDTEntry(Vector);
230
231 //
232 // Read the entry from the IDT
233 //
234 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
235 (Pcr->IDT[Entry].Offset & 0xFFFF));
236 }
237
238 //
239 // Invalidates the TLB entry for a specified address
240 //
241 FORCEINLINE
242 VOID
243 KeInvalidateTlbEntry(IN PVOID Address)
244 {
245 /* Invalidate the TLB entry for this address */
246 __invlpg(Address);
247 }
248
249 FORCEINLINE
250 VOID
251 KeFlushProcessTb(VOID)
252 {
253 /* Flush the TLB by resetting CR3 */
254 __writecr3(__readcr3());
255 }
256
257 FORCEINLINE
258 PRKTHREAD
259 KeGetCurrentThread(VOID)
260 {
261 /* Return the current thread */
262 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
263 }
264
265 FORCEINLINE
266 VOID
267 KiRundownThread(IN PKTHREAD Thread)
268 {
269 #ifndef CONFIG_SMP
270 /* Check if this is the NPX Thread */
271 if (KeGetCurrentPrcb()->NpxThread == Thread)
272 {
273 /* Clear it */
274 KeGetCurrentPrcb()->NpxThread = NULL;
275 Ke386FnInit();
276 }
277 #else
278 /* Nothing to do */
279 #endif
280 }
281
282 VOID
283 FASTCALL
284 Ki386InitializeTss(
285 IN PKTSS Tss,
286 IN PKIDTENTRY Idt,
287 IN PKGDTENTRY Gdt
288 );
289
290 VOID
291 NTAPI
292 KiSetCR0Bits(VOID);
293
294 VOID
295 NTAPI
296 KiGetCacheInformation(VOID);
297
298 BOOLEAN
299 NTAPI
300 KiIsNpxPresent(
301 VOID
302 );
303
304 BOOLEAN
305 NTAPI
306 KiIsNpxErrataPresent(
307 VOID
308 );
309
310 VOID
311 NTAPI
312 KiSetProcessorType(VOID);
313
314 ULONG
315 NTAPI
316 KiGetFeatureBits(VOID);
317
318 VOID
319 NTAPI
320 KiThreadStartup(VOID);
321
322 NTSTATUS
323 NTAPI
324 Ke386GetGdtEntryThread(
325 IN PKTHREAD Thread,
326 IN ULONG Offset,
327 IN PKGDTENTRY Descriptor
328 );
329
330 VOID
331 NTAPI
332 KiFlushNPXState(
333 IN FLOATING_SAVE_AREA *SaveArea
334 );
335
336 VOID
337 NTAPI
338 Ki386AdjustEsp0(
339 IN PKTRAP_FRAME TrapFrame
340 );
341
342 VOID
343 NTAPI
344 Ki386SetupAndExitToV86Mode(
345 OUT PTEB VdmTeb
346 );
347
348 VOID
349 NTAPI
350 KeI386VdmInitialize(
351 VOID
352 );
353
354 ULONG_PTR
355 NTAPI
356 Ki386EnableGlobalPage(
357 IN volatile ULONG_PTR Context
358 );
359
360 VOID
361 NTAPI
362 KiI386PentiumLockErrataFixup(
363 VOID
364 );
365
366 VOID
367 NTAPI
368 KiInitializePAT(
369 VOID
370 );
371
372 VOID
373 NTAPI
374 KiInitializeMTRR(
375 IN BOOLEAN FinalCpu
376 );
377
378 VOID
379 NTAPI
380 KiAmdK6InitializeMTRR(
381 VOID
382 );
383
384 VOID
385 NTAPI
386 KiRestoreFastSyscallReturnState(
387 VOID
388 );
389
390 ULONG_PTR
391 NTAPI
392 Ki386EnableDE(
393 IN ULONG_PTR Context
394 );
395
396 ULONG_PTR
397 NTAPI
398 Ki386EnableFxsr(
399 IN ULONG_PTR Context
400 );
401
402 ULONG_PTR
403 NTAPI
404 Ki386EnableXMMIExceptions(
405 IN ULONG_PTR Context
406 );
407
408 BOOLEAN
409 NTAPI
410 VdmDispatchBop(
411 IN PKTRAP_FRAME TrapFrame
412 );
413
414 BOOLEAN
415 FASTCALL
416 KiVdmOpcodePrefix(
417 IN PKTRAP_FRAME TrapFrame,
418 IN ULONG Flags
419 );
420
421 BOOLEAN
422 FASTCALL
423 Ki386HandleOpcodeV86(
424 IN PKTRAP_FRAME TrapFrame
425 );
426
427 DECLSPEC_NORETURN
428 VOID
429 FASTCALL
430 KiEoiHelper(
431 IN PKTRAP_FRAME TrapFrame
432 );
433
434 VOID
435 FASTCALL
436 Ki386BiosCallReturnAddress(
437 IN PKTRAP_FRAME TrapFrame
438 );
439
440 ULONG_PTR
441 FASTCALL
442 KiExitV86Mode(
443 IN PKTRAP_FRAME TrapFrame
444 );
445
446 DECLSPEC_NORETURN
447 VOID
448 NTAPI
449 KiDispatchExceptionFromTrapFrame(
450 IN NTSTATUS Code,
451 IN ULONG_PTR Address,
452 IN ULONG ParameterCount,
453 IN ULONG_PTR Parameter1,
454 IN ULONG_PTR Parameter2,
455 IN ULONG_PTR Parameter3,
456 IN PKTRAP_FRAME TrapFrame
457 );
458
459 //
460 // Global x86 only Kernel data
461 //
462 extern PVOID Ki386IopmSaveArea;
463 extern ULONG KeI386EFlagsAndMaskV86;
464 extern ULONG KeI386EFlagsOrMaskV86;
465 extern BOOLEAN KeI386VirtualIntExtensions;
466 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR+1];
467 extern KDESCRIPTOR KiIdtDescriptor;
468 extern BOOLEAN KiI386PentiumLockErrataPresent;
469 extern ULONG KeI386NpxPresent;
470 extern ULONG KeI386XMMIPresent;
471 extern ULONG KeI386FxsrPresent;
472 extern ULONG KiMXCsrMask;
473 extern ULONG KeI386CpuType;
474 extern ULONG KeI386CpuStep;
475 extern ULONG Ke386CacheAlignment;
476 extern ULONG KiFastSystemCallDisable;
477 extern UCHAR KiDebugRegisterTrapOffsets[9];
478 extern UCHAR KiDebugRegisterContextOffsets[9];
479 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
480 extern VOID __cdecl KiTrap08(VOID);
481 extern VOID __cdecl KiTrap13(VOID);
482 extern VOID __cdecl KiFastCallEntry(VOID);
483 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
484 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID);
485 extern VOID __cdecl CopyParams(VOID);
486 extern VOID __cdecl ReadBatch(VOID);
487 extern VOID __cdecl FrRestore(VOID);
488 extern CHAR KiSystemCallExitBranch[];
489 extern CHAR KiSystemCallExit[];
490 extern CHAR KiSystemCallExit2[];
491
492 //
493 // Trap Macros
494 //
495 #include "trap_x.h"
496
497 //
498 // Returns a thread's FPU save area
499 //
500 PFX_SAVE_AREA
501 FORCEINLINE
502 KiGetThreadNpxArea(IN PKTHREAD Thread)
503 {
504 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
505 }
506
507 //
508 // Sanitizes a selector
509 //
510 FORCEINLINE
511 ULONG
512 Ke386SanitizeSeg(IN ULONG Cs,
513 IN KPROCESSOR_MODE Mode)
514 {
515 //
516 // Check if we're in kernel-mode, and force CPL 0 if so.
517 // Otherwise, force CPL 3.
518 //
519 return ((Mode == KernelMode) ?
520 (Cs & (0xFFFF & ~RPL_MASK)) :
521 (RPL_MASK | (Cs & 0xFFFF)));
522 }
523
524 //
525 // Sanitizes EFLAGS
526 //
527 FORCEINLINE
528 ULONG
529 Ke386SanitizeFlags(IN ULONG Eflags,
530 IN KPROCESSOR_MODE Mode)
531 {
532 //
533 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
534 // Otherwise, also force interrupt mask on.
535 //
536 return ((Mode == KernelMode) ?
537 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
538 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
539 }
540
541 //
542 // Sanitizes a Debug Register
543 //
544 FORCEINLINE
545 PVOID
546 Ke386SanitizeDr(IN PVOID DrAddress,
547 IN KPROCESSOR_MODE Mode)
548 {
549 //
550 // Check if we're in kernel-mode, and return the address directly if so.
551 // Otherwise, make sure it's not inside the kernel-mode address space.
552 // If it is, then clear the address.
553 //
554 return ((Mode == KernelMode) ? DrAddress :
555 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
556 }
557
558 //
559 // Exception with no arguments
560 //
561 VOID
562 FORCEINLINE
563 DECLSPEC_NORETURN
564 KiDispatchException0Args(IN NTSTATUS Code,
565 IN ULONG_PTR Address,
566 IN PKTRAP_FRAME TrapFrame)
567 {
568 /* Helper for exceptions with no arguments */
569 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
570 }
571
572 //
573 // Exception with one argument
574 //
575 VOID
576 FORCEINLINE
577 DECLSPEC_NORETURN
578 KiDispatchException1Args(IN NTSTATUS Code,
579 IN ULONG_PTR Address,
580 IN ULONG P1,
581 IN PKTRAP_FRAME TrapFrame)
582 {
583 /* Helper for exceptions with no arguments */
584 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
585 }
586
587 //
588 // Exception with two arguments
589 //
590 VOID
591 FORCEINLINE
592 DECLSPEC_NORETURN
593 KiDispatchException2Args(IN NTSTATUS Code,
594 IN ULONG_PTR Address,
595 IN ULONG P1,
596 IN ULONG P2,
597 IN PKTRAP_FRAME TrapFrame)
598 {
599 /* Helper for exceptions with no arguments */
600 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
601 }
602
603 //
604 // Performs a system call
605 //
606
607 /*
608 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
609 * and then calls the function associated with the system call.
610 *
611 * It's done in assembly for two reasons: we need to muck with the stack,
612 * and the call itself restores the stack back for us. The only way to do
613 * this in C is to do manual C handlers for every possible number of args on
614 * the stack, and then have the handler issue a call by pointer. This is
615 * wasteful since it'll basically push the values twice and require another
616 * level of call indirection.
617 *
618 * The ARM kernel currently does this, but it should probably be changed
619 * later to function like this as well.
620 *
621 */
622 #ifdef __GNUC__
623 NTSTATUS
624 FORCEINLINE
625 KiSystemCallTrampoline(IN PVOID Handler,
626 IN PVOID Arguments,
627 IN ULONG StackBytes)
628 {
629 NTSTATUS Result;
630
631 __asm__ __volatile__
632 (
633 "subl %1, %%esp\n"
634 "movl %%esp, %%edi\n"
635 "movl %2, %%esi\n"
636 "shrl $2, %1\n"
637 "rep movsd\n"
638 "call *%3\n"
639 "movl %%eax, %0\n"
640 : "=r"(Result)
641 : "c"(StackBytes),
642 "d"(Arguments),
643 "r"(Handler)
644 : "%esp", "%esi", "%edi"
645 );
646 return Result;
647 }
648 #elif defined(_MSC_VER)
649 NTSTATUS
650 FORCEINLINE
651 KiSystemCallTrampoline(IN PVOID Handler,
652 IN PVOID Arguments,
653 IN ULONG StackBytes)
654 {
655 __asm
656 {
657 mov ecx, StackBytes
658 mov esi, Arguments
659 mov eax, Handler
660 sub esp, ecx
661 mov edi, esp
662 shr ecx, 2
663 rep movsd
664 call eax
665 }
666 /* Return with result in EAX */
667 }
668 #else
669 #error Unknown Compiler
670 #endif
671
672
673 //
674 // Checks for pending APCs
675 //
676 VOID
677 FORCEINLINE
678 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
679 {
680 PKTHREAD Thread;
681 KIRQL OldIrql;
682
683 /* Check for V8086 or user-mode trap */
684 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
685 {
686 /* Get the thread */
687 Thread = KeGetCurrentThread();
688 while (TRUE)
689 {
690 /* Turn off the alerted state for kernel mode */
691 Thread->Alerted[KernelMode] = FALSE;
692
693 /* Are there pending user APCs? */
694 if (!Thread->ApcState.UserApcPending) break;
695
696 /* Raise to APC level and enable interrupts */
697 OldIrql = KfRaiseIrql(APC_LEVEL);
698 _enable();
699
700 /* Deliver APCs */
701 KiDeliverApc(UserMode, NULL, TrapFrame);
702
703 /* Restore IRQL and disable interrupts once again */
704 KfLowerIrql(OldIrql);
705 _disable();
706 }
707 }
708 }
709
710 //
711 // Converts a base thread to a GUI thread
712 //
713 #ifdef __GNUC__
714 NTSTATUS
715 FORCEINLINE
716 KiConvertToGuiThread(VOID)
717 {
718 NTSTATUS Result;
719 PVOID StackFrame;
720
721 /*
722 * Converting to a GUI thread safely updates ESP in-place as well as the
723 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
724 *
725 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
726 * caller's value, since it is considered a nonvolatile register. As such,
727 * as soon as we're back after the conversion and we try to store the result
728 * which will probably be in some stack variable (EBP-based), we'll crash as
729 * we are touching the de-allocated non-expanded stack.
730 *
731 * Thus we need a way to update our EBP before EBP is touched, and the only
732 * way to guarantee this is to do the call itself in assembly, use the EAX
733 * register to store the result, fixup EBP, and then let the C code continue
734 * on its merry way.
735 *
736 */
737 __asm__ __volatile__
738 (
739 "movl %%ebp, %1\n\t"
740 "subl %%esp, %1\n\t"
741 "call _PsConvertToGuiThread@0\n\t"
742 "addl %%esp, %1\n\t"
743 "movl %1, %%ebp"
744 : "=a"(Result), "=r"(StackFrame)
745 :
746 : "%esp", "%ecx", "%edx", "memory"
747 );
748 return Result;
749 }
750 #elif defined(_MSC_VER)
751 NTSTATUS
752 NTAPI
753 KiConvertToGuiThread(VOID);
754 #else
755 #error Unknown Compiler
756 #endif
757
758 //
759 // Switches from boot loader to initial kernel stack
760 //
761 VOID
762 FORCEINLINE
763 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
764 {
765 /* We have to switch to a new stack before continuing kernel initialization */
766 #ifdef __GNUC__
767 __asm__
768 (
769 "movl %0, %%esp\n"
770 "subl %1, %%esp\n"
771 "pushl %2\n"
772 "jmp _KiSystemStartupBootStack@0\n"
773 :
774 : "c"(InitialStack),
775 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
776 "i"(CR0_EM | CR0_TS | CR0_MP)
777 : "%esp"
778 );
779 #elif defined(_MSC_VER)
780 VOID NTAPI KiSystemStartupBootStack(VOID);
781 __asm
782 {
783 mov esp, InitialStack
784 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
785 push (CR0_EM | CR0_TS | CR0_MP)
786 jmp KiSystemStartupBootStack
787 }
788 #else
789 #error Unknown Compiler
790 #endif
791 }
792
793 //
794 // Emits the iret instruction for C code
795 //
796 DECLSPEC_NORETURN
797 VOID
798 FORCEINLINE
799 KiIret(VOID)
800 {
801 #if defined(__GNUC__)
802 __asm__ __volatile__
803 (
804 "iret\n"
805 );
806 #elif defined(_MSC_VER)
807 __asm
808 {
809 iretd
810 }
811 #else
812 #error Unsupported compiler
813 #endif
814 UNREACHABLE;
815 }
816
817 //
818 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
819 // initiates the end by calling back into the HAL and exiting the trap here.
820 //
821 VOID
822 FORCEINLINE
823 KiEndInterrupt(IN KIRQL Irql,
824 IN PKTRAP_FRAME TrapFrame)
825 {
826 /* Disable interrupts and end the interrupt */
827 _disable();
828 HalEndSystemInterrupt(Irql, TrapFrame);
829
830 /* Exit the interrupt */
831 KiEoiHelper(TrapFrame);
832 }
833
834 //
835 // PERF Code
836 //
837 VOID
838 FORCEINLINE
839 Ki386PerfEnd(VOID)
840 {
841 extern ULONGLONG BootCyclesEnd, BootCycles;
842 BootCyclesEnd = __rdtsc();
843 DbgPrint("Boot took %I64u cycles!\n", BootCyclesEnd - BootCycles);
844 DbgPrint("Interrupts: %u System Calls: %u Context Switches: %u\n",
845 KeGetCurrentPrcb()->InterruptCount,
846 KeGetCurrentPrcb()->KeSystemCalls,
847 KeGetContextSwitches(KeGetCurrentPrcb()));
848 }
849
850 FORCEINLINE
851 PULONG
852 KiGetUserModeStackAddress(void)
853 {
854 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp);
855 }
856
857 #endif