[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 //
14 // INT3 is 1 byte long
15 //
16 #define KD_BREAKPOINT_TYPE UCHAR
17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
18 #define KD_BREAKPOINT_VALUE 0xCC
19
20 //
21 // Macros for getting and setting special purpose registers in portable code
22 //
23 #define KeGetContextPc(Context) \
24 ((Context)->Eip)
25
26 #define KeSetContextPc(Context, ProgramCounter) \
27 ((Context)->Eip = (ProgramCounter))
28
29 #define KeGetTrapFramePc(TrapFrame) \
30 ((TrapFrame)->Eip)
31
32 #define KiGetLinkedTrapFrame(x) \
33 (PKTRAP_FRAME)((x)->Edx)
34
35 #define KeGetContextReturnRegister(Context) \
36 ((Context)->Eax)
37
38 #define KeSetContextReturnRegister(Context, ReturnValue) \
39 ((Context)->Eax = (ReturnValue))
40
41 //
42 // Macro to get trap and exception frame from a thread stack
43 //
44 #define KeGetTrapFrame(Thread) \
45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
46 sizeof(KTRAP_FRAME) - \
47 sizeof(FX_SAVE_AREA))
48
49 #define KeGetExceptionFrame(Thread) \
50 NULL
51
52 //
53 // Macro to get context switches from the PRCB
54 // All architectures but x86 have it in the PRCB's KeContextSwitches
55 //
56 #define KeGetContextSwitches(Prcb) \
57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
58
59 //
60 // Macro to get the second level cache size field name which differs between
61 // CISC and RISC architectures, as the former has unified I/D cache
62 //
63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
64
65 //
66 // Returns the Interrupt State from a Trap Frame.
67 // ON = TRUE, OFF = FALSE
68 //
69 #define KeGetTrapFrameInterruptState(TrapFrame) \
70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
71
72 //
73 // Flags for exiting a trap
74 //
75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
78
79 typedef union _KTRAP_EXIT_SKIP_BITS
80 {
81 struct
82 {
83 UCHAR SkipPreviousMode:1;
84 UCHAR SkipSegments:1;
85 UCHAR SkipVolatiles:1;
86 UCHAR Reserved:5;
87 };
88 UCHAR Bits;
89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
90
91
92 //
93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
94 //
95 #define PFX_FLAG_ES 0x00000100
96 #define PFX_FLAG_CS 0x00000200
97 #define PFX_FLAG_SS 0x00000400
98 #define PFX_FLAG_DS 0x00000800
99 #define PFX_FLAG_FS 0x00001000
100 #define PFX_FLAG_GS 0x00002000
101 #define PFX_FLAG_OPER32 0x00004000
102 #define PFX_FLAG_ADDR32 0x00008000
103 #define PFX_FLAG_LOCK 0x00010000
104 #define PFX_FLAG_REPNE 0x00020000
105 #define PFX_FLAG_REP 0x00040000
106
107 //
108 // VDM Helper Macros
109 //
110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
111 // We need to keep 2 parameters while the original ASM implementation uses 4:
112 // TrapFrame, PrefixFlags, Eip, InstructionSize;
113 //
114 // We pass the trap frame, and prefix flags, in our two parameters.
115 //
116 // We then realize that since the smallest prefix flag is 0x100, this gives us
117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
118 //
119 // We further realize that we always have access to EIP from the trap frame, and
120 // that if we want the *current instruction* EIP, we simply have to add the
121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
122 // at now, so we don't need to use the stack to push this parameter.
123 //
124 // We actually only care about the *current instruction* EIP in one location,
125 // so although it may be slightly more expensive to re-calculate the EIP one
126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
127 // which would be forcing stack usage in all other scenarios.
128 //
129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
133 #define KiVdmUnhandledOpcode(x) \
134 BOOLEAN \
135 FASTCALL \
136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
137 IN ULONG Flags) \
138 { \
139 /* Not yet handled */ \
140 UNIMPLEMENTED; \
141 while (TRUE); \
142 return TRUE; \
143 }
144
145 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
146
147 //
148 // Local parameters
149 //
150 typedef struct _KV86_FRAME
151 {
152 PVOID ThreadStack;
153 PVOID ThreadTeb;
154 PVOID PcrTeb;
155 } KV86_FRAME, *PKV86_FRAME;
156
157 //
158 // Virtual Stack Frame
159 //
160 typedef struct _KV8086_STACK_FRAME
161 {
162 KTRAP_FRAME TrapFrame;
163 FX_SAVE_AREA NpxArea;
164 KV86_FRAME V86Frame;
165 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
166
167 //
168 // Registers an interrupt handler with an IDT vector
169 //
170 FORCEINLINE
171 VOID
172 KeRegisterInterruptHandler(IN ULONG Vector,
173 IN PVOID Handler)
174 {
175 UCHAR Entry;
176 ULONG_PTR Address;
177 PKIPCR Pcr = (PKIPCR)KeGetPcr();
178
179 //
180 // Get the entry from the HAL
181 //
182 Entry = HalVectorToIDTEntry(Vector);
183 Address = PtrToUlong(Handler);
184
185 //
186 // Now set the data
187 //
188 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
189 Pcr->IDT[Entry].Offset = (USHORT)Address;
190 }
191
192 //
193 // Returns the registered interrupt handler for a given IDT vector
194 //
195 FORCEINLINE
196 PVOID
197 KeQueryInterruptHandler(IN ULONG Vector)
198 {
199 PKIPCR Pcr = (PKIPCR)KeGetPcr();
200 UCHAR Entry;
201
202 //
203 // Get the entry from the HAL
204 //
205 Entry = HalVectorToIDTEntry(Vector);
206
207 //
208 // Read the entry from the IDT
209 //
210 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
211 (Pcr->IDT[Entry].Offset & 0xFFFF));
212 }
213
214 //
215 // Invalidates the TLB entry for a specified address
216 //
217 FORCEINLINE
218 VOID
219 KeInvalidateTlbEntry(IN PVOID Address)
220 {
221 /* Invalidate the TLB entry for this address */
222 __invlpg(Address);
223 }
224
225 FORCEINLINE
226 VOID
227 KeFlushProcessTb(VOID)
228 {
229 /* Flush the TLB by resetting CR3 */
230 __writecr3(__readcr3());
231 }
232
233 FORCEINLINE
234 PRKTHREAD
235 KeGetCurrentThread(VOID)
236 {
237 /* Return the current thread */
238 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
239 }
240
241 FORCEINLINE
242 VOID
243 KiRundownThread(IN PKTHREAD Thread)
244 {
245 #ifndef CONFIG_SMP
246 /* Check if this is the NPX Thread */
247 if (KeGetCurrentPrcb()->NpxThread == Thread)
248 {
249 /* Clear it */
250 KeGetCurrentPrcb()->NpxThread = NULL;
251 Ke386FnInit();
252 }
253 #else
254 /* Nothing to do */
255 #endif
256 }
257
258 VOID
259 FASTCALL
260 Ki386InitializeTss(
261 IN PKTSS Tss,
262 IN PKIDTENTRY Idt,
263 IN PKGDTENTRY Gdt
264 );
265
266 VOID
267 NTAPI
268 KiSetCR0Bits(VOID);
269
270 VOID
271 NTAPI
272 KiGetCacheInformation(VOID);
273
274 BOOLEAN
275 NTAPI
276 KiIsNpxPresent(
277 VOID
278 );
279
280 BOOLEAN
281 NTAPI
282 KiIsNpxErrataPresent(
283 VOID
284 );
285
286 VOID
287 NTAPI
288 KiSetProcessorType(VOID);
289
290 ULONG
291 NTAPI
292 KiGetFeatureBits(VOID);
293
294 VOID
295 NTAPI
296 KiThreadStartup(VOID);
297
298 NTSTATUS
299 NTAPI
300 Ke386GetGdtEntryThread(
301 IN PKTHREAD Thread,
302 IN ULONG Offset,
303 IN PKGDTENTRY Descriptor
304 );
305
306 VOID
307 NTAPI
308 KiFlushNPXState(
309 IN FLOATING_SAVE_AREA *SaveArea
310 );
311
312 VOID
313 NTAPI
314 Ki386AdjustEsp0(
315 IN PKTRAP_FRAME TrapFrame
316 );
317
318 VOID
319 NTAPI
320 Ki386SetupAndExitToV86Mode(
321 OUT PTEB VdmTeb
322 );
323
324 VOID
325 NTAPI
326 KeI386VdmInitialize(
327 VOID
328 );
329
330 ULONG_PTR
331 NTAPI
332 Ki386EnableGlobalPage(
333 IN volatile ULONG_PTR Context
334 );
335
336 VOID
337 NTAPI
338 KiI386PentiumLockErrataFixup(
339 VOID
340 );
341
342 VOID
343 NTAPI
344 KiInitializePAT(
345 VOID
346 );
347
348 VOID
349 NTAPI
350 KiInitializeMTRR(
351 IN BOOLEAN FinalCpu
352 );
353
354 VOID
355 NTAPI
356 KiAmdK6InitializeMTRR(
357 VOID
358 );
359
360 VOID
361 NTAPI
362 KiRestoreFastSyscallReturnState(
363 VOID
364 );
365
366 ULONG_PTR
367 NTAPI
368 Ki386EnableDE(
369 IN ULONG_PTR Context
370 );
371
372 ULONG_PTR
373 NTAPI
374 Ki386EnableFxsr(
375 IN ULONG_PTR Context
376 );
377
378 ULONG_PTR
379 NTAPI
380 Ki386EnableXMMIExceptions(
381 IN ULONG_PTR Context
382 );
383
384 BOOLEAN
385 NTAPI
386 VdmDispatchBop(
387 IN PKTRAP_FRAME TrapFrame
388 );
389
390 BOOLEAN
391 FASTCALL
392 KiVdmOpcodePrefix(
393 IN PKTRAP_FRAME TrapFrame,
394 IN ULONG Flags
395 );
396
397 BOOLEAN
398 FASTCALL
399 Ki386HandleOpcodeV86(
400 IN PKTRAP_FRAME TrapFrame
401 );
402
403 DECLSPEC_NORETURN
404 VOID
405 FASTCALL
406 KiEoiHelper(
407 IN PKTRAP_FRAME TrapFrame
408 );
409
410 VOID
411 FASTCALL
412 Ki386BiosCallReturnAddress(
413 IN PKTRAP_FRAME TrapFrame
414 );
415
416 ULONG_PTR
417 FASTCALL
418 KiExitV86Mode(
419 IN PKTRAP_FRAME TrapFrame
420 );
421
422 DECLSPEC_NORETURN
423 VOID
424 NTAPI
425 KiDispatchExceptionFromTrapFrame(
426 IN NTSTATUS Code,
427 IN ULONG_PTR Address,
428 IN ULONG ParameterCount,
429 IN ULONG_PTR Parameter1,
430 IN ULONG_PTR Parameter2,
431 IN ULONG_PTR Parameter3,
432 IN PKTRAP_FRAME TrapFrame
433 );
434
435 //
436 // Global x86 only Kernel data
437 //
438 extern PVOID Ki386IopmSaveArea;
439 extern ULONG KeI386EFlagsAndMaskV86;
440 extern ULONG KeI386EFlagsOrMaskV86;
441 extern BOOLEAN KeI386VirtualIntExtensions;
442 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR];
443 extern KDESCRIPTOR KiIdtDescriptor;
444 extern BOOLEAN KiI386PentiumLockErrataPresent;
445 extern ULONG KeI386NpxPresent;
446 extern ULONG KeI386XMMIPresent;
447 extern ULONG KeI386FxsrPresent;
448 extern ULONG KiMXCsrMask;
449 extern ULONG KeI386CpuType;
450 extern ULONG KeI386CpuStep;
451 extern ULONG Ke386CacheAlignment;
452 extern ULONG KiFastSystemCallDisable;
453 extern UCHAR KiDebugRegisterTrapOffsets[9];
454 extern UCHAR KiDebugRegisterContextOffsets[9];
455 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
456 extern VOID __cdecl KiTrap08(VOID);
457 extern VOID __cdecl KiTrap13(VOID);
458 extern VOID __cdecl KiFastCallEntry(VOID);
459 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
460 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID);
461 extern VOID __cdecl CopyParams(VOID);
462 extern VOID __cdecl ReadBatch(VOID);
463 extern VOID __cdecl FrRestore(VOID);
464 extern CHAR KiSystemCallExitBranch[];
465 extern CHAR KiSystemCallExit[];
466 extern CHAR KiSystemCallExit2[];
467
468 //
469 // Trap Macros
470 //
471 #include "../trap_x.h"
472
473 //
474 // Returns a thread's FPU save area
475 //
476 PFX_SAVE_AREA
477 FORCEINLINE
478 KiGetThreadNpxArea(IN PKTHREAD Thread)
479 {
480 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
481 }
482
483 //
484 // Sanitizes a selector
485 //
486 FORCEINLINE
487 ULONG
488 Ke386SanitizeSeg(IN ULONG Cs,
489 IN KPROCESSOR_MODE Mode)
490 {
491 //
492 // Check if we're in kernel-mode, and force CPL 0 if so.
493 // Otherwise, force CPL 3.
494 //
495 return ((Mode == KernelMode) ?
496 (Cs & (0xFFFF & ~RPL_MASK)) :
497 (RPL_MASK | (Cs & 0xFFFF)));
498 }
499
500 //
501 // Sanitizes EFLAGS
502 //
503 FORCEINLINE
504 ULONG
505 Ke386SanitizeFlags(IN ULONG Eflags,
506 IN KPROCESSOR_MODE Mode)
507 {
508 //
509 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
510 // Otherwise, also force interrupt mask on.
511 //
512 return ((Mode == KernelMode) ?
513 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
514 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
515 }
516
517 //
518 // Gets a DR register from a CONTEXT structure
519 //
520 FORCEINLINE
521 PVOID
522 KiDrFromContext(IN ULONG Dr,
523 IN PCONTEXT Context)
524 {
525 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
526 }
527
528 //
529 // Gets a DR register from a KTRAP_FRAME structure
530 //
531 FORCEINLINE
532 PVOID*
533 KiDrFromTrapFrame(IN ULONG Dr,
534 IN PKTRAP_FRAME TrapFrame)
535 {
536 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
537 }
538
539 //
540 // Sanitizes a Debug Register
541 //
542 FORCEINLINE
543 PVOID
544 Ke386SanitizeDr(IN PVOID DrAddress,
545 IN KPROCESSOR_MODE Mode)
546 {
547 //
548 // Check if we're in kernel-mode, and return the address directly if so.
549 // Otherwise, make sure it's not inside the kernel-mode address space.
550 // If it is, then clear the address.
551 //
552 return ((Mode == KernelMode) ? DrAddress :
553 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
554 }
555
556 //
557 // Exception with no arguments
558 //
559 VOID
560 FORCEINLINE
561 DECLSPEC_NORETURN
562 KiDispatchException0Args(IN NTSTATUS Code,
563 IN ULONG_PTR Address,
564 IN PKTRAP_FRAME TrapFrame)
565 {
566 /* Helper for exceptions with no arguments */
567 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
568 }
569
570 //
571 // Exception with one argument
572 //
573 VOID
574 FORCEINLINE
575 DECLSPEC_NORETURN
576 KiDispatchException1Args(IN NTSTATUS Code,
577 IN ULONG_PTR Address,
578 IN ULONG P1,
579 IN PKTRAP_FRAME TrapFrame)
580 {
581 /* Helper for exceptions with no arguments */
582 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
583 }
584
585 //
586 // Exception with two arguments
587 //
588 VOID
589 FORCEINLINE
590 DECLSPEC_NORETURN
591 KiDispatchException2Args(IN NTSTATUS Code,
592 IN ULONG_PTR Address,
593 IN ULONG P1,
594 IN ULONG P2,
595 IN PKTRAP_FRAME TrapFrame)
596 {
597 /* Helper for exceptions with no arguments */
598 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
599 }
600
601 //
602 // Performs a system call
603 //
604
605 /*
606 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
607 * and then calls the function associated with the system call.
608 *
609 * It's done in assembly for two reasons: we need to muck with the stack,
610 * and the call itself restores the stack back for us. The only way to do
611 * this in C is to do manual C handlers for every possible number of args on
612 * the stack, and then have the handler issue a call by pointer. This is
613 * wasteful since it'll basically push the values twice and require another
614 * level of call indirection.
615 *
616 * The ARM kernel currently does this, but it should probably be changed
617 * later to function like this as well.
618 *
619 */
620 #ifdef __GNUC__
621 NTSTATUS
622 FORCEINLINE
623 KiSystemCallTrampoline(IN PVOID Handler,
624 IN PVOID Arguments,
625 IN ULONG StackBytes)
626 {
627 NTSTATUS Result;
628
629 __asm__ __volatile__
630 (
631 "subl %1, %%esp\n"
632 "movl %%esp, %%edi\n"
633 "movl %2, %%esi\n"
634 "shrl $2, %1\n"
635 "rep movsd\n"
636 "call *%3\n"
637 "movl %%eax, %0\n"
638 : "=r"(Result)
639 : "c"(StackBytes),
640 "d"(Arguments),
641 "r"(Handler)
642 : "%esp", "%esi", "%edi"
643 );
644 return Result;
645 }
646 #elif defined(_MSC_VER)
647 NTSTATUS
648 FORCEINLINE
649 KiSystemCallTrampoline(IN PVOID Handler,
650 IN PVOID Arguments,
651 IN ULONG StackBytes)
652 {
653 __asm
654 {
655 mov ecx, StackBytes
656 mov esi, Arguments
657 mov eax, Handler
658 sub esp, ecx
659 mov edi, esp
660 shr ecx, 2
661 rep movsd
662 call eax
663 }
664 /* Return with result in EAX */
665 }
666 #else
667 #error Unknown Compiler
668 #endif
669
670
671 //
672 // Checks for pending APCs
673 //
674 VOID
675 FORCEINLINE
676 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
677 {
678 PKTHREAD Thread;
679 KIRQL OldIrql;
680
681 /* Check for V8086 or user-mode trap */
682 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
683 {
684 /* Get the thread */
685 Thread = KeGetCurrentThread();
686 while (TRUE)
687 {
688 /* Turn off the alerted state for kernel mode */
689 Thread->Alerted[KernelMode] = FALSE;
690
691 /* Are there pending user APCs? */
692 if (!Thread->ApcState.UserApcPending) break;
693
694 /* Raise to APC level and enable interrupts */
695 OldIrql = KfRaiseIrql(APC_LEVEL);
696 _enable();
697
698 /* Deliver APCs */
699 KiDeliverApc(UserMode, NULL, TrapFrame);
700
701 /* Restore IRQL and disable interrupts once again */
702 KfLowerIrql(OldIrql);
703 _disable();
704 }
705 }
706 }
707
708 //
709 // Converts a base thread to a GUI thread
710 //
711 #ifdef __GNUC__
712 NTSTATUS
713 FORCEINLINE
714 KiConvertToGuiThread(VOID)
715 {
716 NTSTATUS Result;
717 PVOID StackFrame;
718
719 /*
720 * Converting to a GUI thread safely updates ESP in-place as well as the
721 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
722 *
723 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
724 * caller's value, since it is considered a nonvolatile register. As such,
725 * as soon as we're back after the conversion and we try to store the result
726 * which will probably be in some stack variable (EBP-based), we'll crash as
727 * we are touching the de-allocated non-expanded stack.
728 *
729 * Thus we need a way to update our EBP before EBP is touched, and the only
730 * way to guarantee this is to do the call itself in assembly, use the EAX
731 * register to store the result, fixup EBP, and then let the C code continue
732 * on its merry way.
733 *
734 */
735 __asm__ __volatile__
736 (
737 "movl %%ebp, %1\n"
738 "subl %%esp, %1\n"
739 "call _PsConvertToGuiThread@0\n"
740 "addl %%esp, %1\n"
741 "movl %1, %%ebp\n"
742 "movl %%eax, %0\n"
743 : "=r"(Result), "=r"(StackFrame)
744 :
745 : "%esp", "%ecx", "%edx", "memory"
746 );
747 return Result;
748 }
749 #elif defined(_MSC_VER)
750 NTSTATUS
751 NTAPI
752 KiConvertToGuiThread(VOID);
753 #else
754 #error Unknown Compiler
755 #endif
756
757 //
758 // Switches from boot loader to initial kernel stack
759 //
760 VOID
761 FORCEINLINE
762 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
763 {
764 /* We have to switch to a new stack before continuing kernel initialization */
765 #ifdef __GNUC__
766 __asm__
767 (
768 "movl %0, %%esp\n"
769 "subl %1, %%esp\n"
770 "pushl %2\n"
771 "jmp _KiSystemStartupBootStack@0\n"
772 :
773 : "c"(InitialStack),
774 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
775 "i"(CR0_EM | CR0_TS | CR0_MP)
776 : "%esp"
777 );
778 #elif defined(_MSC_VER)
779 VOID NTAPI KiSystemStartupBootStack(VOID);
780 __asm
781 {
782 mov esp, InitialStack
783 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
784 push (CR0_EM | CR0_TS | CR0_MP)
785 jmp KiSystemStartupBootStack
786 }
787 #else
788 #error Unknown Compiler
789 #endif
790 }
791
792 //
793 // Emits the iret instruction for C code
794 //
795 DECLSPEC_NORETURN
796 VOID
797 FORCEINLINE
798 KiIret(VOID)
799 {
800 #if defined(__GNUC__)
801 __asm__ __volatile__
802 (
803 "iret\n"
804 );
805 #elif defined(_MSC_VER)
806 __asm
807 {
808 iretd
809 }
810 #else
811 #error Unsupported compiler
812 #endif
813 UNREACHABLE;
814 }
815
816 //
817 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
818 // initiates the end by calling back into the HAL and exiting the trap here.
819 //
820 VOID
821 FORCEINLINE
822 KiEndInterrupt(IN KIRQL Irql,
823 IN PKTRAP_FRAME TrapFrame)
824 {
825 /* Disable interrupts and end the interrupt */
826 _disable();
827 HalEndSystemInterrupt(Irql, TrapFrame);
828
829 /* Exit the interrupt */
830 KiEoiHelper(TrapFrame);
831 }
832
833 //
834 // PERF Code
835 //
836 VOID
837 FORCEINLINE
838 Ki386PerfEnd(VOID)
839 {
840 extern ULONGLONG BootCyclesEnd, BootCycles;
841 BootCyclesEnd = __rdtsc();
842 DbgPrint("Boot took %I64d cycles!\n", BootCyclesEnd - BootCycles);
843 DbgPrint("Interrupts: %d System Calls: %d Context Switches: %d\n",
844 KeGetCurrentPrcb()->InterruptCount,
845 KeGetCurrentPrcb()->KeSystemCalls,
846 KeGetContextSwitches(KeGetCurrentPrcb()));
847 }
848
849 FORCEINLINE
850 PULONG
851 KiGetUserModeStackAddress(void)
852 {
853 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp);
854 }
855
856 #endif