Sync with trunk r58113.
[reactos.git] / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 //
14 // INT3 is 1 byte long
15 //
16 #define KD_BREAKPOINT_TYPE UCHAR
17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
18 #define KD_BREAKPOINT_VALUE 0xCC
19
20 //
21 // Macros for getting and setting special purpose registers in portable code
22 //
23 #define KeGetContextPc(Context) \
24 ((Context)->Eip)
25
26 #define KeSetContextPc(Context, ProgramCounter) \
27 ((Context)->Eip = (ProgramCounter))
28
29 #define KeGetTrapFramePc(TrapFrame) \
30 ((TrapFrame)->Eip)
31
32 #define KiGetLinkedTrapFrame(x) \
33 (PKTRAP_FRAME)((x)->Edx)
34
35 #define KeGetContextReturnRegister(Context) \
36 ((Context)->Eax)
37
38 #define KeSetContextReturnRegister(Context, ReturnValue) \
39 ((Context)->Eax = (ReturnValue))
40
41 //
42 // Macro to get trap and exception frame from a thread stack
43 //
44 #define KeGetTrapFrame(Thread) \
45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
46 sizeof(KTRAP_FRAME) - \
47 sizeof(FX_SAVE_AREA))
48
49 #define KeGetExceptionFrame(Thread) \
50 NULL
51
52 //
53 // Macro to get context switches from the PRCB
54 // All architectures but x86 have it in the PRCB's KeContextSwitches
55 //
56 #define KeGetContextSwitches(Prcb) \
57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
58
59 //
60 // Macro to get the second level cache size field name which differs between
61 // CISC and RISC architectures, as the former has unified I/D cache
62 //
63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
64
65 //
66 // Returns the Interrupt State from a Trap Frame.
67 // ON = TRUE, OFF = FALSE
68 //
69 #define KeGetTrapFrameInterruptState(TrapFrame) \
70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
71
72 //
73 // Flags for exiting a trap
74 //
75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
78
79 typedef union _KTRAP_EXIT_SKIP_BITS
80 {
81 struct
82 {
83 UCHAR SkipPreviousMode:1;
84 UCHAR SkipSegments:1;
85 UCHAR SkipVolatiles:1;
86 UCHAR Reserved:5;
87 };
88 UCHAR Bits;
89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
90
91
92 //
93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
94 //
95 #define PFX_FLAG_ES 0x00000100
96 #define PFX_FLAG_CS 0x00000200
97 #define PFX_FLAG_SS 0x00000400
98 #define PFX_FLAG_DS 0x00000800
99 #define PFX_FLAG_FS 0x00001000
100 #define PFX_FLAG_GS 0x00002000
101 #define PFX_FLAG_OPER32 0x00004000
102 #define PFX_FLAG_ADDR32 0x00008000
103 #define PFX_FLAG_LOCK 0x00010000
104 #define PFX_FLAG_REPNE 0x00020000
105 #define PFX_FLAG_REP 0x00040000
106
107 //
108 // VDM Helper Macros
109 //
110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
111 // We need to keep 2 parameters while the original ASM implementation uses 4:
112 // TrapFrame, PrefixFlags, Eip, InstructionSize;
113 //
114 // We pass the trap frame, and prefix flags, in our two parameters.
115 //
116 // We then realize that since the smallest prefix flag is 0x100, this gives us
117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
118 //
119 // We further realize that we always have access to EIP from the trap frame, and
120 // that if we want the *current instruction* EIP, we simply have to add the
121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
122 // at now, so we don't need to use the stack to push this parameter.
123 //
124 // We actually only care about the *current instruction* EIP in one location,
125 // so although it may be slightly more expensive to re-calculate the EIP one
126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
127 // which would be forcing stack usage in all other scenarios.
128 //
129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
133 #define KiVdmUnhandledOpcode(x) \
134 BOOLEAN \
135 FASTCALL \
136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
137 IN ULONG Flags) \
138 { \
139 /* Not yet handled */ \
140 UNIMPLEMENTED; \
141 ASSERT(FALSE); /* while (TRUE); */ \
142 return FALSE; \
143 }
144
145 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
146
147 //
148 // Local parameters
149 //
150 typedef struct _KV86_FRAME
151 {
152 PVOID ThreadStack;
153 PVOID ThreadTeb;
154 PVOID PcrTeb;
155 } KV86_FRAME, *PKV86_FRAME;
156
157 //
158 // Virtual Stack Frame
159 //
160 typedef struct _KV8086_STACK_FRAME
161 {
162 KTRAP_FRAME TrapFrame;
163 FX_SAVE_AREA NpxArea;
164 KV86_FRAME V86Frame;
165 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
166
167 /* Diable interrupts and return whether they were enabled before */
168 FORCEINLINE
169 BOOLEAN
170 KeDisableInterrupts(VOID)
171 {
172 ULONG Flags;
173 BOOLEAN Return;
174
175 /* Get EFLAGS and check if the interrupt bit is set */
176 Flags = __readeflags();
177 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
178
179 /* Disable interrupts */
180 _disable();
181 return Return;
182 }
183
184 /* Restore previous interrupt state */
185 FORCEINLINE
186 VOID
187 KeRestoreInterrupts(BOOLEAN WereEnabled)
188 {
189 if (WereEnabled) _enable();
190 }
191
192 //
193 // Registers an interrupt handler with an IDT vector
194 //
195 FORCEINLINE
196 VOID
197 KeRegisterInterruptHandler(IN ULONG Vector,
198 IN PVOID Handler)
199 {
200 UCHAR Entry;
201 ULONG_PTR Address;
202 PKIPCR Pcr = (PKIPCR)KeGetPcr();
203
204 //
205 // Get the entry from the HAL
206 //
207 Entry = HalVectorToIDTEntry(Vector);
208 Address = PtrToUlong(Handler);
209
210 //
211 // Now set the data
212 //
213 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
214 Pcr->IDT[Entry].Offset = (USHORT)Address;
215 }
216
217 //
218 // Returns the registered interrupt handler for a given IDT vector
219 //
220 FORCEINLINE
221 PVOID
222 KeQueryInterruptHandler(IN ULONG Vector)
223 {
224 PKIPCR Pcr = (PKIPCR)KeGetPcr();
225 UCHAR Entry;
226
227 //
228 // Get the entry from the HAL
229 //
230 Entry = HalVectorToIDTEntry(Vector);
231
232 //
233 // Read the entry from the IDT
234 //
235 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
236 (Pcr->IDT[Entry].Offset & 0xFFFF));
237 }
238
239 //
240 // Invalidates the TLB entry for a specified address
241 //
242 FORCEINLINE
243 VOID
244 KeInvalidateTlbEntry(IN PVOID Address)
245 {
246 /* Invalidate the TLB entry for this address */
247 __invlpg(Address);
248 }
249
250 FORCEINLINE
251 VOID
252 KeFlushProcessTb(VOID)
253 {
254 /* Flush the TLB by resetting CR3 */
255 __writecr3(__readcr3());
256 }
257
258 FORCEINLINE
259 PRKTHREAD
260 KeGetCurrentThread(VOID)
261 {
262 /* Return the current thread */
263 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
264 }
265
266 FORCEINLINE
267 VOID
268 KiRundownThread(IN PKTHREAD Thread)
269 {
270 #ifndef CONFIG_SMP
271 /* Check if this is the NPX Thread */
272 if (KeGetCurrentPrcb()->NpxThread == Thread)
273 {
274 /* Clear it */
275 KeGetCurrentPrcb()->NpxThread = NULL;
276 Ke386FnInit();
277 }
278 #else
279 /* Nothing to do */
280 #endif
281 }
282
283 VOID
284 FASTCALL
285 Ki386InitializeTss(
286 IN PKTSS Tss,
287 IN PKIDTENTRY Idt,
288 IN PKGDTENTRY Gdt
289 );
290
291 VOID
292 NTAPI
293 KiSetCR0Bits(VOID);
294
295 VOID
296 NTAPI
297 KiGetCacheInformation(VOID);
298
299 BOOLEAN
300 NTAPI
301 KiIsNpxPresent(
302 VOID
303 );
304
305 BOOLEAN
306 NTAPI
307 KiIsNpxErrataPresent(
308 VOID
309 );
310
311 VOID
312 NTAPI
313 KiSetProcessorType(VOID);
314
315 ULONG
316 NTAPI
317 KiGetFeatureBits(VOID);
318
319 VOID
320 NTAPI
321 KiThreadStartup(VOID);
322
323 NTSTATUS
324 NTAPI
325 Ke386GetGdtEntryThread(
326 IN PKTHREAD Thread,
327 IN ULONG Offset,
328 IN PKGDTENTRY Descriptor
329 );
330
331 VOID
332 NTAPI
333 KiFlushNPXState(
334 IN FLOATING_SAVE_AREA *SaveArea
335 );
336
337 VOID
338 NTAPI
339 Ki386AdjustEsp0(
340 IN PKTRAP_FRAME TrapFrame
341 );
342
343 VOID
344 NTAPI
345 Ki386SetupAndExitToV86Mode(
346 OUT PTEB VdmTeb
347 );
348
349 VOID
350 NTAPI
351 KeI386VdmInitialize(
352 VOID
353 );
354
355 ULONG_PTR
356 NTAPI
357 Ki386EnableGlobalPage(
358 IN volatile ULONG_PTR Context
359 );
360
361 VOID
362 NTAPI
363 KiI386PentiumLockErrataFixup(
364 VOID
365 );
366
367 VOID
368 NTAPI
369 KiInitializePAT(
370 VOID
371 );
372
373 VOID
374 NTAPI
375 KiInitializeMTRR(
376 IN BOOLEAN FinalCpu
377 );
378
379 VOID
380 NTAPI
381 KiAmdK6InitializeMTRR(
382 VOID
383 );
384
385 VOID
386 NTAPI
387 KiRestoreFastSyscallReturnState(
388 VOID
389 );
390
391 ULONG_PTR
392 NTAPI
393 Ki386EnableDE(
394 IN ULONG_PTR Context
395 );
396
397 ULONG_PTR
398 NTAPI
399 Ki386EnableFxsr(
400 IN ULONG_PTR Context
401 );
402
403 ULONG_PTR
404 NTAPI
405 Ki386EnableXMMIExceptions(
406 IN ULONG_PTR Context
407 );
408
409 BOOLEAN
410 NTAPI
411 VdmDispatchBop(
412 IN PKTRAP_FRAME TrapFrame
413 );
414
415 BOOLEAN
416 FASTCALL
417 KiVdmOpcodePrefix(
418 IN PKTRAP_FRAME TrapFrame,
419 IN ULONG Flags
420 );
421
422 BOOLEAN
423 FASTCALL
424 Ki386HandleOpcodeV86(
425 IN PKTRAP_FRAME TrapFrame
426 );
427
428 DECLSPEC_NORETURN
429 VOID
430 FASTCALL
431 KiEoiHelper(
432 IN PKTRAP_FRAME TrapFrame
433 );
434
435 VOID
436 FASTCALL
437 Ki386BiosCallReturnAddress(
438 IN PKTRAP_FRAME TrapFrame
439 );
440
441 ULONG_PTR
442 FASTCALL
443 KiExitV86Mode(
444 IN PKTRAP_FRAME TrapFrame
445 );
446
447 DECLSPEC_NORETURN
448 VOID
449 NTAPI
450 KiDispatchExceptionFromTrapFrame(
451 IN NTSTATUS Code,
452 IN ULONG_PTR Address,
453 IN ULONG ParameterCount,
454 IN ULONG_PTR Parameter1,
455 IN ULONG_PTR Parameter2,
456 IN ULONG_PTR Parameter3,
457 IN PKTRAP_FRAME TrapFrame
458 );
459
460 //
461 // Global x86 only Kernel data
462 //
463 extern PVOID Ki386IopmSaveArea;
464 extern ULONG KeI386EFlagsAndMaskV86;
465 extern ULONG KeI386EFlagsOrMaskV86;
466 extern BOOLEAN KeI386VirtualIntExtensions;
467 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR+1];
468 extern KDESCRIPTOR KiIdtDescriptor;
469 extern BOOLEAN KiI386PentiumLockErrataPresent;
470 extern ULONG KeI386NpxPresent;
471 extern ULONG KeI386XMMIPresent;
472 extern ULONG KeI386FxsrPresent;
473 extern ULONG KiMXCsrMask;
474 extern ULONG KeI386CpuType;
475 extern ULONG KeI386CpuStep;
476 extern ULONG Ke386CacheAlignment;
477 extern ULONG KiFastSystemCallDisable;
478 extern UCHAR KiDebugRegisterTrapOffsets[9];
479 extern UCHAR KiDebugRegisterContextOffsets[9];
480 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
481 extern VOID __cdecl KiTrap08(VOID);
482 extern VOID __cdecl KiTrap13(VOID);
483 extern VOID __cdecl KiFastCallEntry(VOID);
484 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
485 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID);
486 extern VOID __cdecl CopyParams(VOID);
487 extern VOID __cdecl ReadBatch(VOID);
488 extern VOID __cdecl FrRestore(VOID);
489 extern CHAR KiSystemCallExitBranch[];
490 extern CHAR KiSystemCallExit[];
491 extern CHAR KiSystemCallExit2[];
492
493 //
494 // Trap Macros
495 //
496 #include "trap_x.h"
497
498 //
499 // Returns a thread's FPU save area
500 //
501 PFX_SAVE_AREA
502 FORCEINLINE
503 KiGetThreadNpxArea(IN PKTHREAD Thread)
504 {
505 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
506 }
507
508 //
509 // Sanitizes a selector
510 //
511 FORCEINLINE
512 ULONG
513 Ke386SanitizeSeg(IN ULONG Cs,
514 IN KPROCESSOR_MODE Mode)
515 {
516 //
517 // Check if we're in kernel-mode, and force CPL 0 if so.
518 // Otherwise, force CPL 3.
519 //
520 return ((Mode == KernelMode) ?
521 (Cs & (0xFFFF & ~RPL_MASK)) :
522 (RPL_MASK | (Cs & 0xFFFF)));
523 }
524
525 //
526 // Sanitizes EFLAGS
527 //
528 FORCEINLINE
529 ULONG
530 Ke386SanitizeFlags(IN ULONG Eflags,
531 IN KPROCESSOR_MODE Mode)
532 {
533 //
534 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
535 // Otherwise, also force interrupt mask on.
536 //
537 return ((Mode == KernelMode) ?
538 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
539 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
540 }
541
542 //
543 // Sanitizes a Debug Register
544 //
545 FORCEINLINE
546 PVOID
547 Ke386SanitizeDr(IN PVOID DrAddress,
548 IN KPROCESSOR_MODE Mode)
549 {
550 //
551 // Check if we're in kernel-mode, and return the address directly if so.
552 // Otherwise, make sure it's not inside the kernel-mode address space.
553 // If it is, then clear the address.
554 //
555 return ((Mode == KernelMode) ? DrAddress :
556 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
557 }
558
559 //
560 // Exception with no arguments
561 //
562 VOID
563 FORCEINLINE
564 DECLSPEC_NORETURN
565 KiDispatchException0Args(IN NTSTATUS Code,
566 IN ULONG_PTR Address,
567 IN PKTRAP_FRAME TrapFrame)
568 {
569 /* Helper for exceptions with no arguments */
570 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
571 }
572
573 //
574 // Exception with one argument
575 //
576 VOID
577 FORCEINLINE
578 DECLSPEC_NORETURN
579 KiDispatchException1Args(IN NTSTATUS Code,
580 IN ULONG_PTR Address,
581 IN ULONG P1,
582 IN PKTRAP_FRAME TrapFrame)
583 {
584 /* Helper for exceptions with no arguments */
585 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
586 }
587
588 //
589 // Exception with two arguments
590 //
591 VOID
592 FORCEINLINE
593 DECLSPEC_NORETURN
594 KiDispatchException2Args(IN NTSTATUS Code,
595 IN ULONG_PTR Address,
596 IN ULONG P1,
597 IN ULONG P2,
598 IN PKTRAP_FRAME TrapFrame)
599 {
600 /* Helper for exceptions with no arguments */
601 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
602 }
603
604 //
605 // Performs a system call
606 //
607
608 /*
609 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
610 * and then calls the function associated with the system call.
611 *
612 * It's done in assembly for two reasons: we need to muck with the stack,
613 * and the call itself restores the stack back for us. The only way to do
614 * this in C is to do manual C handlers for every possible number of args on
615 * the stack, and then have the handler issue a call by pointer. This is
616 * wasteful since it'll basically push the values twice and require another
617 * level of call indirection.
618 *
619 * The ARM kernel currently does this, but it should probably be changed
620 * later to function like this as well.
621 *
622 */
623 #ifdef __GNUC__
624 NTSTATUS
625 FORCEINLINE
626 KiSystemCallTrampoline(IN PVOID Handler,
627 IN PVOID Arguments,
628 IN ULONG StackBytes)
629 {
630 NTSTATUS Result;
631
632 __asm__ __volatile__
633 (
634 "subl %1, %%esp\n"
635 "movl %%esp, %%edi\n"
636 "movl %2, %%esi\n"
637 "shrl $2, %1\n"
638 "rep movsd\n"
639 "call *%3\n"
640 "movl %%eax, %0\n"
641 : "=r"(Result)
642 : "c"(StackBytes),
643 "d"(Arguments),
644 "r"(Handler)
645 : "%esp", "%esi", "%edi"
646 );
647 return Result;
648 }
649 #elif defined(_MSC_VER)
650 NTSTATUS
651 FORCEINLINE
652 KiSystemCallTrampoline(IN PVOID Handler,
653 IN PVOID Arguments,
654 IN ULONG StackBytes)
655 {
656 __asm
657 {
658 mov ecx, StackBytes
659 mov esi, Arguments
660 mov eax, Handler
661 sub esp, ecx
662 mov edi, esp
663 shr ecx, 2
664 rep movsd
665 call eax
666 }
667 /* Return with result in EAX */
668 }
669 #else
670 #error Unknown Compiler
671 #endif
672
673
674 //
675 // Checks for pending APCs
676 //
677 VOID
678 FORCEINLINE
679 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
680 {
681 PKTHREAD Thread;
682 KIRQL OldIrql;
683
684 /* Check for V8086 or user-mode trap */
685 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
686 {
687 /* Get the thread */
688 Thread = KeGetCurrentThread();
689 while (TRUE)
690 {
691 /* Turn off the alerted state for kernel mode */
692 Thread->Alerted[KernelMode] = FALSE;
693
694 /* Are there pending user APCs? */
695 if (!Thread->ApcState.UserApcPending) break;
696
697 /* Raise to APC level and enable interrupts */
698 OldIrql = KfRaiseIrql(APC_LEVEL);
699 _enable();
700
701 /* Deliver APCs */
702 KiDeliverApc(UserMode, NULL, TrapFrame);
703
704 /* Restore IRQL and disable interrupts once again */
705 KfLowerIrql(OldIrql);
706 _disable();
707 }
708 }
709 }
710
711 //
712 // Converts a base thread to a GUI thread
713 //
714 #ifdef __GNUC__
715 NTSTATUS
716 FORCEINLINE
717 KiConvertToGuiThread(VOID)
718 {
719 NTSTATUS Result;
720 PVOID StackFrame;
721
722 /*
723 * Converting to a GUI thread safely updates ESP in-place as well as the
724 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
725 *
726 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
727 * caller's value, since it is considered a nonvolatile register. As such,
728 * as soon as we're back after the conversion and we try to store the result
729 * which will probably be in some stack variable (EBP-based), we'll crash as
730 * we are touching the de-allocated non-expanded stack.
731 *
732 * Thus we need a way to update our EBP before EBP is touched, and the only
733 * way to guarantee this is to do the call itself in assembly, use the EAX
734 * register to store the result, fixup EBP, and then let the C code continue
735 * on its merry way.
736 *
737 */
738 __asm__ __volatile__
739 (
740 "movl %%ebp, %1\n\t"
741 "subl %%esp, %1\n\t"
742 "call _PsConvertToGuiThread@0\n\t"
743 "addl %%esp, %1\n\t"
744 "movl %1, %%ebp"
745 : "=a"(Result), "=r"(StackFrame)
746 :
747 : "%esp", "%ecx", "%edx", "memory"
748 );
749 return Result;
750 }
751 #elif defined(_MSC_VER)
752 NTSTATUS
753 NTAPI
754 KiConvertToGuiThread(VOID);
755 #else
756 #error Unknown Compiler
757 #endif
758
759 //
760 // Switches from boot loader to initial kernel stack
761 //
762 VOID
763 FORCEINLINE
764 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
765 {
766 /* We have to switch to a new stack before continuing kernel initialization */
767 #ifdef __GNUC__
768 __asm__
769 (
770 "movl %0, %%esp\n"
771 "subl %1, %%esp\n"
772 "pushl %2\n"
773 "jmp _KiSystemStartupBootStack@0\n"
774 :
775 : "c"(InitialStack),
776 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
777 "i"(CR0_EM | CR0_TS | CR0_MP)
778 : "%esp"
779 );
780 #elif defined(_MSC_VER)
781 VOID NTAPI KiSystemStartupBootStack(VOID);
782 __asm
783 {
784 mov esp, InitialStack
785 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
786 push (CR0_EM | CR0_TS | CR0_MP)
787 jmp KiSystemStartupBootStack
788 }
789 #else
790 #error Unknown Compiler
791 #endif
792 }
793
794 //
795 // Emits the iret instruction for C code
796 //
797 DECLSPEC_NORETURN
798 VOID
799 FORCEINLINE
800 KiIret(VOID)
801 {
802 #if defined(__GNUC__)
803 __asm__ __volatile__
804 (
805 "iret\n"
806 );
807 #elif defined(_MSC_VER)
808 __asm
809 {
810 iretd
811 }
812 #else
813 #error Unsupported compiler
814 #endif
815 UNREACHABLE;
816 }
817
818 //
819 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
820 // initiates the end by calling back into the HAL and exiting the trap here.
821 //
822 VOID
823 FORCEINLINE
824 KiEndInterrupt(IN KIRQL Irql,
825 IN PKTRAP_FRAME TrapFrame)
826 {
827 /* Disable interrupts and end the interrupt */
828 _disable();
829 HalEndSystemInterrupt(Irql, TrapFrame);
830
831 /* Exit the interrupt */
832 KiEoiHelper(TrapFrame);
833 }
834
835 //
836 // PERF Code
837 //
838 VOID
839 FORCEINLINE
840 Ki386PerfEnd(VOID)
841 {
842 extern ULONGLONG BootCyclesEnd, BootCycles;
843 BootCyclesEnd = __rdtsc();
844 DbgPrint("Boot took %I64u cycles!\n", BootCyclesEnd - BootCycles);
845 DbgPrint("Interrupts: %u System Calls: %u Context Switches: %u\n",
846 KeGetCurrentPrcb()->InterruptCount,
847 KeGetCurrentPrcb()->KeSystemCalls,
848 KeGetContextSwitches(KeGetCurrentPrcb()));
849 }
850
851 FORCEINLINE
852 PULONG
853 KiGetUserModeStackAddress(void)
854 {
855 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp);
856 }
857
858 #endif