[YAROTOWS] Reintegrate the branch. For a brighter future.
[reactos.git] / reactos / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 #define IMAGE_FILE_MACHINE_ARCHITECTURE IMAGE_FILE_MACHINE_I386
14
15 //
16 // INT3 is 1 byte long
17 //
18 #define KD_BREAKPOINT_TYPE UCHAR
19 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
20 #define KD_BREAKPOINT_VALUE 0xCC
21
22 //
23 // Macros for getting and setting special purpose registers in portable code
24 //
25 #define KeGetContextPc(Context) \
26 ((Context)->Eip)
27
28 #define KeSetContextPc(Context, ProgramCounter) \
29 ((Context)->Eip = (ProgramCounter))
30
31 #define KeGetTrapFramePc(TrapFrame) \
32 ((TrapFrame)->Eip)
33
34 #define KiGetLinkedTrapFrame(x) \
35 (PKTRAP_FRAME)((x)->Edx)
36
37 #define KeGetContextReturnRegister(Context) \
38 ((Context)->Eax)
39
40 #define KeSetContextReturnRegister(Context, ReturnValue) \
41 ((Context)->Eax = (ReturnValue))
42
43 //
44 // Macro to get trap and exception frame from a thread stack
45 //
46 #define KeGetTrapFrame(Thread) \
47 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
48 sizeof(KTRAP_FRAME) - \
49 sizeof(FX_SAVE_AREA))
50
51 #define KeGetExceptionFrame(Thread) \
52 NULL
53
54 //
55 // Macro to get context switches from the PRCB
56 // All architectures but x86 have it in the PRCB's KeContextSwitches
57 //
58 #define KeGetContextSwitches(Prcb) \
59 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
60
61 //
62 // Macro to get the second level cache size field name which differs between
63 // CISC and RISC architectures, as the former has unified I/D cache
64 //
65 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
66
67 //
68 // Returns the Interrupt State from a Trap Frame.
69 // ON = TRUE, OFF = FALSE
70 //
71 #define KeGetTrapFrameInterruptState(TrapFrame) \
72 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
73
74 //
75 // Flags for exiting a trap
76 //
77 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
78 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
79 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
80
81 typedef union _KTRAP_EXIT_SKIP_BITS
82 {
83 struct
84 {
85 UCHAR SkipPreviousMode:1;
86 UCHAR SkipSegments:1;
87 UCHAR SkipVolatiles:1;
88 UCHAR Reserved:5;
89 };
90 UCHAR Bits;
91 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
92
93
94 //
95 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
96 //
97 #define PFX_FLAG_ES 0x00000100
98 #define PFX_FLAG_CS 0x00000200
99 #define PFX_FLAG_SS 0x00000400
100 #define PFX_FLAG_DS 0x00000800
101 #define PFX_FLAG_FS 0x00001000
102 #define PFX_FLAG_GS 0x00002000
103 #define PFX_FLAG_OPER32 0x00004000
104 #define PFX_FLAG_ADDR32 0x00008000
105 #define PFX_FLAG_LOCK 0x00010000
106 #define PFX_FLAG_REPNE 0x00020000
107 #define PFX_FLAG_REP 0x00040000
108
109 //
110 // VDM Helper Macros
111 //
112 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
113 // We need to keep 2 parameters while the original ASM implementation uses 4:
114 // TrapFrame, PrefixFlags, Eip, InstructionSize;
115 //
116 // We pass the trap frame, and prefix flags, in our two parameters.
117 //
118 // We then realize that since the smallest prefix flag is 0x100, this gives us
119 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
120 //
121 // We further realize that we always have access to EIP from the trap frame, and
122 // that if we want the *current instruction* EIP, we simply have to add the
123 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
124 // at now, so we don't need to use the stack to push this parameter.
125 //
126 // We actually only care about the *current instruction* EIP in one location,
127 // so although it may be slightly more expensive to re-calculate the EIP one
128 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
129 // which would be forcing stack usage in all other scenarios.
130 //
131 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
132 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
133 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
134 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
135 #define KiVdmUnhandledOpcode(x) \
136 BOOLEAN \
137 FASTCALL \
138 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
139 IN ULONG Flags) \
140 { \
141 /* Not yet handled */ \
142 UNIMPLEMENTED; \
143 while (TRUE); \
144 return TRUE; \
145 }
146
147 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
148
149 //
150 // Local parameters
151 //
152 typedef struct _KV86_FRAME
153 {
154 PVOID ThreadStack;
155 PVOID ThreadTeb;
156 PVOID PcrTeb;
157 } KV86_FRAME, *PKV86_FRAME;
158
159 //
160 // Virtual Stack Frame
161 //
162 typedef struct _KV8086_STACK_FRAME
163 {
164 KTRAP_FRAME TrapFrame;
165 FX_SAVE_AREA NpxArea;
166 KV86_FRAME V86Frame;
167 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
168
169 //
170 // Registers an interrupt handler with an IDT vector
171 //
172 FORCEINLINE
173 VOID
174 KeRegisterInterruptHandler(IN ULONG Vector,
175 IN PVOID Handler)
176 {
177 UCHAR Entry;
178 ULONG_PTR Address;
179 PKIPCR Pcr = (PKIPCR)KeGetPcr();
180
181 //
182 // Get the entry from the HAL
183 //
184 Entry = HalVectorToIDTEntry(Vector);
185 Address = PtrToUlong(Handler);
186
187 //
188 // Now set the data
189 //
190 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
191 Pcr->IDT[Entry].Offset = (USHORT)Address;
192 }
193
194 //
195 // Returns the registered interrupt handler for a given IDT vector
196 //
197 FORCEINLINE
198 PVOID
199 KeQueryInterruptHandler(IN ULONG Vector)
200 {
201 PKIPCR Pcr = (PKIPCR)KeGetPcr();
202 UCHAR Entry;
203
204 //
205 // Get the entry from the HAL
206 //
207 Entry = HalVectorToIDTEntry(Vector);
208
209 //
210 // Read the entry from the IDT
211 //
212 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
213 (Pcr->IDT[Entry].Offset & 0xFFFF));
214 }
215
216 //
217 // Invalidates the TLB entry for a specified address
218 //
219 FORCEINLINE
220 VOID
221 KeInvalidateTlbEntry(IN PVOID Address)
222 {
223 /* Invalidate the TLB entry for this address */
224 __invlpg(Address);
225 }
226
227 FORCEINLINE
228 VOID
229 KeFlushProcessTb(VOID)
230 {
231 /* Flush the TLB by resetting CR3 */
232 __writecr3(__readcr3());
233 }
234
235 FORCEINLINE
236 PRKTHREAD
237 KeGetCurrentThread(VOID)
238 {
239 /* Return the current thread */
240 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
241 }
242
243 FORCEINLINE
244 VOID
245 KiRundownThread(IN PKTHREAD Thread)
246 {
247 #ifndef CONFIG_SMP
248 /* Check if this is the NPX Thread */
249 if (KeGetCurrentPrcb()->NpxThread == Thread)
250 {
251 /* Clear it */
252 KeGetCurrentPrcb()->NpxThread = NULL;
253 Ke386FnInit();
254 }
255 #else
256 /* Nothing to do */
257 #endif
258 }
259
260 VOID
261 FASTCALL
262 Ki386InitializeTss(
263 IN PKTSS Tss,
264 IN PKIDTENTRY Idt,
265 IN PKGDTENTRY Gdt
266 );
267
268 VOID
269 NTAPI
270 KiSetCR0Bits(VOID);
271
272 VOID
273 NTAPI
274 KiGetCacheInformation(VOID);
275
276 BOOLEAN
277 NTAPI
278 KiIsNpxPresent(
279 VOID
280 );
281
282 BOOLEAN
283 NTAPI
284 KiIsNpxErrataPresent(
285 VOID
286 );
287
288 VOID
289 NTAPI
290 KiSetProcessorType(VOID);
291
292 ULONG
293 NTAPI
294 KiGetFeatureBits(VOID);
295
296 VOID
297 NTAPI
298 KiThreadStartup(VOID);
299
300 NTSTATUS
301 NTAPI
302 Ke386GetGdtEntryThread(
303 IN PKTHREAD Thread,
304 IN ULONG Offset,
305 IN PKGDTENTRY Descriptor
306 );
307
308 VOID
309 NTAPI
310 KiFlushNPXState(
311 IN FLOATING_SAVE_AREA *SaveArea
312 );
313
314 VOID
315 NTAPI
316 Ki386AdjustEsp0(
317 IN PKTRAP_FRAME TrapFrame
318 );
319
320 VOID
321 NTAPI
322 Ki386SetupAndExitToV86Mode(
323 OUT PTEB VdmTeb
324 );
325
326 VOID
327 NTAPI
328 KeI386VdmInitialize(
329 VOID
330 );
331
332 ULONG_PTR
333 NTAPI
334 Ki386EnableGlobalPage(
335 IN volatile ULONG_PTR Context
336 );
337
338 VOID
339 NTAPI
340 KiI386PentiumLockErrataFixup(
341 VOID
342 );
343
344 VOID
345 NTAPI
346 KiInitializePAT(
347 VOID
348 );
349
350 VOID
351 NTAPI
352 KiInitializeMTRR(
353 IN BOOLEAN FinalCpu
354 );
355
356 VOID
357 NTAPI
358 KiAmdK6InitializeMTRR(
359 VOID
360 );
361
362 VOID
363 NTAPI
364 KiRestoreFastSyscallReturnState(
365 VOID
366 );
367
368 ULONG_PTR
369 NTAPI
370 Ki386EnableDE(
371 IN ULONG_PTR Context
372 );
373
374 ULONG_PTR
375 NTAPI
376 Ki386EnableFxsr(
377 IN ULONG_PTR Context
378 );
379
380 ULONG_PTR
381 NTAPI
382 Ki386EnableXMMIExceptions(
383 IN ULONG_PTR Context
384 );
385
386 BOOLEAN
387 NTAPI
388 VdmDispatchBop(
389 IN PKTRAP_FRAME TrapFrame
390 );
391
392 BOOLEAN
393 FASTCALL
394 KiVdmOpcodePrefix(
395 IN PKTRAP_FRAME TrapFrame,
396 IN ULONG Flags
397 );
398
399 BOOLEAN
400 FASTCALL
401 Ki386HandleOpcodeV86(
402 IN PKTRAP_FRAME TrapFrame
403 );
404
405 DECLSPEC_NORETURN
406 VOID
407 FASTCALL
408 KiEoiHelper(
409 IN PKTRAP_FRAME TrapFrame
410 );
411
412 VOID
413 FASTCALL
414 Ki386BiosCallReturnAddress(
415 IN PKTRAP_FRAME TrapFrame
416 );
417
418 ULONG_PTR
419 FASTCALL
420 KiExitV86Mode(
421 IN PKTRAP_FRAME TrapFrame
422 );
423
424 DECLSPEC_NORETURN
425 VOID
426 NTAPI
427 KiDispatchExceptionFromTrapFrame(
428 IN NTSTATUS Code,
429 IN ULONG_PTR Address,
430 IN ULONG ParameterCount,
431 IN ULONG_PTR Parameter1,
432 IN ULONG_PTR Parameter2,
433 IN ULONG_PTR Parameter3,
434 IN PKTRAP_FRAME TrapFrame
435 );
436
437 //
438 // Global x86 only Kernel data
439 //
440 extern PVOID Ki386IopmSaveArea;
441 extern ULONG KeI386EFlagsAndMaskV86;
442 extern ULONG KeI386EFlagsOrMaskV86;
443 extern BOOLEAN KeI386VirtualIntExtensions;
444 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR];
445 extern KDESCRIPTOR KiIdtDescriptor;
446 extern BOOLEAN KiI386PentiumLockErrataPresent;
447 extern ULONG KeI386NpxPresent;
448 extern ULONG KeI386XMMIPresent;
449 extern ULONG KeI386FxsrPresent;
450 extern ULONG KiMXCsrMask;
451 extern ULONG KeI386CpuType;
452 extern ULONG KeI386CpuStep;
453 extern ULONG Ke386CacheAlignment;
454 extern ULONG KiFastSystemCallDisable;
455 extern UCHAR KiDebugRegisterTrapOffsets[9];
456 extern UCHAR KiDebugRegisterContextOffsets[9];
457 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
458 extern VOID __cdecl KiTrap08(VOID);
459 extern VOID __cdecl KiTrap13(VOID);
460 extern VOID __cdecl KiFastCallEntry(VOID);
461 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
462 extern VOID __cdecl CopyParams(VOID);
463 extern VOID __cdecl ReadBatch(VOID);
464 extern VOID __cdecl FrRestore(VOID);
465 extern CHAR KiSystemCallExitBranch[];
466 extern CHAR KiSystemCallExit[];
467 extern CHAR KiSystemCallExit2[];
468
469 //
470 // Trap Macros
471 //
472 #include "../trap_x.h"
473
474 //
475 // Returns a thread's FPU save area
476 //
477 PFX_SAVE_AREA
478 FORCEINLINE
479 KiGetThreadNpxArea(IN PKTHREAD Thread)
480 {
481 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
482 }
483
484 //
485 // Sanitizes a selector
486 //
487 FORCEINLINE
488 ULONG
489 Ke386SanitizeSeg(IN ULONG Cs,
490 IN KPROCESSOR_MODE Mode)
491 {
492 //
493 // Check if we're in kernel-mode, and force CPL 0 if so.
494 // Otherwise, force CPL 3.
495 //
496 return ((Mode == KernelMode) ?
497 (Cs & (0xFFFF & ~RPL_MASK)) :
498 (RPL_MASK | (Cs & 0xFFFF)));
499 }
500
501 //
502 // Sanitizes EFLAGS
503 //
504 FORCEINLINE
505 ULONG
506 Ke386SanitizeFlags(IN ULONG Eflags,
507 IN KPROCESSOR_MODE Mode)
508 {
509 //
510 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
511 // Otherwise, also force interrupt mask on.
512 //
513 return ((Mode == KernelMode) ?
514 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
515 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
516 }
517
518 //
519 // Gets a DR register from a CONTEXT structure
520 //
521 FORCEINLINE
522 PVOID
523 KiDrFromContext(IN ULONG Dr,
524 IN PCONTEXT Context)
525 {
526 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]);
527 }
528
529 //
530 // Gets a DR register from a KTRAP_FRAME structure
531 //
532 FORCEINLINE
533 PVOID*
534 KiDrFromTrapFrame(IN ULONG Dr,
535 IN PKTRAP_FRAME TrapFrame)
536 {
537 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]);
538 }
539
540 //
541 // Sanitizes a Debug Register
542 //
543 FORCEINLINE
544 PVOID
545 Ke386SanitizeDr(IN PVOID DrAddress,
546 IN KPROCESSOR_MODE Mode)
547 {
548 //
549 // Check if we're in kernel-mode, and return the address directly if so.
550 // Otherwise, make sure it's not inside the kernel-mode address space.
551 // If it is, then clear the address.
552 //
553 return ((Mode == KernelMode) ? DrAddress :
554 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
555 }
556
557 //
558 // Exception with no arguments
559 //
560 VOID
561 FORCEINLINE
562 DECLSPEC_NORETURN
563 KiDispatchException0Args(IN NTSTATUS Code,
564 IN ULONG_PTR Address,
565 IN PKTRAP_FRAME TrapFrame)
566 {
567 /* Helper for exceptions with no arguments */
568 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
569 }
570
571 //
572 // Exception with one argument
573 //
574 VOID
575 FORCEINLINE
576 DECLSPEC_NORETURN
577 KiDispatchException1Args(IN NTSTATUS Code,
578 IN ULONG_PTR Address,
579 IN ULONG P1,
580 IN PKTRAP_FRAME TrapFrame)
581 {
582 /* Helper for exceptions with no arguments */
583 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
584 }
585
586 //
587 // Exception with two arguments
588 //
589 VOID
590 FORCEINLINE
591 DECLSPEC_NORETURN
592 KiDispatchException2Args(IN NTSTATUS Code,
593 IN ULONG_PTR Address,
594 IN ULONG P1,
595 IN ULONG P2,
596 IN PKTRAP_FRAME TrapFrame)
597 {
598 /* Helper for exceptions with no arguments */
599 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
600 }
601
602 //
603 // Performs a system call
604 //
605 NTSTATUS
606 FORCEINLINE
607 KiSystemCallTrampoline(IN PVOID Handler,
608 IN PVOID Arguments,
609 IN ULONG StackBytes)
610 {
611 NTSTATUS Result;
612
613 /*
614 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
615 * and then calls the function associated with the system call.
616 *
617 * It's done in assembly for two reasons: we need to muck with the stack,
618 * and the call itself restores the stack back for us. The only way to do
619 * this in C is to do manual C handlers for every possible number of args on
620 * the stack, and then have the handler issue a call by pointer. This is
621 * wasteful since it'll basically push the values twice and require another
622 * level of call indirection.
623 *
624 * The ARM kernel currently does this, but it should probably be changed
625 * later to function like this as well.
626 *
627 */
628 #ifdef __GNUC__
629 __asm__ __volatile__
630 (
631 "subl %1, %%esp\n"
632 "movl %%esp, %%edi\n"
633 "movl %2, %%esi\n"
634 "shrl $2, %1\n"
635 "rep movsd\n"
636 "call *%3\n"
637 "movl %%eax, %0\n"
638 : "=r"(Result)
639 : "c"(StackBytes),
640 "d"(Arguments),
641 "r"(Handler)
642 : "%esp", "%esi", "%edi"
643 );
644 #elif defined(_MSC_VER)
645 __asm
646 {
647 mov ecx, StackBytes
648 mov edx, Arguments
649 sub esp, ecx
650 mov edi, esp
651 mov esi, edx
652 shr ecx, 2
653 rep movsd
654 call Handler
655 mov Result, eax
656 }
657 #else
658 #error Unknown Compiler
659 #endif
660
661 return Result;
662 }
663
664 //
665 // Checks for pending APCs
666 //
667 VOID
668 FORCEINLINE
669 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
670 {
671 PKTHREAD Thread;
672 KIRQL OldIrql;
673
674 /* Check for V8086 or user-mode trap */
675 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
676 {
677 /* Get the thread */
678 Thread = KeGetCurrentThread();
679 while (TRUE)
680 {
681 /* Turn off the alerted state for kernel mode */
682 Thread->Alerted[KernelMode] = FALSE;
683
684 /* Are there pending user APCs? */
685 if (!Thread->ApcState.UserApcPending) break;
686
687 /* Raise to APC level and enable interrupts */
688 OldIrql = KfRaiseIrql(APC_LEVEL);
689 _enable();
690
691 /* Deliver APCs */
692 KiDeliverApc(UserMode, NULL, TrapFrame);
693
694 /* Restore IRQL and disable interrupts once again */
695 KfLowerIrql(OldIrql);
696 _disable();
697 }
698 }
699 }
700
701 //
702 // Converts a base thread to a GUI thread
703 //
704 NTSTATUS
705 FORCEINLINE
706 KiConvertToGuiThread(VOID)
707 {
708 NTSTATUS Result;
709 PVOID StackFrame;
710
711 /*
712 * Converting to a GUI thread safely updates ESP in-place as well as the
713 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
714 *
715 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
716 * caller's value, since it is considered a nonvolatile register. As such,
717 * as soon as we're back after the conversion and we try to store the result
718 * which will probably be in some stack variable (EBP-based), we'll crash as
719 * we are touching the de-allocated non-expanded stack.
720 *
721 * Thus we need a way to update our EBP before EBP is touched, and the only
722 * way to guarantee this is to do the call itself in assembly, use the EAX
723 * register to store the result, fixup EBP, and then let the C code continue
724 * on its merry way.
725 *
726 */
727 #ifdef __GNUC__
728 __asm__ __volatile__
729 (
730 "movl %%ebp, %1\n"
731 "subl %%esp, %1\n"
732 "call _PsConvertToGuiThread@0\n"
733 "addl %%esp, %1\n"
734 "movl %1, %%ebp\n"
735 "movl %%eax, %0\n"
736 : "=r"(Result), "=r"(StackFrame)
737 :
738 : "%esp", "%ecx", "%edx", "memory"
739 );
740 #elif defined(_MSC_VER)
741 NTSTATUS NTAPI PsConvertToGuiThread(VOID);
742 __asm
743 {
744 mov StackFrame, ebp
745 sub StackFrame, esp
746 call PsConvertToGuiThread
747 add StackFrame, esp
748 mov ebp, StackFrame
749 mov Result, eax
750 }
751 #else
752 #error Unknown Compiler
753 #endif
754 return Result;
755 }
756
757 //
758 // Switches from boot loader to initial kernel stack
759 //
760 VOID
761 FORCEINLINE
762 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
763 {
764 /* We have to switch to a new stack before continuing kernel initialization */
765 #ifdef __GNUC__
766 __asm__
767 (
768 "movl %0, %%esp\n"
769 "subl %1, %%esp\n"
770 "pushl %2\n"
771 "jmp _KiSystemStartupBootStack@0\n"
772 :
773 : "c"(InitialStack),
774 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
775 "i"(CR0_EM | CR0_TS | CR0_MP)
776 : "%esp"
777 );
778 #elif defined(_MSC_VER)
779 VOID NTAPI KiSystemStartupBootStack(VOID);
780 __asm
781 {
782 mov esp, InitialStack
783 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
784 push (CR0_EM | CR0_TS | CR0_MP)
785 jmp KiSystemStartupBootStack
786 }
787 #else
788 #error Unknown Compiler
789 #endif
790 }
791
792 //
793 // Emits the iret instruction for C code
794 //
795 DECLSPEC_NORETURN
796 VOID
797 FORCEINLINE
798 KiIret(VOID)
799 {
800 #if defined(__GNUC__)
801 __asm__ __volatile__
802 (
803 "iret\n"
804 );
805 #elif defined(_MSC_VER)
806 __asm
807 {
808 iret
809 }
810 #else
811 #error Unsupported compiler
812 #endif
813 UNREACHABLE;
814 }
815
816 //
817 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
818 // initiates the end by calling back into the HAL and exiting the trap here.
819 //
820 VOID
821 FORCEINLINE
822 KiEndInterrupt(IN KIRQL Irql,
823 IN PKTRAP_FRAME TrapFrame)
824 {
825 /* Disable interrupts and end the interrupt */
826 _disable();
827 HalEndSystemInterrupt(Irql, TrapFrame);
828
829 /* Exit the interrupt */
830 KiEoiHelper(TrapFrame);
831 }
832
833 //
834 // PERF Code
835 //
836 VOID
837 FORCEINLINE
838 Ki386PerfEnd(VOID)
839 {
840 extern ULONGLONG BootCyclesEnd, BootCycles;
841 BootCyclesEnd = __rdtsc();
842 DbgPrint("Boot took %I64d cycles!\n", BootCyclesEnd - BootCycles);
843 DbgPrint("Interrupts: %d System Calls: %d Context Switches: %d\n",
844 KeGetCurrentPrcb()->InterruptCount,
845 KeGetCurrentPrcb()->KeSystemCalls,
846 KeGetContextSwitches(KeGetCurrentPrcb()));
847 }
848
849 #endif