* Sync up to trunk HEAD (r62975).
[reactos.git] / ntoskrnl / include / internal / i386 / ke.h
1 #pragma once
2
3 #ifndef __ASM__
4
5 #include "intrin_i.h"
6
7 //
8 // Thread Dispatcher Header DebugActive Mask
9 //
10 #define DR_MASK(x) (1 << (x))
11 #define DR_REG_MASK 0x4F
12
13 //
14 // INT3 is 1 byte long
15 //
16 #define KD_BREAKPOINT_TYPE UCHAR
17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR)
18 #define KD_BREAKPOINT_VALUE 0xCC
19
20 //
21 // Macros for getting and setting special purpose registers in portable code
22 //
23 #define KeGetContextPc(Context) \
24 ((Context)->Eip)
25
26 #define KeSetContextPc(Context, ProgramCounter) \
27 ((Context)->Eip = (ProgramCounter))
28
29 #define KeGetTrapFramePc(TrapFrame) \
30 ((TrapFrame)->Eip)
31
32 #define KiGetLinkedTrapFrame(x) \
33 (PKTRAP_FRAME)((x)->Edx)
34
35 #define KeGetContextReturnRegister(Context) \
36 ((Context)->Eax)
37
38 #define KeSetContextReturnRegister(Context, ReturnValue) \
39 ((Context)->Eax = (ReturnValue))
40
41 //
42 // Macro to get trap and exception frame from a thread stack
43 //
44 #define KeGetTrapFrame(Thread) \
45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \
46 sizeof(KTRAP_FRAME) - \
47 sizeof(FX_SAVE_AREA))
48
49 #define KeGetExceptionFrame(Thread) \
50 NULL
51
52 //
53 // Macro to get context switches from the PRCB
54 // All architectures but x86 have it in the PRCB's KeContextSwitches
55 //
56 #define KeGetContextSwitches(Prcb) \
57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
58
59 //
60 // Macro to get the second level cache size field name which differs between
61 // CISC and RISC architectures, as the former has unified I/D cache
62 //
63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
64
65 //
66 // Returns the Interrupt State from a Trap Frame.
67 // ON = TRUE, OFF = FALSE
68 //
69 #define KeGetTrapFrameInterruptState(TrapFrame) \
70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK)
71
72 //
73 // Flags for exiting a trap
74 //
75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits)
76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits)
77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits)
78
79 typedef union _KTRAP_EXIT_SKIP_BITS
80 {
81 struct
82 {
83 UCHAR SkipPreviousMode:1;
84 UCHAR SkipSegments:1;
85 UCHAR SkipVolatiles:1;
86 UCHAR Reserved:5;
87 };
88 UCHAR Bits;
89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS;
90
91
92 //
93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes
94 //
95 #define PFX_FLAG_ES 0x00000100
96 #define PFX_FLAG_CS 0x00000200
97 #define PFX_FLAG_SS 0x00000400
98 #define PFX_FLAG_DS 0x00000800
99 #define PFX_FLAG_FS 0x00001000
100 #define PFX_FLAG_GS 0x00002000
101 #define PFX_FLAG_OPER32 0x00004000
102 #define PFX_FLAG_ADDR32 0x00008000
103 #define PFX_FLAG_LOCK 0x00010000
104 #define PFX_FLAG_REPNE 0x00020000
105 #define PFX_FLAG_REP 0x00040000
106
107 //
108 // VDM Helper Macros
109 //
110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition.
111 // We need to keep 2 parameters while the original ASM implementation uses 4:
112 // TrapFrame, PrefixFlags, Eip, InstructionSize;
113 //
114 // We pass the trap frame, and prefix flags, in our two parameters.
115 //
116 // We then realize that since the smallest prefix flag is 0x100, this gives us
117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags
118 //
119 // We further realize that we always have access to EIP from the trap frame, and
120 // that if we want the *current instruction* EIP, we simply have to add the
121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking
122 // at now, so we don't need to use the stack to push this parameter.
123 //
124 // We actually only care about the *current instruction* EIP in one location,
125 // so although it may be slightly more expensive to re-calculate the EIP one
126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters,
127 // which would be forcing stack usage in all other scenarios.
128 //
129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x));
130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x))
131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags)
132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x)
133 #define KiVdmUnhandledOpcode(x) \
134 BOOLEAN \
135 FASTCALL \
136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \
137 IN ULONG Flags) \
138 { \
139 /* Not yet handled */ \
140 UNIMPLEMENTED_DBGBREAK(); \
141 return FALSE; \
142 }
143
144 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA));
145
146 //
147 // Local parameters
148 //
149 typedef struct _KV86_FRAME
150 {
151 PVOID ThreadStack;
152 PVOID ThreadTeb;
153 PVOID PcrTeb;
154 } KV86_FRAME, *PKV86_FRAME;
155
156 //
157 // Virtual Stack Frame
158 //
159 typedef struct _KV8086_STACK_FRAME
160 {
161 KTRAP_FRAME TrapFrame;
162 FX_SAVE_AREA NpxArea;
163 KV86_FRAME V86Frame;
164 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME;
165
166 //
167 // Large Pages Support
168 //
169 typedef struct _LARGE_IDENTITY_MAP
170 {
171 PHARDWARE_PTE TopLevelDirectory;
172 ULONG Cr3;
173 ULONG_PTR StartAddress;
174 ULONG PagesCount;
175 PVOID PagesList[30];
176 } LARGE_IDENTITY_MAP, *PLARGE_IDENTITY_MAP;
177
178 /* Diable interrupts and return whether they were enabled before */
179 FORCEINLINE
180 BOOLEAN
181 KeDisableInterrupts(VOID)
182 {
183 ULONG Flags;
184 BOOLEAN Return;
185
186 /* Get EFLAGS and check if the interrupt bit is set */
187 Flags = __readeflags();
188 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
189
190 /* Disable interrupts */
191 _disable();
192 return Return;
193 }
194
195 /* Restore previous interrupt state */
196 FORCEINLINE
197 VOID
198 KeRestoreInterrupts(BOOLEAN WereEnabled)
199 {
200 if (WereEnabled) _enable();
201 }
202
203 //
204 // Registers an interrupt handler with an IDT vector
205 //
206 FORCEINLINE
207 VOID
208 KeRegisterInterruptHandler(IN ULONG Vector,
209 IN PVOID Handler)
210 {
211 UCHAR Entry;
212 ULONG_PTR Address;
213 PKIPCR Pcr = (PKIPCR)KeGetPcr();
214
215 //
216 // Get the entry from the HAL
217 //
218 Entry = HalVectorToIDTEntry(Vector);
219 Address = PtrToUlong(Handler);
220
221 //
222 // Now set the data
223 //
224 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16);
225 Pcr->IDT[Entry].Offset = (USHORT)Address;
226 }
227
228 //
229 // Returns the registered interrupt handler for a given IDT vector
230 //
231 FORCEINLINE
232 PVOID
233 KeQueryInterruptHandler(IN ULONG Vector)
234 {
235 PKIPCR Pcr = (PKIPCR)KeGetPcr();
236 UCHAR Entry;
237
238 //
239 // Get the entry from the HAL
240 //
241 Entry = HalVectorToIDTEntry(Vector);
242
243 //
244 // Read the entry from the IDT
245 //
246 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) |
247 (Pcr->IDT[Entry].Offset & 0xFFFF));
248 }
249
250 //
251 // Invalidates the TLB entry for a specified address
252 //
253 FORCEINLINE
254 VOID
255 KeInvalidateTlbEntry(IN PVOID Address)
256 {
257 /* Invalidate the TLB entry for this address */
258 __invlpg(Address);
259 }
260
261 FORCEINLINE
262 VOID
263 KeFlushProcessTb(VOID)
264 {
265 /* Flush the TLB by resetting CR3 */
266 __writecr3(__readcr3());
267 }
268
269 FORCEINLINE
270 PRKTHREAD
271 KeGetCurrentThread(VOID)
272 {
273 /* Return the current thread */
274 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread;
275 }
276
277 FORCEINLINE
278 VOID
279 KiRundownThread(IN PKTHREAD Thread)
280 {
281 #ifndef CONFIG_SMP
282 /* Check if this is the NPX Thread */
283 if (KeGetCurrentPrcb()->NpxThread == Thread)
284 {
285 /* Clear it */
286 KeGetCurrentPrcb()->NpxThread = NULL;
287 Ke386FnInit();
288 }
289 #else
290 /* Nothing to do */
291 #endif
292 }
293
294 FORCEINLINE
295 VOID
296 Ke386SetGdtEntryBase(PKGDTENTRY GdtEntry, PVOID BaseAddress)
297 {
298 GdtEntry->BaseLow = (USHORT)((ULONG_PTR)BaseAddress & 0xFFFF);
299 GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)BaseAddress >> 16);
300 GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)BaseAddress >> 24);
301 }
302
303 FORCEINLINE
304 VOID
305 KiSetTebBase(PKPCR Pcr, PVOID TebAddress)
306 {
307 Pcr->NtTib.Self = TebAddress;
308 Ke386SetGdtEntryBase(&Pcr->GDT[KGDT_R3_TEB / sizeof(KGDTENTRY)], TebAddress);
309 }
310
311 VOID
312 FASTCALL
313 Ki386InitializeTss(
314 IN PKTSS Tss,
315 IN PKIDTENTRY Idt,
316 IN PKGDTENTRY Gdt
317 );
318
319 VOID
320 NTAPI
321 KiSetCR0Bits(VOID);
322
323 VOID
324 NTAPI
325 KiGetCacheInformation(VOID);
326
327 BOOLEAN
328 NTAPI
329 KiIsNpxPresent(
330 VOID
331 );
332
333 BOOLEAN
334 NTAPI
335 KiIsNpxErrataPresent(
336 VOID
337 );
338
339 VOID
340 NTAPI
341 KiSetProcessorType(VOID);
342
343 ULONG
344 NTAPI
345 KiGetFeatureBits(VOID);
346
347 VOID
348 NTAPI
349 KiThreadStartup(VOID);
350
351 NTSTATUS
352 NTAPI
353 Ke386GetGdtEntryThread(
354 IN PKTHREAD Thread,
355 IN ULONG Offset,
356 IN PKGDTENTRY Descriptor
357 );
358
359 VOID
360 NTAPI
361 KiFlushNPXState(
362 IN FLOATING_SAVE_AREA *SaveArea
363 );
364
365 VOID
366 NTAPI
367 Ki386AdjustEsp0(
368 IN PKTRAP_FRAME TrapFrame
369 );
370
371 VOID
372 NTAPI
373 Ki386SetupAndExitToV86Mode(
374 OUT PTEB VdmTeb
375 );
376
377 VOID
378 NTAPI
379 KeI386VdmInitialize(
380 VOID
381 );
382
383 ULONG_PTR
384 NTAPI
385 Ki386EnableGlobalPage(
386 IN ULONG_PTR Context
387 );
388
389 ULONG_PTR
390 NTAPI
391 Ki386EnableTargetLargePage(
392 IN ULONG_PTR Context
393 );
394
395 BOOLEAN
396 NTAPI
397 Ki386CreateIdentityMap(
398 IN PLARGE_IDENTITY_MAP IdentityMap,
399 IN PVOID StartPtr,
400 IN ULONG Length
401 );
402
403 VOID
404 NTAPI
405 Ki386FreeIdentityMap(
406 IN PLARGE_IDENTITY_MAP IdentityMap
407 );
408
409 VOID
410 NTAPI
411 Ki386EnableCurrentLargePage(
412 IN ULONG_PTR StartAddress,
413 IN ULONG Cr3
414 );
415
416 VOID
417 NTAPI
418 KiI386PentiumLockErrataFixup(
419 VOID
420 );
421
422 VOID
423 NTAPI
424 KiInitializePAT(
425 VOID
426 );
427
428 VOID
429 NTAPI
430 KiInitializeMTRR(
431 IN BOOLEAN FinalCpu
432 );
433
434 VOID
435 NTAPI
436 KiAmdK6InitializeMTRR(
437 VOID
438 );
439
440 VOID
441 NTAPI
442 KiRestoreFastSyscallReturnState(
443 VOID
444 );
445
446 ULONG_PTR
447 NTAPI
448 Ki386EnableDE(
449 IN ULONG_PTR Context
450 );
451
452 ULONG_PTR
453 NTAPI
454 Ki386EnableFxsr(
455 IN ULONG_PTR Context
456 );
457
458 ULONG_PTR
459 NTAPI
460 Ki386EnableXMMIExceptions(
461 IN ULONG_PTR Context
462 );
463
464 BOOLEAN
465 NTAPI
466 VdmDispatchBop(
467 IN PKTRAP_FRAME TrapFrame
468 );
469
470 BOOLEAN
471 FASTCALL
472 KiVdmOpcodePrefix(
473 IN PKTRAP_FRAME TrapFrame,
474 IN ULONG Flags
475 );
476
477 BOOLEAN
478 FASTCALL
479 Ki386HandleOpcodeV86(
480 IN PKTRAP_FRAME TrapFrame
481 );
482
483 DECLSPEC_NORETURN
484 VOID
485 FASTCALL
486 KiEoiHelper(
487 IN PKTRAP_FRAME TrapFrame
488 );
489
490 VOID
491 FASTCALL
492 Ki386BiosCallReturnAddress(
493 IN PKTRAP_FRAME TrapFrame
494 );
495
496 ULONG_PTR
497 FASTCALL
498 KiExitV86Mode(
499 IN PKTRAP_FRAME TrapFrame
500 );
501
502 DECLSPEC_NORETURN
503 VOID
504 NTAPI
505 KiDispatchExceptionFromTrapFrame(
506 IN NTSTATUS Code,
507 IN ULONG_PTR Address,
508 IN ULONG ParameterCount,
509 IN ULONG_PTR Parameter1,
510 IN ULONG_PTR Parameter2,
511 IN ULONG_PTR Parameter3,
512 IN PKTRAP_FRAME TrapFrame
513 );
514
515 //
516 // Global x86 only Kernel data
517 //
518 extern PVOID Ki386IopmSaveArea;
519 extern ULONG KeI386EFlagsAndMaskV86;
520 extern ULONG KeI386EFlagsOrMaskV86;
521 extern BOOLEAN KeI386VirtualIntExtensions;
522 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR+1];
523 extern KDESCRIPTOR KiIdtDescriptor;
524 extern BOOLEAN KiI386PentiumLockErrataPresent;
525 extern ULONG KeI386NpxPresent;
526 extern ULONG KeI386XMMIPresent;
527 extern ULONG KeI386FxsrPresent;
528 extern ULONG KiMXCsrMask;
529 extern ULONG KeI386CpuType;
530 extern ULONG KeI386CpuStep;
531 extern ULONG Ke386CacheAlignment;
532 extern ULONG KiFastSystemCallDisable;
533 extern UCHAR KiDebugRegisterTrapOffsets[9];
534 extern UCHAR KiDebugRegisterContextOffsets[9];
535 extern DECLSPEC_NORETURN VOID __cdecl KiTrap02(VOID);
536 extern VOID __cdecl KiTrap08(VOID);
537 extern VOID __cdecl KiTrap13(VOID);
538 extern VOID __cdecl KiFastCallEntry(VOID);
539 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID);
540 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID);
541 extern VOID __cdecl CopyParams(VOID);
542 extern VOID __cdecl ReadBatch(VOID);
543 extern VOID __cdecl FrRestore(VOID);
544 extern CHAR KiSystemCallExitBranch[];
545 extern CHAR KiSystemCallExit[];
546 extern CHAR KiSystemCallExit2[];
547
548 //
549 // Trap Macros
550 //
551 #include "trap_x.h"
552
553 //
554 // Returns a thread's FPU save area
555 //
556 FORCEINLINE
557 PFX_SAVE_AREA
558 KiGetThreadNpxArea(IN PKTHREAD Thread)
559 {
560 ASSERT((ULONG_PTR)Thread->InitialStack % 16 == 0);
561 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA));
562 }
563
564 //
565 // Sanitizes a selector
566 //
567 FORCEINLINE
568 ULONG
569 Ke386SanitizeSeg(IN ULONG Cs,
570 IN KPROCESSOR_MODE Mode)
571 {
572 //
573 // Check if we're in kernel-mode, and force CPL 0 if so.
574 // Otherwise, force CPL 3.
575 //
576 return ((Mode == KernelMode) ?
577 (Cs & (0xFFFF & ~RPL_MASK)) :
578 (RPL_MASK | (Cs & 0xFFFF)));
579 }
580
581 //
582 // Sanitizes EFLAGS
583 //
584 FORCEINLINE
585 ULONG
586 Ke386SanitizeFlags(IN ULONG Eflags,
587 IN KPROCESSOR_MODE Mode)
588 {
589 //
590 // Check if we're in kernel-mode, and sanitize EFLAGS if so.
591 // Otherwise, also force interrupt mask on.
592 //
593 return ((Mode == KernelMode) ?
594 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) :
595 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE)));
596 }
597
598 //
599 // Sanitizes a Debug Register
600 //
601 FORCEINLINE
602 PVOID
603 Ke386SanitizeDr(IN PVOID DrAddress,
604 IN KPROCESSOR_MODE Mode)
605 {
606 //
607 // Check if we're in kernel-mode, and return the address directly if so.
608 // Otherwise, make sure it's not inside the kernel-mode address space.
609 // If it is, then clear the address.
610 //
611 return ((Mode == KernelMode) ? DrAddress :
612 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0);
613 }
614
615 //
616 // Exception with no arguments
617 //
618 FORCEINLINE
619 DECLSPEC_NORETURN
620 VOID
621 KiDispatchException0Args(IN NTSTATUS Code,
622 IN ULONG_PTR Address,
623 IN PKTRAP_FRAME TrapFrame)
624 {
625 /* Helper for exceptions with no arguments */
626 KiDispatchExceptionFromTrapFrame(Code, Address, 0, 0, 0, 0, TrapFrame);
627 }
628
629 //
630 // Exception with one argument
631 //
632 FORCEINLINE
633 DECLSPEC_NORETURN
634 VOID
635 KiDispatchException1Args(IN NTSTATUS Code,
636 IN ULONG_PTR Address,
637 IN ULONG P1,
638 IN PKTRAP_FRAME TrapFrame)
639 {
640 /* Helper for exceptions with no arguments */
641 KiDispatchExceptionFromTrapFrame(Code, Address, 1, P1, 0, 0, TrapFrame);
642 }
643
644 //
645 // Exception with two arguments
646 //
647 FORCEINLINE
648 DECLSPEC_NORETURN
649 VOID
650 KiDispatchException2Args(IN NTSTATUS Code,
651 IN ULONG_PTR Address,
652 IN ULONG P1,
653 IN ULONG P2,
654 IN PKTRAP_FRAME TrapFrame)
655 {
656 /* Helper for exceptions with no arguments */
657 KiDispatchExceptionFromTrapFrame(Code, Address, 2, P1, P2, 0, TrapFrame);
658 }
659
660 //
661 // Performs a system call
662 //
663
664 /*
665 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes)
666 * and then calls the function associated with the system call.
667 *
668 * It's done in assembly for two reasons: we need to muck with the stack,
669 * and the call itself restores the stack back for us. The only way to do
670 * this in C is to do manual C handlers for every possible number of args on
671 * the stack, and then have the handler issue a call by pointer. This is
672 * wasteful since it'll basically push the values twice and require another
673 * level of call indirection.
674 *
675 * The ARM kernel currently does this, but it should probably be changed
676 * later to function like this as well.
677 *
678 */
679 #ifdef __GNUC__
680 FORCEINLINE
681 NTSTATUS
682 KiSystemCallTrampoline(IN PVOID Handler,
683 IN PVOID Arguments,
684 IN ULONG StackBytes)
685 {
686 NTSTATUS Result;
687
688 __asm__ __volatile__
689 (
690 "subl %1, %%esp\n"
691 "movl %%esp, %%edi\n"
692 "movl %2, %%esi\n"
693 "shrl $2, %1\n"
694 "rep movsd\n"
695 "call *%3\n"
696 "movl %%eax, %0\n"
697 : "=r"(Result)
698 : "c"(StackBytes),
699 "d"(Arguments),
700 "r"(Handler)
701 : "%esp", "%esi", "%edi"
702 );
703 return Result;
704 }
705 #elif defined(_MSC_VER)
706 FORCEINLINE
707 NTSTATUS
708 KiSystemCallTrampoline(IN PVOID Handler,
709 IN PVOID Arguments,
710 IN ULONG StackBytes)
711 {
712 __asm
713 {
714 mov ecx, StackBytes
715 mov esi, Arguments
716 mov eax, Handler
717 sub esp, ecx
718 mov edi, esp
719 shr ecx, 2
720 rep movsd
721 call eax
722 }
723 /* Return with result in EAX */
724 }
725 #else
726 #error Unknown Compiler
727 #endif
728
729
730 //
731 // Checks for pending APCs
732 //
733 FORCEINLINE
734 VOID
735 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame)
736 {
737 PKTHREAD Thread;
738 KIRQL OldIrql;
739
740 /* Check for V8086 or user-mode trap */
741 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame)))
742 {
743 /* Get the thread */
744 Thread = KeGetCurrentThread();
745 while (TRUE)
746 {
747 /* Turn off the alerted state for kernel mode */
748 Thread->Alerted[KernelMode] = FALSE;
749
750 /* Are there pending user APCs? */
751 if (!Thread->ApcState.UserApcPending) break;
752
753 /* Raise to APC level and enable interrupts */
754 OldIrql = KfRaiseIrql(APC_LEVEL);
755 _enable();
756
757 /* Deliver APCs */
758 KiDeliverApc(UserMode, NULL, TrapFrame);
759
760 /* Restore IRQL and disable interrupts once again */
761 KfLowerIrql(OldIrql);
762 _disable();
763 }
764 }
765 }
766
767 //
768 // Converts a base thread to a GUI thread
769 //
770 #ifdef __GNUC__
771 FORCEINLINE
772 NTSTATUS
773 KiConvertToGuiThread(VOID)
774 {
775 NTSTATUS Result;
776 PVOID StackFrame;
777
778 /*
779 * Converting to a GUI thread safely updates ESP in-place as well as the
780 * current Thread->TrapFrame and EBP when KeSwitchKernelStack is called.
781 *
782 * However, PsConvertToGuiThread "helpfully" restores EBP to the original
783 * caller's value, since it is considered a nonvolatile register. As such,
784 * as soon as we're back after the conversion and we try to store the result
785 * which will probably be in some stack variable (EBP-based), we'll crash as
786 * we are touching the de-allocated non-expanded stack.
787 *
788 * Thus we need a way to update our EBP before EBP is touched, and the only
789 * way to guarantee this is to do the call itself in assembly, use the EAX
790 * register to store the result, fixup EBP, and then let the C code continue
791 * on its merry way.
792 *
793 */
794 __asm__ __volatile__
795 (
796 "movl %%ebp, %1\n\t"
797 "subl %%esp, %1\n\t"
798 "call _PsConvertToGuiThread@0\n\t"
799 "addl %%esp, %1\n\t"
800 "movl %1, %%ebp"
801 : "=a"(Result), "=r"(StackFrame)
802 :
803 : "%esp", "%ecx", "%edx", "memory"
804 );
805 return Result;
806 }
807 #elif defined(_MSC_VER)
808 NTSTATUS
809 NTAPI
810 KiConvertToGuiThread(VOID);
811 #else
812 #error Unknown Compiler
813 #endif
814
815 //
816 // Switches from boot loader to initial kernel stack
817 //
818 FORCEINLINE
819 VOID
820 KiSwitchToBootStack(IN ULONG_PTR InitialStack)
821 {
822 /* We have to switch to a new stack before continuing kernel initialization */
823 #ifdef __GNUC__
824 __asm__
825 (
826 "movl %0, %%esp\n"
827 "subl %1, %%esp\n"
828 "pushl %2\n"
829 "jmp _KiSystemStartupBootStack@0\n"
830 :
831 : "c"(InitialStack),
832 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH),
833 "i"(CR0_EM | CR0_TS | CR0_MP)
834 : "%esp"
835 );
836 #elif defined(_MSC_VER)
837 VOID NTAPI KiSystemStartupBootStack(VOID);
838 __asm
839 {
840 mov esp, InitialStack
841 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH)
842 push (CR0_EM | CR0_TS | CR0_MP)
843 jmp KiSystemStartupBootStack
844 }
845 #else
846 #error Unknown Compiler
847 #endif
848 }
849
850 //
851 // Emits the iret instruction for C code
852 //
853 FORCEINLINE
854 DECLSPEC_NORETURN
855 VOID
856 KiIret(VOID)
857 {
858 #if defined(__GNUC__)
859 __asm__ __volatile__
860 (
861 "iret\n"
862 );
863 #elif defined(_MSC_VER)
864 __asm
865 {
866 iretd
867 }
868 #else
869 #error Unsupported compiler
870 #endif
871 UNREACHABLE;
872 }
873
874 //
875 // Normally this is done by the HAL, but on x86 as an optimization, the kernel
876 // initiates the end by calling back into the HAL and exiting the trap here.
877 //
878 FORCEINLINE
879 VOID
880 KiEndInterrupt(IN KIRQL Irql,
881 IN PKTRAP_FRAME TrapFrame)
882 {
883 /* Disable interrupts and end the interrupt */
884 _disable();
885 HalEndSystemInterrupt(Irql, TrapFrame);
886
887 /* Exit the interrupt */
888 KiEoiHelper(TrapFrame);
889 }
890
891 //
892 // PERF Code
893 //
894 FORCEINLINE
895 VOID
896 Ki386PerfEnd(VOID)
897 {
898 extern ULONGLONG BootCyclesEnd, BootCycles;
899 BootCyclesEnd = __rdtsc();
900 DbgPrint("Boot took %I64u cycles!\n", BootCyclesEnd - BootCycles);
901 DbgPrint("Interrupts: %u System Calls: %u Context Switches: %u\n",
902 KeGetCurrentPrcb()->InterruptCount,
903 KeGetCurrentPrcb()->KeSystemCalls,
904 KeGetContextSwitches(KeGetCurrentPrcb()));
905 }
906
907 FORCEINLINE
908 PULONG
909 KiGetUserModeStackAddress(void)
910 {
911 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp);
912 }
913
914 #endif