- Add the other CPU_* codes (got them from a PDB)
[reactos.git] / reactos / ntoskrnl / ke / i386 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/i386/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 /* INCLUDES *****************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 /* GLOBALS *******************************************************************/
16
17 /* The Boot TSS */
18 KTSS KiBootTss;
19
20 /* The TSS to use for Double Fault Traps (INT 0x9) */
21 UCHAR KiDoubleFaultTSS[KTSS_IO_MAPS];
22
23 /* The TSS to use for NMI Fault Traps (INT 0x2) */
24 UCHAR KiNMITSS[KTSS_IO_MAPS];
25
26 /* The Boot GDT */
27 KGDTENTRY KiBootGdt[256] =
28 {
29 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}}, /* KGDT_NULL */
30 {0xffff, 0x0000, {{0x00, 0x9b, 0xcf, 0x00}}}, /* KGDT_R0_CODE */
31 {0xffff, 0x0000, {{0x00, 0x93, 0xcf, 0x00}}}, /* KGDT_R0_DATA */
32 {0xffff, 0x0000, {{0x00, 0xfb, 0xcf, 0x00}}}, /* KGDT_R3_CODE */
33 {0xffff, 0x0000, {{0x00, 0xf3, 0xcf, 0x00}}}, /* KGDT_R3_DATA*/
34 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}}, /* KGDT_TSS */
35 {0x0001, 0xf000, {{0xdf, 0x93, 0xc0, 0xff}}}, /* KGDT_R0_PCR */
36 {0x0fff, 0x0000, {{0x00, 0xf3, 0x40, 0x00}}}, /* KGDT_R3_TEB */
37 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}}, /* KGDT_UNUSED */
38 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}}, /* KGDT_LDT */
39 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}}, /* KGDT_DF_TSS */
40 {0x0000, 0x0000, {{0x00, 0x00, 0x00, 0x00}}} /* KGDT_NMI_TSS */
41 };
42
43 /* GDT Descriptor */
44 KDESCRIPTOR KiGdtDescriptor = {0, sizeof(KiBootGdt) - 1, (ULONG)KiBootGdt};
45
46 /* CPU Features and Flags */
47 ULONG KeI386CpuType;
48 ULONG KeI386CpuStep;
49 ULONG KeProcessorArchitecture;
50 ULONG KeProcessorLevel;
51 ULONG KeProcessorRevision;
52 ULONG KeFeatureBits;
53 ULONG KiFastSystemCallDisable = 1;
54 ULONG KeI386NpxPresent = 0;
55 ULONG KiMXCsrMask = 0;
56 ULONG MxcsrFeatureMask = 0;
57 ULONG KeI386XMMIPresent = 0;
58 ULONG KeI386FxsrPresent = 0;
59 ULONG KeI386MachineType;
60 ULONG Ke386Pae = FALSE;
61 ULONG Ke386NoExecute = FALSE;
62 ULONG KeLargestCacheLine = 0x40;
63 ULONG KeDcacheFlushCount = 0;
64 ULONG KeIcacheFlushCount = 0;
65 ULONG KiDmaIoCoherency = 0;
66 CHAR KeNumberProcessors;
67 KAFFINITY KeActiveProcessors = 1;
68 BOOLEAN KiI386PentiumLockErrataPresent;
69 BOOLEAN KiSMTProcessorsPresent;
70
71 /* Freeze data */
72 KIRQL KiOldIrql;
73 ULONG KiFreezeFlag;
74
75 /* Flush data */
76 volatile LONG KiTbFlushTimeStamp;
77
78 /* CPU Signatures */
79 static const CHAR CmpIntelID[] = "GenuineIntel";
80 static const CHAR CmpAmdID[] = "AuthenticAMD";
81 static const CHAR CmpCyrixID[] = "CyrixInstead";
82 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
83 static const CHAR CmpCentaurID[] = "CentaurHauls";
84 static const CHAR CmpRiseID[] = "RiseRiseRise";
85
86 /* SUPPORT ROUTINES FOR MSVC COMPATIBILITY ***********************************/
87
88 VOID
89 NTAPI
90 CPUID(OUT ULONG CpuInfo[4],
91 IN ULONG InfoType)
92 {
93 Ki386Cpuid(InfoType, &CpuInfo[0], &CpuInfo[1], &CpuInfo[2], &CpuInfo[3]);
94 }
95
96 VOID
97 WRMSR(IN ULONG Register,
98 IN LONGLONG Value)
99 {
100 LARGE_INTEGER LargeVal;
101 LargeVal.QuadPart = Value;
102 Ke386Wrmsr(Register, LargeVal.HighPart, LargeVal.LowPart);
103 }
104
105 LONGLONG
106 RDMSR(IN ULONG Register)
107 {
108 LARGE_INTEGER LargeVal = {{0}};
109 Ke386Rdmsr(Register, LargeVal.HighPart, LargeVal.LowPart);
110 return LargeVal.QuadPart;
111 }
112
113 /* FUNCTIONS *****************************************************************/
114
115 VOID
116 NTAPI
117 KiSetProcessorType(VOID)
118 {
119 ULONG EFlags = 0, NewEFlags;
120 ULONG Reg[4];
121 ULONG Stepping, Type;
122
123 /* Start by assuming no CPUID data */
124 KeGetCurrentPrcb()->CpuID = 0;
125
126 /* Save EFlags */
127 Ke386SaveFlags(EFlags);
128
129 /* XOR out the ID bit and update EFlags */
130 NewEFlags = EFlags ^ EFLAGS_ID;
131 Ke386RestoreFlags(NewEFlags);
132
133 /* Get them back and see if they were modified */
134 Ke386SaveFlags(NewEFlags);
135 if (NewEFlags != EFlags)
136 {
137 /* The modification worked, so CPUID exists. Set the ID Bit again. */
138 EFlags |= EFLAGS_ID;
139 Ke386RestoreFlags(EFlags);
140
141 /* Peform CPUID 0 to see if CPUID 1 is supported */
142 CPUID(Reg, 0);
143 if (Reg[0] > 0)
144 {
145 /* Do CPUID 1 now */
146 CPUID(Reg, 1);
147
148 /*
149 * Get the Stepping and Type. The stepping contains both the
150 * Model and the Step, while the Type contains the returned Type.
151 * We ignore the family.
152 *
153 * For the stepping, we convert this: zzzzzzxy into this: x0y
154 */
155 Stepping = Reg[0] & 0xF0;
156 Stepping <<= 4;
157 Stepping += (Reg[0] & 0xFF);
158 Stepping &= 0xF0F;
159 Type = Reg[0] & 0xF00;
160 Type >>= 8;
161
162 /* Save them in the PRCB */
163 KeGetCurrentPrcb()->CpuID = TRUE;
164 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
165 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
166 }
167 else
168 {
169 DPRINT1("CPUID Support lacking\n");
170 }
171 }
172 else
173 {
174 DPRINT1("CPUID Support lacking\n");
175 }
176
177 /* Restore EFLAGS */
178 Ke386RestoreFlags(EFlags);
179 }
180
181 ULONG
182 NTAPI
183 KiGetCpuVendor(VOID)
184 {
185 PKPRCB Prcb = KeGetCurrentPrcb();
186 ULONG Vendor[5];
187 ULONG Temp;
188
189 /* Assume no Vendor ID and fail if no CPUID Support. */
190 Prcb->VendorString[0] = 0;
191 if (!Prcb->CpuID) return 0;
192
193 /* Get the Vendor ID and null-terminate it */
194 CPUID(Vendor, 0);
195 Vendor[4] = 0;
196
197 /* Re-arrange vendor string */
198 Temp = Vendor[2];
199 Vendor[2] = Vendor[3];
200 Vendor[3] = Temp;
201
202 /* Copy it to the PRCB and null-terminate it again */
203 RtlCopyMemory(Prcb->VendorString,
204 &Vendor[1],
205 sizeof(Prcb->VendorString) - sizeof(CHAR));
206 Prcb->VendorString[sizeof(Prcb->VendorString) - sizeof(CHAR)] = ANSI_NULL;
207
208 /* Now check the CPU Type */
209 if (!strcmp(Prcb->VendorString, CmpIntelID))
210 {
211 return CPU_INTEL;
212 }
213 else if (!strcmp(Prcb->VendorString, CmpAmdID))
214 {
215 return CPU_AMD;
216 }
217 else if (!strcmp(Prcb->VendorString, CmpCyrixID))
218 {
219 DPRINT1("Cyrix CPU support not fully tested!\n");
220 return CPU_CYRIX;
221 }
222 else if (!strcmp(Prcb->VendorString, CmpTransmetaID))
223 {
224 DPRINT1("Transmeta CPU support not fully tested!\n");
225 return CPU_TRANSMETA;
226 }
227 else if (!strcmp(Prcb->VendorString, CmpCentaurID))
228 {
229 DPRINT1("Centaur CPU support not fully tested!\n");
230 return CPU_CENTAUR;
231 }
232 else if (!strcmp(Prcb->VendorString, CmpRiseID))
233 {
234 DPRINT1("Rise CPU support not fully tested!\n");
235 return CPU_RISE;
236 }
237
238 /* Invalid CPU */
239 return 0;
240 }
241
242 ULONG
243 NTAPI
244 KiGetFeatureBits(VOID)
245 {
246 PKPRCB Prcb = KeGetCurrentPrcb();
247 ULONG Vendor;
248 ULONG FeatureBits = KF_WORKING_PTE;
249 ULONG Reg[4];
250 BOOLEAN ExtendedCPUID = TRUE;
251 ULONG CpuFeatures = 0;
252
253 /* Get the Vendor ID */
254 Vendor = KiGetCpuVendor();
255
256 /* Make sure we got a valid vendor ID at least. */
257 if (!Vendor) return FeatureBits;
258
259 /* Get the CPUID Info. Features are in Reg[3]. */
260 CPUID(Reg, 1);
261
262 /* Set the initial APIC ID */
263 Prcb->InitialApicId = (UCHAR)(Reg[1] >> 24);
264
265 switch (Vendor)
266 {
267 /* Intel CPUs */
268 case CPU_INTEL:
269 /* Check if it's a P6 */
270 if (Prcb->CpuType == 6)
271 {
272 /* Perform the special sequence to get the MicroCode Signature */
273 WRMSR(0x8B, 0);
274 CPUID(Reg, 1);
275 Prcb->UpdateSignature.QuadPart = RDMSR(0x8B);
276 }
277 else if (Prcb->CpuType == 5)
278 {
279 /* On P5, enable workaround for the LOCK errata. */
280 KiI386PentiumLockErrataPresent = TRUE;
281 }
282
283 /* Check for broken P6 with bad SMP PTE implementation */
284 if (((Reg[0] & 0x0FF0) == 0x0610 && (Reg[0] & 0x000F) <= 0x9) ||
285 ((Reg[0] & 0x0FF0) == 0x0630 && (Reg[0] & 0x000F) <= 0x4))
286 {
287 /* Remove support for correct PTE support. */
288 FeatureBits &= ~KF_WORKING_PTE;
289 }
290
291 /* Check if the CPU is too old to support SYSENTER */
292 if ((Prcb->CpuType < 6) ||
293 ((Prcb->CpuType == 6) && (Prcb->CpuStep < 0x0303)))
294 {
295 /* Disable it */
296 Reg[3] &= ~0x800;
297 }
298
299 /* Set the current features */
300 CpuFeatures = Reg[3];
301
302 break;
303
304 /* AMD CPUs */
305 case CPU_AMD:
306
307 /* Check if this is a K5 or K6. (family 5) */
308 if ((Reg[0] & 0x0F00) == 0x0500)
309 {
310 /* Get the Model Number */
311 switch (Reg[0] & 0x00F0)
312 {
313 /* Model 1: K5 - 5k86 (initial models) */
314 case 0x0010:
315
316 /* Check if this is Step 0 or 1. They don't support PGE */
317 if ((Reg[0] & 0x000F) > 0x03) break;
318
319 /* Model 0: K5 - SSA5 */
320 case 0x0000:
321
322 /* Model 0 doesn't support PGE at all. */
323 Reg[3] &= ~0x2000;
324 break;
325
326 /* Model 8: K6-2 */
327 case 0x0080:
328
329 /* K6-2, Step 8 and over have support for MTRR. */
330 if ((Reg[0] & 0x000F) >= 0x8) FeatureBits |= KF_AMDK6MTRR;
331 break;
332
333 /* Model 9: K6-III
334 Model D: K6-2+, K6-III+ */
335 case 0x0090:
336 case 0x00D0:
337
338 FeatureBits |= KF_AMDK6MTRR;
339 break;
340 }
341 }
342 else if((Reg[0] & 0x0F00) < 0x0500)
343 {
344 /* Families below 5 don't support PGE, PSE or CMOV at all */
345 Reg[3] &= ~(0x08 | 0x2000 | 0x8000);
346
347 /* They also don't support advanced CPUID functions. */
348 ExtendedCPUID = FALSE;
349 }
350
351 /* Set the current features */
352 CpuFeatures = Reg[3];
353
354 break;
355
356 /* Cyrix CPUs */
357 case CPU_CYRIX:
358 break;
359
360 /* Transmeta CPUs */
361 case CPU_TRANSMETA:
362 /* Enable CMPXCHG8B if the family (>= 5), model and stepping (>= 4.2) support it */
363 if ((Reg[0] & 0x0FFF) >= 0x0542)
364 {
365 WRMSR(0x80860004, RDMSR(0x80860004) | 0x0100);
366 FeatureBits |= KF_CMPXCHG8B;
367 }
368
369 break;
370
371 /* Centaur, IDT, Rise and VIA CPUs */
372 case CPU_CENTAUR:
373 case CPU_RISE:
374 /* These CPUs don't report the presence of CMPXCHG8B through CPUID.
375 However, this feature exists and operates properly without any additional steps. */
376 FeatureBits |= KF_CMPXCHG8B;
377
378 break;
379 }
380
381 /* Convert all CPUID Feature bits into our format */
382 if (CpuFeatures & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
383 if (CpuFeatures & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
384 if (CpuFeatures & 0x00000010) FeatureBits |= KF_RDTSC;
385 if (CpuFeatures & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
386 if (CpuFeatures & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
387 if (CpuFeatures & 0x00001000) FeatureBits |= KF_MTRR;
388 if (CpuFeatures & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
389 if (CpuFeatures & 0x00008000) FeatureBits |= KF_CMOV;
390 if (CpuFeatures & 0x00010000) FeatureBits |= KF_PAT;
391 if (CpuFeatures & 0x00200000) FeatureBits |= KF_DTS;
392 if (CpuFeatures & 0x00800000) FeatureBits |= KF_MMX;
393 if (CpuFeatures & 0x01000000) FeatureBits |= KF_FXSR;
394 if (CpuFeatures & 0x02000000) FeatureBits |= KF_XMMI;
395 if (CpuFeatures & 0x04000000) FeatureBits |= KF_XMMI64;
396
397 /* Check if the CPU has hyper-threading */
398 if (CpuFeatures & 0x10000000)
399 {
400 /* Set the number of logical CPUs */
401 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(Reg[1] >> 16);
402 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
403 {
404 /* We're on dual-core */
405 KiSMTProcessorsPresent = TRUE;
406 }
407 }
408 else
409 {
410 /* We only have a single CPU */
411 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
412 }
413
414 /* Check if CPUID 0x80000000 is supported */
415 if (ExtendedCPUID)
416 {
417 /* Do the call */
418 CPUID(Reg, 0x80000000);
419 if ((Reg[0] & 0xffffff00) == 0x80000000)
420 {
421 /* Check if CPUID 0x80000001 is supported */
422 if (Reg[0] >= 0x80000001)
423 {
424 /* Check which extended features are available. */
425 CPUID(Reg, 0x80000001);
426
427 /* Check if NX-bit is supported */
428 if (Reg[3] & 0x00100000) FeatureBits |= KF_NX_BIT;
429
430 /* Now handle each features for each CPU Vendor */
431 switch (Vendor)
432 {
433 case CPU_AMD:
434 case CPU_CENTAUR:
435 if (Reg[3] & 0x80000000) FeatureBits |= KF_3DNOW;
436 break;
437 }
438 }
439 }
440 }
441
442 /* Return the Feature Bits */
443 return FeatureBits;
444 }
445
446 VOID
447 NTAPI
448 KiGetCacheInformation(VOID)
449 {
450 PKIPCR Pcr = (PKIPCR)KeGetPcr();
451 ULONG Vendor;
452 ULONG Data[4];
453 ULONG CacheRequests = 0, i;
454 ULONG CurrentRegister;
455 UCHAR RegisterByte;
456 BOOLEAN FirstPass = TRUE;
457
458 /* Set default L2 size */
459 Pcr->SecondLevelCacheSize = 0;
460
461 /* Get the Vendor ID and make sure we support CPUID */
462 Vendor = KiGetCpuVendor();
463 if (!Vendor) return;
464
465 /* Check the Vendor ID */
466 switch (Vendor)
467 {
468 /* Handle Intel case */
469 case CPU_INTEL:
470
471 /*Check if we support CPUID 2 */
472 CPUID(Data, 0);
473 if (Data[0] >= 2)
474 {
475 /* We need to loop for the number of times CPUID will tell us to */
476 do
477 {
478 /* Do the CPUID call */
479 CPUID(Data, 2);
480
481 /* Check if it was the first call */
482 if (FirstPass)
483 {
484 /*
485 * The number of times to loop is the first byte. Read
486 * it and then destroy it so we don't get confused.
487 */
488 CacheRequests = Data[0] & 0xFF;
489 Data[0] &= 0xFFFFFF00;
490
491 /* Don't go over this again */
492 FirstPass = FALSE;
493 }
494
495 /* Loop all 4 registers */
496 for (i = 0; i < 4; i++)
497 {
498 /* Get the current register */
499 CurrentRegister = Data[i];
500
501 /*
502 * If the upper bit is set, then this register should
503 * be skipped.
504 */
505 if (CurrentRegister & 0x80000000) continue;
506
507 /* Keep looping for every byte inside this register */
508 while (CurrentRegister)
509 {
510 /* Read a byte, skip a byte. */
511 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
512 CurrentRegister >>= 8;
513 if (!RegisterByte) continue;
514
515 /*
516 * Valid values are from 0x40 (0 bytes) to 0x49
517 * (32MB), or from 0x80 to 0x89 (same size but
518 * 8-way associative.
519 */
520 if (((RegisterByte > 0x40) &&
521 (RegisterByte <= 0x49)) ||
522 ((RegisterByte > 0x80) &&
523 (RegisterByte <= 0x89)))
524 {
525 /* Mask out only the first nibble */
526 RegisterByte &= 0x0F;
527
528 /* Set the L2 Cache Size */
529 Pcr->SecondLevelCacheSize = 0x10000 <<
530 RegisterByte;
531 }
532 }
533 }
534 } while (--CacheRequests);
535 }
536 break;
537
538 case CPU_AMD:
539
540 /* Check if we support CPUID 0x80000006 */
541 CPUID(Data, 0x80000000);
542 if (Data[0] >= 6)
543 {
544 /* Get 2nd level cache and tlb size */
545 CPUID(Data, 0x80000006);
546
547 /* Set the L2 Cache Size */
548 Pcr->SecondLevelCacheSize = (Data[2] & 0xFFFF0000) >> 6;
549 }
550 break;
551 }
552 }
553
554 VOID
555 NTAPI
556 KiSetCR0Bits(VOID)
557 {
558 ULONG Cr0;
559
560 /* Save current CR0 */
561 Cr0 = __readcr0();
562
563 /* If this is a 486, enable Write-Protection */
564 if (KeGetCurrentPrcb()->CpuType > 3) Cr0 |= CR0_WP;
565
566 /* Set new Cr0 */
567 __writecr0(Cr0);
568 }
569
570 VOID
571 NTAPI
572 KiInitializeTSS2(IN PKTSS Tss,
573 IN PKGDTENTRY TssEntry OPTIONAL)
574 {
575 PUCHAR p;
576
577 /* Make sure the GDT Entry is valid */
578 if (TssEntry)
579 {
580 /* Set the Limit */
581 TssEntry->LimitLow = sizeof(KTSS) - 1;
582 TssEntry->HighWord.Bits.LimitHi = 0;
583 }
584
585 /* Now clear the I/O Map */
586 RtlFillMemory(Tss->IoMaps[0].IoMap, 8096, -1);
587
588 /* Initialize Interrupt Direction Maps */
589 p = (PUCHAR)(Tss->IoMaps[0].DirectionMap);
590 RtlZeroMemory(p, 32);
591
592 /* Add DPMI support for interrupts */
593 p[0] = 4;
594 p[3] = 0x18;
595 p[4] = 0x18;
596
597 /* Initialize the default Interrupt Direction Map */
598 p = Tss->IntDirectionMap;
599 RtlZeroMemory(Tss->IntDirectionMap, 32);
600
601 /* Add DPMI support */
602 p[0] = 4;
603 p[3] = 0x18;
604 p[4] = 0x18;
605 }
606
607 VOID
608 NTAPI
609 KiInitializeTSS(IN PKTSS Tss)
610 {
611 /* Set an invalid map base */
612 Tss->IoMapBase = KiComputeIopmOffset(IO_ACCESS_MAP_NONE);
613
614 /* Disable traps during Task Switches */
615 Tss->Flags = 0;
616
617 /* Set LDT and Ring 0 SS */
618 Tss->LDT = 0;
619 Tss->Ss0 = KGDT_R0_DATA;
620 }
621
622 VOID
623 FASTCALL
624 Ki386InitializeTss(IN PKTSS Tss,
625 IN PKIDTENTRY Idt,
626 IN PKGDTENTRY Gdt)
627 {
628 PKGDTENTRY TssEntry, TaskGateEntry;
629
630 /* Initialize the boot TSS. */
631 TssEntry = &Gdt[KGDT_TSS / sizeof(KGDTENTRY)];
632 TssEntry->HighWord.Bits.Type = I386_TSS;
633 TssEntry->HighWord.Bits.Pres = 1;
634 TssEntry->HighWord.Bits.Dpl = 0;
635 KiInitializeTSS2(Tss, TssEntry);
636 KiInitializeTSS(Tss);
637
638 /* Load the task register */
639 Ke386SetTr(KGDT_TSS);
640
641 /* Setup the Task Gate for Double Fault Traps */
642 TaskGateEntry = (PKGDTENTRY)&Idt[8];
643 TaskGateEntry->HighWord.Bits.Type = I386_TASK_GATE;
644 TaskGateEntry->HighWord.Bits.Pres = 1;
645 TaskGateEntry->HighWord.Bits.Dpl = 0;
646 ((PKIDTENTRY)TaskGateEntry)->Selector = KGDT_DF_TSS;
647
648 /* Initialize the TSS used for handling double faults. */
649 Tss = (PKTSS)KiDoubleFaultTSS;
650 KiInitializeTSS(Tss);
651 Tss->CR3 = __readcr3();
652 Tss->Esp0 = PtrToUlong(KiDoubleFaultStack);
653 Tss->Eip = PtrToUlong(KiTrap8);
654 Tss->Cs = KGDT_R0_CODE;
655 Tss->Fs = KGDT_R0_PCR;
656 Tss->Ss = Ke386GetSs();
657 Tss->Es = KGDT_R3_DATA | RPL_MASK;
658 Tss->Ds = KGDT_R3_DATA | RPL_MASK;
659
660 /* Setup the Double Trap TSS entry in the GDT */
661 TssEntry = &Gdt[KGDT_DF_TSS / sizeof(KGDTENTRY)];
662 TssEntry->HighWord.Bits.Type = I386_TSS;
663 TssEntry->HighWord.Bits.Pres = 1;
664 TssEntry->HighWord.Bits.Dpl = 0;
665 TssEntry->BaseLow = (USHORT)((ULONG_PTR)Tss & 0xFFFF);
666 TssEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)Tss >> 16);
667 TssEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)Tss >> 24);
668 TssEntry->LimitLow = KTSS_IO_MAPS;
669
670 /* Now setup the NMI Task Gate */
671 TaskGateEntry = (PKGDTENTRY)&Idt[2];
672 TaskGateEntry->HighWord.Bits.Type = I386_TASK_GATE;
673 TaskGateEntry->HighWord.Bits.Pres = 1;
674 TaskGateEntry->HighWord.Bits.Dpl = 0;
675 ((PKIDTENTRY)TaskGateEntry)->Selector = KGDT_NMI_TSS;
676
677 /* Initialize the actual TSS */
678 Tss = (PKTSS)KiNMITSS;
679 KiInitializeTSS(Tss);
680 Tss->CR3 = __readcr3();
681 Tss->Esp0 = PtrToUlong(KiDoubleFaultStack);
682 Tss->Eip = PtrToUlong(KiTrap2);
683 Tss->Cs = KGDT_R0_CODE;
684 Tss->Fs = KGDT_R0_PCR;
685 Tss->Ss = Ke386GetSs();
686 Tss->Es = KGDT_R3_DATA | RPL_MASK;
687 Tss->Ds = KGDT_R3_DATA | RPL_MASK;
688
689 /* And its associated TSS Entry */
690 TssEntry = &Gdt[KGDT_NMI_TSS / sizeof(KGDTENTRY)];
691 TssEntry->HighWord.Bits.Type = I386_TSS;
692 TssEntry->HighWord.Bits.Pres = 1;
693 TssEntry->HighWord.Bits.Dpl = 0;
694 TssEntry->BaseLow = (USHORT)((ULONG_PTR)Tss & 0xFFFF);
695 TssEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)Tss >> 16);
696 TssEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)Tss >> 24);
697 TssEntry->LimitLow = KTSS_IO_MAPS;
698 }
699
700 VOID
701 NTAPI
702 KeFlushCurrentTb(VOID)
703 {
704 /* Flush the TLB by resetting CR3 */
705 __writecr3(__readcr3());
706 }
707
708 VOID
709 NTAPI
710 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
711 {
712 /* Restore the CR registers */
713 __writecr0(ProcessorState->SpecialRegisters.Cr0);
714 Ke386SetCr2(ProcessorState->SpecialRegisters.Cr2);
715 __writecr3(ProcessorState->SpecialRegisters.Cr3);
716 if (KeFeatureBits & KF_CR4) __writecr4(ProcessorState->SpecialRegisters.Cr4);
717
718 //
719 // Restore the DR registers
720 //
721 Ke386SetDr0(ProcessorState->SpecialRegisters.KernelDr0);
722 Ke386SetDr1(ProcessorState->SpecialRegisters.KernelDr1);
723 Ke386SetDr2(ProcessorState->SpecialRegisters.KernelDr2);
724 Ke386SetDr3(ProcessorState->SpecialRegisters.KernelDr3);
725 Ke386SetDr6(ProcessorState->SpecialRegisters.KernelDr6);
726 Ke386SetDr7(ProcessorState->SpecialRegisters.KernelDr7);
727
728 //
729 // Restore GDT, IDT, LDT and TSS
730 //
731 Ke386SetGlobalDescriptorTable(*(PKDESCRIPTOR)&ProcessorState->SpecialRegisters.Gdtr.Limit);
732 Ke386SetInterruptDescriptorTable(*(PKDESCRIPTOR)&ProcessorState->SpecialRegisters.Idtr.Limit);
733 Ke386SetTr(ProcessorState->SpecialRegisters.Tr);
734 Ke386SetLocalDescriptorTable(ProcessorState->SpecialRegisters.Ldtr);
735 }
736
737 VOID
738 NTAPI
739 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
740 {
741 /* Save the CR registers */
742 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
743 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
744 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
745 ProcessorState->SpecialRegisters.Cr4 = (KeFeatureBits & KF_CR4) ?
746 __readcr4() : 0;
747
748 /* Save the DR registers */
749 ProcessorState->SpecialRegisters.KernelDr0 = Ke386GetDr0();
750 ProcessorState->SpecialRegisters.KernelDr1 = Ke386GetDr1();
751 ProcessorState->SpecialRegisters.KernelDr2 = Ke386GetDr2();
752 ProcessorState->SpecialRegisters.KernelDr3 = Ke386GetDr3();
753 ProcessorState->SpecialRegisters.KernelDr6 = Ke386GetDr6();
754 ProcessorState->SpecialRegisters.KernelDr7 = Ke386GetDr7();
755 Ke386SetDr7(0);
756
757 /* Save GDT, IDT, LDT and TSS */
758 Ke386GetGlobalDescriptorTable(*(PKDESCRIPTOR)&ProcessorState->SpecialRegisters.Gdtr.Limit);
759 Ke386GetInterruptDescriptorTable(*(PKDESCRIPTOR)&ProcessorState->SpecialRegisters.Idtr.Limit);
760 Ke386GetTr(ProcessorState->SpecialRegisters.Tr);
761 Ke386GetLocalDescriptorTable(ProcessorState->SpecialRegisters.Ldtr);
762 }
763
764 VOID
765 NTAPI
766 KiInitializeMachineType(VOID)
767 {
768 /* Set the Machine Type we got from NTLDR */
769 KeI386MachineType = KeLoaderBlock->u.I386.MachineType & 0x000FF;
770 }
771
772 ULONG_PTR
773 NTAPI
774 KiLoadFastSyscallMachineSpecificRegisters(IN ULONG_PTR Context)
775 {
776 /* Set CS and ESP */
777 Ke386Wrmsr(0x174, KGDT_R0_CODE, 0);
778 Ke386Wrmsr(0x175, (ULONG)KeGetCurrentPrcb()->DpcStack, 0);
779
780 /* Set LSTAR */
781 Ke386Wrmsr(0x176, (ULONG)KiFastCallEntry, 0);
782 return 0;
783 }
784
785 VOID
786 NTAPI
787 KiRestoreFastSyscallReturnState(VOID)
788 {
789 /* FIXME: NT has support for SYSCALL, IA64-SYSENTER, etc. */
790
791 /* Check if the CPU Supports fast system call */
792 if (KeFeatureBits & KF_FAST_SYSCALL)
793 {
794 /* Do an IPI to enable it */
795 KeIpiGenericCall(KiLoadFastSyscallMachineSpecificRegisters, 0);
796 }
797 }
798
799 ULONG_PTR
800 NTAPI
801 Ki386EnableDE(IN ULONG_PTR Context)
802 {
803 /* Enable DE */
804 __writecr4(__readcr4() | CR4_DE);
805 return 0;
806 }
807
808 ULONG_PTR
809 NTAPI
810 Ki386EnableFxsr(IN ULONG_PTR Context)
811 {
812 /* Enable FXSR */
813 __writecr4(__readcr4() | CR4_FXSR);
814 return 0;
815 }
816
817 ULONG_PTR
818 NTAPI
819 Ki386EnableXMMIExceptions(IN ULONG_PTR Context)
820 {
821 PKIDTENTRY IdtEntry;
822
823 /* Get the IDT Entry for Interrupt 19 */
824 IdtEntry = &((PKIPCR)KeGetPcr())->IDT[19];
825
826 /* Set it up */
827 IdtEntry->Selector = KGDT_R0_CODE;
828 IdtEntry->Offset = ((ULONG_PTR)KiTrap19 & 0xFFFF);
829 IdtEntry->ExtendedOffset = ((ULONG_PTR)KiTrap19 >> 16) & 0xFFFF;
830 ((PKIDT_ACCESS)&IdtEntry->Access)->Dpl = 0;
831 ((PKIDT_ACCESS)&IdtEntry->Access)->Present = 1;
832 ((PKIDT_ACCESS)&IdtEntry->Access)->SegmentType = I386_INTERRUPT_GATE;
833
834 /* Enable XMMI exceptions */
835 __writecr4(__readcr4() | CR4_XMMEXCPT);
836 return 0;
837 }
838
839 VOID
840 NTAPI
841 KiI386PentiumLockErrataFixup(VOID)
842 {
843 KDESCRIPTOR IdtDescriptor = {0};
844 PKIDTENTRY NewIdt, NewIdt2;
845
846 /* Allocate memory for a new IDT */
847 NewIdt = ExAllocatePool(NonPagedPool, 2 * PAGE_SIZE);
848
849 /* Put everything after the first 7 entries on a new page */
850 NewIdt2 = (PVOID)((ULONG_PTR)NewIdt + PAGE_SIZE - (7 * sizeof(KIDTENTRY)));
851
852 /* Disable interrupts */
853 _disable();
854
855 /* Get the current IDT and copy it */
856 Ke386GetInterruptDescriptorTable(*(PKDESCRIPTOR)&IdtDescriptor.Limit);
857 RtlCopyMemory(NewIdt2,
858 (PVOID)IdtDescriptor.Base,
859 IdtDescriptor.Limit + 1);
860 IdtDescriptor.Base = (ULONG)NewIdt2;
861
862 /* Set the new IDT */
863 Ke386SetInterruptDescriptorTable(*(PKDESCRIPTOR)&IdtDescriptor.Limit);
864 ((PKIPCR)KeGetPcr())->IDT = NewIdt2;
865
866 /* Restore interrupts */
867 _enable();
868
869 /* Set the first 7 entries as read-only to produce a fault */
870 MmSetPageProtect(NULL, NewIdt, PAGE_READONLY);
871 }
872
873 BOOLEAN
874 NTAPI
875 KeFreezeExecution(IN PKTRAP_FRAME TrapFrame,
876 IN PKEXCEPTION_FRAME ExceptionFrame)
877 {
878 ULONG Flags = 0;
879
880 /* Disable interrupts and get previous state */
881 Ke386SaveFlags(Flags);
882 //Flags = __getcallerseflags();
883 _disable();
884
885 /* Save freeze flag */
886 KiFreezeFlag = 4;
887
888 /* Save the old IRQL */
889 KiOldIrql = KeGetCurrentIrql();
890
891 /* Return whether interrupts were enabled */
892 return (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
893 }
894
895 VOID
896 NTAPI
897 KeThawExecution(IN BOOLEAN Enable)
898 {
899 /* Cleanup CPU caches */
900 KeFlushCurrentTb();
901
902 /* Re-enable interrupts */
903 if (Enable) _enable();
904 }
905
906 BOOLEAN
907 NTAPI
908 KeInvalidateAllCaches(VOID)
909 {
910 /* Only supported on Pentium Pro and higher */
911 if (KeI386CpuType < 6) return FALSE;
912
913 /* Invalidate all caches */
914 __wbinvd();
915 return TRUE;
916 }
917
918 VOID
919 FASTCALL
920 KeZeroPages(IN PVOID Address,
921 IN ULONG Size)
922 {
923 /* Not using XMMI in this routine */
924 RtlZeroMemory(Address, Size);
925 }
926
927 /* PUBLIC FUNCTIONS **********************************************************/
928
929 /*
930 * @implemented
931 */
932 NTSTATUS
933 NTAPI
934 KeSaveFloatingPointState(OUT PKFLOATING_SAVE Save)
935 {
936 PFNSAVE_FORMAT FpState;
937 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL);
938 DPRINT1("%s is not really implemented\n", __FUNCTION__);
939
940 /* check if we are doing software emulation */
941 if (!KeI386NpxPresent) return STATUS_ILLEGAL_FLOAT_CONTEXT;
942
943 FpState = ExAllocatePool(NonPagedPool, sizeof (FNSAVE_FORMAT));
944 if (!FpState) return STATUS_INSUFFICIENT_RESOURCES;
945
946 *((PVOID *) Save) = FpState;
947 #ifdef __GNUC__
948 asm volatile("fnsave %0\n\t" : "=m" (*FpState));
949 #else
950 __asm
951 {
952 fnsave [FpState]
953 };
954 #endif
955
956 KeGetCurrentThread()->DispatcherHeader.NpxIrql = KeGetCurrentIrql();
957 return STATUS_SUCCESS;
958 }
959
960 /*
961 * @implemented
962 */
963 NTSTATUS
964 NTAPI
965 KeRestoreFloatingPointState(IN PKFLOATING_SAVE Save)
966 {
967 PFNSAVE_FORMAT FpState = *((PVOID *) Save);
968 ASSERT(KeGetCurrentThread()->DispatcherHeader.NpxIrql == KeGetCurrentIrql());
969 DPRINT1("%s is not really implemented\n", __FUNCTION__);
970
971 #ifdef __GNUC__
972 asm volatile("fnclex\n\t");
973 asm volatile("frstor %0\n\t" : "=m" (*FpState));
974 #else
975 __asm
976 {
977 fnclex
978 frstor [FpState]
979 };
980 #endif
981
982 ExFreePool(FpState);
983 return STATUS_SUCCESS;
984 }
985
986 /*
987 * @implemented
988 */
989 ULONG
990 NTAPI
991 KeGetRecommendedSharedDataAlignment(VOID)
992 {
993 /* Return the global variable */
994 return KeLargestCacheLine;
995 }
996
997 /*
998 * @implemented
999 */
1000 VOID
1001 NTAPI
1002 KeFlushEntireTb(IN BOOLEAN Invalid,
1003 IN BOOLEAN AllProcessors)
1004 {
1005 KIRQL OldIrql;
1006
1007 /* Raise the IRQL for the TB Flush */
1008 OldIrql = KeRaiseIrqlToSynchLevel();
1009
1010 #ifdef CONFIG_SMP
1011 /* FIXME: Support IPI Flush */
1012 #error Not yet implemented!
1013 #endif
1014
1015 /* Flush the TB for the Current CPU */
1016 KeFlushCurrentTb();
1017
1018 /* Return to Original IRQL */
1019 KeLowerIrql(OldIrql);
1020 }
1021
1022 /*
1023 * @implemented
1024 */
1025 VOID
1026 NTAPI
1027 KeSetDmaIoCoherency(IN ULONG Coherency)
1028 {
1029 /* Save the coherency globally */
1030 KiDmaIoCoherency = Coherency;
1031 }
1032
1033 /*
1034 * @implemented
1035 */
1036 KAFFINITY
1037 NTAPI
1038 KeQueryActiveProcessors(VOID)
1039 {
1040 PAGED_CODE();
1041
1042 /* Simply return the number of active processors */
1043 return KeActiveProcessors;
1044 }
1045
1046 /*
1047 * @implemented
1048 */
1049 VOID
1050 __cdecl
1051 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
1052 {
1053 /* Capture the context */
1054 RtlCaptureContext(&State->ContextFrame);
1055
1056 /* Capture the control state */
1057 KiSaveProcessorControlState(State);
1058 }