b8d9853e6a81e4a256473933fe8870c843d353ae
[reactos.git] / reactos / ntoskrnl / ke / i386 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/i386/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 */
8
9 /* INCLUDES *****************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 /* GLOBALS *******************************************************************/
16
17 /* The TSS to use for Double Fault Traps (INT 0x9) */
18 UCHAR KiDoubleFaultTSS[KTSS_IO_MAPS];
19
20 /* The TSS to use for NMI Fault Traps (INT 0x2) */
21 UCHAR KiNMITSS[KTSS_IO_MAPS];
22
23 /* CPU Features and Flags */
24 ULONG KeI386CpuType;
25 ULONG KeI386CpuStep;
26 ULONG KeProcessorArchitecture;
27 ULONG KeProcessorLevel;
28 ULONG KeProcessorRevision;
29 ULONG KeFeatureBits;
30 ULONG KiFastSystemCallDisable;
31 ULONG KeI386NpxPresent = 0;
32 ULONG KiMXCsrMask = 0;
33 ULONG MxcsrFeatureMask = 0;
34 ULONG KeI386XMMIPresent = 0;
35 ULONG KeI386FxsrPresent = 0;
36 ULONG KeI386MachineType;
37 ULONG Ke386Pae = FALSE;
38 ULONG Ke386NoExecute = FALSE;
39 ULONG KeLargestCacheLine = 0x40;
40 ULONG KeDcacheFlushCount = 0;
41 ULONG KeIcacheFlushCount = 0;
42 ULONG KiDmaIoCoherency = 0;
43 CHAR KeNumberProcessors;
44 KAFFINITY KeActiveProcessors = 1;
45 BOOLEAN KiI386PentiumLockErrataPresent;
46 BOOLEAN KiSMTProcessorsPresent;
47
48 /* The distance between SYSEXIT and IRETD return modes */
49 UCHAR KiSystemCallExitAdjust;
50
51 /* The offset that was applied -- either 0 or the value above */
52 UCHAR KiSystemCallExitAdjusted;
53
54 /* Whether the adjustment was already done once */
55 BOOLEAN KiFastCallCopyDoneOnce;
56
57 /* Flush data */
58 volatile LONG KiTbFlushTimeStamp;
59
60 /* CPU Signatures */
61 static const CHAR CmpIntelID[] = "GenuineIntel";
62 static const CHAR CmpAmdID[] = "AuthenticAMD";
63 static const CHAR CmpCyrixID[] = "CyrixInstead";
64 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
65 static const CHAR CmpCentaurID[] = "CentaurHauls";
66 static const CHAR CmpRiseID[] = "RiseRiseRise";
67
68 /* SUPPORT ROUTINES FOR MSVC COMPATIBILITY ***********************************/
69
70 VOID
71 NTAPI
72 CPUID(IN ULONG InfoType,
73 OUT PULONG CpuInfoEax,
74 OUT PULONG CpuInfoEbx,
75 OUT PULONG CpuInfoEcx,
76 OUT PULONG CpuInfoEdx)
77 {
78 ULONG CpuInfo[4];
79
80 /* Perform the CPUID Operation */
81 __cpuid((int*)CpuInfo, InfoType);
82
83 /* Return the results */
84 *CpuInfoEax = CpuInfo[0];
85 *CpuInfoEbx = CpuInfo[1];
86 *CpuInfoEcx = CpuInfo[2];
87 *CpuInfoEdx = CpuInfo[3];
88 }
89
90 VOID
91 NTAPI
92 WRMSR(IN ULONG Register,
93 IN LONGLONG Value)
94 {
95 /* Write to the MSR */
96 __writemsr(Register, Value);
97 }
98
99 LONGLONG
100 FASTCALL
101 RDMSR(IN ULONG Register)
102 {
103 /* Read from the MSR */
104 return __readmsr(Register);
105 }
106
107 /* FUNCTIONS *****************************************************************/
108
109 VOID
110 NTAPI
111 KiSetProcessorType(VOID)
112 {
113 ULONG EFlags, NewEFlags;
114 ULONG Reg, Dummy;
115 ULONG Stepping, Type;
116
117 /* Start by assuming no CPUID data */
118 KeGetCurrentPrcb()->CpuID = 0;
119
120 /* Save EFlags */
121 EFlags = __readeflags();
122
123 /* XOR out the ID bit and update EFlags */
124 NewEFlags = EFlags ^ EFLAGS_ID;
125 __writeeflags(NewEFlags);
126
127 /* Get them back and see if they were modified */
128 NewEFlags = __readeflags();
129 if (NewEFlags != EFlags)
130 {
131 /* The modification worked, so CPUID exists. Set the ID Bit again. */
132 EFlags |= EFLAGS_ID;
133 __writeeflags(EFlags);
134
135 /* Peform CPUID 0 to see if CPUID 1 is supported */
136 CPUID(0, &Reg, &Dummy, &Dummy, &Dummy);
137 if (Reg > 0)
138 {
139 /* Do CPUID 1 now */
140 CPUID(1, &Reg, &Dummy, &Dummy, &Dummy);
141
142 /*
143 * Get the Stepping and Type. The stepping contains both the
144 * Model and the Step, while the Type contains the returned Type.
145 * We ignore the family.
146 *
147 * For the stepping, we convert this: zzzzzzxy into this: x0y
148 */
149 Stepping = Reg & 0xF0;
150 Stepping <<= 4;
151 Stepping += (Reg & 0xFF);
152 Stepping &= 0xF0F;
153 Type = Reg & 0xF00;
154 Type >>= 8;
155
156 /* Save them in the PRCB */
157 KeGetCurrentPrcb()->CpuID = TRUE;
158 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
159 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
160 }
161 else
162 {
163 DPRINT1("CPUID Support lacking\n");
164 }
165 }
166 else
167 {
168 DPRINT1("CPUID Support lacking\n");
169 }
170
171 /* Restore EFLAGS */
172 __writeeflags(EFlags);
173 }
174
175 ULONG
176 NTAPI
177 KiGetCpuVendor(VOID)
178 {
179 PKPRCB Prcb = KeGetCurrentPrcb();
180 ULONG Vendor[5];
181 ULONG Temp;
182
183 /* Assume no Vendor ID and fail if no CPUID Support. */
184 Prcb->VendorString[0] = 0;
185 if (!Prcb->CpuID) return 0;
186
187 /* Get the Vendor ID and null-terminate it */
188 CPUID(0, &Vendor[0], &Vendor[1], &Vendor[2], &Vendor[3]);
189 Vendor[4] = 0;
190
191 /* Re-arrange vendor string */
192 Temp = Vendor[2];
193 Vendor[2] = Vendor[3];
194 Vendor[3] = Temp;
195
196 /* Copy it to the PRCB and null-terminate it again */
197 RtlCopyMemory(Prcb->VendorString,
198 &Vendor[1],
199 sizeof(Prcb->VendorString) - sizeof(CHAR));
200 Prcb->VendorString[sizeof(Prcb->VendorString) - sizeof(CHAR)] = ANSI_NULL;
201
202 /* Now check the CPU Type */
203 if (!strcmp(Prcb->VendorString, CmpIntelID))
204 {
205 return CPU_INTEL;
206 }
207 else if (!strcmp(Prcb->VendorString, CmpAmdID))
208 {
209 return CPU_AMD;
210 }
211 else if (!strcmp(Prcb->VendorString, CmpCyrixID))
212 {
213 DPRINT1("Cyrix CPU support not fully tested!\n");
214 return CPU_CYRIX;
215 }
216 else if (!strcmp(Prcb->VendorString, CmpTransmetaID))
217 {
218 DPRINT1("Transmeta CPU support not fully tested!\n");
219 return CPU_TRANSMETA;
220 }
221 else if (!strcmp(Prcb->VendorString, CmpCentaurID))
222 {
223 DPRINT1("Centaur CPU support not fully tested!\n");
224 return CPU_CENTAUR;
225 }
226 else if (!strcmp(Prcb->VendorString, CmpRiseID))
227 {
228 DPRINT1("Rise CPU support not fully tested!\n");
229 return CPU_RISE;
230 }
231
232 /* Invalid CPU */
233 return 0;
234 }
235
236 ULONG
237 NTAPI
238 KiGetFeatureBits(VOID)
239 {
240 PKPRCB Prcb = KeGetCurrentPrcb();
241 ULONG Vendor;
242 ULONG FeatureBits = KF_WORKING_PTE;
243 ULONG Reg[4], Dummy;
244 BOOLEAN ExtendedCPUID = TRUE;
245 ULONG CpuFeatures = 0;
246
247 /* Get the Vendor ID */
248 Vendor = KiGetCpuVendor();
249
250 /* Make sure we got a valid vendor ID at least. */
251 if (!Vendor) return FeatureBits;
252
253 /* Get the CPUID Info. Features are in Reg[3]. */
254 CPUID(1, &Reg[0], &Reg[1], &Dummy, &Reg[3]);
255
256 /* Set the initial APIC ID */
257 Prcb->InitialApicId = (UCHAR)(Reg[1] >> 24);
258
259 switch (Vendor)
260 {
261 /* Intel CPUs */
262 case CPU_INTEL:
263
264 /* Check if it's a P6 */
265 if (Prcb->CpuType == 6)
266 {
267 /* Perform the special sequence to get the MicroCode Signature */
268 WRMSR(0x8B, 0);
269 CPUID(1, &Dummy, &Dummy, &Dummy, &Dummy);
270 Prcb->UpdateSignature.QuadPart = RDMSR(0x8B);
271 }
272 else if (Prcb->CpuType == 5)
273 {
274 /* On P5, enable workaround for the LOCK errata. */
275 KiI386PentiumLockErrataPresent = TRUE;
276 }
277
278 /* Check for broken P6 with bad SMP PTE implementation */
279 if (((Reg[0] & 0x0FF0) == 0x0610 && (Reg[0] & 0x000F) <= 0x9) ||
280 ((Reg[0] & 0x0FF0) == 0x0630 && (Reg[0] & 0x000F) <= 0x4))
281 {
282 /* Remove support for correct PTE support. */
283 FeatureBits &= ~KF_WORKING_PTE;
284 }
285
286 /* Check if the CPU is too old to support SYSENTER */
287 if ((Prcb->CpuType < 6) ||
288 ((Prcb->CpuType == 6) && (Prcb->CpuStep < 0x0303)))
289 {
290 /* Disable it */
291 Reg[3] &= ~0x800;
292 }
293
294 /* Set the current features */
295 CpuFeatures = Reg[3];
296
297 break;
298
299 /* AMD CPUs */
300 case CPU_AMD:
301
302 /* Check if this is a K5 or K6. (family 5) */
303 if ((Reg[0] & 0x0F00) == 0x0500)
304 {
305 /* Get the Model Number */
306 switch (Reg[0] & 0x00F0)
307 {
308 /* Model 1: K5 - 5k86 (initial models) */
309 case 0x0010:
310
311 /* Check if this is Step 0 or 1. They don't support PGE */
312 if ((Reg[0] & 0x000F) > 0x03) break;
313
314 /* Model 0: K5 - SSA5 */
315 case 0x0000:
316
317 /* Model 0 doesn't support PGE at all. */
318 Reg[3] &= ~0x2000;
319 break;
320
321 /* Model 8: K6-2 */
322 case 0x0080:
323
324 /* K6-2, Step 8 and over have support for MTRR. */
325 if ((Reg[0] & 0x000F) >= 0x8) FeatureBits |= KF_AMDK6MTRR;
326 break;
327
328 /* Model 9: K6-III
329 Model D: K6-2+, K6-III+ */
330 case 0x0090:
331 case 0x00D0:
332
333 FeatureBits |= KF_AMDK6MTRR;
334 break;
335 }
336 }
337 else if((Reg[0] & 0x0F00) < 0x0500)
338 {
339 /* Families below 5 don't support PGE, PSE or CMOV at all */
340 Reg[3] &= ~(0x08 | 0x2000 | 0x8000);
341
342 /* They also don't support advanced CPUID functions. */
343 ExtendedCPUID = FALSE;
344 }
345
346 /* Set the current features */
347 CpuFeatures = Reg[3];
348
349 break;
350
351 /* Cyrix CPUs */
352 case CPU_CYRIX:
353
354 /* FIXME: CMPXCGH8B */
355
356 break;
357
358 /* Transmeta CPUs */
359 case CPU_TRANSMETA:
360
361 /* Enable CMPXCHG8B if the family (>= 5), model and stepping (>= 4.2) support it */
362 if ((Reg[0] & 0x0FFF) >= 0x0542)
363 {
364 WRMSR(0x80860004, RDMSR(0x80860004) | 0x0100);
365 FeatureBits |= KF_CMPXCHG8B;
366 }
367
368 break;
369
370 /* Centaur, IDT, Rise and VIA CPUs */
371 case CPU_CENTAUR:
372 case CPU_RISE:
373
374 /* These CPUs don't report the presence of CMPXCHG8B through CPUID.
375 However, this feature exists and operates properly without any additional steps. */
376 FeatureBits |= KF_CMPXCHG8B;
377
378 break;
379 }
380
381 /* Convert all CPUID Feature bits into our format */
382 if (CpuFeatures & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
383 if (CpuFeatures & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
384 if (CpuFeatures & 0x00000010) FeatureBits |= KF_RDTSC;
385 if (CpuFeatures & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
386 if (CpuFeatures & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
387 if (CpuFeatures & 0x00001000) FeatureBits |= KF_MTRR;
388 if (CpuFeatures & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
389 if (CpuFeatures & 0x00008000) FeatureBits |= KF_CMOV;
390 if (CpuFeatures & 0x00010000) FeatureBits |= KF_PAT;
391 if (CpuFeatures & 0x00200000) FeatureBits |= KF_DTS;
392 if (CpuFeatures & 0x00800000) FeatureBits |= KF_MMX;
393 if (CpuFeatures & 0x01000000) FeatureBits |= KF_FXSR;
394 if (CpuFeatures & 0x02000000) FeatureBits |= KF_XMMI;
395 if (CpuFeatures & 0x04000000) FeatureBits |= KF_XMMI64;
396
397 /* Check if the CPU has hyper-threading */
398 if (CpuFeatures & 0x10000000)
399 {
400 /* Set the number of logical CPUs */
401 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(Reg[1] >> 16);
402 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
403 {
404 /* We're on dual-core */
405 KiSMTProcessorsPresent = TRUE;
406 }
407 }
408 else
409 {
410 /* We only have a single CPU */
411 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
412 }
413
414 /* Check if CPUID 0x80000000 is supported */
415 if (ExtendedCPUID)
416 {
417 /* Do the call */
418 CPUID(0x80000000, &Reg[0], &Dummy, &Dummy, &Dummy);
419 if ((Reg[0] & 0xffffff00) == 0x80000000)
420 {
421 /* Check if CPUID 0x80000001 is supported */
422 if (Reg[0] >= 0x80000001)
423 {
424 /* Check which extended features are available. */
425 CPUID(0x80000001, &Dummy, &Dummy, &Dummy, &Reg[3]);
426
427 /* Check if NX-bit is supported */
428 if (Reg[3] & 0x00100000) FeatureBits |= KF_NX_BIT;
429
430 /* Now handle each features for each CPU Vendor */
431 switch (Vendor)
432 {
433 case CPU_AMD:
434 case CPU_CENTAUR:
435 if (Reg[3] & 0x80000000) FeatureBits |= KF_3DNOW;
436 break;
437 }
438 }
439 }
440 }
441
442 /* Return the Feature Bits */
443 return FeatureBits;
444 }
445
446 VOID
447 NTAPI
448 KiGetCacheInformation(VOID)
449 {
450 PKIPCR Pcr = (PKIPCR)KeGetPcr();
451 ULONG Vendor;
452 ULONG Data[4], Dummy;
453 ULONG CacheRequests = 0, i;
454 ULONG CurrentRegister;
455 UCHAR RegisterByte;
456 BOOLEAN FirstPass = TRUE;
457
458 /* Set default L2 size */
459 Pcr->SecondLevelCacheSize = 0;
460
461 /* Get the Vendor ID and make sure we support CPUID */
462 Vendor = KiGetCpuVendor();
463 if (!Vendor) return;
464
465 /* Check the Vendor ID */
466 switch (Vendor)
467 {
468 /* Handle Intel case */
469 case CPU_INTEL:
470
471 /*Check if we support CPUID 2 */
472 CPUID(0, &Data[0], &Dummy, &Dummy, &Dummy);
473 if (Data[0] >= 2)
474 {
475 /* We need to loop for the number of times CPUID will tell us to */
476 do
477 {
478 /* Do the CPUID call */
479 CPUID(2, &Data[0], &Data[1], &Data[2], &Data[3]);
480
481 /* Check if it was the first call */
482 if (FirstPass)
483 {
484 /*
485 * The number of times to loop is the first byte. Read
486 * it and then destroy it so we don't get confused.
487 */
488 CacheRequests = Data[0] & 0xFF;
489 Data[0] &= 0xFFFFFF00;
490
491 /* Don't go over this again */
492 FirstPass = FALSE;
493 }
494
495 /* Loop all 4 registers */
496 for (i = 0; i < 4; i++)
497 {
498 /* Get the current register */
499 CurrentRegister = Data[i];
500
501 /*
502 * If the upper bit is set, then this register should
503 * be skipped.
504 */
505 if (CurrentRegister & 0x80000000) continue;
506
507 /* Keep looping for every byte inside this register */
508 while (CurrentRegister)
509 {
510 /* Read a byte, skip a byte. */
511 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
512 CurrentRegister >>= 8;
513 if (!RegisterByte) continue;
514
515 /*
516 * Valid values are from 0x40 (0 bytes) to 0x49
517 * (32MB), or from 0x80 to 0x89 (same size but
518 * 8-way associative.
519 */
520 if (((RegisterByte > 0x40) &&
521 (RegisterByte <= 0x49)) ||
522 ((RegisterByte > 0x80) &&
523 (RegisterByte <= 0x89)))
524 {
525 /* Mask out only the first nibble */
526 RegisterByte &= 0x0F;
527
528 /* Set the L2 Cache Size */
529 Pcr->SecondLevelCacheSize = 0x10000 <<
530 RegisterByte;
531 }
532 }
533 }
534 } while (--CacheRequests);
535 }
536 break;
537
538 case CPU_AMD:
539
540 /* Check if we support CPUID 0x80000006 */
541 CPUID(0x80000000, &Data[0], &Dummy, &Dummy, &Dummy);
542 if (Data[0] >= 6)
543 {
544 /* Get 2nd level cache and tlb size */
545 CPUID(0x80000006, &Dummy, &Dummy, &Data[2], &Dummy);
546
547 /* Set the L2 Cache Size */
548 Pcr->SecondLevelCacheSize = (Data[2] & 0xFFFF0000) >> 6;
549 }
550 break;
551
552 case CPU_CYRIX:
553 case CPU_TRANSMETA:
554 case CPU_CENTAUR:
555 case CPU_RISE:
556
557 /* FIXME */
558 break;
559 }
560 }
561
562 VOID
563 NTAPI
564 KiSetCR0Bits(VOID)
565 {
566 ULONG Cr0;
567
568 /* Save current CR0 */
569 Cr0 = __readcr0();
570
571 /* If this is a 486, enable Write-Protection */
572 if (KeGetCurrentPrcb()->CpuType > 3) Cr0 |= CR0_WP;
573
574 /* Set new Cr0 */
575 __writecr0(Cr0);
576 }
577
578 VOID
579 NTAPI
580 KiInitializeTSS2(IN PKTSS Tss,
581 IN PKGDTENTRY TssEntry OPTIONAL)
582 {
583 PUCHAR p;
584
585 /* Make sure the GDT Entry is valid */
586 if (TssEntry)
587 {
588 /* Set the Limit */
589 TssEntry->LimitLow = sizeof(KTSS) - 1;
590 TssEntry->HighWord.Bits.LimitHi = 0;
591 }
592
593 /* Now clear the I/O Map */
594 ASSERT(IOPM_COUNT == 1);
595 RtlFillMemory(Tss->IoMaps[0].IoMap, IOPM_FULL_SIZE, 0xFF);
596
597 /* Initialize Interrupt Direction Maps */
598 p = (PUCHAR)(Tss->IoMaps[0].DirectionMap);
599 RtlZeroMemory(p, IOPM_DIRECTION_MAP_SIZE);
600
601 /* Add DPMI support for interrupts */
602 p[0] = 4;
603 p[3] = 0x18;
604 p[4] = 0x18;
605
606 /* Initialize the default Interrupt Direction Map */
607 p = Tss->IntDirectionMap;
608 RtlZeroMemory(Tss->IntDirectionMap, IOPM_DIRECTION_MAP_SIZE);
609
610 /* Add DPMI support */
611 p[0] = 4;
612 p[3] = 0x18;
613 p[4] = 0x18;
614 }
615
616 VOID
617 NTAPI
618 KiInitializeTSS(IN PKTSS Tss)
619 {
620 /* Set an invalid map base */
621 Tss->IoMapBase = KiComputeIopmOffset(IO_ACCESS_MAP_NONE);
622
623 /* Disable traps during Task Switches */
624 Tss->Flags = 0;
625
626 /* Set LDT and Ring 0 SS */
627 Tss->LDT = 0;
628 Tss->Ss0 = KGDT_R0_DATA;
629 }
630
631 VOID
632 FASTCALL
633 Ki386InitializeTss(IN PKTSS Tss,
634 IN PKIDTENTRY Idt,
635 IN PKGDTENTRY Gdt)
636 {
637 PKGDTENTRY TssEntry, TaskGateEntry;
638
639 /* Initialize the boot TSS. */
640 TssEntry = &Gdt[KGDT_TSS / sizeof(KGDTENTRY)];
641 TssEntry->HighWord.Bits.Type = I386_TSS;
642 TssEntry->HighWord.Bits.Pres = 1;
643 TssEntry->HighWord.Bits.Dpl = 0;
644 KiInitializeTSS2(Tss, TssEntry);
645 KiInitializeTSS(Tss);
646
647 /* Load the task register */
648 Ke386SetTr(KGDT_TSS);
649
650 /* Setup the Task Gate for Double Fault Traps */
651 TaskGateEntry = (PKGDTENTRY)&Idt[8];
652 TaskGateEntry->HighWord.Bits.Type = I386_TASK_GATE;
653 TaskGateEntry->HighWord.Bits.Pres = 1;
654 TaskGateEntry->HighWord.Bits.Dpl = 0;
655 ((PKIDTENTRY)TaskGateEntry)->Selector = KGDT_DF_TSS;
656
657 /* Initialize the TSS used for handling double faults. */
658 Tss = (PKTSS)KiDoubleFaultTSS;
659 KiInitializeTSS(Tss);
660 Tss->CR3 = __readcr3();
661 Tss->Esp0 = KiDoubleFaultStack;
662 Tss->Esp = KiDoubleFaultStack;
663 Tss->Eip = PtrToUlong(KiTrap08);
664 Tss->Cs = KGDT_R0_CODE;
665 Tss->Fs = KGDT_R0_PCR;
666 Tss->Ss = Ke386GetSs();
667 Tss->Es = KGDT_R3_DATA | RPL_MASK;
668 Tss->Ds = KGDT_R3_DATA | RPL_MASK;
669
670 /* Setup the Double Trap TSS entry in the GDT */
671 TssEntry = &Gdt[KGDT_DF_TSS / sizeof(KGDTENTRY)];
672 TssEntry->HighWord.Bits.Type = I386_TSS;
673 TssEntry->HighWord.Bits.Pres = 1;
674 TssEntry->HighWord.Bits.Dpl = 0;
675 TssEntry->BaseLow = (USHORT)((ULONG_PTR)Tss & 0xFFFF);
676 TssEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)Tss >> 16);
677 TssEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)Tss >> 24);
678 TssEntry->LimitLow = KTSS_IO_MAPS;
679
680 /* Now setup the NMI Task Gate */
681 TaskGateEntry = (PKGDTENTRY)&Idt[2];
682 TaskGateEntry->HighWord.Bits.Type = I386_TASK_GATE;
683 TaskGateEntry->HighWord.Bits.Pres = 1;
684 TaskGateEntry->HighWord.Bits.Dpl = 0;
685 ((PKIDTENTRY)TaskGateEntry)->Selector = KGDT_NMI_TSS;
686
687 /* Initialize the actual TSS */
688 Tss = (PKTSS)KiNMITSS;
689 KiInitializeTSS(Tss);
690 Tss->CR3 = __readcr3();
691 Tss->Esp0 = KiDoubleFaultStack;
692 Tss->Esp = KiDoubleFaultStack;
693 Tss->Eip = PtrToUlong(KiTrap02);
694 Tss->Cs = KGDT_R0_CODE;
695 Tss->Fs = KGDT_R0_PCR;
696 Tss->Ss = Ke386GetSs();
697 Tss->Es = KGDT_R3_DATA | RPL_MASK;
698 Tss->Ds = KGDT_R3_DATA | RPL_MASK;
699
700 /* And its associated TSS Entry */
701 TssEntry = &Gdt[KGDT_NMI_TSS / sizeof(KGDTENTRY)];
702 TssEntry->HighWord.Bits.Type = I386_TSS;
703 TssEntry->HighWord.Bits.Pres = 1;
704 TssEntry->HighWord.Bits.Dpl = 0;
705 TssEntry->BaseLow = (USHORT)((ULONG_PTR)Tss & 0xFFFF);
706 TssEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)Tss >> 16);
707 TssEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)Tss >> 24);
708 TssEntry->LimitLow = KTSS_IO_MAPS;
709 }
710
711 VOID
712 NTAPI
713 KeFlushCurrentTb(VOID)
714 {
715 /* Flush the TLB by resetting CR3 */
716 __writecr3(__readcr3());
717 }
718
719 VOID
720 NTAPI
721 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
722 {
723 PKGDTENTRY TssEntry;
724
725 //
726 // Restore the CR registers
727 //
728 __writecr0(ProcessorState->SpecialRegisters.Cr0);
729 Ke386SetCr2(ProcessorState->SpecialRegisters.Cr2);
730 __writecr3(ProcessorState->SpecialRegisters.Cr3);
731 if (KeFeatureBits & KF_CR4) __writecr4(ProcessorState->SpecialRegisters.Cr4);
732
733 //
734 // Restore the DR registers
735 //
736 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
737 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
738 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
739 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
740 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
741 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
742
743 //
744 // Restore GDT and IDT
745 //
746 Ke386SetGlobalDescriptorTable(&ProcessorState->SpecialRegisters.Gdtr.Limit);
747 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
748
749 //
750 // Clear the busy flag so we don't crash if we reload the same selector
751 //
752 TssEntry = (PKGDTENTRY)(ProcessorState->SpecialRegisters.Gdtr.Base +
753 ProcessorState->SpecialRegisters.Tr);
754 TssEntry->HighWord.Bytes.Flags1 &= ~0x2;
755
756 //
757 // Restore TSS and LDT
758 //
759 Ke386SetTr(ProcessorState->SpecialRegisters.Tr);
760 Ke386SetLocalDescriptorTable(ProcessorState->SpecialRegisters.Ldtr);
761 }
762
763 VOID
764 NTAPI
765 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
766 {
767 /* Save the CR registers */
768 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
769 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
770 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
771 ProcessorState->SpecialRegisters.Cr4 = (KeFeatureBits & KF_CR4) ?
772 __readcr4() : 0;
773
774 /* Save the DR registers */
775 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
776 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
777 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
778 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
779 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
780 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
781 __writedr(7, 0);
782
783 /* Save GDT, IDT, LDT and TSS */
784 Ke386GetGlobalDescriptorTable(&ProcessorState->SpecialRegisters.Gdtr.Limit);
785 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
786 ProcessorState->SpecialRegisters.Tr = Ke386GetTr();
787 ProcessorState->SpecialRegisters.Ldtr = Ke386GetLocalDescriptorTable();
788 }
789
790 VOID
791 NTAPI
792 KiInitializeMachineType(VOID)
793 {
794 /* Set the Machine Type we got from NTLDR */
795 KeI386MachineType = KeLoaderBlock->u.I386.MachineType & 0x000FF;
796 }
797
798 ULONG_PTR
799 NTAPI
800 KiLoadFastSyscallMachineSpecificRegisters(IN ULONG_PTR Context)
801 {
802 /* Set CS and ESP */
803 WRMSR(0x174, KGDT_R0_CODE);
804 WRMSR(0x175, (ULONG_PTR)KeGetCurrentPrcb()->DpcStack);
805
806 /* Set LSTAR */
807 WRMSR(0x176, (ULONG_PTR)KiFastCallEntry);
808 return 0;
809 }
810
811 VOID
812 NTAPI
813 KiDisableFastSyscallReturn(VOID)
814 {
815 /* Was it applied? */
816 if (KiSystemCallExitAdjusted)
817 {
818 /* Restore the original value */
819 KiSystemCallExitBranch[1] = KiSystemCallExitBranch[1] - KiSystemCallExitAdjusted;
820
821 /* It's not adjusted anymore */
822 KiSystemCallExitAdjusted = FALSE;
823 }
824 }
825
826 VOID
827 NTAPI
828 KiEnableFastSyscallReturn(VOID)
829 {
830 /* Check if the patch has already been done */
831 if ((KiSystemCallExitAdjusted == KiSystemCallExitAdjust) &&
832 (KiFastCallCopyDoneOnce))
833 {
834 return;
835 }
836
837 /* Make sure the offset is within the distance of a Jxx SHORT */
838 if ((KiSystemCallExitBranch[1] - KiSystemCallExitAdjust) < 0x80)
839 {
840 /* Remove any existing code patch */
841 KiDisableFastSyscallReturn();
842
843 /* We should have a JNZ there */
844 ASSERT(KiSystemCallExitBranch[0] == 0x75);
845
846 /* Do the patch */
847 KiSystemCallExitAdjusted = KiSystemCallExitAdjust;
848 KiSystemCallExitBranch[1] -= KiSystemCallExitAdjusted;
849
850 /* Remember that we've done it */
851 KiFastCallCopyDoneOnce = TRUE;
852 }
853 else
854 {
855 /* This shouldn't happen unless we've messed the macros up */
856 DPRINT1("Your compiled kernel is broken!\n");
857 DbgBreakPoint();
858 }
859 }
860
861 VOID
862 NTAPI
863 KiRestoreFastSyscallReturnState(VOID)
864 {
865 /* Check if the CPU Supports fast system call */
866 if (KeFeatureBits & KF_FAST_SYSCALL)
867 {
868 /* Check if it has been disabled */
869 if (!KiFastSystemCallDisable)
870 {
871 /* KiSystemCallExit2 should come BEFORE KiSystemCallExit */
872 ASSERT(KiSystemCallExit2 < KiSystemCallExit);
873
874 /* It's enabled, so we'll have to do a code patch */
875 KiSystemCallExitAdjust = KiSystemCallExit - KiSystemCallExit2;
876 }
877 else
878 {
879 /* Disable fast system call */
880 KeFeatureBits &= ~KF_FAST_SYSCALL;
881 }
882 }
883
884 /* Now check if all CPUs support fast system call, and the registry allows it */
885 if (KeFeatureBits & KF_FAST_SYSCALL)
886 {
887 /* Do an IPI to enable it */
888 KeIpiGenericCall(KiLoadFastSyscallMachineSpecificRegisters, 0);
889 }
890
891 /* Perform the code patch that is required */
892 KiEnableFastSyscallReturn();
893 }
894
895 ULONG_PTR
896 NTAPI
897 Ki386EnableDE(IN ULONG_PTR Context)
898 {
899 /* Enable DE */
900 __writecr4(__readcr4() | CR4_DE);
901 return 0;
902 }
903
904 ULONG_PTR
905 NTAPI
906 Ki386EnableFxsr(IN ULONG_PTR Context)
907 {
908 /* Enable FXSR */
909 __writecr4(__readcr4() | CR4_FXSR);
910 return 0;
911 }
912
913 ULONG_PTR
914 NTAPI
915 Ki386EnableXMMIExceptions(IN ULONG_PTR Context)
916 {
917 PKIDTENTRY IdtEntry;
918
919 /* Get the IDT Entry for Interrupt 0x13 */
920 IdtEntry = &((PKIPCR)KeGetPcr())->IDT[0x13];
921
922 /* Set it up */
923 IdtEntry->Selector = KGDT_R0_CODE;
924 IdtEntry->Offset = ((ULONG_PTR)KiTrap13 & 0xFFFF);
925 IdtEntry->ExtendedOffset = ((ULONG_PTR)KiTrap13 >> 16) & 0xFFFF;
926 ((PKIDT_ACCESS)&IdtEntry->Access)->Dpl = 0;
927 ((PKIDT_ACCESS)&IdtEntry->Access)->Present = 1;
928 ((PKIDT_ACCESS)&IdtEntry->Access)->SegmentType = I386_INTERRUPT_GATE;
929
930 /* Enable XMMI exceptions */
931 __writecr4(__readcr4() | CR4_XMMEXCPT);
932 return 0;
933 }
934
935 VOID
936 NTAPI
937 KiI386PentiumLockErrataFixup(VOID)
938 {
939 KDESCRIPTOR IdtDescriptor;
940 PKIDTENTRY NewIdt, NewIdt2;
941
942 /* Allocate memory for a new IDT */
943 NewIdt = ExAllocatePool(NonPagedPool, 2 * PAGE_SIZE);
944
945 /* Put everything after the first 7 entries on a new page */
946 NewIdt2 = (PVOID)((ULONG_PTR)NewIdt + PAGE_SIZE - (7 * sizeof(KIDTENTRY)));
947
948 /* Disable interrupts */
949 _disable();
950
951 /* Get the current IDT and copy it */
952 __sidt(&IdtDescriptor.Limit);
953 RtlCopyMemory(NewIdt2,
954 (PVOID)IdtDescriptor.Base,
955 IdtDescriptor.Limit + 1);
956 IdtDescriptor.Base = (ULONG)NewIdt2;
957
958 /* Set the new IDT */
959 __lidt(&IdtDescriptor.Limit);
960 ((PKIPCR)KeGetPcr())->IDT = NewIdt2;
961
962 /* Restore interrupts */
963 _enable();
964
965 /* Set the first 7 entries as read-only to produce a fault */
966 MmSetPageProtect(NULL, NewIdt, PAGE_READONLY);
967 }
968
969 BOOLEAN
970 NTAPI
971 KeDisableInterrupts(VOID)
972 {
973 ULONG Flags;
974 BOOLEAN Return;
975
976 /* Get EFLAGS and check if the interrupt bit is set */
977 Flags = __readeflags();
978 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE;
979
980 /* Disable interrupts */
981 _disable();
982 return Return;
983 }
984
985 BOOLEAN
986 NTAPI
987 KeInvalidateAllCaches(VOID)
988 {
989 /* Only supported on Pentium Pro and higher */
990 if (KeI386CpuType < 6) return FALSE;
991
992 /* Invalidate all caches */
993 __wbinvd();
994 return TRUE;
995 }
996
997 VOID
998 FASTCALL
999 KeZeroPages(IN PVOID Address,
1000 IN ULONG Size)
1001 {
1002 /* Not using XMMI in this routine */
1003 RtlZeroMemory(Address, Size);
1004 }
1005
1006 VOID
1007 NTAPI
1008 KiSaveProcessorState(IN PKTRAP_FRAME TrapFrame,
1009 IN PKEXCEPTION_FRAME ExceptionFrame)
1010 {
1011 PKPRCB Prcb = KeGetCurrentPrcb();
1012
1013 //
1014 // Save full context
1015 //
1016 Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_FULL |
1017 CONTEXT_DEBUG_REGISTERS;
1018 KeTrapFrameToContext(TrapFrame, NULL, &Prcb->ProcessorState.ContextFrame);
1019
1020 //
1021 // Save control registers
1022 //
1023 KiSaveProcessorControlState(&Prcb->ProcessorState);
1024 }
1025
1026 BOOLEAN
1027 NTAPI
1028 KiIsNpxPresent(VOID)
1029 {
1030 ULONG Cr0;
1031 USHORT Magic;
1032
1033 /* Set magic */
1034 Magic = 0xFFFF;
1035
1036 /* Read CR0 and mask out FPU flags */
1037 Cr0 = __readcr0() & ~(CR0_MP | CR0_TS | CR0_EM | CR0_ET);
1038
1039 /* Store on FPU stack */
1040 asm volatile ("fninit;" "fnstsw %0" : "+m"(Magic));
1041
1042 /* Magic should now be cleared */
1043 if (Magic & 0xFF)
1044 {
1045 /* You don't have an FPU -- enable emulation for now */
1046 __writecr0(Cr0 | CR0_EM | CR0_TS);
1047 return FALSE;
1048 }
1049
1050 /* You have an FPU, enable it */
1051 Cr0 |= CR0_ET;
1052
1053 /* Enable INT 16 on 486 and higher */
1054 if (KeGetCurrentPrcb()->CpuType >= 3) Cr0 |= CR0_NE;
1055
1056 /* Set FPU state */
1057 __writecr0(Cr0 | CR0_EM | CR0_TS);
1058 return TRUE;
1059 }
1060
1061 BOOLEAN
1062 NTAPI
1063 KiIsNpxErrataPresent(VOID)
1064 {
1065 BOOLEAN ErrataPresent;
1066 ULONG Cr0;
1067 volatile double Value1, Value2;
1068
1069 /* Disable interrupts */
1070 _disable();
1071
1072 /* Read CR0 and remove FPU flags */
1073 Cr0 = __readcr0();
1074 __writecr0(Cr0 & ~(CR0_MP | CR0_TS | CR0_EM));
1075
1076 /* Initialize FPU state */
1077 asm volatile ("fninit");
1078
1079 /* Multiply the magic values and divide, we should get the result back */
1080 Value1 = 4195835.0;
1081 Value2 = 3145727.0;
1082 ErrataPresent = (Value1 * Value2 / 3145727.0) != 4195835.0;
1083
1084 /* Restore CR0 */
1085 __writecr0(Cr0);
1086
1087 /* Enable interrupts */
1088 _enable();
1089
1090 /* Return if there's an errata */
1091 return ErrataPresent;
1092 }
1093
1094 NTAPI
1095 VOID
1096 KiFlushNPXState(IN PFLOATING_SAVE_AREA SaveArea)
1097 {
1098 ULONG EFlags, Cr0;
1099 PKTHREAD Thread, NpxThread;
1100 PFX_SAVE_AREA FxSaveArea;
1101
1102 /* Save volatiles and disable interrupts */
1103 EFlags = __readeflags();
1104 _disable();
1105
1106 /* Save the PCR and get the current thread */
1107 Thread = KeGetCurrentThread();
1108
1109 /* Check if we're already loaded */
1110 if (Thread->NpxState != NPX_STATE_LOADED)
1111 {
1112 /* If there's nothing to load, quit */
1113 if (!SaveArea) return;
1114
1115 /* Need FXSR support for this */
1116 ASSERT(KeI386FxsrPresent == TRUE);
1117
1118 /* Check for sane CR0 */
1119 Cr0 = __readcr0();
1120 if (Cr0 & (CR0_MP | CR0_TS | CR0_EM))
1121 {
1122 /* Mask out FPU flags */
1123 __writecr0(Cr0 & ~(CR0_MP | CR0_TS | CR0_EM));
1124 }
1125
1126 /* Get the NPX thread and check its FPU state */
1127 NpxThread = KeGetCurrentPrcb()->NpxThread;
1128 if ((NpxThread) && (NpxThread->NpxState == NPX_STATE_LOADED))
1129 {
1130 /* Get the FX frame and store the state there */
1131 FxSaveArea = KiGetThreadNpxArea(NpxThread);
1132 Ke386FxSave(FxSaveArea);
1133
1134 /* NPX thread has lost its state */
1135 NpxThread->NpxState = NPX_STATE_NOT_LOADED;
1136 }
1137
1138 /* Now load NPX state from the NPX area */
1139 FxSaveArea = KiGetThreadNpxArea(Thread);
1140 Ke386FxStore(FxSaveArea);
1141 }
1142 else
1143 {
1144 /* Check for sane CR0 */
1145 Cr0 = __readcr0();
1146 if (Cr0 & (CR0_MP | CR0_TS | CR0_EM))
1147 {
1148 /* Mask out FPU flags */
1149 __writecr0(Cr0 & ~(CR0_MP | CR0_TS | CR0_EM));
1150 }
1151
1152 /* Get FX frame */
1153 FxSaveArea = KiGetThreadNpxArea(Thread);
1154 Thread->NpxState = NPX_STATE_NOT_LOADED;
1155
1156 /* Save state if supported by CPU */
1157 if (KeI386FxsrPresent) Ke386FxSave(FxSaveArea);
1158 }
1159
1160 /* Now save the FN state wherever it was requested */
1161 if (SaveArea) Ke386FnSave(SaveArea);
1162
1163 /* Clear NPX thread */
1164 KeGetCurrentPrcb()->NpxThread = NULL;
1165
1166 /* Add the CR0 from the NPX frame */
1167 Cr0 |= NPX_STATE_NOT_LOADED;
1168 Cr0 |= FxSaveArea->Cr0NpxState;
1169 __writecr0(Cr0);
1170
1171 /* Restore interrupt state */
1172 __writeeflags(EFlags);
1173 }
1174
1175 /* PUBLIC FUNCTIONS **********************************************************/
1176
1177 /*
1178 * @implemented
1179 */
1180 VOID
1181 NTAPI
1182 KiCoprocessorError(VOID)
1183 {
1184 PFX_SAVE_AREA NpxArea;
1185
1186 /* Get the FPU area */
1187 NpxArea = KiGetThreadNpxArea(KeGetCurrentThread());
1188
1189 /* Set CR0_TS */
1190 NpxArea->Cr0NpxState = CR0_TS;
1191 __writecr0(__readcr0() | CR0_TS);
1192 }
1193
1194 /*
1195 * @implemented
1196 */
1197 NTSTATUS
1198 NTAPI
1199 KeSaveFloatingPointState(OUT PKFLOATING_SAVE Save)
1200 {
1201 PFNSAVE_FORMAT FpState;
1202 ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL);
1203 DPRINT1("%s is not really implemented\n", __FUNCTION__);
1204
1205 /* check if we are doing software emulation */
1206 if (!KeI386NpxPresent) return STATUS_ILLEGAL_FLOAT_CONTEXT;
1207
1208 FpState = ExAllocatePool(NonPagedPool, sizeof (FNSAVE_FORMAT));
1209 if (!FpState) return STATUS_INSUFFICIENT_RESOURCES;
1210
1211 *((PVOID *) Save) = FpState;
1212 #ifdef __GNUC__
1213 asm volatile("fnsave %0\n\t" : "=m" (*FpState));
1214 #else
1215 __asm
1216 {
1217 fnsave [FpState]
1218 };
1219 #endif
1220
1221 KeGetCurrentThread()->DispatcherHeader.NpxIrql = KeGetCurrentIrql();
1222 return STATUS_SUCCESS;
1223 }
1224
1225 /*
1226 * @implemented
1227 */
1228 NTSTATUS
1229 NTAPI
1230 KeRestoreFloatingPointState(IN PKFLOATING_SAVE Save)
1231 {
1232 PFNSAVE_FORMAT FpState = *((PVOID *) Save);
1233 ASSERT(KeGetCurrentThread()->DispatcherHeader.NpxIrql == KeGetCurrentIrql());
1234 DPRINT1("%s is not really implemented\n", __FUNCTION__);
1235
1236 #ifdef __GNUC__
1237 asm volatile("fnclex\n\t");
1238 asm volatile("frstor %0\n\t" : "=m" (*FpState));
1239 #else
1240 __asm
1241 {
1242 fnclex
1243 frstor [FpState]
1244 };
1245 #endif
1246
1247 ExFreePool(FpState);
1248 return STATUS_SUCCESS;
1249 }
1250
1251 /*
1252 * @implemented
1253 */
1254 ULONG
1255 NTAPI
1256 KeGetRecommendedSharedDataAlignment(VOID)
1257 {
1258 /* Return the global variable */
1259 return KeLargestCacheLine;
1260 }
1261
1262 VOID
1263 NTAPI
1264 KiFlushTargetEntireTb(IN PKIPI_CONTEXT PacketContext,
1265 IN PVOID Ignored1,
1266 IN PVOID Ignored2,
1267 IN PVOID Ignored3)
1268 {
1269 /* Signal this packet as done */
1270 KiIpiSignalPacketDone(PacketContext);
1271
1272 /* Flush the TB for the Current CPU */
1273 KeFlushCurrentTb();
1274 }
1275
1276 /*
1277 * @implemented
1278 */
1279 VOID
1280 NTAPI
1281 KeFlushEntireTb(IN BOOLEAN Invalid,
1282 IN BOOLEAN AllProcessors)
1283 {
1284 KIRQL OldIrql;
1285 #ifdef CONFIG_SMP
1286 KAFFINITY TargetAffinity;
1287 PKPRCB Prcb = KeGetCurrentPrcb();
1288 #endif
1289
1290 /* Raise the IRQL for the TB Flush */
1291 OldIrql = KeRaiseIrqlToSynchLevel();
1292
1293 #ifdef CONFIG_SMP
1294 /* FIXME: Use KiTbFlushTimeStamp to synchronize TB flush */
1295
1296 /* Get the current processor affinity, and exclude ourselves */
1297 TargetAffinity = KeActiveProcessors;
1298 TargetAffinity &= ~Prcb->SetMember;
1299
1300 /* Make sure this is MP */
1301 if (TargetAffinity)
1302 {
1303 /* Send an IPI TB flush to the other processors */
1304 KiIpiSendPacket(TargetAffinity,
1305 KiFlushTargetEntireTb,
1306 NULL,
1307 0,
1308 NULL);
1309 }
1310 #endif
1311
1312 /* Flush the TB for the Current CPU, and update the flush stamp */
1313 KeFlushCurrentTb();
1314
1315 #ifdef CONFIG_SMP
1316 /* If this is MP, wait for the other processors to finish */
1317 if (TargetAffinity)
1318 {
1319 /* Sanity check */
1320 ASSERT(Prcb == (volatile PKPRCB)KeGetCurrentPrcb());
1321
1322 /* FIXME: TODO */
1323 ASSERTMSG("Not yet implemented\n", FALSE);
1324 }
1325 #endif
1326
1327 /* Update the flush stamp and return to original IRQL */
1328 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
1329 KeLowerIrql(OldIrql);
1330 }
1331
1332 /*
1333 * @implemented
1334 */
1335 VOID
1336 NTAPI
1337 KeSetDmaIoCoherency(IN ULONG Coherency)
1338 {
1339 /* Save the coherency globally */
1340 KiDmaIoCoherency = Coherency;
1341 }
1342
1343 /*
1344 * @implemented
1345 */
1346 KAFFINITY
1347 NTAPI
1348 KeQueryActiveProcessors(VOID)
1349 {
1350 PAGED_CODE();
1351
1352 /* Simply return the number of active processors */
1353 return KeActiveProcessors;
1354 }
1355
1356 /*
1357 * @implemented
1358 */
1359 VOID
1360 __cdecl
1361 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
1362 {
1363 /* Capture the context */
1364 RtlCaptureContext(&State->ContextFrame);
1365
1366 /* Capture the control state */
1367 KiSaveProcessorControlState(State);
1368 }