[NTOSKRNL] Handle some more KeFeatureFlags in amd64/cpu.c and set RtlpUse16ByteSLists
[reactos.git] / reactos / ntoskrnl / ke / amd64 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/amd64/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Timo Kreuzer (timo.kreuzer@reactos.org)
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* FIXME: Local EFLAGS defines not used anywhere else */
17 #define EFLAGS_IOPL 0x3000
18 #define EFLAGS_NF 0x4000
19 #define EFLAGS_RF 0x10000
20 #define EFLAGS_ID 0x200000
21
22 /* GLOBALS *******************************************************************/
23
24 /* The Boot TSS */
25 KTSS64 KiBootTss;
26
27 /* CPU Features and Flags */
28 ULONG KeI386CpuType;
29 ULONG KeI386CpuStep;
30 ULONG KeI386MachineType;
31 ULONG KeI386NpxPresent = 1;
32 ULONG KeLargestCacheLine = 0x40;
33 ULONG KiDmaIoCoherency = 0;
34 BOOLEAN KiSMTProcessorsPresent;
35
36 /* Freeze data */
37 KIRQL KiOldIrql;
38 ULONG KiFreezeFlag;
39
40 /* Flush data */
41 volatile LONG KiTbFlushTimeStamp;
42
43 /* CPU Signatures */
44 static const CHAR CmpIntelID[] = "GenuineIntel";
45 static const CHAR CmpAmdID[] = "AuthenticAMD";
46 static const CHAR CmpCyrixID[] = "CyrixInstead";
47 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
48 static const CHAR CmpCentaurID[] = "CentaurHauls";
49 static const CHAR CmpRiseID[] = "RiseRiseRise";
50
51 /* SUPPORT ROUTINES FOR MSVC COMPATIBILITY ***********************************/
52
53 VOID
54 NTAPI
55 CPUID(IN ULONG InfoType,
56 OUT PULONG CpuInfoEax,
57 OUT PULONG CpuInfoEbx,
58 OUT PULONG CpuInfoEcx,
59 OUT PULONG CpuInfoEdx)
60 {
61 ULONG CpuInfo[4];
62
63 /* Perform the CPUID Operation */
64 __cpuid((int*)CpuInfo, InfoType);
65
66 /* Return the results */
67 *CpuInfoEax = CpuInfo[0];
68 *CpuInfoEbx = CpuInfo[1];
69 *CpuInfoEcx = CpuInfo[2];
70 *CpuInfoEdx = CpuInfo[3];
71 }
72
73 /* FUNCTIONS *****************************************************************/
74
75 VOID
76 NTAPI
77 KiSetProcessorType(VOID)
78 {
79 ULONG64 EFlags;
80 INT Reg[4];
81 ULONG Stepping, Type;
82
83 /* Start by assuming no CPUID data */
84 KeGetCurrentPrcb()->CpuID = 0;
85
86 /* Save EFlags */
87 EFlags = __readeflags();
88
89 /* Do CPUID 1 now */
90 __cpuid(Reg, 1);
91
92 /*
93 * Get the Stepping and Type. The stepping contains both the
94 * Model and the Step, while the Type contains the returned Type.
95 * We ignore the family.
96 *
97 * For the stepping, we convert this: zzzzzzxy into this: x0y
98 */
99 Stepping = Reg[0] & 0xF0;
100 Stepping <<= 4;
101 Stepping += (Reg[0] & 0xFF);
102 Stepping &= 0xF0F;
103 Type = Reg[0] & 0xF00;
104 Type >>= 8;
105
106 /* Save them in the PRCB */
107 KeGetCurrentPrcb()->CpuID = TRUE;
108 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
109 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
110
111 /* Restore EFLAGS */
112 __writeeflags(EFlags);
113 }
114
115 ULONG
116 NTAPI
117 KiGetCpuVendor(VOID)
118 {
119 PKPRCB Prcb = KeGetCurrentPrcb();
120 INT Vendor[5];
121
122 /* Get the Vendor ID and null-terminate it */
123 __cpuid(Vendor, 0);
124
125 /* Copy it to the PRCB and null-terminate it */
126 *(ULONG*)&Prcb->VendorString[0] = Vendor[1]; // ebx
127 *(ULONG*)&Prcb->VendorString[4] = Vendor[3]; // edx
128 *(ULONG*)&Prcb->VendorString[8] = Vendor[2]; // ecx
129 *(ULONG*)&Prcb->VendorString[12] = 0;
130
131 /* Now check the CPU Type */
132 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
133 {
134 return CPU_INTEL;
135 }
136 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
137 {
138 return CPU_AMD;
139 }
140 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCyrixID))
141 {
142 DPRINT1("Cyrix CPUs not fully supported\n");
143 return 0;
144 }
145 else if (!strcmp((PCHAR)Prcb->VendorString, CmpTransmetaID))
146 {
147 DPRINT1("Transmeta CPUs not fully supported\n");
148 return 0;
149 }
150 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
151 {
152 DPRINT1("VIA CPUs not fully supported\n");
153 return 0;
154 }
155 else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
156 {
157 DPRINT1("Rise CPUs not fully supported\n");
158 return 0;
159 }
160
161 /* Invalid CPU */
162 return 0;
163 }
164
165 ULONG
166 NTAPI
167 KiGetFeatureBits(VOID)
168 {
169 PKPRCB Prcb = KeGetCurrentPrcb();
170 ULONG Vendor;
171 ULONG FeatureBits = KF_WORKING_PTE;
172 INT Reg[4];
173 ULONG CpuFeatures = 0;
174
175 /* Get the Vendor ID */
176 Vendor = KiGetCpuVendor();
177
178 /* Make sure we got a valid vendor ID at least. */
179 if (!Vendor) return FeatureBits;
180
181 /* Get the CPUID Info. Features are in Reg[3]. */
182 __cpuid(Reg, 1);
183
184 /* Set the initial APIC ID */
185 Prcb->InitialApicId = (UCHAR)(Reg[1] >> 24);
186
187 /* Set the current features */
188 CpuFeatures = Reg[3];
189
190 /* Convert all CPUID Feature bits into our format */
191 if (CpuFeatures & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
192 if (CpuFeatures & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
193 if (CpuFeatures & 0x00000010) FeatureBits |= KF_RDTSC;
194 if (CpuFeatures & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
195 if (CpuFeatures & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
196 if (CpuFeatures & 0x00001000) FeatureBits |= KF_MTRR;
197 if (CpuFeatures & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
198 if (CpuFeatures & 0x00008000) FeatureBits |= KF_CMOV;
199 if (CpuFeatures & 0x00010000) FeatureBits |= KF_PAT;
200 if (CpuFeatures & 0x00200000) FeatureBits |= KF_DTS;
201 if (CpuFeatures & 0x00800000) FeatureBits |= KF_MMX;
202 if (CpuFeatures & 0x01000000) FeatureBits |= KF_FXSR;
203 if (CpuFeatures & 0x02000000) FeatureBits |= KF_XMMI;
204 if (CpuFeatures & 0x04000000) FeatureBits |= KF_XMMI64;
205
206 if (Reg[2] & 0x00000001) FeatureBits |= KF_SSE3;
207 //if (Reg[2] & 0x00000008) FeatureBits |= KF_MONITOR;
208 //if (Reg[2] & 0x00000200) FeatureBits |= KF_SSE3SUP;
209 if (Reg[2] & 0x00002000) FeatureBits |= KF_CMPXCHG16B;
210 //if (Reg[2] & 0x00080000) FeatureBits |= KF_SSE41;
211 //if (Reg[2] & 0x00800000) FeatureBits |= KF_POPCNT;
212 if (Reg[2] & 0x04000000) FeatureBits |= KF_XSTATE;
213
214 /* Check if the CPU has hyper-threading */
215 if (CpuFeatures & 0x10000000)
216 {
217 /* Set the number of logical CPUs */
218 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(Reg[1] >> 16);
219 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
220 {
221 /* We're on dual-core */
222 KiSMTProcessorsPresent = TRUE;
223 }
224 }
225 else
226 {
227 /* We only have a single CPU */
228 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
229 }
230
231 /* Check extended cpuid features */
232 __cpuid(Reg, 0x80000000);
233 if ((Reg[0] & 0xffffff00) == 0x80000000)
234 {
235 /* Check if CPUID 0x80000001 is supported */
236 if (Reg[0] >= 0x80000001)
237 {
238 /* Check which extended features are available. */
239 __cpuid(Reg, 0x80000001);
240
241 /* Check if NX-bit is supported */
242 if (Reg[3] & 0x00100000) FeatureBits |= KF_NX_BIT;
243
244 /* Now handle each features for each CPU Vendor */
245 switch (Vendor)
246 {
247 case CPU_AMD:
248 if (Reg[3] & 0x80000000) FeatureBits |= KF_3DNOW;
249 break;
250 }
251 }
252 }
253
254 /* Return the Feature Bits */
255 return FeatureBits;
256 }
257
258 VOID
259 NTAPI
260 KiGetCacheInformation(VOID)
261 {
262 PKIPCR Pcr = (PKIPCR)KeGetPcr();
263 ULONG Vendor;
264 INT Data[4];
265 ULONG CacheRequests = 0, i;
266 ULONG CurrentRegister;
267 UCHAR RegisterByte;
268 BOOLEAN FirstPass = TRUE;
269
270 /* Set default L2 size */
271 Pcr->SecondLevelCacheSize = 0;
272
273 /* Get the Vendor ID and make sure we support CPUID */
274 Vendor = KiGetCpuVendor();
275 if (!Vendor) return;
276
277 /* Check the Vendor ID */
278 switch (Vendor)
279 {
280 /* Handle Intel case */
281 case CPU_INTEL:
282
283 /*Check if we support CPUID 2 */
284 __cpuid(Data, 0);
285 if (Data[0] >= 2)
286 {
287 /* We need to loop for the number of times CPUID will tell us to */
288 do
289 {
290 /* Do the CPUID call */
291 __cpuid(Data, 2);
292
293 /* Check if it was the first call */
294 if (FirstPass)
295 {
296 /*
297 * The number of times to loop is the first byte. Read
298 * it and then destroy it so we don't get confused.
299 */
300 CacheRequests = Data[0] & 0xFF;
301 Data[0] &= 0xFFFFFF00;
302
303 /* Don't go over this again */
304 FirstPass = FALSE;
305 }
306
307 /* Loop all 4 registers */
308 for (i = 0; i < 4; i++)
309 {
310 /* Get the current register */
311 CurrentRegister = Data[i];
312
313 /*
314 * If the upper bit is set, then this register should
315 * be skipped.
316 */
317 if (CurrentRegister & 0x80000000) continue;
318
319 /* Keep looping for every byte inside this register */
320 while (CurrentRegister)
321 {
322 /* Read a byte, skip a byte. */
323 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
324 CurrentRegister >>= 8;
325 if (!RegisterByte) continue;
326
327 /*
328 * Valid values are from 0x40 (0 bytes) to 0x49
329 * (32MB), or from 0x80 to 0x89 (same size but
330 * 8-way associative.
331 */
332 if (((RegisterByte > 0x40) &&
333 (RegisterByte <= 0x49)) ||
334 ((RegisterByte > 0x80) &&
335 (RegisterByte <= 0x89)))
336 {
337 /* Mask out only the first nibble */
338 RegisterByte &= 0x0F;
339
340 /* Set the L2 Cache Size */
341 Pcr->SecondLevelCacheSize = 0x10000 <<
342 RegisterByte;
343 }
344 }
345 }
346 } while (--CacheRequests);
347 }
348 break;
349
350 case CPU_AMD:
351
352 /* Check if we support CPUID 0x80000006 */
353 __cpuid(Data, 0x80000000);
354 if (Data[0] >= 6)
355 {
356 /* Get 2nd level cache and tlb size */
357 __cpuid(Data, 0x80000006);
358
359 /* Set the L2 Cache Size */
360 Pcr->SecondLevelCacheSize = (Data[2] & 0xFFFF0000) >> 6;
361 }
362 break;
363 }
364 }
365
366 VOID
367 NTAPI
368 KeFlushCurrentTb(VOID)
369 {
370 /* Flush the TLB by resetting CR3 */
371 __writecr3(__readcr3());
372 }
373
374 VOID
375 NTAPI
376 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
377 {
378 /* Restore the CR registers */
379 __writecr0(ProcessorState->SpecialRegisters.Cr0);
380 // __writecr2(ProcessorState->SpecialRegisters.Cr2);
381 __writecr3(ProcessorState->SpecialRegisters.Cr3);
382 __writecr4(ProcessorState->SpecialRegisters.Cr4);
383 __writecr8(ProcessorState->SpecialRegisters.Cr8);
384
385 /* Restore the DR registers */
386 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
387 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
388 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
389 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
390 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
391 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
392
393 /* Restore GDT, IDT, LDT and TSS */
394 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
395 // __lldt(&ProcessorState->SpecialRegisters.Ldtr);
396 // __ltr(&ProcessorState->SpecialRegisters.Tr);
397 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
398
399 // __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
400 // ProcessorState->SpecialRegisters.DebugControl
401 // ProcessorState->SpecialRegisters.LastBranchToRip
402 // ProcessorState->SpecialRegisters.LastBranchFromRip
403 // ProcessorState->SpecialRegisters.LastExceptionToRip
404 // ProcessorState->SpecialRegisters.LastExceptionFromRip
405
406 /* Restore MSRs */
407 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
408 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
409 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
410 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
411 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
412 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
413
414 }
415
416 VOID
417 NTAPI
418 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
419 {
420 /* Save the CR registers */
421 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
422 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
423 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
424 ProcessorState->SpecialRegisters.Cr4 = __readcr4();
425 ProcessorState->SpecialRegisters.Cr8 = __readcr8();
426
427 /* Save the DR registers */
428 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
429 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
430 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
431 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
432 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
433 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
434
435 /* Save GDT, IDT, LDT and TSS */
436 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
437 __sldt(&ProcessorState->SpecialRegisters.Ldtr);
438 __str(&ProcessorState->SpecialRegisters.Tr);
439 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
440
441 // __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
442 // ProcessorState->SpecialRegisters.DebugControl =
443 // ProcessorState->SpecialRegisters.LastBranchToRip =
444 // ProcessorState->SpecialRegisters.LastBranchFromRip =
445 // ProcessorState->SpecialRegisters.LastExceptionToRip =
446 // ProcessorState->SpecialRegisters.LastExceptionFromRip =
447
448 /* Save MSRs */
449 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
450 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
451 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
452 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
453 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
454 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
455 }
456
457 VOID
458 NTAPI
459 KeFlushEntireTb(IN BOOLEAN Invalid,
460 IN BOOLEAN AllProcessors)
461 {
462 KIRQL OldIrql;
463
464 // FIXME: halfplemented
465 /* Raise the IRQL for the TB Flush */
466 OldIrql = KeRaiseIrqlToSynchLevel();
467
468 /* Flush the TB for the Current CPU, and update the flush stamp */
469 KeFlushCurrentTb();
470
471 /* Update the flush stamp and return to original IRQL */
472 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
473 KeLowerIrql(OldIrql);
474
475 }
476
477 KAFFINITY
478 NTAPI
479 KeQueryActiveProcessors(VOID)
480 {
481 PAGED_CODE();
482
483 /* Simply return the number of active processors */
484 return KeActiveProcessors;
485 }
486
487 NTSTATUS
488 NTAPI
489 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
490 {
491 UNREFERENCED_PARAMETER(FloatingState);
492 return STATUS_SUCCESS;
493 }
494
495 NTSTATUS
496 NTAPI
497 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
498 {
499 UNREFERENCED_PARAMETER(FloatingState);
500 return STATUS_SUCCESS;
501 }
502
503 BOOLEAN
504 NTAPI
505 KeInvalidateAllCaches(VOID)
506 {
507 /* Invalidate all caches */
508 __wbinvd();
509 return TRUE;
510 }
511
512 /*
513 * @implemented
514 */
515 ULONG
516 NTAPI
517 KeGetRecommendedSharedDataAlignment(VOID)
518 {
519 /* Return the global variable */
520 return KeLargestCacheLine;
521 }
522
523 /*
524 * @implemented
525 */
526 VOID
527 __cdecl
528 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
529 {
530 /* Capture the context */
531 RtlCaptureContext(&State->ContextFrame);
532
533 /* Capture the control state */
534 KiSaveProcessorControlState(State);
535 }
536
537 /*
538 * @implemented
539 */
540 VOID
541 NTAPI
542 KeSetDmaIoCoherency(IN ULONG Coherency)
543 {
544 /* Save the coherency globally */
545 KiDmaIoCoherency = Coherency;
546 }