[RTL]
[reactos.git] / reactos / ntoskrnl / ke / amd64 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/amd64/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Timo Kreuzer (timo.kreuzer@reactos.org)
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* FIXME: Local EFLAGS defines not used anywhere else */
17 #define EFLAGS_IOPL 0x3000
18 #define EFLAGS_NF 0x4000
19 #define EFLAGS_RF 0x10000
20 #define EFLAGS_ID 0x200000
21
22 /* GLOBALS *******************************************************************/
23
24 /* The Boot TSS */
25 KTSS64 KiBootTss;
26
27 /* CPU Features and Flags */
28 ULONG KeI386CpuType;
29 ULONG KeI386CpuStep;
30 ULONG KeI386MachineType;
31 ULONG KeI386NpxPresent = 1;
32 ULONG KeLargestCacheLine = 0x40;
33 ULONG KiDmaIoCoherency = 0;
34 BOOLEAN KiSMTProcessorsPresent;
35
36 /* Freeze data */
37 KIRQL KiOldIrql;
38 ULONG KiFreezeFlag;
39
40 /* Flush data */
41 volatile LONG KiTbFlushTimeStamp;
42
43 /* CPU Signatures */
44 static const CHAR CmpIntelID[] = "GenuineIntel";
45 static const CHAR CmpAmdID[] = "AuthenticAMD";
46 static const CHAR CmpCyrixID[] = "CyrixInstead";
47 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
48 static const CHAR CmpCentaurID[] = "CentaurHauls";
49 static const CHAR CmpRiseID[] = "RiseRiseRise";
50
51 /* SUPPORT ROUTINES FOR MSVC COMPATIBILITY ***********************************/
52
53 VOID
54 NTAPI
55 CPUID(IN ULONG InfoType,
56 OUT PULONG CpuInfoEax,
57 OUT PULONG CpuInfoEbx,
58 OUT PULONG CpuInfoEcx,
59 OUT PULONG CpuInfoEdx)
60 {
61 ULONG CpuInfo[4];
62
63 /* Perform the CPUID Operation */
64 __cpuid((int*)CpuInfo, InfoType);
65
66 /* Return the results */
67 *CpuInfoEax = CpuInfo[0];
68 *CpuInfoEbx = CpuInfo[1];
69 *CpuInfoEcx = CpuInfo[2];
70 *CpuInfoEdx = CpuInfo[3];
71 }
72
73 /* FUNCTIONS *****************************************************************/
74
75 VOID
76 NTAPI
77 KiSetProcessorType(VOID)
78 {
79 ULONG64 EFlags;
80 INT Reg[4];
81 ULONG Stepping, Type;
82
83 /* Start by assuming no CPUID data */
84 KeGetCurrentPrcb()->CpuID = 0;
85
86 /* Save EFlags */
87 EFlags = __readeflags();
88
89 /* Do CPUID 1 now */
90 __cpuid(Reg, 1);
91
92 /*
93 * Get the Stepping and Type. The stepping contains both the
94 * Model and the Step, while the Type contains the returned Type.
95 * We ignore the family.
96 *
97 * For the stepping, we convert this: zzzzzzxy into this: x0y
98 */
99 Stepping = Reg[0] & 0xF0;
100 Stepping <<= 4;
101 Stepping += (Reg[0] & 0xFF);
102 Stepping &= 0xF0F;
103 Type = Reg[0] & 0xF00;
104 Type >>= 8;
105
106 /* Save them in the PRCB */
107 KeGetCurrentPrcb()->CpuID = TRUE;
108 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
109 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
110
111 /* Restore EFLAGS */
112 __writeeflags(EFlags);
113 }
114
115 ULONG
116 NTAPI
117 KiGetCpuVendor(VOID)
118 {
119 PKPRCB Prcb = KeGetCurrentPrcb();
120 INT Vendor[5];
121
122 /* Get the Vendor ID and null-terminate it */
123 __cpuid(Vendor, 0);
124
125 /* Copy it to the PRCB and null-terminate it */
126 *(ULONG*)&Prcb->VendorString[0] = Vendor[1]; // ebx
127 *(ULONG*)&Prcb->VendorString[4] = Vendor[3]; // edx
128 *(ULONG*)&Prcb->VendorString[8] = Vendor[2]; // ecx
129 *(ULONG*)&Prcb->VendorString[12] = 0;
130
131 /* Now check the CPU Type */
132 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
133 {
134 return CPU_INTEL;
135 }
136 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
137 {
138 return CPU_AMD;
139 }
140 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCyrixID))
141 {
142 DPRINT1("Cyrix CPUs not fully supported\n");
143 return 0;
144 }
145 else if (!strcmp((PCHAR)Prcb->VendorString, CmpTransmetaID))
146 {
147 DPRINT1("Transmeta CPUs not fully supported\n");
148 return 0;
149 }
150 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
151 {
152 DPRINT1("VIA CPUs not fully supported\n");
153 return 0;
154 }
155 else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
156 {
157 DPRINT1("Rise CPUs not fully supported\n");
158 return 0;
159 }
160
161 /* Invalid CPU */
162 return 0;
163 }
164
165 ULONG
166 NTAPI
167 KiGetFeatureBits(VOID)
168 {
169 PKPRCB Prcb = KeGetCurrentPrcb();
170 ULONG Vendor;
171 ULONG FeatureBits = KF_WORKING_PTE;
172 INT Reg[4];
173 ULONG CpuFeatures = 0;
174
175 /* Get the Vendor ID */
176 Vendor = KiGetCpuVendor();
177
178 /* Make sure we got a valid vendor ID at least. */
179 if (!Vendor) return FeatureBits;
180
181 /* Get the CPUID Info. Features are in Reg[3]. */
182 __cpuid(Reg, 1);
183
184 /* Set the initial APIC ID */
185 Prcb->InitialApicId = (UCHAR)(Reg[1] >> 24);
186
187 /* Set the current features */
188 CpuFeatures = Reg[3];
189
190 /* Convert all CPUID Feature bits into our format */
191 if (CpuFeatures & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
192 if (CpuFeatures & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
193 if (CpuFeatures & 0x00000010) FeatureBits |= KF_RDTSC;
194 if (CpuFeatures & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
195 if (CpuFeatures & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
196 if (CpuFeatures & 0x00001000) FeatureBits |= KF_MTRR;
197 if (CpuFeatures & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
198 if (CpuFeatures & 0x00008000) FeatureBits |= KF_CMOV;
199 if (CpuFeatures & 0x00010000) FeatureBits |= KF_PAT;
200 if (CpuFeatures & 0x00200000) FeatureBits |= KF_DTS;
201 if (CpuFeatures & 0x00800000) FeatureBits |= KF_MMX;
202 if (CpuFeatures & 0x01000000) FeatureBits |= KF_FXSR;
203 if (CpuFeatures & 0x02000000) FeatureBits |= KF_XMMI;
204 if (CpuFeatures & 0x04000000) FeatureBits |= KF_XMMI64;
205
206 #if 0
207 if (Reg[2] & 0x00000001) FeatureBits |= KF_SSE3NEW;
208 if (Reg[2] & 0x00000008) FeatureBits |= KF_MONITOR;
209 if (Reg[2] & 0x00000200) FeatureBits |= KF_SSE3SUP;
210 if (Reg[2] & 0x00002000) FeatureBits |= KF_CMPXCHG16B;
211 if (Reg[2] & 0x00080000) FeatureBits |= KF_SSE41;
212 if (Reg[2] & 0x00800000) FeatureBits |= KF_POPCNT;
213 #endif
214
215 /* Check if the CPU has hyper-threading */
216 if (CpuFeatures & 0x10000000)
217 {
218 /* Set the number of logical CPUs */
219 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(Reg[1] >> 16);
220 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
221 {
222 /* We're on dual-core */
223 KiSMTProcessorsPresent = TRUE;
224 }
225 }
226 else
227 {
228 /* We only have a single CPU */
229 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
230 }
231
232 /* Check extended cpuid features */
233 __cpuid(Reg, 0x80000000);
234 if ((Reg[0] & 0xffffff00) == 0x80000000)
235 {
236 /* Check if CPUID 0x80000001 is supported */
237 if (Reg[0] >= 0x80000001)
238 {
239 /* Check which extended features are available. */
240 __cpuid(Reg, 0x80000001);
241
242 /* Check if NX-bit is supported */
243 if (Reg[3] & 0x00100000) FeatureBits |= KF_NX_BIT;
244
245 /* Now handle each features for each CPU Vendor */
246 switch (Vendor)
247 {
248 case CPU_AMD:
249 if (Reg[3] & 0x80000000) FeatureBits |= KF_3DNOW;
250 break;
251 }
252 }
253 }
254
255 /* Return the Feature Bits */
256 return FeatureBits;
257 }
258
259 VOID
260 NTAPI
261 KiGetCacheInformation(VOID)
262 {
263 PKIPCR Pcr = (PKIPCR)KeGetPcr();
264 ULONG Vendor;
265 INT Data[4];
266 ULONG CacheRequests = 0, i;
267 ULONG CurrentRegister;
268 UCHAR RegisterByte;
269 BOOLEAN FirstPass = TRUE;
270
271 /* Set default L2 size */
272 Pcr->SecondLevelCacheSize = 0;
273
274 /* Get the Vendor ID and make sure we support CPUID */
275 Vendor = KiGetCpuVendor();
276 if (!Vendor) return;
277
278 /* Check the Vendor ID */
279 switch (Vendor)
280 {
281 /* Handle Intel case */
282 case CPU_INTEL:
283
284 /*Check if we support CPUID 2 */
285 __cpuid(Data, 0);
286 if (Data[0] >= 2)
287 {
288 /* We need to loop for the number of times CPUID will tell us to */
289 do
290 {
291 /* Do the CPUID call */
292 __cpuid(Data, 2);
293
294 /* Check if it was the first call */
295 if (FirstPass)
296 {
297 /*
298 * The number of times to loop is the first byte. Read
299 * it and then destroy it so we don't get confused.
300 */
301 CacheRequests = Data[0] & 0xFF;
302 Data[0] &= 0xFFFFFF00;
303
304 /* Don't go over this again */
305 FirstPass = FALSE;
306 }
307
308 /* Loop all 4 registers */
309 for (i = 0; i < 4; i++)
310 {
311 /* Get the current register */
312 CurrentRegister = Data[i];
313
314 /*
315 * If the upper bit is set, then this register should
316 * be skipped.
317 */
318 if (CurrentRegister & 0x80000000) continue;
319
320 /* Keep looping for every byte inside this register */
321 while (CurrentRegister)
322 {
323 /* Read a byte, skip a byte. */
324 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
325 CurrentRegister >>= 8;
326 if (!RegisterByte) continue;
327
328 /*
329 * Valid values are from 0x40 (0 bytes) to 0x49
330 * (32MB), or from 0x80 to 0x89 (same size but
331 * 8-way associative.
332 */
333 if (((RegisterByte > 0x40) &&
334 (RegisterByte <= 0x49)) ||
335 ((RegisterByte > 0x80) &&
336 (RegisterByte <= 0x89)))
337 {
338 /* Mask out only the first nibble */
339 RegisterByte &= 0x0F;
340
341 /* Set the L2 Cache Size */
342 Pcr->SecondLevelCacheSize = 0x10000 <<
343 RegisterByte;
344 }
345 }
346 }
347 } while (--CacheRequests);
348 }
349 break;
350
351 case CPU_AMD:
352
353 /* Check if we support CPUID 0x80000006 */
354 __cpuid(Data, 0x80000000);
355 if (Data[0] >= 6)
356 {
357 /* Get 2nd level cache and tlb size */
358 __cpuid(Data, 0x80000006);
359
360 /* Set the L2 Cache Size */
361 Pcr->SecondLevelCacheSize = (Data[2] & 0xFFFF0000) >> 6;
362 }
363 break;
364 }
365 }
366
367 VOID
368 NTAPI
369 KeFlushCurrentTb(VOID)
370 {
371 /* Flush the TLB by resetting CR3 */
372 __writecr3(__readcr3());
373 }
374
375 VOID
376 NTAPI
377 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
378 {
379 /* Restore the CR registers */
380 __writecr0(ProcessorState->SpecialRegisters.Cr0);
381 // __writecr2(ProcessorState->SpecialRegisters.Cr2);
382 __writecr3(ProcessorState->SpecialRegisters.Cr3);
383 __writecr4(ProcessorState->SpecialRegisters.Cr4);
384 __writecr8(ProcessorState->SpecialRegisters.Cr8);
385
386 /* Restore the DR registers */
387 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
388 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
389 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
390 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
391 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
392 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
393
394 /* Restore GDT, IDT, LDT and TSS */
395 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
396 // __lldt(&ProcessorState->SpecialRegisters.Ldtr);
397 // __ltr(&ProcessorState->SpecialRegisters.Tr);
398 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
399
400 // __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
401 // ProcessorState->SpecialRegisters.DebugControl
402 // ProcessorState->SpecialRegisters.LastBranchToRip
403 // ProcessorState->SpecialRegisters.LastBranchFromRip
404 // ProcessorState->SpecialRegisters.LastExceptionToRip
405 // ProcessorState->SpecialRegisters.LastExceptionFromRip
406
407 /* Restore MSRs */
408 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
409 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
410 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
411 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
412 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
413 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
414
415 }
416
417 VOID
418 NTAPI
419 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
420 {
421 /* Save the CR registers */
422 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
423 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
424 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
425 ProcessorState->SpecialRegisters.Cr4 = __readcr4();
426 ProcessorState->SpecialRegisters.Cr8 = __readcr8();
427
428 /* Save the DR registers */
429 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
430 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
431 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
432 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
433 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
434 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
435
436 /* Save GDT, IDT, LDT and TSS */
437 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
438 __sldt(&ProcessorState->SpecialRegisters.Ldtr);
439 __str(&ProcessorState->SpecialRegisters.Tr);
440 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
441
442 // __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
443 // ProcessorState->SpecialRegisters.DebugControl =
444 // ProcessorState->SpecialRegisters.LastBranchToRip =
445 // ProcessorState->SpecialRegisters.LastBranchFromRip =
446 // ProcessorState->SpecialRegisters.LastExceptionToRip =
447 // ProcessorState->SpecialRegisters.LastExceptionFromRip =
448
449 /* Save MSRs */
450 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
451 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
452 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
453 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
454 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
455 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
456 }
457
458 VOID
459 NTAPI
460 KeFlushEntireTb(IN BOOLEAN Invalid,
461 IN BOOLEAN AllProcessors)
462 {
463 KIRQL OldIrql;
464
465 // FIXME: halfplemented
466 /* Raise the IRQL for the TB Flush */
467 OldIrql = KeRaiseIrqlToSynchLevel();
468
469 /* Flush the TB for the Current CPU, and update the flush stamp */
470 KeFlushCurrentTb();
471
472 /* Update the flush stamp and return to original IRQL */
473 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
474 KeLowerIrql(OldIrql);
475
476 }
477
478 KAFFINITY
479 NTAPI
480 KeQueryActiveProcessors(VOID)
481 {
482 PAGED_CODE();
483
484 /* Simply return the number of active processors */
485 return KeActiveProcessors;
486 }
487
488 NTSTATUS
489 NTAPI
490 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
491 {
492 UNREFERENCED_PARAMETER(FloatingState);
493 return STATUS_SUCCESS;
494 }
495
496 NTSTATUS
497 NTAPI
498 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
499 {
500 UNREFERENCED_PARAMETER(FloatingState);
501 return STATUS_SUCCESS;
502 }
503
504 BOOLEAN
505 NTAPI
506 KeInvalidateAllCaches(VOID)
507 {
508 /* Invalidate all caches */
509 __wbinvd();
510 return TRUE;
511 }
512
513 /*
514 * @implemented
515 */
516 ULONG
517 NTAPI
518 KeGetRecommendedSharedDataAlignment(VOID)
519 {
520 /* Return the global variable */
521 return KeLargestCacheLine;
522 }
523
524 /*
525 * @implemented
526 */
527 VOID
528 __cdecl
529 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
530 {
531 /* Capture the context */
532 RtlCaptureContext(&State->ContextFrame);
533
534 /* Capture the control state */
535 KiSaveProcessorControlState(State);
536 }
537
538 /*
539 * @implemented
540 */
541 VOID
542 NTAPI
543 KeSetDmaIoCoherency(IN ULONG Coherency)
544 {
545 /* Save the coherency globally */
546 KiDmaIoCoherency = Coherency;
547 }