[STORPORT] Fix x64 build
[reactos.git] / ntoskrnl / ke / amd64 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/amd64/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Timo Kreuzer (timo.kreuzer@reactos.org)
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* FIXME: Local EFLAGS defines not used anywhere else */
17 #define EFLAGS_IOPL 0x3000
18 #define EFLAGS_NF 0x4000
19 #define EFLAGS_RF 0x10000
20 #define EFLAGS_ID 0x200000
21
22 /* GLOBALS *******************************************************************/
23
24 /* The Boot TSS */
25 KTSS64 KiBootTss;
26
27 /* CPU Features and Flags */
28 ULONG KeI386CpuType;
29 ULONG KeI386CpuStep;
30 ULONG KeI386MachineType;
31 ULONG KeI386NpxPresent = 1;
32 ULONG KeLargestCacheLine = 0x40;
33 ULONG KiDmaIoCoherency = 0;
34 BOOLEAN KiSMTProcessorsPresent;
35
36 /* Freeze data */
37 KIRQL KiOldIrql;
38 ULONG KiFreezeFlag;
39
40 /* Flush data */
41 volatile LONG KiTbFlushTimeStamp;
42
43 /* CPU Signatures */
44 static const CHAR CmpIntelID[] = "GenuineIntel";
45 static const CHAR CmpAmdID[] = "AuthenticAMD";
46 static const CHAR CmpCyrixID[] = "CyrixInstead";
47 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
48 static const CHAR CmpCentaurID[] = "CentaurHauls";
49 static const CHAR CmpRiseID[] = "RiseRiseRise";
50
51 /* FUNCTIONS *****************************************************************/
52
53 VOID
54 NTAPI
55 KiSetProcessorType(VOID)
56 {
57 ULONG64 EFlags;
58 CPU_INFO CpuInfo;
59 ULONG Stepping, Type;
60
61 /* Start by assuming no CPUID data */
62 KeGetCurrentPrcb()->CpuID = 0;
63
64 /* Save EFlags */
65 EFlags = __readeflags();
66
67 /* Do CPUID 1 now */
68 KiCpuId(&CpuInfo, 1);
69
70 /*
71 * Get the Stepping and Type. The stepping contains both the
72 * Model and the Step, while the Type contains the returned Type.
73 * We ignore the family.
74 *
75 * For the stepping, we convert this: zzzzzzxy into this: x0y
76 */
77 Stepping = CpuInfo.Eax & 0xF0;
78 Stepping <<= 4;
79 Stepping += (CpuInfo.Eax & 0xFF);
80 Stepping &= 0xF0F;
81 Type = CpuInfo.Eax & 0xF00;
82 Type >>= 8;
83
84 /* Save them in the PRCB */
85 KeGetCurrentPrcb()->CpuID = TRUE;
86 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
87 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
88
89 /* Restore EFLAGS */
90 __writeeflags(EFlags);
91 }
92
93 ULONG
94 NTAPI
95 KiGetCpuVendor(VOID)
96 {
97 PKPRCB Prcb = KeGetCurrentPrcb();
98 CPU_INFO CpuInfo;
99
100 /* Get the Vendor ID and null-terminate it */
101 KiCpuId(&CpuInfo, 0);
102
103 /* Copy it to the PRCB and null-terminate it */
104 *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx;
105 *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx;
106 *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx;
107 Prcb->VendorString[12] = 0;
108
109 /* Now check the CPU Type */
110 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
111 {
112 return CPU_INTEL;
113 }
114 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
115 {
116 return CPU_AMD;
117 }
118 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
119 {
120 DPRINT1("VIA CPUs not fully supported\n");
121 return CPU_VIA;
122 }
123 else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
124 {
125 DPRINT1("Rise CPUs not fully supported\n");
126 return 0;
127 }
128
129 /* Invalid CPU */
130 return CPU_UNKNOWN;
131 }
132
133 ULONG
134 NTAPI
135 KiGetFeatureBits(VOID)
136 {
137 PKPRCB Prcb = KeGetCurrentPrcb();
138 ULONG Vendor;
139 ULONG FeatureBits = KF_WORKING_PTE;
140 CPU_INFO CpuInfo;
141
142 /* Get the Vendor ID */
143 Vendor = KiGetCpuVendor();
144
145 /* Make sure we got a valid vendor ID at least. */
146 if (!Vendor) return FeatureBits;
147
148 /* Get the CPUID Info. */
149 KiCpuId(&CpuInfo, 1);
150
151 /* Set the initial APIC ID */
152 Prcb->InitialApicId = (UCHAR)(CpuInfo.Ebx >> 24);
153
154 /* Convert all CPUID Feature bits into our format */
155 if (CpuInfo.Edx & 0x00000002) FeatureBits |= KF_V86_VIS | KF_CR4;
156 if (CpuInfo.Edx & 0x00000008) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
157 if (CpuInfo.Edx & 0x00000010) FeatureBits |= KF_RDTSC;
158 if (CpuInfo.Edx & 0x00000100) FeatureBits |= KF_CMPXCHG8B;
159 if (CpuInfo.Edx & 0x00000800) FeatureBits |= KF_FAST_SYSCALL;
160 if (CpuInfo.Edx & 0x00001000) FeatureBits |= KF_MTRR;
161 if (CpuInfo.Edx & 0x00002000) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
162 if (CpuInfo.Edx & 0x00008000) FeatureBits |= KF_CMOV;
163 if (CpuInfo.Edx & 0x00010000) FeatureBits |= KF_PAT;
164 if (CpuInfo.Edx & 0x00200000) FeatureBits |= KF_DTS;
165 if (CpuInfo.Edx & 0x00800000) FeatureBits |= KF_MMX;
166 if (CpuInfo.Edx & 0x01000000) FeatureBits |= KF_FXSR;
167 if (CpuInfo.Edx & 0x02000000) FeatureBits |= KF_XMMI;
168 if (CpuInfo.Edx & 0x04000000) FeatureBits |= KF_XMMI64;
169
170 if (CpuInfo.Ecx & 0x00000001) FeatureBits |= KF_SSE3;
171 //if (CpuInfo.Ecx & 0x00000008) FeatureBits |= KF_MONITOR;
172 //if (CpuInfo.Ecx & 0x00000200) FeatureBits |= KF_SSE3SUP;
173 if (CpuInfo.Ecx & 0x00002000) FeatureBits |= KF_CMPXCHG16B;
174 //if (CpuInfo.Ecx & 0x00080000) FeatureBits |= KF_SSE41;
175 //if (CpuInfo.Ecx & 0x00800000) FeatureBits |= KF_POPCNT;
176 if (CpuInfo.Ecx & 0x04000000) FeatureBits |= KF_XSTATE;
177
178 /* Check if the CPU has hyper-threading */
179 if (CpuInfo.Ecx & 0x10000000)
180 {
181 /* Set the number of logical CPUs */
182 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(CpuInfo.Ebx >> 16);
183 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
184 {
185 /* We're on dual-core */
186 KiSMTProcessorsPresent = TRUE;
187 }
188 }
189 else
190 {
191 /* We only have a single CPU */
192 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
193 }
194
195 /* Check extended cpuid features */
196 KiCpuId(&CpuInfo, 0x80000000);
197 if ((CpuInfo.Eax & 0xffffff00) == 0x80000000)
198 {
199 /* Check if CPUID 0x80000001 is supported */
200 if (CpuInfo.Eax >= 0x80000001)
201 {
202 /* Check which extended features are available. */
203 KiCpuId(&CpuInfo, 0x80000001);
204
205 /* Check if NX-bit is supported */
206 if (CpuInfo.Edx & 0x00100000) FeatureBits |= KF_NX_BIT;
207
208 /* Now handle each features for each CPU Vendor */
209 switch (Vendor)
210 {
211 case CPU_AMD:
212 if (CpuInfo.Edx & 0x80000000) FeatureBits |= KF_3DNOW;
213 break;
214 }
215 }
216 }
217
218 /* Return the Feature Bits */
219 return FeatureBits;
220 }
221
222 VOID
223 NTAPI
224 KiGetCacheInformation(VOID)
225 {
226 PKIPCR Pcr = (PKIPCR)KeGetPcr();
227 ULONG Vendor;
228 ULONG CacheRequests = 0, i;
229 ULONG CurrentRegister;
230 UCHAR RegisterByte;
231 BOOLEAN FirstPass = TRUE;
232 CPU_INFO CpuInfo;
233
234 /* Set default L2 size */
235 Pcr->SecondLevelCacheSize = 0;
236
237 /* Get the Vendor ID and make sure we support CPUID */
238 Vendor = KiGetCpuVendor();
239 if (!Vendor) return;
240
241 /* Check the Vendor ID */
242 switch (Vendor)
243 {
244 /* Handle Intel case */
245 case CPU_INTEL:
246
247 /*Check if we support CPUID 2 */
248 KiCpuId(&CpuInfo, 0);
249 if (CpuInfo.Eax >= 2)
250 {
251 /* We need to loop for the number of times CPUID will tell us to */
252 do
253 {
254 /* Do the CPUID call */
255 KiCpuId(&CpuInfo, 2);
256
257 /* Check if it was the first call */
258 if (FirstPass)
259 {
260 /*
261 * The number of times to loop is the first byte. Read
262 * it and then destroy it so we don't get confused.
263 */
264 CacheRequests = CpuInfo.Eax & 0xFF;
265 CpuInfo.Eax &= 0xFFFFFF00;
266
267 /* Don't go over this again */
268 FirstPass = FALSE;
269 }
270
271 /* Loop all 4 registers */
272 for (i = 0; i < 4; i++)
273 {
274 /* Get the current register */
275 CurrentRegister = CpuInfo.AsUINT32[i];
276
277 /*
278 * If the upper bit is set, then this register should
279 * be skipped.
280 */
281 if (CurrentRegister & 0x80000000) continue;
282
283 /* Keep looping for every byte inside this register */
284 while (CurrentRegister)
285 {
286 /* Read a byte, skip a byte. */
287 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
288 CurrentRegister >>= 8;
289 if (!RegisterByte) continue;
290
291 /*
292 * Valid values are from 0x40 (0 bytes) to 0x49
293 * (32MB), or from 0x80 to 0x89 (same size but
294 * 8-way associative.
295 */
296 if (((RegisterByte > 0x40) &&
297 (RegisterByte <= 0x49)) ||
298 ((RegisterByte > 0x80) &&
299 (RegisterByte <= 0x89)))
300 {
301 /* Mask out only the first nibble */
302 RegisterByte &= 0x0F;
303
304 /* Set the L2 Cache Size */
305 Pcr->SecondLevelCacheSize = 0x10000 <<
306 RegisterByte;
307 }
308 }
309 }
310 } while (--CacheRequests);
311 }
312 break;
313
314 case CPU_AMD:
315
316 /* Check if we support CPUID 0x80000006 */
317 KiCpuId(&CpuInfo, 0x80000000);
318 if (CpuInfo.Eax >= 6)
319 {
320 /* Get 2nd level cache and tlb size */
321 KiCpuId(&CpuInfo, 0x80000006);
322
323 /* Set the L2 Cache Size */
324 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6;
325 }
326 break;
327 }
328 }
329
330 VOID
331 NTAPI
332 KeFlushCurrentTb(VOID)
333 {
334 /* Flush the TLB by resetting CR3 */
335 __writecr3(__readcr3());
336 }
337
338 VOID
339 NTAPI
340 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
341 {
342 /* Restore the CR registers */
343 __writecr0(ProcessorState->SpecialRegisters.Cr0);
344 // __writecr2(ProcessorState->SpecialRegisters.Cr2);
345 __writecr3(ProcessorState->SpecialRegisters.Cr3);
346 __writecr4(ProcessorState->SpecialRegisters.Cr4);
347 __writecr8(ProcessorState->SpecialRegisters.Cr8);
348
349 /* Restore the DR registers */
350 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
351 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
352 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
353 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
354 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
355 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
356
357 /* Restore GDT, IDT, LDT and TSS */
358 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
359 // __lldt(&ProcessorState->SpecialRegisters.Ldtr);
360 // __ltr(&ProcessorState->SpecialRegisters.Tr);
361 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
362
363 // __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
364 // ProcessorState->SpecialRegisters.DebugControl
365 // ProcessorState->SpecialRegisters.LastBranchToRip
366 // ProcessorState->SpecialRegisters.LastBranchFromRip
367 // ProcessorState->SpecialRegisters.LastExceptionToRip
368 // ProcessorState->SpecialRegisters.LastExceptionFromRip
369
370 /* Restore MSRs */
371 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
372 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
373 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
374 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
375 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
376 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
377
378 }
379
380 VOID
381 NTAPI
382 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
383 {
384 /* Save the CR registers */
385 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
386 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
387 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
388 ProcessorState->SpecialRegisters.Cr4 = __readcr4();
389 ProcessorState->SpecialRegisters.Cr8 = __readcr8();
390
391 /* Save the DR registers */
392 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
393 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
394 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
395 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
396 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
397 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
398
399 /* Save GDT, IDT, LDT and TSS */
400 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
401 __sldt(&ProcessorState->SpecialRegisters.Ldtr);
402 __str(&ProcessorState->SpecialRegisters.Tr);
403 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
404
405 // __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
406 // ProcessorState->SpecialRegisters.DebugControl =
407 // ProcessorState->SpecialRegisters.LastBranchToRip =
408 // ProcessorState->SpecialRegisters.LastBranchFromRip =
409 // ProcessorState->SpecialRegisters.LastExceptionToRip =
410 // ProcessorState->SpecialRegisters.LastExceptionFromRip =
411
412 /* Save MSRs */
413 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
414 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
415 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
416 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
417 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
418 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
419 }
420
421 VOID
422 NTAPI
423 KeFlushEntireTb(IN BOOLEAN Invalid,
424 IN BOOLEAN AllProcessors)
425 {
426 KIRQL OldIrql;
427
428 // FIXME: halfplemented
429 /* Raise the IRQL for the TB Flush */
430 OldIrql = KeRaiseIrqlToSynchLevel();
431
432 /* Flush the TB for the Current CPU, and update the flush stamp */
433 KeFlushCurrentTb();
434
435 /* Update the flush stamp and return to original IRQL */
436 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
437 KeLowerIrql(OldIrql);
438
439 }
440
441 KAFFINITY
442 NTAPI
443 KeQueryActiveProcessors(VOID)
444 {
445 PAGED_CODE();
446
447 /* Simply return the number of active processors */
448 return KeActiveProcessors;
449 }
450
451 NTSTATUS
452 NTAPI
453 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
454 {
455 UNREFERENCED_PARAMETER(FloatingState);
456 return STATUS_SUCCESS;
457 }
458
459 NTSTATUS
460 NTAPI
461 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
462 {
463 UNREFERENCED_PARAMETER(FloatingState);
464 return STATUS_SUCCESS;
465 }
466
467 BOOLEAN
468 NTAPI
469 KeInvalidateAllCaches(VOID)
470 {
471 /* Invalidate all caches */
472 __wbinvd();
473 return TRUE;
474 }
475
476 /*
477 * @implemented
478 */
479 ULONG
480 NTAPI
481 KeGetRecommendedSharedDataAlignment(VOID)
482 {
483 /* Return the global variable */
484 return KeLargestCacheLine;
485 }
486
487 /*
488 * @implemented
489 */
490 VOID
491 __cdecl
492 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
493 {
494 /* Capture the context */
495 RtlCaptureContext(&State->ContextFrame);
496
497 /* Capture the control state */
498 KiSaveProcessorControlState(State);
499 }
500
501 /*
502 * @implemented
503 */
504 VOID
505 NTAPI
506 KeSetDmaIoCoherency(IN ULONG Coherency)
507 {
508 /* Save the coherency globally */
509 KiDmaIoCoherency = Coherency;
510 }