[NTOS:PS] On x64 don't fail in NtSetInformationProcess with ProcessUserModeIOPL infor...
[reactos.git] / ntoskrnl / ke / amd64 / cpu.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: GPL - See COPYING in the top level directory
4 * FILE: ntoskrnl/ke/amd64/cpu.c
5 * PURPOSE: Routines for CPU-level support
6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org)
7 * Timo Kreuzer (timo.kreuzer@reactos.org)
8 */
9
10 /* INCLUDES *****************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 /* FIXME: Local EFLAGS defines not used anywhere else */
17 #define EFLAGS_IOPL 0x3000
18 #define EFLAGS_NF 0x4000
19 #define EFLAGS_RF 0x10000
20 #define EFLAGS_ID 0x200000
21
22 /* GLOBALS *******************************************************************/
23
24 /* The Boot TSS */
25 KTSS64 KiBootTss;
26
27 /* CPU Features and Flags */
28 ULONG KeI386CpuType;
29 ULONG KeI386CpuStep;
30 ULONG KeI386MachineType;
31 ULONG KeI386NpxPresent = 1;
32 ULONG KeLargestCacheLine = 0x40;
33 ULONG KiDmaIoCoherency = 0;
34 BOOLEAN KiSMTProcessorsPresent;
35
36 /* Freeze data */
37 KIRQL KiOldIrql;
38 ULONG KiFreezeFlag;
39
40 /* Flush data */
41 volatile LONG KiTbFlushTimeStamp;
42
43 /* CPU Signatures */
44 static const CHAR CmpIntelID[] = "GenuineIntel";
45 static const CHAR CmpAmdID[] = "AuthenticAMD";
46 static const CHAR CmpCyrixID[] = "CyrixInstead";
47 static const CHAR CmpTransmetaID[] = "GenuineTMx86";
48 static const CHAR CmpCentaurID[] = "CentaurHauls";
49 static const CHAR CmpRiseID[] = "RiseRiseRise";
50
51 /* FUNCTIONS *****************************************************************/
52
53 VOID
54 NTAPI
55 KiSetProcessorType(VOID)
56 {
57 CPU_INFO CpuInfo;
58 ULONG Stepping, Type;
59
60 /* Do CPUID 1 now */
61 KiCpuId(&CpuInfo, 1);
62
63 /*
64 * Get the Stepping and Type. The stepping contains both the
65 * Model and the Step, while the Type contains the returned Type.
66 * We ignore the family.
67 *
68 * For the stepping, we convert this: zzzzzzxy into this: x0y
69 */
70 Stepping = CpuInfo.Eax & 0xF0;
71 Stepping <<= 4;
72 Stepping += (CpuInfo.Eax & 0xFF);
73 Stepping &= 0xF0F;
74 Type = CpuInfo.Eax & 0xF00;
75 Type >>= 8;
76
77 /* Save them in the PRCB */
78 KeGetCurrentPrcb()->CpuID = TRUE;
79 KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
80 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
81 }
82
83 ULONG
84 NTAPI
85 KiGetCpuVendor(VOID)
86 {
87 PKPRCB Prcb = KeGetCurrentPrcb();
88 CPU_INFO CpuInfo;
89
90 /* Get the Vendor ID and null-terminate it */
91 KiCpuId(&CpuInfo, 0);
92
93 /* Copy it to the PRCB and null-terminate it */
94 *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx;
95 *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx;
96 *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx;
97 Prcb->VendorString[12] = 0;
98
99 /* Now check the CPU Type */
100 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
101 {
102 return CPU_INTEL;
103 }
104 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
105 {
106 return CPU_AMD;
107 }
108 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
109 {
110 DPRINT1("VIA CPUs not fully supported\n");
111 return CPU_VIA;
112 }
113 else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
114 {
115 DPRINT1("Rise CPUs not fully supported\n");
116 return 0;
117 }
118
119 /* Invalid CPU */
120 return CPU_UNKNOWN;
121 }
122
123 ULONG
124 NTAPI
125 KiGetFeatureBits(VOID)
126 {
127 PKPRCB Prcb = KeGetCurrentPrcb();
128 ULONG Vendor;
129 ULONG FeatureBits = KF_WORKING_PTE;
130 CPU_INFO CpuInfo;
131
132 /* Get the Vendor ID */
133 Vendor = KiGetCpuVendor();
134
135 /* Make sure we got a valid vendor ID at least. */
136 if (!Vendor) return FeatureBits;
137
138 /* Get the CPUID Info. */
139 KiCpuId(&CpuInfo, 1);
140
141 /* Set the initial APIC ID */
142 Prcb->InitialApicId = (UCHAR)(CpuInfo.Ebx >> 24);
143
144 /* Convert all CPUID Feature bits into our format */
145 if (CpuInfo.Edx & X86_FEATURE_VME) FeatureBits |= KF_V86_VIS | KF_CR4;
146 if (CpuInfo.Edx & X86_FEATURE_PSE) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
147 if (CpuInfo.Edx & X86_FEATURE_TSC) FeatureBits |= KF_RDTSC;
148 if (CpuInfo.Edx & X86_FEATURE_CX8) FeatureBits |= KF_CMPXCHG8B;
149 if (CpuInfo.Edx & X86_FEATURE_SYSCALL) FeatureBits |= KF_FAST_SYSCALL;
150 if (CpuInfo.Edx & X86_FEATURE_MTTR) FeatureBits |= KF_MTRR;
151 if (CpuInfo.Edx & X86_FEATURE_PGE) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
152 if (CpuInfo.Edx & X86_FEATURE_CMOV) FeatureBits |= KF_CMOV;
153 if (CpuInfo.Edx & X86_FEATURE_PAT) FeatureBits |= KF_PAT;
154 if (CpuInfo.Edx & X86_FEATURE_DS) FeatureBits |= KF_DTS;
155 if (CpuInfo.Edx & X86_FEATURE_MMX) FeatureBits |= KF_MMX;
156 if (CpuInfo.Edx & X86_FEATURE_FXSR) FeatureBits |= KF_FXSR;
157 if (CpuInfo.Edx & X86_FEATURE_SSE) FeatureBits |= KF_XMMI;
158 if (CpuInfo.Edx & X86_FEATURE_SSE2) FeatureBits |= KF_XMMI64;
159
160 if (CpuInfo.Ecx & X86_FEATURE_SSE3) FeatureBits |= KF_SSE3;
161 //if (CpuInfo.Ecx & X86_FEATURE_MONITOR) FeatureBits |= KF_MONITOR;
162 //if (CpuInfo.Ecx & X86_FEATURE_SSSE3) FeatureBits |= KF_SSE3SUP;
163 if (CpuInfo.Ecx & X86_FEATURE_CX16) FeatureBits |= KF_CMPXCHG16B;
164 //if (CpuInfo.Ecx & X86_FEATURE_SSE41) FeatureBits |= KF_SSE41;
165 //if (CpuInfo.Ecx & X86_FEATURE_POPCNT) FeatureBits |= KF_POPCNT;
166 if (CpuInfo.Ecx & X86_FEATURE_XSAVE) FeatureBits |= KF_XSTATE;
167
168 /* Check if the CPU has hyper-threading */
169 if (CpuInfo.Edx & X86_FEATURE_HT)
170 {
171 /* Set the number of logical CPUs */
172 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(CpuInfo.Ebx >> 16);
173 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
174 {
175 /* We're on dual-core */
176 KiSMTProcessorsPresent = TRUE;
177 }
178 }
179 else
180 {
181 /* We only have a single CPU */
182 Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
183 }
184
185 /* Check extended cpuid features */
186 KiCpuId(&CpuInfo, 0x80000000);
187 if ((CpuInfo.Eax & 0xffffff00) == 0x80000000)
188 {
189 /* Check if CPUID 0x80000001 is supported */
190 if (CpuInfo.Eax >= 0x80000001)
191 {
192 /* Check which extended features are available. */
193 KiCpuId(&CpuInfo, 0x80000001);
194
195 /* Check if NX-bit is supported */
196 if (CpuInfo.Edx & X86_FEATURE_NX) FeatureBits |= KF_NX_BIT;
197
198 /* Now handle each features for each CPU Vendor */
199 switch (Vendor)
200 {
201 case CPU_AMD:
202 if (CpuInfo.Edx & 0x80000000) FeatureBits |= KF_3DNOW;
203 break;
204 }
205 }
206 }
207
208 /* Return the Feature Bits */
209 return FeatureBits;
210 }
211
212 VOID
213 NTAPI
214 KiGetCacheInformation(VOID)
215 {
216 PKIPCR Pcr = (PKIPCR)KeGetPcr();
217 ULONG Vendor;
218 ULONG CacheRequests = 0, i;
219 ULONG CurrentRegister;
220 UCHAR RegisterByte;
221 BOOLEAN FirstPass = TRUE;
222 CPU_INFO CpuInfo;
223
224 /* Set default L2 size */
225 Pcr->SecondLevelCacheSize = 0;
226
227 /* Get the Vendor ID and make sure we support CPUID */
228 Vendor = KiGetCpuVendor();
229 if (!Vendor) return;
230
231 /* Check the Vendor ID */
232 switch (Vendor)
233 {
234 /* Handle Intel case */
235 case CPU_INTEL:
236
237 /*Check if we support CPUID 2 */
238 KiCpuId(&CpuInfo, 0);
239 if (CpuInfo.Eax >= 2)
240 {
241 /* We need to loop for the number of times CPUID will tell us to */
242 do
243 {
244 /* Do the CPUID call */
245 KiCpuId(&CpuInfo, 2);
246
247 /* Check if it was the first call */
248 if (FirstPass)
249 {
250 /*
251 * The number of times to loop is the first byte. Read
252 * it and then destroy it so we don't get confused.
253 */
254 CacheRequests = CpuInfo.Eax & 0xFF;
255 CpuInfo.Eax &= 0xFFFFFF00;
256
257 /* Don't go over this again */
258 FirstPass = FALSE;
259 }
260
261 /* Loop all 4 registers */
262 for (i = 0; i < 4; i++)
263 {
264 /* Get the current register */
265 CurrentRegister = CpuInfo.AsUINT32[i];
266
267 /*
268 * If the upper bit is set, then this register should
269 * be skipped.
270 */
271 if (CurrentRegister & 0x80000000) continue;
272
273 /* Keep looping for every byte inside this register */
274 while (CurrentRegister)
275 {
276 /* Read a byte, skip a byte. */
277 RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
278 CurrentRegister >>= 8;
279 if (!RegisterByte) continue;
280
281 /*
282 * Valid values are from 0x40 (0 bytes) to 0x49
283 * (32MB), or from 0x80 to 0x89 (same size but
284 * 8-way associative.
285 */
286 if (((RegisterByte > 0x40) &&
287 (RegisterByte <= 0x49)) ||
288 ((RegisterByte > 0x80) &&
289 (RegisterByte <= 0x89)))
290 {
291 /* Mask out only the first nibble */
292 RegisterByte &= 0x0F;
293
294 /* Set the L2 Cache Size */
295 Pcr->SecondLevelCacheSize = 0x10000 <<
296 RegisterByte;
297 }
298 }
299 }
300 } while (--CacheRequests);
301 }
302 break;
303
304 case CPU_AMD:
305
306 /* Check if we support CPUID 0x80000006 */
307 KiCpuId(&CpuInfo, 0x80000000);
308 if (CpuInfo.Eax >= 6)
309 {
310 /* Get 2nd level cache and tlb size */
311 KiCpuId(&CpuInfo, 0x80000006);
312
313 /* Set the L2 Cache Size */
314 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6;
315 }
316 break;
317 }
318 }
319
320 VOID
321 NTAPI
322 KeFlushCurrentTb(VOID)
323 {
324 /* Flush the TLB by resetting CR3 */
325 __writecr3(__readcr3());
326 }
327
328 VOID
329 NTAPI
330 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
331 {
332 /* Restore the CR registers */
333 __writecr0(ProcessorState->SpecialRegisters.Cr0);
334 // __writecr2(ProcessorState->SpecialRegisters.Cr2);
335 __writecr3(ProcessorState->SpecialRegisters.Cr3);
336 __writecr4(ProcessorState->SpecialRegisters.Cr4);
337 __writecr8(ProcessorState->SpecialRegisters.Cr8);
338
339 /* Restore the DR registers */
340 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
341 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
342 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
343 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
344 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
345 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
346
347 /* Restore GDT, IDT, LDT and TSS */
348 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
349 // __lldt(&ProcessorState->SpecialRegisters.Ldtr);
350 // __ltr(&ProcessorState->SpecialRegisters.Tr);
351 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
352
353 // __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
354 // ProcessorState->SpecialRegisters.DebugControl
355 // ProcessorState->SpecialRegisters.LastBranchToRip
356 // ProcessorState->SpecialRegisters.LastBranchFromRip
357 // ProcessorState->SpecialRegisters.LastExceptionToRip
358 // ProcessorState->SpecialRegisters.LastExceptionFromRip
359
360 /* Restore MSRs */
361 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
362 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
363 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
364 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
365 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
366 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
367
368 }
369
370 VOID
371 NTAPI
372 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
373 {
374 /* Save the CR registers */
375 ProcessorState->SpecialRegisters.Cr0 = __readcr0();
376 ProcessorState->SpecialRegisters.Cr2 = __readcr2();
377 ProcessorState->SpecialRegisters.Cr3 = __readcr3();
378 ProcessorState->SpecialRegisters.Cr4 = __readcr4();
379 ProcessorState->SpecialRegisters.Cr8 = __readcr8();
380
381 /* Save the DR registers */
382 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
383 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
384 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
385 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
386 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
387 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
388
389 /* Save GDT, IDT, LDT and TSS */
390 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
391 __sldt(&ProcessorState->SpecialRegisters.Ldtr);
392 __str(&ProcessorState->SpecialRegisters.Tr);
393 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
394
395 // __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
396 // ProcessorState->SpecialRegisters.DebugControl =
397 // ProcessorState->SpecialRegisters.LastBranchToRip =
398 // ProcessorState->SpecialRegisters.LastBranchFromRip =
399 // ProcessorState->SpecialRegisters.LastExceptionToRip =
400 // ProcessorState->SpecialRegisters.LastExceptionFromRip =
401
402 /* Save MSRs */
403 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
404 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
405 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
406 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
407 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
408 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
409 }
410
411 VOID
412 NTAPI
413 KeFlushEntireTb(IN BOOLEAN Invalid,
414 IN BOOLEAN AllProcessors)
415 {
416 KIRQL OldIrql;
417
418 // FIXME: halfplemented
419 /* Raise the IRQL for the TB Flush */
420 OldIrql = KeRaiseIrqlToSynchLevel();
421
422 /* Flush the TB for the Current CPU, and update the flush stamp */
423 KeFlushCurrentTb();
424
425 /* Update the flush stamp and return to original IRQL */
426 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
427 KeLowerIrql(OldIrql);
428
429 }
430
431 KAFFINITY
432 NTAPI
433 KeQueryActiveProcessors(VOID)
434 {
435 PAGED_CODE();
436
437 /* Simply return the number of active processors */
438 return KeActiveProcessors;
439 }
440
441 NTSTATUS
442 NTAPI
443 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
444 {
445 UNREFERENCED_PARAMETER(FloatingState);
446 return STATUS_SUCCESS;
447 }
448
449 NTSTATUS
450 NTAPI
451 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
452 {
453 UNREFERENCED_PARAMETER(FloatingState);
454 return STATUS_SUCCESS;
455 }
456
457 BOOLEAN
458 NTAPI
459 KeInvalidateAllCaches(VOID)
460 {
461 /* Invalidate all caches */
462 __wbinvd();
463 return TRUE;
464 }
465
466 /*
467 * @implemented
468 */
469 ULONG
470 NTAPI
471 KeGetRecommendedSharedDataAlignment(VOID)
472 {
473 /* Return the global variable */
474 return KeLargestCacheLine;
475 }
476
477 /*
478 * @implemented
479 */
480 VOID
481 __cdecl
482 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
483 {
484 /* Capture the context */
485 RtlCaptureContext(&State->ContextFrame);
486
487 /* Capture the control state */
488 KiSaveProcessorControlState(State);
489 }
490
491 /*
492 * @implemented
493 */
494 VOID
495 NTAPI
496 KeSetDmaIoCoherency(IN ULONG Coherency)
497 {
498 /* Save the coherency globally */
499 KiDmaIoCoherency = Coherency;
500 }