- Add KiGetSecondLevelDCacheSize (for all the available architectures) and MiGetPdeOffset macros.
- Remove conflicting PDE_SIZE definitions (both aren't used anywhere).
- Fix ValidKernelPde, PointerPte and PointerPde types and correct their use (mminit.c).
- Thanks to the work that was done over the recent commits (in the header branch) and this one, the kernel now builds (but doesn't link yet) for ARM.
svn path=/branches/header-work/; revision=47258
#define KeGetContextSwitches(Prcb) \
(Prcb->KeContextSwitches)
+//
+// Macro to get the second level cache size field name which differs between
+// CISC and RISC architectures, as the former has unified I/D cache
+//
+#define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
+
#define KeGetExceptionFrame(Thread) \
(PKEXCEPTION_FRAME)((ULONG_PTR)KeGetTrapFrame(Thread) - \
sizeof(KEXCEPTION_FRAME))
#define ADDR_TO_PDE_OFFSET(v) ((((ULONG_PTR)(v)) / (512 * PAGE_SIZE)))
#define ADDR_TO_PTE_OFFSET(v) ((((ULONG_PTR)(v)) % (512 * PAGE_SIZE)) / PAGE_SIZE)
+#define MiGetPdeOffset ADDR_TO_PDE_OFFSET
+
#define VAtoPXI(va) ((((ULONG64)va) >> PXI_SHIFT) & 0x1FF)
#define VAtoPPI(va) ((((ULONG64)va) >> PPI_SHIFT) & 0x1FF)
#define VAtoPDI(va) ((((ULONG64)va) >> PDI_SHIFT) & 0x1FF)
#define KeGetContextSwitches(Prcb) \
CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
+//
+// Macro to get the second level cache size field name which differs between
+// CISC and RISC architectures, as the former has unified I/D cache
+//
+#define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelDcacheSize
+
//
// Returns the Interrupt State from a Trap Frame.
// ON = TRUE, OFF = FALSE
#pragma once
-//
-// Number of bits corresponding to the area that a PDE entry represents (1MB)
-//
#define PDE_SHIFT 20
-#define PDE_SIZE (1 << PDE_SHIFT)
//
// Number of bits corresponding to the area that a coarse page table entry represents (4KB)
#define KeGetContextSwitches(Prcb) \
CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches
+//
+// Macro to get the second level cache size field name which differs between
+// CISC and RISC architectures, as the former has unified I/D cache
+//
+#define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize
+
//
// Returns the Interrupt State from a Trap Frame.
// ON = TRUE, OFF = FALSE
#define ADDR_TO_PDE_OFFSET(v) ((((ULONG)(v)) / (1024 * PAGE_SIZE)))
#define ADDR_TO_PTE_OFFSET(v) ((((ULONG)(v)) % (1024 * PAGE_SIZE)) / PAGE_SIZE)
+#define MiGetPdeOffset ADDR_TO_PDE_OFFSET
+
/* Easy accessing PFN in PTE */
#define PFN_FROM_PTE(v) ((v)->u.Hard.PageFrameNumber)
//#define KD_BREAKPOINT_SIZE
//#define KD_BREAKPOINT_VALUE
+//
+// Macro to get the second level cache size field name which differs between
+// CISC and RISC architectures, as the former has unified I/D cache
+//
+#define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelDcacheSize
+
//
// Macros for getting and setting special purpose registers in portable code
//
#define _1KB (1024)
#define _1MB (1024 * _1KB)
-/* Size of a PDE directory, and size of a page table */
-#define PDE_SIZE (PDE_COUNT * sizeof(MMPDE))
+/* Size of a page table */
#define PT_SIZE (PTE_COUNT * sizeof(MMPTE))
/* Architecture specific count of PDEs in a directory, and count of PTEs in a PT */
} MMCOLOR_TABLES, *PMMCOLOR_TABLES;
extern MMPTE HyperTemplatePte;
-extern MMPTE ValidKernelPde;
+extern MMPDE ValidKernelPde;
extern MMPTE ValidKernelPte;
extern ULONG MmSizeOfNonPagedPoolInBytes;
//
// Puerile piece of junk-grade carbonized horseshit puss sold to the lowest bidder
//
- ULONG Pde = ADDR_TO_PDE_OFFSET(AddressStart);
- while (Pde <= ADDR_TO_PDE_OFFSET(AddressEnd))
+ ULONG Pde = MiGetPdeOffset(AddressStart);
+ while (Pde <= MiGetPdeOffset(AddressEnd))
{
//
// This both odious and heinous
//
- extern ULONG MmGlobalKernelPageDirectory[1024];
+ extern ULONG MmGlobalKernelPageDirectory[];
MmGlobalKernelPageDirectory[Pde] = ((PULONG)PDE_BASE)[Pde];
Pde++;
}
if (!MmSecondaryColors)
{
/* Get L2 cache information */
- L2Associativity = KeGetPcr()->SecondLevelCacheAssociativity;
+ L2Associativity = KiGetSecondLevelDCacheSize();
/* The number of colors is the number of cache bytes by set/way */
- MmSecondaryColors = KeGetPcr()->SecondLevelCacheSize;
+ MmSecondaryColors = KiGetSecondLevelDCacheSize();
if (L2Associativity) MmSecondaryColors /= L2Associativity;
}
/* Yes we do, set it up */
Pfn1 = MI_PFN_TO_PFNENTRY(PageFrameIndex);
Pfn1->u4.PteFrame = StartupPdIndex;
- Pfn1->PteAddress = PointerPde;
+ Pfn1->PteAddress = (PMMPTE)PointerPde;
Pfn1->u2.ShareCount++;
Pfn1->u3.e2.ReferenceCount = 1;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
/* Make it a bogus page to catch errors */
PointerPde = MiAddressToPde(0xFFFFFFFF);
Pfn1->u4.PteFrame = PFN_FROM_PTE(PointerPde);
- Pfn1->PteAddress = PointerPde;
+ Pfn1->PteAddress = (PMMPTE)PointerPde;
Pfn1->u2.ShareCount++;
Pfn1->u3.e2.ReferenceCount = 0xFFF0;
Pfn1->u3.e1.PageLocation = ActiveAndValid;
NTAPI
MiBuildPagedPool(VOID)
{
- PMMPTE PointerPte, PointerPde;
+ PMMPTE PointerPte;
+ PMMPDE PointerPde;
MMPTE TempPte = ValidKernelPte;
+ MMPDE TempPde = ValidKernelPde;
PFN_NUMBER PageFrameIndex;
KIRQL OldIrql;
ULONG Size, BitMapSize;
// Allocate a page and map the first paged pool PDE
//
PageFrameIndex = MmAllocPage(MC_NPPOOL);
- TempPte.u.Hard.PageFrameNumber = PageFrameIndex;
+ TempPde.u.Hard.PageFrameNumber = PageFrameIndex;
ASSERT(PointerPde->u.Hard.Valid == 0);
- ASSERT(TempPte.u.Hard.Valid == 1);
- *PointerPde = TempPte;
+ ASSERT(TempPde.u.Hard.Valid == 1);
+ *PointerPde = TempPde;
//
// Release the PFN database lock
// will be allocated to handle paged pool growth. This is where they'll have
// to start.
//
- MmPagedPoolInfo.NextPdeForPagedPoolExpansion = PointerPde + 1;
+ MmPagedPoolInfo.NextPdeForPagedPoolExpansion = (PMMPTE)(PointerPde + 1);
//
// We keep track of each page via a bit, so check how big the bitmap will