2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/i386/page.c
5 * PURPOSE: Low level memory managment manipulation
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
10 /* INCLUDES ***************************************************************/
15 #include <mm/ARM3/miarm.h>
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitGlobalKernelPageDirectory)
21 #define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset
22 #define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE))
24 /* GLOBALS *****************************************************************/
26 #define PA_BIT_PRESENT (0)
27 #define PA_BIT_READWRITE (1)
28 #define PA_BIT_USER (2)
31 #define PA_BIT_ACCESSED (5)
32 #define PA_BIT_DIRTY (6)
33 #define PA_BIT_GLOBAL (8)
35 #define PA_PRESENT (1 << PA_BIT_PRESENT)
36 #define PA_READWRITE (1 << PA_BIT_READWRITE)
37 #define PA_USER (1 << PA_BIT_USER)
38 #define PA_DIRTY (1 << PA_BIT_DIRTY)
39 #define PA_WT (1 << PA_BIT_WT)
40 #define PA_CD (1 << PA_BIT_CD)
41 #define PA_ACCESSED (1 << PA_BIT_ACCESSED)
42 #define PA_GLOBAL (1 << PA_BIT_GLOBAL)
44 #define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END))
46 #define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT)
47 #define PFN_TO_PTE(X) ((X) << PAGE_SHIFT)
49 #define PAGE_MASK(x) ((x)&(~0xfff))
53 MmProtectToPteMask
[32] =
56 // These are the base MM_ protection flags
59 PTE_READONLY
| PTE_ENABLE_CACHE
,
60 PTE_EXECUTE
| PTE_ENABLE_CACHE
,
61 PTE_EXECUTE_READ
| PTE_ENABLE_CACHE
,
62 PTE_READWRITE
| PTE_ENABLE_CACHE
,
63 PTE_WRITECOPY
| PTE_ENABLE_CACHE
,
64 PTE_EXECUTE_READWRITE
| PTE_ENABLE_CACHE
,
65 PTE_EXECUTE_WRITECOPY
| PTE_ENABLE_CACHE
,
67 // These OR in the MM_NOCACHE flag
70 PTE_READONLY
| PTE_DISABLE_CACHE
,
71 PTE_EXECUTE
| PTE_DISABLE_CACHE
,
72 PTE_EXECUTE_READ
| PTE_DISABLE_CACHE
,
73 PTE_READWRITE
| PTE_DISABLE_CACHE
,
74 PTE_WRITECOPY
| PTE_DISABLE_CACHE
,
75 PTE_EXECUTE_READWRITE
| PTE_DISABLE_CACHE
,
76 PTE_EXECUTE_WRITECOPY
| PTE_DISABLE_CACHE
,
78 // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM
81 PTE_READONLY
| PTE_ENABLE_CACHE
,
82 PTE_EXECUTE
| PTE_ENABLE_CACHE
,
83 PTE_EXECUTE_READ
| PTE_ENABLE_CACHE
,
84 PTE_READWRITE
| PTE_ENABLE_CACHE
,
85 PTE_WRITECOPY
| PTE_ENABLE_CACHE
,
86 PTE_EXECUTE_READWRITE
| PTE_ENABLE_CACHE
,
87 PTE_EXECUTE_WRITECOPY
| PTE_ENABLE_CACHE
,
89 // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining?
92 PTE_READONLY
| PTE_WRITECOMBINED_CACHE
,
93 PTE_EXECUTE
| PTE_WRITECOMBINED_CACHE
,
94 PTE_EXECUTE_READ
| PTE_WRITECOMBINED_CACHE
,
95 PTE_READWRITE
| PTE_WRITECOMBINED_CACHE
,
96 PTE_WRITECOPY
| PTE_WRITECOMBINED_CACHE
,
97 PTE_EXECUTE_READWRITE
| PTE_WRITECOMBINED_CACHE
,
98 PTE_EXECUTE_WRITECOPY
| PTE_WRITECOMBINED_CACHE
,
102 ULONG MmProtectToValue
[32] =
110 PAGE_EXECUTE_READWRITE
,
111 PAGE_EXECUTE_WRITECOPY
,
113 PAGE_NOCACHE
| PAGE_READONLY
,
114 PAGE_NOCACHE
| PAGE_EXECUTE
,
115 PAGE_NOCACHE
| PAGE_EXECUTE_READ
,
116 PAGE_NOCACHE
| PAGE_READWRITE
,
117 PAGE_NOCACHE
| PAGE_WRITECOPY
,
118 PAGE_NOCACHE
| PAGE_EXECUTE_READWRITE
,
119 PAGE_NOCACHE
| PAGE_EXECUTE_WRITECOPY
,
121 PAGE_GUARD
| PAGE_READONLY
,
122 PAGE_GUARD
| PAGE_EXECUTE
,
123 PAGE_GUARD
| PAGE_EXECUTE_READ
,
124 PAGE_GUARD
| PAGE_READWRITE
,
125 PAGE_GUARD
| PAGE_WRITECOPY
,
126 PAGE_GUARD
| PAGE_EXECUTE_READWRITE
,
127 PAGE_GUARD
| PAGE_EXECUTE_WRITECOPY
,
129 PAGE_WRITECOMBINE
| PAGE_READONLY
,
130 PAGE_WRITECOMBINE
| PAGE_EXECUTE
,
131 PAGE_WRITECOMBINE
| PAGE_EXECUTE_READ
,
132 PAGE_WRITECOMBINE
| PAGE_READWRITE
,
133 PAGE_WRITECOMBINE
| PAGE_WRITECOPY
,
134 PAGE_WRITECOMBINE
| PAGE_EXECUTE_READWRITE
,
135 PAGE_WRITECOMBINE
| PAGE_EXECUTE_WRITECOPY
138 /* FUNCTIONS ***************************************************************/
140 static BOOLEAN
MmUnmapPageTable(PULONG Pt
);
143 MiFlushTlb(PULONG Pt
, PVOID Address
)
145 if ((Pt
&& MmUnmapPageTable(Pt
)) || Address
>= MmSystemRangeStart
)
147 KeInvalidateTlbEntry(Address
);
152 ProtectToPTE(ULONG flProtect
)
154 ULONG Attributes
= 0;
156 if (flProtect
& (PAGE_NOACCESS
|PAGE_GUARD
))
160 else if (flProtect
& PAGE_IS_WRITABLE
)
162 Attributes
= PA_PRESENT
| PA_READWRITE
;
164 else if (flProtect
& (PAGE_IS_READABLE
| PAGE_IS_EXECUTABLE
))
166 Attributes
= PA_PRESENT
;
170 DPRINT1("Unknown main protection type.\n");
171 KeBugCheck(MEMORY_MANAGEMENT
);
174 if (flProtect
& PAGE_SYSTEM
)
179 Attributes
= Attributes
| PA_USER
;
181 if (flProtect
& PAGE_NOCACHE
)
183 Attributes
= Attributes
| PA_CD
;
185 if (flProtect
& PAGE_WRITETHROUGH
)
187 Attributes
= Attributes
| PA_WT
;
194 MiDispatchFault(IN BOOLEAN StoreInstruction
,
196 IN PMMPTE PointerPte
,
197 IN PMMPTE PointerProtoPte
,
198 IN BOOLEAN Recursive
,
199 IN PEPROCESS Process
,
200 IN PVOID TrapInformation
,
205 MiFillSystemPageDirectory(IN PVOID Base
,
206 IN SIZE_T NumberOfBytes
);
209 MmGetPageTableForProcess(PEPROCESS Process
, PVOID Address
, BOOLEAN Create
)
215 if (Address
< MmSystemRangeStart
)
217 /* We should have a process for user land addresses */
218 ASSERT(Process
!= NULL
);
220 if(Process
!= PsGetCurrentProcess())
223 ULONG PdeOffset
= MiGetPdeOffset(Address
);
225 /* Nobody but page fault should ask for creating the PDE,
226 * Which imples that Process is the current one */
227 ASSERT(Create
== FALSE
);
229 PdeBase
= MmCreateHyperspaceMapping(PTE_TO_PFN(Process
->Pcb
.DirectoryTableBase
[0]));
232 KeBugCheck(MEMORY_MANAGEMENT
);
234 PointerPde
= PdeBase
+ PdeOffset
;
235 if (PointerPde
->u
.Hard
.Valid
== 0)
237 MmDeleteHyperspaceMapping(PdeBase
);
242 Pfn
= PointerPde
->u
.Hard
.PageFrameNumber
;
244 MmDeleteHyperspaceMapping(PdeBase
);
245 Pt
= MmCreateHyperspaceMapping(Pfn
);
248 KeBugCheck(MEMORY_MANAGEMENT
);
250 return Pt
+ MiAddressToPteOffset(Address
);
252 /* This is for our process */
253 PointerPde
= MiAddressToPde(Address
);
254 Pt
= (PULONG
)MiAddressToPte(Address
);
255 if (PointerPde
->u
.Hard
.Valid
== 0)
262 ASSERT(PointerPde
->u
.Long
== 0);
264 MI_WRITE_INVALID_PTE(PointerPde
, DemandZeroPde
);
265 Status
= MiDispatchFault(TRUE
,
270 PsGetCurrentProcess(),
273 DBG_UNREFERENCED_LOCAL_VARIABLE(Status
);
274 ASSERT(KeAreAllApcsDisabled() == TRUE
);
275 ASSERT(PointerPde
->u
.Hard
.Valid
== 1);
277 return (PULONG
)MiAddressToPte(Address
);
280 /* This is for kernel land address */
281 ASSERT(Process
== NULL
);
282 PointerPde
= MiAddressToPde(Address
);
283 Pt
= (PULONG
)MiAddressToPte(Address
);
284 if (PointerPde
->u
.Hard
.Valid
== 0)
286 /* Let ARM3 synchronize the PDE */
287 if(!MiSynchronizeSystemPde(PointerPde
))
289 /* PDE (still) not valid, let ARM3 allocate one if asked */
292 MiFillSystemPageDirectory(Address
, PAGE_SIZE
);
298 static BOOLEAN
MmUnmapPageTable(PULONG Pt
)
300 if (!IS_HYPERSPACE(Pt
))
307 MmDeleteHyperspaceMapping((PVOID
)PAGE_ROUND_DOWN(Pt
));
312 static ULONG
MmGetPageEntryForProcess(PEPROCESS Process
, PVOID Address
)
317 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
321 MmUnmapPageTable(Pt
);
329 MmGetPfnForProcess(PEPROCESS Process
,
333 Entry
= MmGetPageEntryForProcess(Process
, Address
);
334 if (!(Entry
& PA_PRESENT
))
338 return(PTE_TO_PFN(Entry
));
343 MmDeleteVirtualMapping(PEPROCESS Process
, PVOID Address
,
344 BOOLEAN
* WasDirty
, PPFN_NUMBER Page
)
346 * FUNCTION: Delete a virtual mapping
349 BOOLEAN WasValid
= FALSE
;
354 DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n",
355 Process
, Address
, WasDirty
, Page
);
357 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
361 if (WasDirty
!= NULL
)
373 * Atomically set the entry to zero and get the old value.
375 Pte
= InterlockedExchangePte(Pt
, 0);
377 /* We count a mapping as valid if it's a present page, or it's a nonzero pfn with
378 * the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */
379 WasValid
= (Pte
& PA_PRESENT
) || ((Pte
>> PAGE_SHIFT
) && !(Pte
& 0x800));
382 /* Flush the TLB since we transitioned this PTE
383 * from valid to invalid so any stale translations
384 * are removed from the cache */
385 MiFlushTlb(Pt
, Address
);
387 if (Address
< MmSystemRangeStart
)
389 /* Remove PDE reference */
390 Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)]--;
391 ASSERT(Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)] < PTE_COUNT
);
394 Pfn
= PTE_TO_PFN(Pte
);
398 MmUnmapPageTable(Pt
);
403 * Return some information to the caller
405 if (WasDirty
!= NULL
)
407 *WasDirty
= ((Pte
& PA_DIRTY
) && (Pte
& PA_PRESENT
)) ? TRUE
: FALSE
;
417 MmGetPageFileMapping(PEPROCESS Process
, PVOID Address
,
418 SWAPENTRY
* SwapEntry
)
420 * FUNCTION: Get a page file mapping
423 ULONG Entry
= MmGetPageEntryForProcess(Process
, Address
);
424 *SwapEntry
= Entry
>> 1;
429 MmDeletePageFileMapping(PEPROCESS Process
, PVOID Address
,
430 SWAPENTRY
* SwapEntry
)
432 * FUNCTION: Delete a virtual mapping
438 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
447 * Atomically set the entry to zero and get the old value.
449 Pte
= InterlockedExchangePte(Pt
, 0);
451 if (Address
< MmSystemRangeStart
)
453 /* Remove PDE reference */
454 Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)]--;
455 ASSERT(Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)] < PTE_COUNT
);
458 /* We don't need to flush here because page file entries
459 * are invalid translations, so the processor won't cache them */
460 MmUnmapPageTable(Pt
);
462 if ((Pte
& PA_PRESENT
) || !(Pte
& 0x800))
464 DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte
);
465 KeBugCheck(MEMORY_MANAGEMENT
);
469 * Return some information to the caller
471 *SwapEntry
= Pte
>> 1;
475 Mmi386MakeKernelPageTableGlobal(PVOID Address
)
477 PMMPDE PointerPde
= MiAddressToPde(Address
);
478 PMMPTE PointerPte
= MiAddressToPte(Address
);
480 if (PointerPde
->u
.Hard
.Valid
== 0)
482 if(!MiSynchronizeSystemPde(PointerPde
))
484 return PointerPte
->u
.Hard
.Valid
!= 0;
491 MmIsDirtyPage(PEPROCESS Process
, PVOID Address
)
493 return MmGetPageEntryForProcess(Process
, Address
) & PA_DIRTY
? TRUE
: FALSE
;
498 MmSetCleanPage(PEPROCESS Process
, PVOID Address
)
503 if (Address
< MmSystemRangeStart
&& Process
== NULL
)
505 DPRINT1("MmSetCleanPage is called for user space without a process.\n");
506 KeBugCheck(MEMORY_MANAGEMENT
);
509 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
512 KeBugCheck(MEMORY_MANAGEMENT
);
518 } while (Pte
!= InterlockedCompareExchangePte(Pt
, Pte
& ~PA_DIRTY
, Pte
));
520 if (!(Pte
& PA_PRESENT
))
522 KeBugCheck(MEMORY_MANAGEMENT
);
524 else if (Pte
& PA_DIRTY
)
526 MiFlushTlb(Pt
, Address
);
530 MmUnmapPageTable(Pt
);
536 MmSetDirtyPage(PEPROCESS Process
, PVOID Address
)
541 if (Address
< MmSystemRangeStart
&& Process
== NULL
)
543 DPRINT1("MmSetDirtyPage is called for user space without a process.\n");
544 KeBugCheck(MEMORY_MANAGEMENT
);
547 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
550 KeBugCheck(MEMORY_MANAGEMENT
);
556 } while (Pte
!= InterlockedCompareExchangePte(Pt
, Pte
| PA_DIRTY
, Pte
));
558 if (!(Pte
& PA_PRESENT
))
560 KeBugCheck(MEMORY_MANAGEMENT
);
564 /* The processor will never clear this bit itself, therefore
565 * we do not need to flush the TLB here when setting it */
566 MmUnmapPageTable(Pt
);
572 MmIsPagePresent(PEPROCESS Process
, PVOID Address
)
574 return MmGetPageEntryForProcess(Process
, Address
) & PA_PRESENT
;
579 MmIsDisabledPage(PEPROCESS Process
, PVOID Address
)
581 ULONG_PTR Entry
= MmGetPageEntryForProcess(Process
, Address
);
582 return !(Entry
& PA_PRESENT
) && !(Entry
& 0x800) && (Entry
>> PAGE_SHIFT
);
587 MmIsPageSwapEntry(PEPROCESS Process
, PVOID Address
)
590 Entry
= MmGetPageEntryForProcess(Process
, Address
);
591 return !(Entry
& PA_PRESENT
) && (Entry
& 0x800);
596 MmCreatePageFileMapping(PEPROCESS Process
,
603 if (Process
== NULL
&& Address
< MmSystemRangeStart
)
605 DPRINT1("No process\n");
606 KeBugCheck(MEMORY_MANAGEMENT
);
608 if (Process
!= NULL
&& Address
>= MmSystemRangeStart
)
610 DPRINT1("Setting kernel address with process context\n");
611 KeBugCheck(MEMORY_MANAGEMENT
);
614 if (SwapEntry
& (1 << 31))
616 KeBugCheck(MEMORY_MANAGEMENT
);
619 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
622 /* Nobody should page out an address that hasn't even been mapped */
623 /* But we might place a wait entry first, requiring the page table */
624 if (SwapEntry
!= MM_WAIT_ENTRY
)
626 KeBugCheck(MEMORY_MANAGEMENT
);
628 Pt
= MmGetPageTableForProcess(Process
, Address
, TRUE
);
630 Pte
= InterlockedExchangePte(Pt
, SwapEntry
<< 1);
633 KeBugCheckEx(MEMORY_MANAGEMENT
, SwapEntry
, (ULONG_PTR
)Process
, (ULONG_PTR
)Address
, 0);
636 if (Address
< MmSystemRangeStart
)
638 /* Add PDE reference */
639 Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)]++;
640 ASSERT(Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Address
)] <= PTE_COUNT
);
643 /* We don't need to flush the TLB here because it
644 * only caches valid translations and a zero PTE
645 * is not a valid translation */
646 MmUnmapPageTable(Pt
);
648 return(STATUS_SUCCESS
);
654 MmCreateVirtualMappingUnsafe(PEPROCESS Process
,
663 ULONG oldPdeOffset
, PdeOffset
;
666 DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n",
667 Process
, Address
, flProtect
, Pages
, *Pages
, PageCount
);
669 ASSERT(((ULONG_PTR
)Address
% PAGE_SIZE
) == 0);
673 if (Address
< MmSystemRangeStart
)
675 DPRINT1("No process\n");
676 KeBugCheck(MEMORY_MANAGEMENT
);
678 if (PageCount
> 0x10000 ||
679 (ULONG_PTR
) Address
/ PAGE_SIZE
+ PageCount
> 0x100000)
681 DPRINT1("Page count too large\n");
682 KeBugCheck(MEMORY_MANAGEMENT
);
687 if (Address
>= MmSystemRangeStart
)
689 DPRINT1("Setting kernel address with process context\n");
690 KeBugCheck(MEMORY_MANAGEMENT
);
692 if (PageCount
> (ULONG_PTR
)MmSystemRangeStart
/ PAGE_SIZE
||
693 (ULONG_PTR
) Address
/ PAGE_SIZE
+ PageCount
>
694 (ULONG_PTR
)MmSystemRangeStart
/ PAGE_SIZE
)
696 DPRINT1("Page Count too large\n");
697 KeBugCheck(MEMORY_MANAGEMENT
);
701 Attributes
= ProtectToPTE(flProtect
);
703 if (Address
>= MmSystemRangeStart
)
705 Attributes
&= ~PA_USER
;
709 Attributes
|= PA_USER
;
713 /* MmGetPageTableForProcess should be called on the first run, so
714 * let this trigger it */
715 oldPdeOffset
= ADDR_TO_PDE_OFFSET(Addr
) + 1;
716 for (i
= 0; i
< PageCount
; i
++, Addr
= (PVOID
)((ULONG_PTR
)Addr
+ PAGE_SIZE
))
718 if (!(Attributes
& PA_PRESENT
) && Pages
[i
] != 0)
720 DPRINT1("Setting physical address but not allowing access at address "
721 "0x%p with attributes %x/%x.\n",
722 Addr
, Attributes
, flProtect
);
723 KeBugCheck(MEMORY_MANAGEMENT
);
725 PdeOffset
= ADDR_TO_PDE_OFFSET(Addr
);
726 if (oldPdeOffset
!= PdeOffset
)
728 if(Pt
) MmUnmapPageTable(Pt
);
729 Pt
= MmGetPageTableForProcess(Process
, Addr
, TRUE
);
732 KeBugCheck(MEMORY_MANAGEMENT
);
739 oldPdeOffset
= PdeOffset
;
741 Pte
= InterlockedExchangePte(Pt
, PFN_TO_PTE(Pages
[i
]) | Attributes
);
743 /* There should not be anything valid here */
746 DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte
, Pt
, Address
, i
);
747 KeBugCheck(MEMORY_MANAGEMENT
);
750 /* We don't need to flush the TLB here because it only caches valid translations
751 * and we're moving this PTE from invalid to valid so it can't be cached right now */
753 if (Addr
< MmSystemRangeStart
)
755 /* Add PDE reference */
756 Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Addr
)]++;
757 ASSERT(Process
->Vm
.VmWorkingSetList
->UsedPageTableEntries
[MiGetPdeOffset(Addr
)] <= PTE_COUNT
);
761 ASSERT(Addr
> Address
);
762 MmUnmapPageTable(Pt
);
764 return(STATUS_SUCCESS
);
769 MmCreateVirtualMapping(PEPROCESS Process
,
777 ASSERT((ULONG_PTR
)Address
% PAGE_SIZE
== 0);
778 for (i
= 0; i
< PageCount
; i
++)
780 if (!MmIsPageInUse(Pages
[i
]))
782 DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages
[i
]));
783 KeBugCheck(MEMORY_MANAGEMENT
);
787 return(MmCreateVirtualMappingUnsafe(Process
,
796 MmGetPageProtect(PEPROCESS Process
, PVOID Address
)
801 Entry
= MmGetPageEntryForProcess(Process
, Address
);
804 if (!(Entry
& PA_PRESENT
))
806 Protect
= PAGE_NOACCESS
;
810 if (Entry
& PA_READWRITE
)
812 Protect
= PAGE_READWRITE
;
816 Protect
= PAGE_EXECUTE_READ
;
820 Protect
|= PAGE_NOCACHE
;
824 Protect
|= PAGE_WRITETHROUGH
;
826 if (!(Entry
& PA_USER
))
828 Protect
|= PAGE_SYSTEM
;
837 MmSetPageProtect(PEPROCESS Process
, PVOID Address
, ULONG flProtect
)
839 ULONG Attributes
= 0;
843 DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n",
844 Process
, Address
, flProtect
);
846 Attributes
= ProtectToPTE(flProtect
);
849 if (Address
>= MmSystemRangeStart
)
851 Attributes
&= ~PA_USER
;
855 Attributes
|= PA_USER
;
858 Pt
= MmGetPageTableForProcess(Process
, Address
, FALSE
);
861 KeBugCheck(MEMORY_MANAGEMENT
);
863 Pte
= InterlockedExchangePte(Pt
, PAGE_MASK(*Pt
) | Attributes
| (*Pt
& (PA_ACCESSED
|PA_DIRTY
)));
865 // We should be able to bring a page back from PAGE_NOACCESS
866 if ((Pte
& 0x800) || !(Pte
>> PAGE_SHIFT
))
868 DPRINT1("Invalid Pte %lx\n", Pte
);
869 KeBugCheck(MEMORY_MANAGEMENT
);
872 if((Pte
& Attributes
) != Attributes
)
873 MiFlushTlb(Pt
, Address
);
875 MmUnmapPageTable(Pt
);
881 MmInitGlobalKernelPageDirectory(VOID
)
883 /* Nothing to do here */