2 * Copyright (C) 1998-2005 ReactOS Team (and the authors from the programmers section)
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 * PROJECT: ReactOS kernel
20 * FILE: ntoskrnl/mm/section/fault.c
21 * PURPOSE: Consolidate fault handlers for sections
37 * Thomas Weidenmueller
38 * Gunnar Andre' Dalsnes
48 I've generally organized fault handling code in newmm as handlers that run
49 under a single lock acquisition, check the state, and either take necessary
50 action atomically, or place a wait entry and return a continuation to the
51 caller. This lends itself to code that has a simple, structured form,
52 doesn't make assumptions about lock taking and breaking, and provides an
53 obvious, graphic seperation between code that may block and code that isn't
54 allowed to. This file contains the non-blocking half.
56 In order to request a blocking operation to happen outside locks, place a
57 function pointer in the provided MM_REQUIRED_RESOURCES struct and return
58 STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
59 provided struct and take action outside of any mm related locks and at
60 PASSIVE_LEVEL. The same fault handler will be called again after the
61 blocking operation succeeds. In this way, the fault handler can accumulate
62 state, but will freely work while competing with other threads.
64 Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
65 table they're using and return STATUS_SUCCESS + 1 if it's found. In that
66 case, the caller will wait on the wait entry event until the competing thread
67 is finished, and recall this handler in the current thread.
69 Another thing to note here is that we require mappings to exactly mirror
70 rmaps, so each mapping should be immediately followed by an rmap addition.
74 /* INCLUDES *****************************************************************/
80 #include "../mm/ARM3/miarm.h"
82 #define DPRINTC DPRINT
84 extern KEVENT MmWaitPageEvent
;
85 extern PMMWSL MmWorkingSetList
;
89 Multiple stage handling of a not-present fault in a data section.
91 Required->State is used to accumulate flags that indicate the next action
92 the handler should take.
94 State & 2 is currently used to indicate that the page acquired by a previous
95 callout is a global page to the section and should be placed in the section
98 Note that the primitive tail recursion done here reaches the base case when
105 MmNotPresentFaultCachePage (
106 _In_ PMMSUPPORT AddressSpace
,
107 _In_ MEMORY_AREA
* MemoryArea
,
110 _Inout_ PMM_REQUIRED_RESOURCES Required
)
115 PMM_SECTION_SEGMENT Segment
;
116 LARGE_INTEGER FileOffset
, TotalOffset
;
119 PEPROCESS Process
= MmGetAddressSpaceOwner(AddressSpace
);
122 DPRINT("Not Present: %p %p (%p-%p)\n",
125 MemoryArea
->StartingAddress
,
126 MemoryArea
->EndingAddress
);
129 * There is a window between taking the page fault and locking the
130 * address space when another thread could load the page so we check
133 if (MmIsPagePresent(Process
, Address
))
136 return STATUS_SUCCESS
;
139 PAddress
= MM_ROUND_DOWN(Address
, PAGE_SIZE
);
140 TotalOffset
.QuadPart
= (ULONG_PTR
)PAddress
-
141 (ULONG_PTR
)MemoryArea
->StartingAddress
;
143 Segment
= MemoryArea
->Data
.SectionData
.Segment
;
145 TotalOffset
.QuadPart
+= MemoryArea
->Data
.SectionData
.ViewOffset
.QuadPart
;
146 FileOffset
= TotalOffset
;
148 //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
151 if (Segment
->FileObject
)
153 DPRINT("FileName %wZ\n", &Segment
->FileObject
->FileName
);
156 DPRINT("Total Offset %08x%08x\n", TotalOffset
.HighPart
, TotalOffset
.LowPart
);
158 /* Lock the segment */
159 MmLockSectionSegment(Segment
);
161 /* Get the entry corresponding to the offset within the section */
162 Entry
= MmGetPageEntrySectionSegment(Segment
, &TotalOffset
);
164 Attributes
= PAGE_READONLY
;
166 if (Required
->State
&& Required
->Page
[0])
168 DPRINT("Have file and page, set page %x in section @ %x #\n",
170 TotalOffset
.LowPart
);
172 if (Required
->SwapEntry
)
173 MmSetSavedSwapEntryPage(Required
->Page
[0], Required
->SwapEntry
);
175 if (Required
->State
& 2)
177 DPRINT("Set in section @ %x\n", TotalOffset
.LowPart
);
178 Status
= MmSetPageEntrySectionSegment(Segment
,
180 Entry
= MAKE_PFN_SSE(Required
->Page
[0]));
181 if (!NT_SUCCESS(Status
))
183 MmReleasePageMemoryConsumer(MC_CACHE
, Required
->Page
[0]);
185 MmUnlockSectionSegment(Segment
);
186 MiSetPageEvent(Process
, Address
);
187 DPRINT("Status %x\n", Status
);
188 return STATUS_MM_RESTART_OPERATION
;
192 DPRINT("Set %x in address space @ %x\n", Required
->Page
[0], Address
);
193 Status
= MmCreateVirtualMapping(Process
,
198 if (NT_SUCCESS(Status
))
200 MmInsertRmap(Required
->Page
[0], Process
, Address
);
204 /* Drop the reference for our address space ... */
205 MmReleasePageMemoryConsumer(MC_CACHE
, Required
->Page
[0]);
207 MmUnlockSectionSegment(Segment
);
208 DPRINTC("XXX Set Event %x\n", Status
);
209 MiSetPageEvent(Process
, Address
);
210 DPRINT("Status %x\n", Status
);
214 else if (MM_IS_WAIT_PTE(Entry
))
216 // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
217 // ask the fault handler to wait until we should continue. Rathern
218 // than recopy this boilerplate code everywhere, we just ask them
220 MmUnlockSectionSegment(Segment
);
221 return STATUS_SUCCESS
+ 1;
225 PFN_NUMBER Page
= PFN_FROM_SSE(Entry
);
226 DPRINT("Take reference to page %x #\n", Page
);
228 if (MiGetPfnEntry(Page
) == NULL
)
230 DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
234 TotalOffset
.HighPart
,
235 TotalOffset
.LowPart
);
236 KeBugCheck(CACHE_MANAGER
);
239 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
240 MmReferencePage(Page
);
241 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
243 Status
= MmCreateVirtualMapping(Process
, Address
, Attributes
, &Page
, 1);
244 if (NT_SUCCESS(Status
))
246 MmInsertRmap(Page
, Process
, Address
);
248 DPRINT("XXX Set Event %x\n", Status
);
249 MiSetPageEvent(Process
, Address
);
250 MmUnlockSectionSegment(Segment
);
251 DPRINT("Status %x\n", Status
);
256 DPRINT("Get page into section\n");
258 * If the entry is zero (and it can't change because we have
259 * locked the segment) then we need to load the page.
261 //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
263 Required
->Context
= Segment
->FileObject
;
264 Required
->Consumer
= Consumer
;
265 Required
->FileOffset
= FileOffset
;
266 Required
->Amount
= PAGE_SIZE
;
267 Required
->DoAcquisition
= MiReadFilePage
;
269 MmSetPageEntrySectionSegment(Segment
,
271 MAKE_SWAP_SSE(MM_WAIT_ENTRY
));
273 MmUnlockSectionSegment(Segment
);
274 return STATUS_MORE_PROCESSING_REQUIRED
;
277 return STATUS_ACCESS_VIOLATION
;
282 MiCopyPageToPage(PFN_NUMBER DestPage
, PFN_NUMBER SrcPage
)
286 PVOID TempAddress
, TempSource
;
288 Process
= PsGetCurrentProcess();
289 TempAddress
= MiMapPageInHyperSpace(Process
, DestPage
, &Irql
);
290 if (TempAddress
== NULL
)
292 return STATUS_NO_MEMORY
;
294 TempSource
= MiMapPageInHyperSpace(Process
, SrcPage
, &Irql2
);
296 MiUnmapPageInHyperSpace(Process
, TempAddress
, Irql
);
297 return STATUS_NO_MEMORY
;
300 memcpy(TempAddress
, TempSource
, PAGE_SIZE
);
302 MiUnmapPageInHyperSpace(Process
, TempSource
, Irql2
);
303 MiUnmapPageInHyperSpace(Process
, TempAddress
, Irql
);
304 return STATUS_SUCCESS
;
309 This function is deceptively named, in that it does the actual work of handling
310 access faults on data sections. In the case of the code that's present here,
311 we don't allow cow sections, but we do need this to unset the initial
312 PAGE_READONLY condition of pages faulted into the cache so that we can add
313 a dirty bit in the section page table on the first modification.
315 In the ultimate form of this code, CoW is reenabled.
321 MiCowCacheSectionPage (
322 _In_ PMMSUPPORT AddressSpace
,
323 _In_ PMEMORY_AREA MemoryArea
,
326 _Inout_ PMM_REQUIRED_RESOURCES Required
)
328 PMM_SECTION_SEGMENT Segment
;
329 PFN_NUMBER NewPage
, OldPage
;
332 LARGE_INTEGER Offset
;
333 PEPROCESS Process
= MmGetAddressSpaceOwner(AddressSpace
);
335 DPRINT("MmAccessFaultSectionView(%x, %x, %x, %x)\n",
341 Segment
= MemoryArea
->Data
.SectionData
.Segment
;
343 /* Lock the segment */
344 MmLockSectionSegment(Segment
);
346 /* Find the offset of the page */
347 PAddress
= MM_ROUND_DOWN(Address
, PAGE_SIZE
);
348 Offset
.QuadPart
= (ULONG_PTR
)PAddress
- (ULONG_PTR
)MemoryArea
->StartingAddress
+
349 MemoryArea
->Data
.SectionData
.ViewOffset
.QuadPart
;
351 if (!Segment
->WriteCopy
/*&&
352 !MemoryArea->Data.SectionData.WriteCopyView*/ ||
353 Segment
->Image
.Characteristics
& IMAGE_SCN_MEM_SHARED
)
356 if (Region
->Protect
== PAGE_READWRITE
||
357 Region
->Protect
== PAGE_EXECUTE_READWRITE
)
361 DPRINTC("setting non-cow page %x %x:%x offset %x (%x) to writable\n",
366 MmGetPfnForProcess(Process
, Address
));
367 if (Segment
->FileObject
)
369 DPRINTC("file %wZ\n", &Segment
->FileObject
->FileName
);
371 Entry
= MmGetPageEntrySectionSegment(Segment
, &Offset
);
372 DPRINT("Entry %x\n", Entry
);
374 !IS_SWAP_FROM_SSE(Entry
) &&
375 PFN_FROM_SSE(Entry
) == MmGetPfnForProcess(Process
, Address
)) {
377 MmSetPageEntrySectionSegment(Segment
,
381 MmSetPageProtect(Process
, PAddress
, PAGE_READWRITE
);
382 MmSetDirtyPage(Process
, PAddress
);
383 MmUnlockSectionSegment(Segment
);
385 return STATUS_SUCCESS
;
390 DPRINT("Not supposed to be writable\n");
391 MmUnlockSectionSegment(Segment
);
392 return STATUS_ACCESS_VIOLATION
;
397 if (!Required
->Page
[0])
400 if (MmIsPageSwapEntry(Process
, Address
))
402 MmGetPageFileMapping(Process
, Address
, &SwapEntry
);
403 MmUnlockSectionSegment(Segment
);
404 if (SwapEntry
== MM_WAIT_ENTRY
)
405 return STATUS_SUCCESS
+ 1; // Wait ... somebody else is getting it right now
407 return STATUS_SUCCESS
; // Nonwait swap entry ... handle elsewhere
409 /* Call out to acquire a page to copy to. We'll be re-called when
410 * the page has been allocated. */
411 Required
->Page
[1] = MmGetPfnForProcess(Process
, Address
);
412 Required
->Consumer
= MC_CACHE
;
413 Required
->Amount
= 1;
414 Required
->File
= __FILE__
;
415 Required
->Line
= __LINE__
;
416 Required
->DoAcquisition
= MiGetOnePage
;
417 MmCreatePageFileMapping(Process
, Address
, MM_WAIT_ENTRY
);
418 MmUnlockSectionSegment(Segment
);
419 return STATUS_MORE_PROCESSING_REQUIRED
;
422 NewPage
= Required
->Page
[0];
423 OldPage
= Required
->Page
[1];
425 DPRINT("Allocated page %x\n", NewPage
);
427 /* Unshare the old page */
428 MmDeleteRmap(OldPage
, Process
, PAddress
);
430 /* Copy the old page */
432 MiCopyPageToPage(NewPage
, OldPage
);
434 /* Set the PTE to point to the new page */
435 Status
= MmCreateVirtualMapping(Process
,
441 if (!NT_SUCCESS(Status
))
443 DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
445 MmUnlockSectionSegment(Segment
);
449 MmInsertRmap(NewPage
, Process
, PAddress
);
450 MmReleasePageMemoryConsumer(MC_CACHE
, OldPage
);
451 MmUnlockSectionSegment(Segment
);
453 DPRINT("Address 0x%.8X\n", Address
);
454 return STATUS_SUCCESS
;
457 KEVENT MmWaitPageEvent
;
459 typedef struct _WORK_QUEUE_WITH_CONTEXT
461 WORK_QUEUE_ITEM WorkItem
;
462 PMMSUPPORT AddressSpace
;
463 PMEMORY_AREA MemoryArea
;
464 PMM_REQUIRED_RESOURCES Required
;
467 AcquireResource DoAcquisition
;
468 } WORK_QUEUE_WITH_CONTEXT
, *PWORK_QUEUE_WITH_CONTEXT
;
472 This is the work item used do blocking resource acquisition when a fault
473 handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
474 acquisition to take place on a different stack, and outside of any locks used
475 by fault handling, making recursive fault handling possible when required.
479 _Function_class_(WORKER_THREAD_ROUTINE
)
482 MmpFaultWorker(PVOID Parameter
)
484 PWORK_QUEUE_WITH_CONTEXT WorkItem
= Parameter
;
486 DPRINT("Calling work\n");
487 WorkItem
->Status
= WorkItem
->Required
->DoAcquisition(WorkItem
->AddressSpace
,
488 WorkItem
->MemoryArea
,
490 DPRINT("Status %x\n", WorkItem
->Status
);
491 KeSetEvent(&WorkItem
->Wait
, IO_NO_INCREMENT
, FALSE
);
496 This code seperates the action of fault handling into an upper and lower
497 handler to allow the inner handler to optionally be called in work item
498 if the stack is getting too deep. My experiments show that the third
499 recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
500 worker thread. In the ultimate form of this code, the primary fault handler
501 makes this decision by using a thread-local counter to detect a too-deep
502 fault stack and call the inner fault handler in a worker thread if required.
504 Note that faults are taken at passive level and have access to ordinary
505 driver entry points such as those that read and write files, and filesystems
506 should use paged structures whenever possible. This makes recursive faults
507 both a perfectly normal occurrance, and a worthwhile case to handle.
509 The code below will repeatedly call MiCowSectionPage as long as it returns
510 either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
511 processing required case, we call out to a blocking resource acquisition
512 function and then recall the faut handler with the shared state represented
513 by the MM_REQUIRED_RESOURCES struct.
515 In the other case, we wait on the wait entry event and recall the handler.
516 Each time the wait entry event is signalled, one thread has removed an
517 MM_WAIT_ENTRY from a page table.
519 In the ultimate form of this code, there is a single system wide fault handler
520 for each of access fault and not present and each memory area contains a
521 function pointer that indicates the active fault handler. Since the mm code
522 in reactos is currently fragmented, I didn't bring this change to trunk.
528 MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode
,
529 PMMSUPPORT AddressSpace
,
534 MEMORY_AREA
* MemoryArea
;
536 BOOLEAN Locked
= FromMdl
;
537 MM_REQUIRED_RESOURCES Resources
= { 0 };
538 WORK_QUEUE_WITH_CONTEXT Context
;
540 RtlZeroMemory(&Context
, sizeof(WORK_QUEUE_WITH_CONTEXT
));
542 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
544 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
546 DPRINT1("Page fault at high IRQL was %d\n", KeGetCurrentIrql());
547 return STATUS_UNSUCCESSFUL
;
550 /* Find the memory area for the faulting address */
551 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
553 /* Check permissions */
554 if (Mode
!= KernelMode
)
556 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
557 return STATUS_ACCESS_VIOLATION
;
559 AddressSpace
= MmGetKernelAddressSpace();
563 AddressSpace
= &PsGetCurrentProcess()->Vm
;
568 MmLockAddressSpace(AddressSpace
);
573 MemoryArea
= MmLocateMemoryAreaByAddress(AddressSpace
, (PVOID
)Address
);
574 if (MemoryArea
== NULL
||
575 MemoryArea
->DeleteInProgress
)
579 MmUnlockAddressSpace(AddressSpace
);
581 DPRINT("Address: %x\n", Address
);
582 return STATUS_ACCESS_VIOLATION
;
585 DPRINT("Type %x (%x -> %x)\n",
587 MemoryArea
->StartingAddress
,
588 MemoryArea
->EndingAddress
);
590 Resources
.DoAcquisition
= NULL
;
592 // Note: fault handlers are called with address space locked
593 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
594 Status
= MiCowCacheSectionPage(AddressSpace
,
602 MmUnlockAddressSpace(AddressSpace
);
605 if (Status
== STATUS_SUCCESS
+ 1)
608 DPRINT("Waiting for %x\n", Address
);
609 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
610 DPRINT("Restarting fault %x\n", Address
);
611 Status
= STATUS_MM_RESTART_OPERATION
;
613 else if (Status
== STATUS_MM_RESTART_OPERATION
)
616 RtlZeroMemory(&Resources
, sizeof(Resources
));
618 else if (Status
== STATUS_MORE_PROCESSING_REQUIRED
)
620 if (Thread
->ActiveFaultCount
> 0)
622 DPRINT("Already fault handling ... going to work item (%x)\n",
624 Context
.AddressSpace
= AddressSpace
;
625 Context
.MemoryArea
= MemoryArea
;
626 Context
.Required
= &Resources
;
627 KeInitializeEvent(&Context
.Wait
, NotificationEvent
, FALSE
);
629 ExInitializeWorkItem(&Context
.WorkItem
,
633 DPRINT("Queue work item\n");
634 ExQueueWorkItem(&Context
.WorkItem
, DelayedWorkQueue
);
636 KeWaitForSingleObject(&Context
.Wait
, 0, KernelMode
, FALSE
, NULL
);
637 Status
= Context
.Status
;
638 DPRINT("Status %x\n", Status
);
642 Status
= Resources
.DoAcquisition(AddressSpace
, MemoryArea
, &Resources
);
645 if (NT_SUCCESS(Status
))
647 Status
= STATUS_MM_RESTART_OPERATION
;
653 MmLockAddressSpace(AddressSpace
);
656 while (Status
== STATUS_MM_RESTART_OPERATION
);
658 if (!NT_SUCCESS(Status
) && MemoryArea
->Type
== 1)
660 DPRINT1("Completed page fault handling %x %x\n", Address
, Status
);
661 DPRINT1("Type %x (%x -> %x)\n",
663 MemoryArea
->StartingAddress
,
664 MemoryArea
->EndingAddress
);
669 MmUnlockAddressSpace(AddressSpace
);
677 This is the outer fault handler mentioned in the description of
678 MmpSectionAccsesFaultInner. It increments a fault depth count in the current
681 In the ultimate form of this code, the lower fault handler will optionally
682 use the count to keep the kernel stack from overflowing.
688 MmAccessFaultCacheSection(KPROCESSOR_MODE Mode
,
693 PMMSUPPORT AddressSpace
;
696 DPRINT("MmpAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
698 Thread
= PsGetCurrentThread();
700 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
702 DPRINT1("Page fault at high IRQL %d, address %x\n",
705 return STATUS_UNSUCCESSFUL
;
708 /* Find the memory area for the faulting address */
709 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
711 /* Check permissions */
712 if (Mode
!= KernelMode
)
714 DPRINT1("Address: %x:%x\n", PsGetCurrentProcess(), Address
);
715 return STATUS_ACCESS_VIOLATION
;
717 AddressSpace
= MmGetKernelAddressSpace();
721 AddressSpace
= &PsGetCurrentProcess()->Vm
;
724 Thread
->ActiveFaultCount
++;
725 Status
= MmpSectionAccessFaultInner(Mode
,
730 Thread
->ActiveFaultCount
--;
737 As above, this code seperates the active part of fault handling from a carrier
738 that can use the thread's active fault count to determine whether a work item
739 is required. Also as above, this function repeatedly calls the active not
740 present fault handler until a clear success or failure is received, using a
741 return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
747 MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode
,
748 PMMSUPPORT AddressSpace
,
753 BOOLEAN Locked
= FromMdl
;
754 PMEMORY_AREA MemoryArea
;
755 MM_REQUIRED_RESOURCES Resources
= { 0 };
756 WORK_QUEUE_WITH_CONTEXT Context
;
757 NTSTATUS Status
= STATUS_SUCCESS
;
759 RtlZeroMemory(&Context
, sizeof(WORK_QUEUE_WITH_CONTEXT
));
763 MmLockAddressSpace(AddressSpace
);
766 /* Call the memory area specific fault handler */
769 MemoryArea
= MmLocateMemoryAreaByAddress(AddressSpace
, (PVOID
)Address
);
770 if (MemoryArea
== NULL
|| MemoryArea
->DeleteInProgress
)
772 Status
= STATUS_ACCESS_VIOLATION
;
775 DPRINT1("Type %x DIP %x\n",
777 MemoryArea
->DeleteInProgress
);
781 DPRINT1("No memory area\n");
783 DPRINT1("Process %x, Address %x\n",
784 MmGetAddressSpaceOwner(AddressSpace
),
789 DPRINTC("Type %x (%x -> %x -> %x) in %x\n",
791 MemoryArea
->StartingAddress
,
793 MemoryArea
->EndingAddress
,
794 PsGetCurrentThread());
796 Resources
.DoAcquisition
= NULL
;
798 // Note: fault handlers are called with address space locked
799 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
801 Status
= MmNotPresentFaultCachePage(AddressSpace
,
809 MmUnlockAddressSpace(AddressSpace
);
812 if (Status
== STATUS_SUCCESS
)
816 else if (Status
== STATUS_SUCCESS
+ 1)
819 DPRINT("Waiting for %x\n", Address
);
820 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
821 DPRINT("Done waiting for %x\n", Address
);
822 Status
= STATUS_MM_RESTART_OPERATION
;
824 else if (Status
== STATUS_MM_RESTART_OPERATION
)
827 DPRINT("Clear resource\n");
828 RtlZeroMemory(&Resources
, sizeof(Resources
));
830 else if (Status
== STATUS_MORE_PROCESSING_REQUIRED
)
832 if (Thread
->ActiveFaultCount
> 2)
834 DPRINTC("Already fault handling ... going to work item (%x)\n", Address
);
835 Context
.AddressSpace
= AddressSpace
;
836 Context
.MemoryArea
= MemoryArea
;
837 Context
.Required
= &Resources
;
838 KeInitializeEvent(&Context
.Wait
, NotificationEvent
, FALSE
);
840 ExInitializeWorkItem(&Context
.WorkItem
,
841 (PWORKER_THREAD_ROUTINE
)MmpFaultWorker
,
844 DPRINT("Queue work item\n");
845 ExQueueWorkItem(&Context
.WorkItem
, DelayedWorkQueue
);
847 KeWaitForSingleObject(&Context
.Wait
, 0, KernelMode
, FALSE
, NULL
);
848 Status
= Context
.Status
;
849 DPRINTC("Status %x\n", Status
);
853 DPRINT("DoAcquisition %x\n", Resources
.DoAcquisition
);
855 Status
= Resources
.DoAcquisition(AddressSpace
,
859 DPRINT("DoAcquisition %x -> %x\n",
860 Resources
.DoAcquisition
,
864 if (NT_SUCCESS(Status
))
866 Status
= STATUS_MM_RESTART_OPERATION
;
869 else if (NT_SUCCESS(Status
))
876 MmLockAddressSpace(AddressSpace
);
879 while (Status
== STATUS_MM_RESTART_OPERATION
);
881 DPRINTC("Completed page fault handling: %x:%x %x\n",
882 MmGetAddressSpaceOwner(AddressSpace
),
888 MmUnlockAddressSpace(AddressSpace
);
891 MiSetPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
892 DPRINT("Done %x\n", Status
);
899 Call the inner not present fault handler, keeping track of the fault count.
900 In the ultimate form of this code, optionally use a worker thread the handle
901 the fault in order to sidestep stack overflow in the multiple fault case.
907 MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode
,
912 PMMSUPPORT AddressSpace
;
915 Address
&= ~(PAGE_SIZE
- 1);
916 DPRINT("MmNotPresentFault(Mode %d, Address %x)\n", Mode
, Address
);
918 Thread
= PsGetCurrentThread();
920 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
922 DPRINT1("Page fault at high IRQL %d, address %x\n",
927 return STATUS_UNSUCCESSFUL
;
930 /* Find the memory area for the faulting address */
931 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
933 /* Check permissions */
934 if (Mode
!= KernelMode
)
936 DPRINTC("Address: %x\n", Address
);
937 return STATUS_ACCESS_VIOLATION
;
939 AddressSpace
= MmGetKernelAddressSpace();
943 AddressSpace
= &PsGetCurrentProcess()->Vm
;
946 Thread
->ActiveFaultCount
++;
947 Status
= MmNotPresentFaultCacheSectionInner(Mode
,
952 Thread
->ActiveFaultCount
--;
954 ASSERT(Status
!= STATUS_UNSUCCESSFUL
);
955 ASSERT(Status
!= STATUS_INVALID_PARAMETER
);
956 DPRINT("MmAccessFault %x:%x -> %x\n",
957 MmGetAddressSpaceOwner(AddressSpace
),