2 * Copyright (C) 1998-2005 ReactOS Team (and the authors from the programmers section)
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 * PROJECT: ReactOS kernel
20 * FILE: ntoskrnl/mm/section/fault.c
21 * PURPOSE: Consolidate fault handlers for sections
37 * Thomas Weidenmueller
38 * Gunnar Andre' Dalsnes
48 I've generally organized fault handling code in newmm as handlers that run
49 under a single lock acquisition, check the state, and either take necessary
50 action atomically, or place a wait entry and return a continuation to the
51 caller. This lends itself to code that has a simple, structured form,
52 doesn't make assumptions about lock taking and breaking, and provides an
53 obvious, graphic seperation between code that may block and code that isn't
54 allowed to. This file contains the non-blocking half.
56 In order to request a blocking operation to happen outside locks, place a
57 function pointer in the provided MM_REQUIRED_RESOURCES struct and return
58 STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
59 provided struct and take action outside of any mm related locks and at
60 PASSIVE_LEVEL. The same fault handler will be called again after the
61 blocking operation succeeds. In this way, the fault handler can accumulate
62 state, but will freely work while competing with other threads.
64 Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
65 table they're using and return STATUS_SUCCESS + 1 if it's found. In that
66 case, the caller will wait on the wait entry event until the competing thread
67 is finished, and recall this handler in the current thread.
69 Another thing to note here is that we require mappings to exactly mirror
70 rmaps, so each mapping should be immediately followed by an rmap addition.
74 /* INCLUDES *****************************************************************/
80 #include "../mm/ARM3/miarm.h"
82 #define DPRINTC DPRINT
84 extern KEVENT MmWaitPageEvent
;
85 extern PMMWSL MmWorkingSetList
;
89 Multiple stage handling of a not-present fault in a data section.
91 Required->State is used to accumulate flags that indicate the next action
92 the handler should take.
94 State & 2 is currently used to indicate that the page acquired by a previous
95 callout is a global page to the section and should be placed in the section
98 Note that the primitive tail recursion done here reaches the base case when
105 MmNotPresentFaultCachePage(PMMSUPPORT AddressSpace
,
106 MEMORY_AREA
* MemoryArea
,
109 PMM_REQUIRED_RESOURCES Required
)
114 PMM_SECTION_SEGMENT Segment
;
115 LARGE_INTEGER FileOffset
, TotalOffset
;
118 PEPROCESS Process
= MmGetAddressSpaceOwner(AddressSpace
);
121 DPRINT("Not Present: %p %p (%p-%p)\n",
124 MemoryArea
->StartingAddress
,
125 MemoryArea
->EndingAddress
);
128 * There is a window between taking the page fault and locking the
129 * address space when another thread could load the page so we check
132 if (MmIsPagePresent(Process
, Address
))
135 return STATUS_SUCCESS
;
138 PAddress
= MM_ROUND_DOWN(Address
, PAGE_SIZE
);
139 TotalOffset
.QuadPart
= (ULONG_PTR
)PAddress
-
140 (ULONG_PTR
)MemoryArea
->StartingAddress
;
142 Segment
= MemoryArea
->Data
.SectionData
.Segment
;
144 TotalOffset
.QuadPart
+= MemoryArea
->Data
.SectionData
.ViewOffset
.QuadPart
;
145 FileOffset
= TotalOffset
;
147 //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
150 if (Segment
->FileObject
)
152 DPRINT("FileName %wZ\n", &Segment
->FileObject
->FileName
);
155 DPRINT("Total Offset %08x%08x\n", TotalOffset
.HighPart
, TotalOffset
.LowPart
);
157 /* Lock the segment */
158 MmLockSectionSegment(Segment
);
160 /* Get the entry corresponding to the offset within the section */
161 Entry
= MmGetPageEntrySectionSegment(Segment
, &TotalOffset
);
163 Attributes
= PAGE_READONLY
;
165 if (Required
->State
&& Required
->Page
[0])
167 DPRINT("Have file and page, set page %x in section @ %x #\n",
169 TotalOffset
.LowPart
);
171 if (Required
->SwapEntry
)
172 MmSetSavedSwapEntryPage(Required
->Page
[0], Required
->SwapEntry
);
174 if (Required
->State
& 2)
176 DPRINT("Set in section @ %x\n", TotalOffset
.LowPart
);
177 Status
= MmSetPageEntrySectionSegment(Segment
,
179 Entry
= MAKE_PFN_SSE(Required
->Page
[0]));
180 if (!NT_SUCCESS(Status
))
182 MmReleasePageMemoryConsumer(MC_CACHE
, Required
->Page
[0]);
184 MmUnlockSectionSegment(Segment
);
185 MiSetPageEvent(Process
, Address
);
186 DPRINT("Status %x\n", Status
);
187 return STATUS_MM_RESTART_OPERATION
;
191 DPRINT("Set %x in address space @ %x\n", Required
->Page
[0], Address
);
192 Status
= MmCreateVirtualMapping(Process
,
197 if (NT_SUCCESS(Status
))
199 MmInsertRmap(Required
->Page
[0], Process
, Address
);
203 /* Drop the reference for our address space ... */
204 MmReleasePageMemoryConsumer(MC_CACHE
, Required
->Page
[0]);
206 MmUnlockSectionSegment(Segment
);
207 DPRINTC("XXX Set Event %x\n", Status
);
208 MiSetPageEvent(Process
, Address
);
209 DPRINT("Status %x\n", Status
);
213 else if (MM_IS_WAIT_PTE(Entry
))
215 // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
216 // ask the fault handler to wait until we should continue. Rathern
217 // than recopy this boilerplate code everywhere, we just ask them
219 MmUnlockSectionSegment(Segment
);
220 return STATUS_SUCCESS
+ 1;
224 PFN_NUMBER Page
= PFN_FROM_SSE(Entry
);
225 DPRINT("Take reference to page %x #\n", Page
);
227 if (MiGetPfnEntry(Page
) == NULL
)
229 DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
233 TotalOffset
.HighPart
,
234 TotalOffset
.LowPart
);
235 KeBugCheck(CACHE_MANAGER
);
238 OldIrql
= KeAcquireQueuedSpinLock(LockQueuePfnLock
);
239 MmReferencePage(Page
);
240 KeReleaseQueuedSpinLock(LockQueuePfnLock
, OldIrql
);
242 Status
= MmCreateVirtualMapping(Process
, Address
, Attributes
, &Page
, 1);
243 if (NT_SUCCESS(Status
))
245 MmInsertRmap(Page
, Process
, Address
);
247 DPRINT("XXX Set Event %x\n", Status
);
248 MiSetPageEvent(Process
, Address
);
249 MmUnlockSectionSegment(Segment
);
250 DPRINT("Status %x\n", Status
);
255 DPRINT("Get page into section\n");
257 * If the entry is zero (and it can't change because we have
258 * locked the segment) then we need to load the page.
260 //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
262 Required
->Context
= Segment
->FileObject
;
263 Required
->Consumer
= Consumer
;
264 Required
->FileOffset
= FileOffset
;
265 Required
->Amount
= PAGE_SIZE
;
266 Required
->DoAcquisition
= MiReadFilePage
;
268 MmSetPageEntrySectionSegment(Segment
,
270 MAKE_SWAP_SSE(MM_WAIT_ENTRY
));
272 MmUnlockSectionSegment(Segment
);
273 return STATUS_MORE_PROCESSING_REQUIRED
;
276 return STATUS_ACCESS_VIOLATION
;
281 MiCopyPageToPage(PFN_NUMBER DestPage
, PFN_NUMBER SrcPage
)
285 PVOID TempAddress
, TempSource
;
287 Process
= PsGetCurrentProcess();
288 TempAddress
= MiMapPageInHyperSpace(Process
, DestPage
, &Irql
);
289 if (TempAddress
== NULL
)
291 return STATUS_NO_MEMORY
;
293 TempSource
= MiMapPageInHyperSpace(Process
, SrcPage
, &Irql2
);
295 MiUnmapPageInHyperSpace(Process
, TempAddress
, Irql
);
296 return STATUS_NO_MEMORY
;
299 memcpy(TempAddress
, TempSource
, PAGE_SIZE
);
301 MiUnmapPageInHyperSpace(Process
, TempSource
, Irql2
);
302 MiUnmapPageInHyperSpace(Process
, TempAddress
, Irql
);
303 return STATUS_SUCCESS
;
308 This function is deceptively named, in that it does the actual work of handling
309 access faults on data sections. In the case of the code that's present here,
310 we don't allow cow sections, but we do need this to unset the initial
311 PAGE_READONLY condition of pages faulted into the cache so that we can add
312 a dirty bit in the section page table on the first modification.
314 In the ultimate form of this code, CoW is reenabled.
320 MiCowCacheSectionPage(PMMSUPPORT AddressSpace
,
321 PMEMORY_AREA MemoryArea
,
324 PMM_REQUIRED_RESOURCES Required
)
326 PMM_SECTION_SEGMENT Segment
;
327 PFN_NUMBER NewPage
, OldPage
;
330 LARGE_INTEGER Offset
;
331 PEPROCESS Process
= MmGetAddressSpaceOwner(AddressSpace
);
333 DPRINT("MmAccessFaultSectionView(%x, %x, %x, %x)\n",
339 Segment
= MemoryArea
->Data
.SectionData
.Segment
;
341 /* Lock the segment */
342 MmLockSectionSegment(Segment
);
344 /* Find the offset of the page */
345 PAddress
= MM_ROUND_DOWN(Address
, PAGE_SIZE
);
346 Offset
.QuadPart
= (ULONG_PTR
)PAddress
- (ULONG_PTR
)MemoryArea
->StartingAddress
+
347 MemoryArea
->Data
.SectionData
.ViewOffset
.QuadPart
;
349 if (!Segment
->WriteCopy
/*&&
350 !MemoryArea->Data.SectionData.WriteCopyView*/ ||
351 Segment
->Image
.Characteristics
& IMAGE_SCN_MEM_SHARED
)
354 if (Region
->Protect
== PAGE_READWRITE
||
355 Region
->Protect
== PAGE_EXECUTE_READWRITE
)
359 DPRINTC("setting non-cow page %x %x:%x offset %x (%x) to writable\n",
364 MmGetPfnForProcess(Process
, Address
));
365 if (Segment
->FileObject
)
367 DPRINTC("file %wZ\n", &Segment
->FileObject
->FileName
);
369 Entry
= MmGetPageEntrySectionSegment(Segment
, &Offset
);
370 DPRINT("Entry %x\n", Entry
);
372 !IS_SWAP_FROM_SSE(Entry
) &&
373 PFN_FROM_SSE(Entry
) == MmGetPfnForProcess(Process
, Address
)) {
375 MmSetPageEntrySectionSegment(Segment
,
379 MmSetPageProtect(Process
, PAddress
, PAGE_READWRITE
);
380 MmSetDirtyPage(Process
, PAddress
);
381 MmUnlockSectionSegment(Segment
);
383 return STATUS_SUCCESS
;
388 DPRINT("Not supposed to be writable\n");
389 MmUnlockSectionSegment(Segment
);
390 return STATUS_ACCESS_VIOLATION
;
395 if (!Required
->Page
[0])
398 if (MmIsPageSwapEntry(Process
, Address
))
400 MmGetPageFileMapping(Process
, Address
, &SwapEntry
);
401 MmUnlockSectionSegment(Segment
);
402 if (SwapEntry
== MM_WAIT_ENTRY
)
403 return STATUS_SUCCESS
+ 1; // Wait ... somebody else is getting it right now
405 return STATUS_SUCCESS
; // Nonwait swap entry ... handle elsewhere
407 /* Call out to acquire a page to copy to. We'll be re-called when
408 * the page has been allocated. */
409 Required
->Page
[1] = MmGetPfnForProcess(Process
, Address
);
410 Required
->Consumer
= MC_CACHE
;
411 Required
->Amount
= 1;
412 Required
->File
= __FILE__
;
413 Required
->Line
= __LINE__
;
414 Required
->DoAcquisition
= MiGetOnePage
;
415 MmCreatePageFileMapping(Process
, Address
, MM_WAIT_ENTRY
);
416 MmUnlockSectionSegment(Segment
);
417 return STATUS_MORE_PROCESSING_REQUIRED
;
420 NewPage
= Required
->Page
[0];
421 OldPage
= Required
->Page
[1];
423 DPRINT("Allocated page %x\n", NewPage
);
425 /* Unshare the old page */
426 MmDeleteRmap(OldPage
, Process
, PAddress
);
428 /* Copy the old page */
430 MiCopyPageToPage(NewPage
, OldPage
);
432 /* Set the PTE to point to the new page */
433 Status
= MmCreateVirtualMapping(Process
,
439 if (!NT_SUCCESS(Status
))
441 DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
443 MmUnlockSectionSegment(Segment
);
447 MmInsertRmap(NewPage
, Process
, PAddress
);
448 MmReleasePageMemoryConsumer(MC_CACHE
, OldPage
);
449 MmUnlockSectionSegment(Segment
);
451 DPRINT("Address 0x%.8X\n", Address
);
452 return STATUS_SUCCESS
;
455 KEVENT MmWaitPageEvent
;
457 typedef struct _WORK_QUEUE_WITH_CONTEXT
459 WORK_QUEUE_ITEM WorkItem
;
460 PMMSUPPORT AddressSpace
;
461 PMEMORY_AREA MemoryArea
;
462 PMM_REQUIRED_RESOURCES Required
;
465 AcquireResource DoAcquisition
;
466 } WORK_QUEUE_WITH_CONTEXT
, *PWORK_QUEUE_WITH_CONTEXT
;
470 This is the work item used do blocking resource acquisition when a fault
471 handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
472 acquisition to take place on a different stack, and outside of any locks used
473 by fault handling, making recursive fault handling possible when required.
479 MmpFaultWorker(PWORK_QUEUE_WITH_CONTEXT WorkItem
)
481 DPRINT("Calling work\n");
482 WorkItem
->Status
= WorkItem
->Required
->DoAcquisition(WorkItem
->AddressSpace
,
483 WorkItem
->MemoryArea
,
485 DPRINT("Status %x\n", WorkItem
->Status
);
486 KeSetEvent(&WorkItem
->Wait
, IO_NO_INCREMENT
, FALSE
);
491 This code seperates the action of fault handling into an upper and lower
492 handler to allow the inner handler to optionally be called in work item
493 if the stack is getting too deep. My experiments show that the third
494 recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
495 worker thread. In the ultimate form of this code, the primary fault handler
496 makes this decision by using a thread-local counter to detect a too-deep
497 fault stack and call the inner fault handler in a worker thread if required.
499 Note that faults are taken at passive level and have access to ordinary
500 driver entry points such as those that read and write files, and filesystems
501 should use paged structures whenever possible. This makes recursive faults
502 both a perfectly normal occurrance, and a worthwhile case to handle.
504 The code below will repeatedly call MiCowSectionPage as long as it returns
505 either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
506 processing required case, we call out to a blocking resource acquisition
507 function and then recall the faut handler with the shared state represented
508 by the MM_REQUIRED_RESOURCES struct.
510 In the other case, we wait on the wait entry event and recall the handler.
511 Each time the wait entry event is signalled, one thread has removed an
512 MM_WAIT_ENTRY from a page table.
514 In the ultimate form of this code, there is a single system wide fault handler
515 for each of access fault and not present and each memory area contains a
516 function pointer that indicates the active fault handler. Since the mm code
517 in reactos is currently fragmented, I didn't bring this change to trunk.
523 MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode
,
524 PMMSUPPORT AddressSpace
,
529 MEMORY_AREA
* MemoryArea
;
531 BOOLEAN Locked
= FromMdl
;
532 MM_REQUIRED_RESOURCES Resources
= { 0 };
533 WORK_QUEUE_WITH_CONTEXT Context
;
535 RtlZeroMemory(&Context
, sizeof(WORK_QUEUE_WITH_CONTEXT
));
537 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
539 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
541 DPRINT1("Page fault at high IRQL was %d\n", KeGetCurrentIrql());
542 return STATUS_UNSUCCESSFUL
;
545 /* Find the memory area for the faulting address */
546 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
548 /* Check permissions */
549 if (Mode
!= KernelMode
)
551 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
552 return STATUS_ACCESS_VIOLATION
;
554 AddressSpace
= MmGetKernelAddressSpace();
558 AddressSpace
= &PsGetCurrentProcess()->Vm
;
563 MmLockAddressSpace(AddressSpace
);
568 MemoryArea
= MmLocateMemoryAreaByAddress(AddressSpace
, (PVOID
)Address
);
569 if (MemoryArea
== NULL
||
570 MemoryArea
->DeleteInProgress
)
574 MmUnlockAddressSpace(AddressSpace
);
576 DPRINT("Address: %x\n", Address
);
577 return STATUS_ACCESS_VIOLATION
;
580 DPRINT("Type %x (%x -> %x)\n",
582 MemoryArea
->StartingAddress
,
583 MemoryArea
->EndingAddress
);
585 Resources
.DoAcquisition
= NULL
;
587 // Note: fault handlers are called with address space locked
588 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
589 Status
= MiCowCacheSectionPage(AddressSpace
,
597 MmUnlockAddressSpace(AddressSpace
);
600 if (Status
== STATUS_SUCCESS
+ 1)
603 DPRINT("Waiting for %x\n", Address
);
604 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
605 DPRINT("Restarting fault %x\n", Address
);
606 Status
= STATUS_MM_RESTART_OPERATION
;
608 else if (Status
== STATUS_MM_RESTART_OPERATION
)
611 RtlZeroMemory(&Resources
, sizeof(Resources
));
613 else if (Status
== STATUS_MORE_PROCESSING_REQUIRED
)
615 if (Thread
->ActiveFaultCount
> 0)
617 DPRINT("Already fault handling ... going to work item (%x)\n",
619 Context
.AddressSpace
= AddressSpace
;
620 Context
.MemoryArea
= MemoryArea
;
621 Context
.Required
= &Resources
;
622 KeInitializeEvent(&Context
.Wait
, NotificationEvent
, FALSE
);
624 ExInitializeWorkItem(&Context
.WorkItem
,
625 (PWORKER_THREAD_ROUTINE
)MmpFaultWorker
,
628 DPRINT("Queue work item\n");
629 ExQueueWorkItem(&Context
.WorkItem
, DelayedWorkQueue
);
631 KeWaitForSingleObject(&Context
.Wait
, 0, KernelMode
, FALSE
, NULL
);
632 Status
= Context
.Status
;
633 DPRINT("Status %x\n", Status
);
637 Status
= Resources
.DoAcquisition(AddressSpace
, MemoryArea
, &Resources
);
640 if (NT_SUCCESS(Status
))
642 Status
= STATUS_MM_RESTART_OPERATION
;
648 MmLockAddressSpace(AddressSpace
);
651 while (Status
== STATUS_MM_RESTART_OPERATION
);
653 if (!NT_SUCCESS(Status
) && MemoryArea
->Type
== 1)
655 DPRINT1("Completed page fault handling %x %x\n", Address
, Status
);
656 DPRINT1("Type %x (%x -> %x)\n",
658 MemoryArea
->StartingAddress
,
659 MemoryArea
->EndingAddress
);
664 MmUnlockAddressSpace(AddressSpace
);
672 This is the outer fault handler mentioned in the description of
673 MmpSectionAccsesFaultInner. It increments a fault depth count in the current
676 In the ultimate form of this code, the lower fault handler will optionally
677 use the count to keep the kernel stack from overflowing.
683 MmAccessFaultCacheSection(KPROCESSOR_MODE Mode
,
688 PMMSUPPORT AddressSpace
;
691 DPRINT("MmpAccessFault(Mode %d, Address %x)\n", Mode
, Address
);
693 Thread
= PsGetCurrentThread();
695 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
697 DPRINT1("Page fault at high IRQL %d, address %x\n",
700 return STATUS_UNSUCCESSFUL
;
703 /* Find the memory area for the faulting address */
704 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
706 /* Check permissions */
707 if (Mode
!= KernelMode
)
709 DPRINT1("Address: %x:%x\n", PsGetCurrentProcess(), Address
);
710 return STATUS_ACCESS_VIOLATION
;
712 AddressSpace
= MmGetKernelAddressSpace();
716 AddressSpace
= &PsGetCurrentProcess()->Vm
;
719 Thread
->ActiveFaultCount
++;
720 Status
= MmpSectionAccessFaultInner(Mode
,
725 Thread
->ActiveFaultCount
--;
732 As above, this code seperates the active part of fault handling from a carrier
733 that can use the thread's active fault count to determine whether a work item
734 is required. Also as above, this function repeatedly calls the active not
735 present fault handler until a clear success or failure is received, using a
736 return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
742 MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode
,
743 PMMSUPPORT AddressSpace
,
748 BOOLEAN Locked
= FromMdl
;
749 PMEMORY_AREA MemoryArea
;
750 MM_REQUIRED_RESOURCES Resources
= { 0 };
751 WORK_QUEUE_WITH_CONTEXT Context
;
752 NTSTATUS Status
= STATUS_SUCCESS
;
754 RtlZeroMemory(&Context
, sizeof(WORK_QUEUE_WITH_CONTEXT
));
758 MmLockAddressSpace(AddressSpace
);
761 /* Call the memory area specific fault handler */
764 MemoryArea
= MmLocateMemoryAreaByAddress(AddressSpace
, (PVOID
)Address
);
765 if (MemoryArea
== NULL
|| MemoryArea
->DeleteInProgress
)
767 Status
= STATUS_ACCESS_VIOLATION
;
770 DPRINT1("Type %x DIP %x\n",
772 MemoryArea
->DeleteInProgress
);
776 DPRINT1("No memory area\n");
778 DPRINT1("Process %x, Address %x\n",
779 MmGetAddressSpaceOwner(AddressSpace
),
784 DPRINTC("Type %x (%x -> %x -> %x) in %x\n",
786 MemoryArea
->StartingAddress
,
788 MemoryArea
->EndingAddress
,
789 PsGetCurrentThread());
791 Resources
.DoAcquisition
= NULL
;
793 // Note: fault handlers are called with address space locked
794 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
796 Status
= MmNotPresentFaultCachePage(AddressSpace
,
804 MmUnlockAddressSpace(AddressSpace
);
807 if (Status
== STATUS_SUCCESS
)
811 else if (Status
== STATUS_SUCCESS
+ 1)
814 DPRINT("Waiting for %x\n", Address
);
815 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
816 DPRINT("Done waiting for %x\n", Address
);
817 Status
= STATUS_MM_RESTART_OPERATION
;
819 else if (Status
== STATUS_MM_RESTART_OPERATION
)
822 DPRINT("Clear resource\n");
823 RtlZeroMemory(&Resources
, sizeof(Resources
));
825 else if (Status
== STATUS_MORE_PROCESSING_REQUIRED
)
827 if (Thread
->ActiveFaultCount
> 2)
829 DPRINTC("Already fault handling ... going to work item (%x)\n", Address
);
830 Context
.AddressSpace
= AddressSpace
;
831 Context
.MemoryArea
= MemoryArea
;
832 Context
.Required
= &Resources
;
833 KeInitializeEvent(&Context
.Wait
, NotificationEvent
, FALSE
);
835 ExInitializeWorkItem(&Context
.WorkItem
,
836 (PWORKER_THREAD_ROUTINE
)MmpFaultWorker
,
839 DPRINT("Queue work item\n");
840 ExQueueWorkItem(&Context
.WorkItem
, DelayedWorkQueue
);
842 KeWaitForSingleObject(&Context
.Wait
, 0, KernelMode
, FALSE
, NULL
);
843 Status
= Context
.Status
;
844 DPRINTC("Status %x\n", Status
);
848 DPRINT("DoAcquisition %x\n", Resources
.DoAcquisition
);
850 Status
= Resources
.DoAcquisition(AddressSpace
,
854 DPRINT("DoAcquisition %x -> %x\n",
855 Resources
.DoAcquisition
,
859 if (NT_SUCCESS(Status
))
861 Status
= STATUS_MM_RESTART_OPERATION
;
864 else if (NT_SUCCESS(Status
))
871 MmLockAddressSpace(AddressSpace
);
874 while (Status
== STATUS_MM_RESTART_OPERATION
);
876 DPRINTC("Completed page fault handling: %x:%x %x\n",
877 MmGetAddressSpaceOwner(AddressSpace
),
883 MmUnlockAddressSpace(AddressSpace
);
886 MiSetPageEvent(MmGetAddressSpaceOwner(AddressSpace
), Address
);
887 DPRINT("Done %x\n", Status
);
894 Call the inner not present fault handler, keeping track of the fault count.
895 In the ultimate form of this code, optionally use a worker thread the handle
896 the fault in order to sidestep stack overflow in the multiple fault case.
902 MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode
,
907 PMMSUPPORT AddressSpace
;
910 Address
&= ~(PAGE_SIZE
- 1);
911 DPRINT("MmNotPresentFault(Mode %d, Address %x)\n", Mode
, Address
);
913 Thread
= PsGetCurrentThread();
915 if (KeGetCurrentIrql() >= DISPATCH_LEVEL
)
917 DPRINT1("Page fault at high IRQL %d, address %x\n",
922 return STATUS_UNSUCCESSFUL
;
925 /* Find the memory area for the faulting address */
926 if (Address
>= (ULONG_PTR
)MmSystemRangeStart
)
928 /* Check permissions */
929 if (Mode
!= KernelMode
)
931 DPRINTC("Address: %x\n", Address
);
932 return STATUS_ACCESS_VIOLATION
;
934 AddressSpace
= MmGetKernelAddressSpace();
938 AddressSpace
= &PsGetCurrentProcess()->Vm
;
941 Thread
->ActiveFaultCount
++;
942 Status
= MmNotPresentFaultCacheSectionInner(Mode
,
947 Thread
->ActiveFaultCount
--;
949 ASSERT(Status
!= STATUS_UNSUCCESSFUL
);
950 ASSERT(Status
!= STATUS_INVALID_PARAMETER
);
951 DPRINT("MmAccessFault %x:%x -> %x\n",
952 MmGetAddressSpaceOwner(AddressSpace
),