[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / cache / section / fault.c
1 /*
2 * Copyright (C) 1998-2005 ReactOS Team (and the authors from the programmers section)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 *
19 * PROJECT: ReactOS kernel
20 * FILE: ntoskrnl/mm/section/fault.c
21 * PURPOSE: Consolidate fault handlers for sections
22 *
23 * PROGRAMMERS: Arty
24 * Rex Jolliff
25 * David Welch
26 * Eric Kohl
27 * Emanuele Aliberti
28 * Eugene Ingerman
29 * Casper Hornstrup
30 * KJK::Hyperion
31 * Guido de Jong
32 * Ge van Geldorp
33 * Royce Mitchell III
34 * Filip Navara
35 * Aleksey Bragin
36 * Jason Filby
37 * Thomas Weidenmueller
38 * Gunnar Andre' Dalsnes
39 * Mike Nordell
40 * Alex Ionescu
41 * Gregor Anich
42 * Steven Edwards
43 * Herve Poussineau
44 */
45
46 /*
47
48 I've generally organized fault handling code in newmm as handlers that run
49 under a single lock acquisition, check the state, and either take necessary
50 action atomically, or place a wait entry and return a continuation to the
51 caller. This lends itself to code that has a simple, structured form,
52 doesn't make assumptions about lock taking and breaking, and provides an
53 obvious, graphic seperation between code that may block and code that isn't
54 allowed to. This file contains the non-blocking half.
55
56 In order to request a blocking operation to happen outside locks, place a
57 function pointer in the provided MM_REQUIRED_RESOURCES struct and return
58 STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
59 provided struct and take action outside of any mm related locks and at
60 PASSIVE_LEVEL. The same fault handler will be called again after the
61 blocking operation succeeds. In this way, the fault handler can accumulate
62 state, but will freely work while competing with other threads.
63
64 Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
65 table they're using and return STATUS_SUCCESS + 1 if it's found. In that
66 case, the caller will wait on the wait entry event until the competing thread
67 is finished, and recall this handler in the current thread.
68
69 Another thing to note here is that we require mappings to exactly mirror
70 rmaps, so each mapping should be immediately followed by an rmap addition.
71
72 */
73
74 /* INCLUDES *****************************************************************/
75
76 #include <ntoskrnl.h>
77 #include "newmm.h"
78 #define NDEBUG
79 #include <debug.h>
80 #include "../mm/ARM3/miarm.h"
81
82 #define DPRINTC DPRINT
83
84 extern KEVENT MmWaitPageEvent;
85 extern PMMWSL MmWorkingSetList;
86
87 /*
88
89 Multiple stage handling of a not-present fault in a data section.
90
91 Required->State is used to accumulate flags that indicate the next action
92 the handler should take.
93
94 State & 2 is currently used to indicate that the page acquired by a previous
95 callout is a global page to the section and should be placed in the section
96 page table.
97
98 Note that the primitive tail recursion done here reaches the base case when
99 the page is present.
100
101 */
102
103 NTSTATUS
104 NTAPI
105 MmNotPresentFaultCachePage (
106 _In_ PMMSUPPORT AddressSpace,
107 _In_ MEMORY_AREA* MemoryArea,
108 _In_ PVOID Address,
109 _In_ BOOLEAN Locked,
110 _Inout_ PMM_REQUIRED_RESOURCES Required)
111 {
112 NTSTATUS Status;
113 PVOID PAddress;
114 ULONG Consumer;
115 PMM_SECTION_SEGMENT Segment;
116 LARGE_INTEGER FileOffset, TotalOffset;
117 ULONG_PTR Entry;
118 ULONG Attributes;
119 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
120 KIRQL OldIrql;
121
122 DPRINT("Not Present: %p %p (%p-%p)\n",
123 AddressSpace,
124 Address,
125 MemoryArea->StartingAddress,
126 MemoryArea->EndingAddress);
127
128 /*
129 * There is a window between taking the page fault and locking the
130 * address space when another thread could load the page so we check
131 * that.
132 */
133 if (MmIsPagePresent(Process, Address))
134 {
135 DPRINT("Done\n");
136 return STATUS_SUCCESS;
137 }
138
139 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
140 TotalOffset.QuadPart = (ULONG_PTR)PAddress -
141 (ULONG_PTR)MemoryArea->StartingAddress;
142
143 Segment = MemoryArea->Data.SectionData.Segment;
144
145 TotalOffset.QuadPart += MemoryArea->Data.SectionData.ViewOffset.QuadPart;
146 FileOffset = TotalOffset;
147
148 //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
149 Consumer = MC_CACHE;
150
151 if (Segment->FileObject)
152 {
153 DPRINT("FileName %wZ\n", &Segment->FileObject->FileName);
154 }
155
156 DPRINT("Total Offset %08x%08x\n", TotalOffset.HighPart, TotalOffset.LowPart);
157
158 /* Lock the segment */
159 MmLockSectionSegment(Segment);
160
161 /* Get the entry corresponding to the offset within the section */
162 Entry = MmGetPageEntrySectionSegment(Segment, &TotalOffset);
163
164 Attributes = PAGE_READONLY;
165
166 if (Required->State && Required->Page[0])
167 {
168 DPRINT("Have file and page, set page %x in section @ %x #\n",
169 Required->Page[0],
170 TotalOffset.LowPart);
171
172 if (Required->SwapEntry)
173 MmSetSavedSwapEntryPage(Required->Page[0], Required->SwapEntry);
174
175 if (Required->State & 2)
176 {
177 DPRINT("Set in section @ %x\n", TotalOffset.LowPart);
178 Status = MmSetPageEntrySectionSegment(Segment,
179 &TotalOffset,
180 Entry = MAKE_PFN_SSE(Required->Page[0]));
181 if (!NT_SUCCESS(Status))
182 {
183 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
184 }
185 MmUnlockSectionSegment(Segment);
186 MiSetPageEvent(Process, Address);
187 DPRINT("Status %x\n", Status);
188 return STATUS_MM_RESTART_OPERATION;
189 }
190 else
191 {
192 DPRINT("Set %x in address space @ %x\n", Required->Page[0], Address);
193 Status = MmCreateVirtualMapping(Process,
194 Address,
195 Attributes,
196 Required->Page,
197 1);
198 if (NT_SUCCESS(Status))
199 {
200 MmInsertRmap(Required->Page[0], Process, Address);
201 }
202 else
203 {
204 /* Drop the reference for our address space ... */
205 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
206 }
207 MmUnlockSectionSegment(Segment);
208 DPRINTC("XXX Set Event %x\n", Status);
209 MiSetPageEvent(Process, Address);
210 DPRINT("Status %x\n", Status);
211 return Status;
212 }
213 }
214 else if (MM_IS_WAIT_PTE(Entry))
215 {
216 // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
217 // ask the fault handler to wait until we should continue. Rathern
218 // than recopy this boilerplate code everywhere, we just ask them
219 // to wait.
220 MmUnlockSectionSegment(Segment);
221 return STATUS_SUCCESS + 1;
222 }
223 else if (Entry)
224 {
225 PFN_NUMBER Page = PFN_FROM_SSE(Entry);
226 DPRINT("Take reference to page %x #\n", Page);
227
228 if (MiGetPfnEntry(Page) == NULL)
229 {
230 DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
231 Page,
232 Entry,
233 Segment,
234 TotalOffset.HighPart,
235 TotalOffset.LowPart);
236 KeBugCheck(CACHE_MANAGER);
237 }
238
239 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
240 MmReferencePage(Page);
241 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
242
243 Status = MmCreateVirtualMapping(Process, Address, Attributes, &Page, 1);
244 if (NT_SUCCESS(Status))
245 {
246 MmInsertRmap(Page, Process, Address);
247 }
248 DPRINT("XXX Set Event %x\n", Status);
249 MiSetPageEvent(Process, Address);
250 MmUnlockSectionSegment(Segment);
251 DPRINT("Status %x\n", Status);
252 return Status;
253 }
254 else
255 {
256 DPRINT("Get page into section\n");
257 /*
258 * If the entry is zero (and it can't change because we have
259 * locked the segment) then we need to load the page.
260 */
261 //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
262 Required->State = 2;
263 Required->Context = Segment->FileObject;
264 Required->Consumer = Consumer;
265 Required->FileOffset = FileOffset;
266 Required->Amount = PAGE_SIZE;
267 Required->DoAcquisition = MiReadFilePage;
268
269 MmSetPageEntrySectionSegment(Segment,
270 &TotalOffset,
271 MAKE_SWAP_SSE(MM_WAIT_ENTRY));
272
273 MmUnlockSectionSegment(Segment);
274 return STATUS_MORE_PROCESSING_REQUIRED;
275 }
276 ASSERT(FALSE);
277 return STATUS_ACCESS_VIOLATION;
278 }
279
280 NTSTATUS
281 NTAPI
282 MiCopyPageToPage(PFN_NUMBER DestPage, PFN_NUMBER SrcPage)
283 {
284 PEPROCESS Process;
285 KIRQL Irql, Irql2;
286 PVOID TempAddress, TempSource;
287
288 Process = PsGetCurrentProcess();
289 TempAddress = MiMapPageInHyperSpace(Process, DestPage, &Irql);
290 if (TempAddress == NULL)
291 {
292 return STATUS_NO_MEMORY;
293 }
294 TempSource = MiMapPageInHyperSpace(Process, SrcPage, &Irql2);
295 if (!TempSource) {
296 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
297 return STATUS_NO_MEMORY;
298 }
299
300 memcpy(TempAddress, TempSource, PAGE_SIZE);
301
302 MiUnmapPageInHyperSpace(Process, TempSource, Irql2);
303 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
304 return STATUS_SUCCESS;
305 }
306
307 /*
308
309 This function is deceptively named, in that it does the actual work of handling
310 access faults on data sections. In the case of the code that's present here,
311 we don't allow cow sections, but we do need this to unset the initial
312 PAGE_READONLY condition of pages faulted into the cache so that we can add
313 a dirty bit in the section page table on the first modification.
314
315 In the ultimate form of this code, CoW is reenabled.
316
317 */
318
319 NTSTATUS
320 NTAPI
321 MiCowCacheSectionPage (
322 _In_ PMMSUPPORT AddressSpace,
323 _In_ PMEMORY_AREA MemoryArea,
324 _In_ PVOID Address,
325 _In_ BOOLEAN Locked,
326 _Inout_ PMM_REQUIRED_RESOURCES Required)
327 {
328 PMM_SECTION_SEGMENT Segment;
329 PFN_NUMBER NewPage, OldPage;
330 NTSTATUS Status;
331 PVOID PAddress;
332 LARGE_INTEGER Offset;
333 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
334
335 DPRINT("MmAccessFaultSectionView(%x, %x, %x, %x)\n",
336 AddressSpace,
337 MemoryArea,
338 Address,
339 Locked);
340
341 Segment = MemoryArea->Data.SectionData.Segment;
342
343 /* Lock the segment */
344 MmLockSectionSegment(Segment);
345
346 /* Find the offset of the page */
347 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
348 Offset.QuadPart = (ULONG_PTR)PAddress - (ULONG_PTR)MemoryArea->StartingAddress +
349 MemoryArea->Data.SectionData.ViewOffset.QuadPart;
350
351 if (!Segment->WriteCopy /*&&
352 !MemoryArea->Data.SectionData.WriteCopyView*/ ||
353 Segment->Image.Characteristics & IMAGE_SCN_MEM_SHARED)
354 {
355 #if 0
356 if (Region->Protect == PAGE_READWRITE ||
357 Region->Protect == PAGE_EXECUTE_READWRITE)
358 #endif
359 {
360 ULONG_PTR Entry;
361 DPRINTC("setting non-cow page %x %x:%x offset %x (%x) to writable\n",
362 Segment,
363 Process,
364 PAddress,
365 Offset.u.LowPart,
366 MmGetPfnForProcess(Process, Address));
367 if (Segment->FileObject)
368 {
369 DPRINTC("file %wZ\n", &Segment->FileObject->FileName);
370 }
371 Entry = MmGetPageEntrySectionSegment(Segment, &Offset);
372 DPRINT("Entry %x\n", Entry);
373 if (Entry &&
374 !IS_SWAP_FROM_SSE(Entry) &&
375 PFN_FROM_SSE(Entry) == MmGetPfnForProcess(Process, Address)) {
376
377 MmSetPageEntrySectionSegment(Segment,
378 &Offset,
379 DIRTY_SSE(Entry));
380 }
381 MmSetPageProtect(Process, PAddress, PAGE_READWRITE);
382 MmSetDirtyPage(Process, PAddress);
383 MmUnlockSectionSegment(Segment);
384 DPRINT("Done\n");
385 return STATUS_SUCCESS;
386 }
387 #if 0
388 else
389 {
390 DPRINT("Not supposed to be writable\n");
391 MmUnlockSectionSegment(Segment);
392 return STATUS_ACCESS_VIOLATION;
393 }
394 #endif
395 }
396
397 if (!Required->Page[0])
398 {
399 SWAPENTRY SwapEntry;
400 if (MmIsPageSwapEntry(Process, Address))
401 {
402 MmGetPageFileMapping(Process, Address, &SwapEntry);
403 MmUnlockSectionSegment(Segment);
404 if (SwapEntry == MM_WAIT_ENTRY)
405 return STATUS_SUCCESS + 1; // Wait ... somebody else is getting it right now
406 else
407 return STATUS_SUCCESS; // Nonwait swap entry ... handle elsewhere
408 }
409 /* Call out to acquire a page to copy to. We'll be re-called when
410 * the page has been allocated. */
411 Required->Page[1] = MmGetPfnForProcess(Process, Address);
412 Required->Consumer = MC_CACHE;
413 Required->Amount = 1;
414 Required->File = __FILE__;
415 Required->Line = __LINE__;
416 Required->DoAcquisition = MiGetOnePage;
417 MmCreatePageFileMapping(Process, Address, MM_WAIT_ENTRY);
418 MmUnlockSectionSegment(Segment);
419 return STATUS_MORE_PROCESSING_REQUIRED;
420 }
421
422 NewPage = Required->Page[0];
423 OldPage = Required->Page[1];
424
425 DPRINT("Allocated page %x\n", NewPage);
426
427 /* Unshare the old page */
428 MmDeleteRmap(OldPage, Process, PAddress);
429
430 /* Copy the old page */
431 DPRINT("Copying\n");
432 MiCopyPageToPage(NewPage, OldPage);
433
434 /* Set the PTE to point to the new page */
435 Status = MmCreateVirtualMapping(Process,
436 Address,
437 PAGE_READWRITE,
438 &NewPage,
439 1);
440
441 if (!NT_SUCCESS(Status))
442 {
443 DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
444 ASSERT(FALSE);
445 MmUnlockSectionSegment(Segment);
446 return Status;
447 }
448
449 MmInsertRmap(NewPage, Process, PAddress);
450 MmReleasePageMemoryConsumer(MC_CACHE, OldPage);
451 MmUnlockSectionSegment(Segment);
452
453 DPRINT("Address 0x%.8X\n", Address);
454 return STATUS_SUCCESS;
455 }
456
457 KEVENT MmWaitPageEvent;
458
459 typedef struct _WORK_QUEUE_WITH_CONTEXT
460 {
461 WORK_QUEUE_ITEM WorkItem;
462 PMMSUPPORT AddressSpace;
463 PMEMORY_AREA MemoryArea;
464 PMM_REQUIRED_RESOURCES Required;
465 NTSTATUS Status;
466 KEVENT Wait;
467 AcquireResource DoAcquisition;
468 } WORK_QUEUE_WITH_CONTEXT, *PWORK_QUEUE_WITH_CONTEXT;
469
470 /*
471
472 This is the work item used do blocking resource acquisition when a fault
473 handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
474 acquisition to take place on a different stack, and outside of any locks used
475 by fault handling, making recursive fault handling possible when required.
476
477 */
478
479 _Function_class_(WORKER_THREAD_ROUTINE)
480 VOID
481 NTAPI
482 MmpFaultWorker(PVOID Parameter)
483 {
484 PWORK_QUEUE_WITH_CONTEXT WorkItem = Parameter;
485
486 DPRINT("Calling work\n");
487 WorkItem->Status = WorkItem->Required->DoAcquisition(WorkItem->AddressSpace,
488 WorkItem->MemoryArea,
489 WorkItem->Required);
490 DPRINT("Status %x\n", WorkItem->Status);
491 KeSetEvent(&WorkItem->Wait, IO_NO_INCREMENT, FALSE);
492 }
493
494 /*
495
496 This code seperates the action of fault handling into an upper and lower
497 handler to allow the inner handler to optionally be called in work item
498 if the stack is getting too deep. My experiments show that the third
499 recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
500 worker thread. In the ultimate form of this code, the primary fault handler
501 makes this decision by using a thread-local counter to detect a too-deep
502 fault stack and call the inner fault handler in a worker thread if required.
503
504 Note that faults are taken at passive level and have access to ordinary
505 driver entry points such as those that read and write files, and filesystems
506 should use paged structures whenever possible. This makes recursive faults
507 both a perfectly normal occurrance, and a worthwhile case to handle.
508
509 The code below will repeatedly call MiCowSectionPage as long as it returns
510 either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
511 processing required case, we call out to a blocking resource acquisition
512 function and then recall the faut handler with the shared state represented
513 by the MM_REQUIRED_RESOURCES struct.
514
515 In the other case, we wait on the wait entry event and recall the handler.
516 Each time the wait entry event is signalled, one thread has removed an
517 MM_WAIT_ENTRY from a page table.
518
519 In the ultimate form of this code, there is a single system wide fault handler
520 for each of access fault and not present and each memory area contains a
521 function pointer that indicates the active fault handler. Since the mm code
522 in reactos is currently fragmented, I didn't bring this change to trunk.
523
524 */
525
526 NTSTATUS
527 NTAPI
528 MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode,
529 PMMSUPPORT AddressSpace,
530 ULONG_PTR Address,
531 BOOLEAN FromMdl,
532 PETHREAD Thread)
533 {
534 MEMORY_AREA* MemoryArea;
535 NTSTATUS Status;
536 BOOLEAN Locked = FromMdl;
537 MM_REQUIRED_RESOURCES Resources = { 0 };
538 WORK_QUEUE_WITH_CONTEXT Context;
539
540 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
541
542 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode, Address);
543
544 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
545 {
546 DPRINT1("Page fault at high IRQL was %d\n", KeGetCurrentIrql());
547 return STATUS_UNSUCCESSFUL;
548 }
549
550 /* Find the memory area for the faulting address */
551 if (Address >= (ULONG_PTR)MmSystemRangeStart)
552 {
553 /* Check permissions */
554 if (Mode != KernelMode)
555 {
556 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode, Address);
557 return STATUS_ACCESS_VIOLATION;
558 }
559 AddressSpace = MmGetKernelAddressSpace();
560 }
561 else
562 {
563 AddressSpace = &PsGetCurrentProcess()->Vm;
564 }
565
566 if (!FromMdl)
567 {
568 MmLockAddressSpace(AddressSpace);
569 }
570
571 do
572 {
573 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
574 if (MemoryArea == NULL ||
575 MemoryArea->DeleteInProgress)
576 {
577 if (!FromMdl)
578 {
579 MmUnlockAddressSpace(AddressSpace);
580 }
581 DPRINT("Address: %x\n", Address);
582 return STATUS_ACCESS_VIOLATION;
583 }
584
585 DPRINT("Type %x (%x -> %x)\n",
586 MemoryArea->Type,
587 MemoryArea->StartingAddress,
588 MemoryArea->EndingAddress);
589
590 Resources.DoAcquisition = NULL;
591
592 // Note: fault handlers are called with address space locked
593 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
594 Status = MiCowCacheSectionPage(AddressSpace,
595 MemoryArea,
596 (PVOID)Address,
597 Locked,
598 &Resources);
599
600 if (!FromMdl)
601 {
602 MmUnlockAddressSpace(AddressSpace);
603 }
604
605 if (Status == STATUS_SUCCESS + 1)
606 {
607 /* Wait page ... */
608 DPRINT("Waiting for %x\n", Address);
609 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
610 DPRINT("Restarting fault %x\n", Address);
611 Status = STATUS_MM_RESTART_OPERATION;
612 }
613 else if (Status == STATUS_MM_RESTART_OPERATION)
614 {
615 /* Clean slate */
616 RtlZeroMemory(&Resources, sizeof(Resources));
617 }
618 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
619 {
620 if (Thread->ActiveFaultCount > 0)
621 {
622 DPRINT("Already fault handling ... going to work item (%x)\n",
623 Address);
624 Context.AddressSpace = AddressSpace;
625 Context.MemoryArea = MemoryArea;
626 Context.Required = &Resources;
627 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
628
629 ExInitializeWorkItem(&Context.WorkItem,
630 MmpFaultWorker,
631 &Context);
632
633 DPRINT("Queue work item\n");
634 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
635 DPRINT("Wait\n");
636 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
637 Status = Context.Status;
638 DPRINT("Status %x\n", Status);
639 }
640 else
641 {
642 Status = Resources.DoAcquisition(AddressSpace, MemoryArea, &Resources);
643 }
644
645 if (NT_SUCCESS(Status))
646 {
647 Status = STATUS_MM_RESTART_OPERATION;
648 }
649 }
650
651 if (!FromMdl)
652 {
653 MmLockAddressSpace(AddressSpace);
654 }
655 }
656 while (Status == STATUS_MM_RESTART_OPERATION);
657
658 if (!NT_SUCCESS(Status) && MemoryArea->Type == 1)
659 {
660 DPRINT1("Completed page fault handling %x %x\n", Address, Status);
661 DPRINT1("Type %x (%x -> %x)\n",
662 MemoryArea->Type,
663 MemoryArea->StartingAddress,
664 MemoryArea->EndingAddress);
665 }
666
667 if (!FromMdl)
668 {
669 MmUnlockAddressSpace(AddressSpace);
670 }
671
672 return Status;
673 }
674
675 /*
676
677 This is the outer fault handler mentioned in the description of
678 MmpSectionAccsesFaultInner. It increments a fault depth count in the current
679 thread.
680
681 In the ultimate form of this code, the lower fault handler will optionally
682 use the count to keep the kernel stack from overflowing.
683
684 */
685
686 NTSTATUS
687 NTAPI
688 MmAccessFaultCacheSection(KPROCESSOR_MODE Mode,
689 ULONG_PTR Address,
690 BOOLEAN FromMdl)
691 {
692 PETHREAD Thread;
693 PMMSUPPORT AddressSpace;
694 NTSTATUS Status;
695
696 DPRINT("MmpAccessFault(Mode %d, Address %x)\n", Mode, Address);
697
698 Thread = PsGetCurrentThread();
699
700 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
701 {
702 DPRINT1("Page fault at high IRQL %d, address %x\n",
703 KeGetCurrentIrql(),
704 Address);
705 return STATUS_UNSUCCESSFUL;
706 }
707
708 /* Find the memory area for the faulting address */
709 if (Address >= (ULONG_PTR)MmSystemRangeStart)
710 {
711 /* Check permissions */
712 if (Mode != KernelMode)
713 {
714 DPRINT1("Address: %x:%x\n", PsGetCurrentProcess(), Address);
715 return STATUS_ACCESS_VIOLATION;
716 }
717 AddressSpace = MmGetKernelAddressSpace();
718 }
719 else
720 {
721 AddressSpace = &PsGetCurrentProcess()->Vm;
722 }
723
724 Thread->ActiveFaultCount++;
725 Status = MmpSectionAccessFaultInner(Mode,
726 AddressSpace,
727 Address,
728 FromMdl,
729 Thread);
730 Thread->ActiveFaultCount--;
731
732 return Status;
733 }
734
735 /*
736
737 As above, this code seperates the active part of fault handling from a carrier
738 that can use the thread's active fault count to determine whether a work item
739 is required. Also as above, this function repeatedly calls the active not
740 present fault handler until a clear success or failure is received, using a
741 return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
742
743 */
744
745 NTSTATUS
746 NTAPI
747 MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode,
748 PMMSUPPORT AddressSpace,
749 ULONG_PTR Address,
750 BOOLEAN FromMdl,
751 PETHREAD Thread)
752 {
753 BOOLEAN Locked = FromMdl;
754 PMEMORY_AREA MemoryArea;
755 MM_REQUIRED_RESOURCES Resources = { 0 };
756 WORK_QUEUE_WITH_CONTEXT Context;
757 NTSTATUS Status = STATUS_SUCCESS;
758
759 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
760
761 if (!FromMdl)
762 {
763 MmLockAddressSpace(AddressSpace);
764 }
765
766 /* Call the memory area specific fault handler */
767 do
768 {
769 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
770 if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
771 {
772 Status = STATUS_ACCESS_VIOLATION;
773 if (MemoryArea)
774 {
775 DPRINT1("Type %x DIP %x\n",
776 MemoryArea->Type,
777 MemoryArea->DeleteInProgress);
778 }
779 else
780 {
781 DPRINT1("No memory area\n");
782 }
783 DPRINT1("Process %x, Address %x\n",
784 MmGetAddressSpaceOwner(AddressSpace),
785 Address);
786 break;
787 }
788
789 DPRINTC("Type %x (%x -> %x -> %x) in %x\n",
790 MemoryArea->Type,
791 MemoryArea->StartingAddress,
792 Address,
793 MemoryArea->EndingAddress,
794 PsGetCurrentThread());
795
796 Resources.DoAcquisition = NULL;
797
798 // Note: fault handlers are called with address space locked
799 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
800
801 Status = MmNotPresentFaultCachePage(AddressSpace,
802 MemoryArea,
803 (PVOID)Address,
804 Locked,
805 &Resources);
806
807 if (!FromMdl)
808 {
809 MmUnlockAddressSpace(AddressSpace);
810 }
811
812 if (Status == STATUS_SUCCESS)
813 {
814 ; // Nothing
815 }
816 else if (Status == STATUS_SUCCESS + 1)
817 {
818 /* Wait page ... */
819 DPRINT("Waiting for %x\n", Address);
820 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
821 DPRINT("Done waiting for %x\n", Address);
822 Status = STATUS_MM_RESTART_OPERATION;
823 }
824 else if (Status == STATUS_MM_RESTART_OPERATION)
825 {
826 /* Clean slate */
827 DPRINT("Clear resource\n");
828 RtlZeroMemory(&Resources, sizeof(Resources));
829 }
830 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
831 {
832 if (Thread->ActiveFaultCount > 2)
833 {
834 DPRINTC("Already fault handling ... going to work item (%x)\n", Address);
835 Context.AddressSpace = AddressSpace;
836 Context.MemoryArea = MemoryArea;
837 Context.Required = &Resources;
838 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
839
840 ExInitializeWorkItem(&Context.WorkItem,
841 (PWORKER_THREAD_ROUTINE)MmpFaultWorker,
842 &Context);
843
844 DPRINT("Queue work item\n");
845 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
846 DPRINT("Wait\n");
847 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
848 Status = Context.Status;
849 DPRINTC("Status %x\n", Status);
850 }
851 else
852 {
853 DPRINT("DoAcquisition %x\n", Resources.DoAcquisition);
854
855 Status = Resources.DoAcquisition(AddressSpace,
856 MemoryArea,
857 &Resources);
858
859 DPRINT("DoAcquisition %x -> %x\n",
860 Resources.DoAcquisition,
861 Status);
862 }
863
864 if (NT_SUCCESS(Status))
865 {
866 Status = STATUS_MM_RESTART_OPERATION;
867 }
868 }
869 else if (NT_SUCCESS(Status))
870 {
871 ASSERT(FALSE);
872 }
873
874 if (!FromMdl)
875 {
876 MmLockAddressSpace(AddressSpace);
877 }
878 }
879 while (Status == STATUS_MM_RESTART_OPERATION);
880
881 DPRINTC("Completed page fault handling: %x:%x %x\n",
882 MmGetAddressSpaceOwner(AddressSpace),
883 Address,
884 Status);
885
886 if (!FromMdl)
887 {
888 MmUnlockAddressSpace(AddressSpace);
889 }
890
891 MiSetPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
892 DPRINT("Done %x\n", Status);
893
894 return Status;
895 }
896
897 /*
898
899 Call the inner not present fault handler, keeping track of the fault count.
900 In the ultimate form of this code, optionally use a worker thread the handle
901 the fault in order to sidestep stack overflow in the multiple fault case.
902
903 */
904
905 NTSTATUS
906 NTAPI
907 MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,
908 ULONG_PTR Address,
909 BOOLEAN FromMdl)
910 {
911 PETHREAD Thread;
912 PMMSUPPORT AddressSpace;
913 NTSTATUS Status;
914
915 Address &= ~(PAGE_SIZE - 1);
916 DPRINT("MmNotPresentFault(Mode %d, Address %x)\n", Mode, Address);
917
918 Thread = PsGetCurrentThread();
919
920 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
921 {
922 DPRINT1("Page fault at high IRQL %d, address %x\n",
923 KeGetCurrentIrql(),
924 Address);
925
926 ASSERT(FALSE);
927 return STATUS_UNSUCCESSFUL;
928 }
929
930 /* Find the memory area for the faulting address */
931 if (Address >= (ULONG_PTR)MmSystemRangeStart)
932 {
933 /* Check permissions */
934 if (Mode != KernelMode)
935 {
936 DPRINTC("Address: %x\n", Address);
937 return STATUS_ACCESS_VIOLATION;
938 }
939 AddressSpace = MmGetKernelAddressSpace();
940 }
941 else
942 {
943 AddressSpace = &PsGetCurrentProcess()->Vm;
944 }
945
946 Thread->ActiveFaultCount++;
947 Status = MmNotPresentFaultCacheSectionInner(Mode,
948 AddressSpace,
949 Address,
950 FromMdl,
951 Thread);
952 Thread->ActiveFaultCount--;
953
954 ASSERT(Status != STATUS_UNSUCCESSFUL);
955 ASSERT(Status != STATUS_INVALID_PARAMETER);
956 DPRINT("MmAccessFault %x:%x -> %x\n",
957 MmGetAddressSpaceOwner(AddressSpace),
958 Address,
959 Status);
960
961 return Status;
962 }