[CSQ]
[reactos.git] / reactos / ntoskrnl / cache / section / fault.c
1 /*
2 * Copyright (C) 1998-2005 ReactOS Team (and the authors from the programmers section)
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 *
18 *
19 * PROJECT: ReactOS kernel
20 * FILE: ntoskrnl/mm/section/fault.c
21 * PURPOSE: Consolidate fault handlers for sections
22 *
23 * PROGRAMMERS: Arty
24 * Rex Jolliff
25 * David Welch
26 * Eric Kohl
27 * Emanuele Aliberti
28 * Eugene Ingerman
29 * Casper Hornstrup
30 * KJK::Hyperion
31 * Guido de Jong
32 * Ge van Geldorp
33 * Royce Mitchell III
34 * Filip Navara
35 * Aleksey Bragin
36 * Jason Filby
37 * Thomas Weidenmueller
38 * Gunnar Andre' Dalsnes
39 * Mike Nordell
40 * Alex Ionescu
41 * Gregor Anich
42 * Steven Edwards
43 * Herve Poussineau
44 */
45
46 /*
47
48 I've generally organized fault handling code in newmm as handlers that run
49 under a single lock acquisition, check the state, and either take necessary
50 action atomically, or place a wait entry and return a continuation to the
51 caller. This lends itself to code that has a simple, structured form,
52 doesn't make assumptions about lock taking and breaking, and provides an
53 obvious, graphic seperation between code that may block and code that isn't
54 allowed to. This file contains the non-blocking half.
55
56 In order to request a blocking operation to happen outside locks, place a
57 function pointer in the provided MM_REQUIRED_RESOURCES struct and return
58 STATUS_MORE_PROCESSING_REQUIRED. The function indicated will receive the
59 provided struct and take action outside of any mm related locks and at
60 PASSIVE_LEVEL. The same fault handler will be called again after the
61 blocking operation succeeds. In this way, the fault handler can accumulate
62 state, but will freely work while competing with other threads.
63
64 Fault handlers in this file should check for an MM_WAIT_ENTRY in a page
65 table they're using and return STATUS_SUCCESS + 1 if it's found. In that
66 case, the caller will wait on the wait entry event until the competing thread
67 is finished, and recall this handler in the current thread.
68
69 Another thing to note here is that we require mappings to exactly mirror
70 rmaps, so each mapping should be immediately followed by an rmap addition.
71
72 */
73
74 /* INCLUDES *****************************************************************/
75
76 #include <ntoskrnl.h>
77 #include "newmm.h"
78 #define NDEBUG
79 #include <debug.h>
80 #include "../mm/ARM3/miarm.h"
81
82 #define DPRINTC DPRINT
83
84 extern KEVENT MmWaitPageEvent;
85 extern PMMWSL MmWorkingSetList;
86
87 /*
88
89 Multiple stage handling of a not-present fault in a data section.
90
91 Required->State is used to accumulate flags that indicate the next action
92 the handler should take.
93
94 State & 2 is currently used to indicate that the page acquired by a previous
95 callout is a global page to the section and should be placed in the section
96 page table.
97
98 Note that the primitive tail recursion done here reaches the base case when
99 the page is present.
100
101 */
102
103 NTSTATUS
104 NTAPI
105 MmNotPresentFaultCachePage(PMMSUPPORT AddressSpace,
106 MEMORY_AREA* MemoryArea,
107 PVOID Address,
108 BOOLEAN Locked,
109 PMM_REQUIRED_RESOURCES Required)
110 {
111 NTSTATUS Status;
112 PVOID PAddress;
113 ULONG Consumer;
114 PMM_SECTION_SEGMENT Segment;
115 LARGE_INTEGER FileOffset, TotalOffset;
116 ULONG_PTR Entry;
117 ULONG Attributes;
118 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
119 KIRQL OldIrql;
120
121 DPRINT("Not Present: %p %p (%p-%p)\n",
122 AddressSpace,
123 Address,
124 MemoryArea->StartingAddress,
125 MemoryArea->EndingAddress);
126
127 /*
128 * There is a window between taking the page fault and locking the
129 * address space when another thread could load the page so we check
130 * that.
131 */
132 if (MmIsPagePresent(Process, Address))
133 {
134 DPRINT("Done\n");
135 return STATUS_SUCCESS;
136 }
137
138 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
139 TotalOffset.QuadPart = (ULONG_PTR)PAddress -
140 (ULONG_PTR)MemoryArea->StartingAddress;
141
142 Segment = MemoryArea->Data.SectionData.Segment;
143
144 TotalOffset.QuadPart += MemoryArea->Data.SectionData.ViewOffset.QuadPart;
145 FileOffset = TotalOffset;
146
147 //Consumer = (Segment->Flags & MM_DATAFILE_SEGMENT) ? MC_CACHE : MC_USER;
148 Consumer = MC_CACHE;
149
150 if (Segment->FileObject)
151 {
152 DPRINT("FileName %wZ\n", &Segment->FileObject->FileName);
153 }
154
155 DPRINT("Total Offset %08x%08x\n", TotalOffset.HighPart, TotalOffset.LowPart);
156
157 /* Lock the segment */
158 MmLockSectionSegment(Segment);
159
160 /* Get the entry corresponding to the offset within the section */
161 Entry = MmGetPageEntrySectionSegment(Segment, &TotalOffset);
162
163 Attributes = PAGE_READONLY;
164
165 if (Required->State && Required->Page[0])
166 {
167 DPRINT("Have file and page, set page %x in section @ %x #\n",
168 Required->Page[0],
169 TotalOffset.LowPart);
170
171 if (Required->SwapEntry)
172 MmSetSavedSwapEntryPage(Required->Page[0], Required->SwapEntry);
173
174 if (Required->State & 2)
175 {
176 DPRINT("Set in section @ %x\n", TotalOffset.LowPart);
177 Status = MmSetPageEntrySectionSegment(Segment,
178 &TotalOffset,
179 Entry = MAKE_PFN_SSE(Required->Page[0]));
180 if (!NT_SUCCESS(Status))
181 {
182 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
183 }
184 MmUnlockSectionSegment(Segment);
185 MiSetPageEvent(Process, Address);
186 DPRINT("Status %x\n", Status);
187 return STATUS_MM_RESTART_OPERATION;
188 }
189 else
190 {
191 DPRINT("Set %x in address space @ %x\n", Required->Page[0], Address);
192 Status = MmCreateVirtualMapping(Process,
193 Address,
194 Attributes,
195 Required->Page,
196 1);
197 if (NT_SUCCESS(Status))
198 {
199 MmInsertRmap(Required->Page[0], Process, Address);
200 }
201 else
202 {
203 /* Drop the reference for our address space ... */
204 MmReleasePageMemoryConsumer(MC_CACHE, Required->Page[0]);
205 }
206 MmUnlockSectionSegment(Segment);
207 DPRINTC("XXX Set Event %x\n", Status);
208 MiSetPageEvent(Process, Address);
209 DPRINT("Status %x\n", Status);
210 return Status;
211 }
212 }
213 else if (MM_IS_WAIT_PTE(Entry))
214 {
215 // Whenever MM_WAIT_ENTRY is required as a swap entry, we need to
216 // ask the fault handler to wait until we should continue. Rathern
217 // than recopy this boilerplate code everywhere, we just ask them
218 // to wait.
219 MmUnlockSectionSegment(Segment);
220 return STATUS_SUCCESS + 1;
221 }
222 else if (Entry)
223 {
224 PFN_NUMBER Page = PFN_FROM_SSE(Entry);
225 DPRINT("Take reference to page %x #\n", Page);
226
227 if (MiGetPfnEntry(Page) == NULL)
228 {
229 DPRINT1("Found no PFN entry for page 0x%x in page entry 0x%x (segment: 0x%p, offset: %08x%08x)\n",
230 Page,
231 Entry,
232 Segment,
233 TotalOffset.HighPart,
234 TotalOffset.LowPart);
235 KeBugCheck(CACHE_MANAGER);
236 }
237
238 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
239 MmReferencePage(Page);
240 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
241
242 Status = MmCreateVirtualMapping(Process, Address, Attributes, &Page, 1);
243 if (NT_SUCCESS(Status))
244 {
245 MmInsertRmap(Page, Process, Address);
246 }
247 DPRINT("XXX Set Event %x\n", Status);
248 MiSetPageEvent(Process, Address);
249 MmUnlockSectionSegment(Segment);
250 DPRINT("Status %x\n", Status);
251 return Status;
252 }
253 else
254 {
255 DPRINT("Get page into section\n");
256 /*
257 * If the entry is zero (and it can't change because we have
258 * locked the segment) then we need to load the page.
259 */
260 //DPRINT1("Read from file %08x %wZ\n", FileOffset.LowPart, &Section->FileObject->FileName);
261 Required->State = 2;
262 Required->Context = Segment->FileObject;
263 Required->Consumer = Consumer;
264 Required->FileOffset = FileOffset;
265 Required->Amount = PAGE_SIZE;
266 Required->DoAcquisition = MiReadFilePage;
267
268 MmSetPageEntrySectionSegment(Segment,
269 &TotalOffset,
270 MAKE_SWAP_SSE(MM_WAIT_ENTRY));
271
272 MmUnlockSectionSegment(Segment);
273 return STATUS_MORE_PROCESSING_REQUIRED;
274 }
275 ASSERT(FALSE);
276 return STATUS_ACCESS_VIOLATION;
277 }
278
279 NTSTATUS
280 NTAPI
281 MiCopyPageToPage(PFN_NUMBER DestPage, PFN_NUMBER SrcPage)
282 {
283 PEPROCESS Process;
284 KIRQL Irql, Irql2;
285 PVOID TempAddress, TempSource;
286
287 Process = PsGetCurrentProcess();
288 TempAddress = MiMapPageInHyperSpace(Process, DestPage, &Irql);
289 if (TempAddress == NULL)
290 {
291 return STATUS_NO_MEMORY;
292 }
293 TempSource = MiMapPageInHyperSpace(Process, SrcPage, &Irql2);
294 if (!TempSource) {
295 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
296 return STATUS_NO_MEMORY;
297 }
298
299 memcpy(TempAddress, TempSource, PAGE_SIZE);
300
301 MiUnmapPageInHyperSpace(Process, TempSource, Irql2);
302 MiUnmapPageInHyperSpace(Process, TempAddress, Irql);
303 return STATUS_SUCCESS;
304 }
305
306 /*
307
308 This function is deceptively named, in that it does the actual work of handling
309 access faults on data sections. In the case of the code that's present here,
310 we don't allow cow sections, but we do need this to unset the initial
311 PAGE_READONLY condition of pages faulted into the cache so that we can add
312 a dirty bit in the section page table on the first modification.
313
314 In the ultimate form of this code, CoW is reenabled.
315
316 */
317
318 NTSTATUS
319 NTAPI
320 MiCowCacheSectionPage(PMMSUPPORT AddressSpace,
321 PMEMORY_AREA MemoryArea,
322 PVOID Address,
323 BOOLEAN Locked,
324 PMM_REQUIRED_RESOURCES Required)
325 {
326 PMM_SECTION_SEGMENT Segment;
327 PFN_NUMBER NewPage, OldPage;
328 NTSTATUS Status;
329 PVOID PAddress;
330 LARGE_INTEGER Offset;
331 PEPROCESS Process = MmGetAddressSpaceOwner(AddressSpace);
332
333 DPRINT("MmAccessFaultSectionView(%x, %x, %x, %x)\n",
334 AddressSpace,
335 MemoryArea,
336 Address,
337 Locked);
338
339 Segment = MemoryArea->Data.SectionData.Segment;
340
341 /* Lock the segment */
342 MmLockSectionSegment(Segment);
343
344 /* Find the offset of the page */
345 PAddress = MM_ROUND_DOWN(Address, PAGE_SIZE);
346 Offset.QuadPart = (ULONG_PTR)PAddress - (ULONG_PTR)MemoryArea->StartingAddress +
347 MemoryArea->Data.SectionData.ViewOffset.QuadPart;
348
349 if (!Segment->WriteCopy /*&&
350 !MemoryArea->Data.SectionData.WriteCopyView*/ ||
351 Segment->Image.Characteristics & IMAGE_SCN_MEM_SHARED)
352 {
353 #if 0
354 if (Region->Protect == PAGE_READWRITE ||
355 Region->Protect == PAGE_EXECUTE_READWRITE)
356 #endif
357 {
358 ULONG_PTR Entry;
359 DPRINTC("setting non-cow page %x %x:%x offset %x (%x) to writable\n",
360 Segment,
361 Process,
362 PAddress,
363 Offset.u.LowPart,
364 MmGetPfnForProcess(Process, Address));
365 if (Segment->FileObject)
366 {
367 DPRINTC("file %wZ\n", &Segment->FileObject->FileName);
368 }
369 Entry = MmGetPageEntrySectionSegment(Segment, &Offset);
370 DPRINT("Entry %x\n", Entry);
371 if (Entry &&
372 !IS_SWAP_FROM_SSE(Entry) &&
373 PFN_FROM_SSE(Entry) == MmGetPfnForProcess(Process, Address)) {
374
375 MmSetPageEntrySectionSegment(Segment,
376 &Offset,
377 DIRTY_SSE(Entry));
378 }
379 MmSetPageProtect(Process, PAddress, PAGE_READWRITE);
380 MmSetDirtyPage(Process, PAddress);
381 MmUnlockSectionSegment(Segment);
382 DPRINT("Done\n");
383 return STATUS_SUCCESS;
384 }
385 #if 0
386 else
387 {
388 DPRINT("Not supposed to be writable\n");
389 MmUnlockSectionSegment(Segment);
390 return STATUS_ACCESS_VIOLATION;
391 }
392 #endif
393 }
394
395 if (!Required->Page[0])
396 {
397 SWAPENTRY SwapEntry;
398 if (MmIsPageSwapEntry(Process, Address))
399 {
400 MmGetPageFileMapping(Process, Address, &SwapEntry);
401 MmUnlockSectionSegment(Segment);
402 if (SwapEntry == MM_WAIT_ENTRY)
403 return STATUS_SUCCESS + 1; // Wait ... somebody else is getting it right now
404 else
405 return STATUS_SUCCESS; // Nonwait swap entry ... handle elsewhere
406 }
407 /* Call out to acquire a page to copy to. We'll be re-called when
408 * the page has been allocated. */
409 Required->Page[1] = MmGetPfnForProcess(Process, Address);
410 Required->Consumer = MC_CACHE;
411 Required->Amount = 1;
412 Required->File = __FILE__;
413 Required->Line = __LINE__;
414 Required->DoAcquisition = MiGetOnePage;
415 MmCreatePageFileMapping(Process, Address, MM_WAIT_ENTRY);
416 MmUnlockSectionSegment(Segment);
417 return STATUS_MORE_PROCESSING_REQUIRED;
418 }
419
420 NewPage = Required->Page[0];
421 OldPage = Required->Page[1];
422
423 DPRINT("Allocated page %x\n", NewPage);
424
425 /* Unshare the old page */
426 MmDeleteRmap(OldPage, Process, PAddress);
427
428 /* Copy the old page */
429 DPRINT("Copying\n");
430 MiCopyPageToPage(NewPage, OldPage);
431
432 /* Set the PTE to point to the new page */
433 Status = MmCreateVirtualMapping(Process,
434 Address,
435 PAGE_READWRITE,
436 &NewPage,
437 1);
438
439 if (!NT_SUCCESS(Status))
440 {
441 DPRINT1("MmCreateVirtualMapping failed, not out of memory\n");
442 ASSERT(FALSE);
443 MmUnlockSectionSegment(Segment);
444 return Status;
445 }
446
447 MmInsertRmap(NewPage, Process, PAddress);
448 MmReleasePageMemoryConsumer(MC_CACHE, OldPage);
449 MmUnlockSectionSegment(Segment);
450
451 DPRINT("Address 0x%.8X\n", Address);
452 return STATUS_SUCCESS;
453 }
454
455 KEVENT MmWaitPageEvent;
456
457 typedef struct _WORK_QUEUE_WITH_CONTEXT
458 {
459 WORK_QUEUE_ITEM WorkItem;
460 PMMSUPPORT AddressSpace;
461 PMEMORY_AREA MemoryArea;
462 PMM_REQUIRED_RESOURCES Required;
463 NTSTATUS Status;
464 KEVENT Wait;
465 AcquireResource DoAcquisition;
466 } WORK_QUEUE_WITH_CONTEXT, *PWORK_QUEUE_WITH_CONTEXT;
467
468 /*
469
470 This is the work item used do blocking resource acquisition when a fault
471 handler returns STATUS_MORE_PROCESSING_REQUIRED. It's used to allow resource
472 acquisition to take place on a different stack, and outside of any locks used
473 by fault handling, making recursive fault handling possible when required.
474
475 */
476
477 VOID
478 NTAPI
479 MmpFaultWorker(PWORK_QUEUE_WITH_CONTEXT WorkItem)
480 {
481 DPRINT("Calling work\n");
482 WorkItem->Status = WorkItem->Required->DoAcquisition(WorkItem->AddressSpace,
483 WorkItem->MemoryArea,
484 WorkItem->Required);
485 DPRINT("Status %x\n", WorkItem->Status);
486 KeSetEvent(&WorkItem->Wait, IO_NO_INCREMENT, FALSE);
487 }
488
489 /*
490
491 This code seperates the action of fault handling into an upper and lower
492 handler to allow the inner handler to optionally be called in work item
493 if the stack is getting too deep. My experiments show that the third
494 recursive page fault taken at PASSIVE_LEVEL must be shunted away to a
495 worker thread. In the ultimate form of this code, the primary fault handler
496 makes this decision by using a thread-local counter to detect a too-deep
497 fault stack and call the inner fault handler in a worker thread if required.
498
499 Note that faults are taken at passive level and have access to ordinary
500 driver entry points such as those that read and write files, and filesystems
501 should use paged structures whenever possible. This makes recursive faults
502 both a perfectly normal occurrance, and a worthwhile case to handle.
503
504 The code below will repeatedly call MiCowSectionPage as long as it returns
505 either STATUS_SUCCESS + 1 or STATUS_MORE_PROCESSING_REQUIRED. In the more
506 processing required case, we call out to a blocking resource acquisition
507 function and then recall the faut handler with the shared state represented
508 by the MM_REQUIRED_RESOURCES struct.
509
510 In the other case, we wait on the wait entry event and recall the handler.
511 Each time the wait entry event is signalled, one thread has removed an
512 MM_WAIT_ENTRY from a page table.
513
514 In the ultimate form of this code, there is a single system wide fault handler
515 for each of access fault and not present and each memory area contains a
516 function pointer that indicates the active fault handler. Since the mm code
517 in reactos is currently fragmented, I didn't bring this change to trunk.
518
519 */
520
521 NTSTATUS
522 NTAPI
523 MmpSectionAccessFaultInner(KPROCESSOR_MODE Mode,
524 PMMSUPPORT AddressSpace,
525 ULONG_PTR Address,
526 BOOLEAN FromMdl,
527 PETHREAD Thread)
528 {
529 MEMORY_AREA* MemoryArea;
530 NTSTATUS Status;
531 BOOLEAN Locked = FromMdl;
532 MM_REQUIRED_RESOURCES Resources = { 0 };
533 WORK_QUEUE_WITH_CONTEXT Context;
534
535 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
536
537 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode, Address);
538
539 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
540 {
541 DPRINT1("Page fault at high IRQL was %d\n", KeGetCurrentIrql());
542 return STATUS_UNSUCCESSFUL;
543 }
544
545 /* Find the memory area for the faulting address */
546 if (Address >= (ULONG_PTR)MmSystemRangeStart)
547 {
548 /* Check permissions */
549 if (Mode != KernelMode)
550 {
551 DPRINT("MmAccessFault(Mode %d, Address %x)\n", Mode, Address);
552 return STATUS_ACCESS_VIOLATION;
553 }
554 AddressSpace = MmGetKernelAddressSpace();
555 }
556 else
557 {
558 AddressSpace = &PsGetCurrentProcess()->Vm;
559 }
560
561 if (!FromMdl)
562 {
563 MmLockAddressSpace(AddressSpace);
564 }
565
566 do
567 {
568 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
569 if (MemoryArea == NULL ||
570 MemoryArea->DeleteInProgress)
571 {
572 if (!FromMdl)
573 {
574 MmUnlockAddressSpace(AddressSpace);
575 }
576 DPRINT("Address: %x\n", Address);
577 return STATUS_ACCESS_VIOLATION;
578 }
579
580 DPRINT("Type %x (%x -> %x)\n",
581 MemoryArea->Type,
582 MemoryArea->StartingAddress,
583 MemoryArea->EndingAddress);
584
585 Resources.DoAcquisition = NULL;
586
587 // Note: fault handlers are called with address space locked
588 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
589 Status = MiCowCacheSectionPage(AddressSpace,
590 MemoryArea,
591 (PVOID)Address,
592 Locked,
593 &Resources);
594
595 if (!FromMdl)
596 {
597 MmUnlockAddressSpace(AddressSpace);
598 }
599
600 if (Status == STATUS_SUCCESS + 1)
601 {
602 /* Wait page ... */
603 DPRINT("Waiting for %x\n", Address);
604 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
605 DPRINT("Restarting fault %x\n", Address);
606 Status = STATUS_MM_RESTART_OPERATION;
607 }
608 else if (Status == STATUS_MM_RESTART_OPERATION)
609 {
610 /* Clean slate */
611 RtlZeroMemory(&Resources, sizeof(Resources));
612 }
613 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
614 {
615 if (Thread->ActiveFaultCount > 0)
616 {
617 DPRINT("Already fault handling ... going to work item (%x)\n",
618 Address);
619 Context.AddressSpace = AddressSpace;
620 Context.MemoryArea = MemoryArea;
621 Context.Required = &Resources;
622 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
623
624 ExInitializeWorkItem(&Context.WorkItem,
625 (PWORKER_THREAD_ROUTINE)MmpFaultWorker,
626 &Context);
627
628 DPRINT("Queue work item\n");
629 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
630 DPRINT("Wait\n");
631 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
632 Status = Context.Status;
633 DPRINT("Status %x\n", Status);
634 }
635 else
636 {
637 Status = Resources.DoAcquisition(AddressSpace, MemoryArea, &Resources);
638 }
639
640 if (NT_SUCCESS(Status))
641 {
642 Status = STATUS_MM_RESTART_OPERATION;
643 }
644 }
645
646 if (!FromMdl)
647 {
648 MmLockAddressSpace(AddressSpace);
649 }
650 }
651 while (Status == STATUS_MM_RESTART_OPERATION);
652
653 if (!NT_SUCCESS(Status) && MemoryArea->Type == 1)
654 {
655 DPRINT1("Completed page fault handling %x %x\n", Address, Status);
656 DPRINT1("Type %x (%x -> %x)\n",
657 MemoryArea->Type,
658 MemoryArea->StartingAddress,
659 MemoryArea->EndingAddress);
660 }
661
662 if (!FromMdl)
663 {
664 MmUnlockAddressSpace(AddressSpace);
665 }
666
667 return Status;
668 }
669
670 /*
671
672 This is the outer fault handler mentioned in the description of
673 MmpSectionAccsesFaultInner. It increments a fault depth count in the current
674 thread.
675
676 In the ultimate form of this code, the lower fault handler will optionally
677 use the count to keep the kernel stack from overflowing.
678
679 */
680
681 NTSTATUS
682 NTAPI
683 MmAccessFaultCacheSection(KPROCESSOR_MODE Mode,
684 ULONG_PTR Address,
685 BOOLEAN FromMdl)
686 {
687 PETHREAD Thread;
688 PMMSUPPORT AddressSpace;
689 NTSTATUS Status;
690
691 DPRINT("MmpAccessFault(Mode %d, Address %x)\n", Mode, Address);
692
693 Thread = PsGetCurrentThread();
694
695 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
696 {
697 DPRINT1("Page fault at high IRQL %d, address %x\n",
698 KeGetCurrentIrql(),
699 Address);
700 return STATUS_UNSUCCESSFUL;
701 }
702
703 /* Find the memory area for the faulting address */
704 if (Address >= (ULONG_PTR)MmSystemRangeStart)
705 {
706 /* Check permissions */
707 if (Mode != KernelMode)
708 {
709 DPRINT1("Address: %x:%x\n", PsGetCurrentProcess(), Address);
710 return STATUS_ACCESS_VIOLATION;
711 }
712 AddressSpace = MmGetKernelAddressSpace();
713 }
714 else
715 {
716 AddressSpace = &PsGetCurrentProcess()->Vm;
717 }
718
719 Thread->ActiveFaultCount++;
720 Status = MmpSectionAccessFaultInner(Mode,
721 AddressSpace,
722 Address,
723 FromMdl,
724 Thread);
725 Thread->ActiveFaultCount--;
726
727 return Status;
728 }
729
730 /*
731
732 As above, this code seperates the active part of fault handling from a carrier
733 that can use the thread's active fault count to determine whether a work item
734 is required. Also as above, this function repeatedly calls the active not
735 present fault handler until a clear success or failure is received, using a
736 return of STATUS_MORE_PROCESSING_REQUIRED or STATUS_SUCCESS + 1.
737
738 */
739
740 NTSTATUS
741 NTAPI
742 MmNotPresentFaultCacheSectionInner(KPROCESSOR_MODE Mode,
743 PMMSUPPORT AddressSpace,
744 ULONG_PTR Address,
745 BOOLEAN FromMdl,
746 PETHREAD Thread)
747 {
748 BOOLEAN Locked = FromMdl;
749 PMEMORY_AREA MemoryArea;
750 MM_REQUIRED_RESOURCES Resources = { 0 };
751 WORK_QUEUE_WITH_CONTEXT Context;
752 NTSTATUS Status = STATUS_SUCCESS;
753
754 RtlZeroMemory(&Context, sizeof(WORK_QUEUE_WITH_CONTEXT));
755
756 if (!FromMdl)
757 {
758 MmLockAddressSpace(AddressSpace);
759 }
760
761 /* Call the memory area specific fault handler */
762 do
763 {
764 MemoryArea = MmLocateMemoryAreaByAddress(AddressSpace, (PVOID)Address);
765 if (MemoryArea == NULL || MemoryArea->DeleteInProgress)
766 {
767 Status = STATUS_ACCESS_VIOLATION;
768 if (MemoryArea)
769 {
770 DPRINT1("Type %x DIP %x\n",
771 MemoryArea->Type,
772 MemoryArea->DeleteInProgress);
773 }
774 else
775 {
776 DPRINT1("No memory area\n");
777 }
778 DPRINT1("Process %x, Address %x\n",
779 MmGetAddressSpaceOwner(AddressSpace),
780 Address);
781 break;
782 }
783
784 DPRINTC("Type %x (%x -> %x -> %x) in %x\n",
785 MemoryArea->Type,
786 MemoryArea->StartingAddress,
787 Address,
788 MemoryArea->EndingAddress,
789 PsGetCurrentThread());
790
791 Resources.DoAcquisition = NULL;
792
793 // Note: fault handlers are called with address space locked
794 // We return STATUS_MORE_PROCESSING_REQUIRED if anything is needed
795
796 Status = MmNotPresentFaultCachePage(AddressSpace,
797 MemoryArea,
798 (PVOID)Address,
799 Locked,
800 &Resources);
801
802 if (!FromMdl)
803 {
804 MmUnlockAddressSpace(AddressSpace);
805 }
806
807 if (Status == STATUS_SUCCESS)
808 {
809 ; // Nothing
810 }
811 else if (Status == STATUS_SUCCESS + 1)
812 {
813 /* Wait page ... */
814 DPRINT("Waiting for %x\n", Address);
815 MiWaitForPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
816 DPRINT("Done waiting for %x\n", Address);
817 Status = STATUS_MM_RESTART_OPERATION;
818 }
819 else if (Status == STATUS_MM_RESTART_OPERATION)
820 {
821 /* Clean slate */
822 DPRINT("Clear resource\n");
823 RtlZeroMemory(&Resources, sizeof(Resources));
824 }
825 else if (Status == STATUS_MORE_PROCESSING_REQUIRED)
826 {
827 if (Thread->ActiveFaultCount > 2)
828 {
829 DPRINTC("Already fault handling ... going to work item (%x)\n", Address);
830 Context.AddressSpace = AddressSpace;
831 Context.MemoryArea = MemoryArea;
832 Context.Required = &Resources;
833 KeInitializeEvent(&Context.Wait, NotificationEvent, FALSE);
834
835 ExInitializeWorkItem(&Context.WorkItem,
836 (PWORKER_THREAD_ROUTINE)MmpFaultWorker,
837 &Context);
838
839 DPRINT("Queue work item\n");
840 ExQueueWorkItem(&Context.WorkItem, DelayedWorkQueue);
841 DPRINT("Wait\n");
842 KeWaitForSingleObject(&Context.Wait, 0, KernelMode, FALSE, NULL);
843 Status = Context.Status;
844 DPRINTC("Status %x\n", Status);
845 }
846 else
847 {
848 DPRINT("DoAcquisition %x\n", Resources.DoAcquisition);
849
850 Status = Resources.DoAcquisition(AddressSpace,
851 MemoryArea,
852 &Resources);
853
854 DPRINT("DoAcquisition %x -> %x\n",
855 Resources.DoAcquisition,
856 Status);
857 }
858
859 if (NT_SUCCESS(Status))
860 {
861 Status = STATUS_MM_RESTART_OPERATION;
862 }
863 }
864 else if (NT_SUCCESS(Status))
865 {
866 ASSERT(FALSE);
867 }
868
869 if (!FromMdl)
870 {
871 MmLockAddressSpace(AddressSpace);
872 }
873 }
874 while (Status == STATUS_MM_RESTART_OPERATION);
875
876 DPRINTC("Completed page fault handling: %x:%x %x\n",
877 MmGetAddressSpaceOwner(AddressSpace),
878 Address,
879 Status);
880
881 if (!FromMdl)
882 {
883 MmUnlockAddressSpace(AddressSpace);
884 }
885
886 MiSetPageEvent(MmGetAddressSpaceOwner(AddressSpace), Address);
887 DPRINT("Done %x\n", Status);
888
889 return Status;
890 }
891
892 /*
893
894 Call the inner not present fault handler, keeping track of the fault count.
895 In the ultimate form of this code, optionally use a worker thread the handle
896 the fault in order to sidestep stack overflow in the multiple fault case.
897
898 */
899
900 NTSTATUS
901 NTAPI
902 MmNotPresentFaultCacheSection(KPROCESSOR_MODE Mode,
903 ULONG_PTR Address,
904 BOOLEAN FromMdl)
905 {
906 PETHREAD Thread;
907 PMMSUPPORT AddressSpace;
908 NTSTATUS Status;
909
910 Address &= ~(PAGE_SIZE - 1);
911 DPRINT("MmNotPresentFault(Mode %d, Address %x)\n", Mode, Address);
912
913 Thread = PsGetCurrentThread();
914
915 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
916 {
917 DPRINT1("Page fault at high IRQL %d, address %x\n",
918 KeGetCurrentIrql(),
919 Address);
920
921 ASSERT(FALSE);
922 return STATUS_UNSUCCESSFUL;
923 }
924
925 /* Find the memory area for the faulting address */
926 if (Address >= (ULONG_PTR)MmSystemRangeStart)
927 {
928 /* Check permissions */
929 if (Mode != KernelMode)
930 {
931 DPRINTC("Address: %x\n", Address);
932 return STATUS_ACCESS_VIOLATION;
933 }
934 AddressSpace = MmGetKernelAddressSpace();
935 }
936 else
937 {
938 AddressSpace = &PsGetCurrentProcess()->Vm;
939 }
940
941 Thread->ActiveFaultCount++;
942 Status = MmNotPresentFaultCacheSectionInner(Mode,
943 AddressSpace,
944 Address,
945 FromMdl,
946 Thread);
947 Thread->ActiveFaultCount--;
948
949 ASSERT(Status != STATUS_UNSUCCESSFUL);
950 ASSERT(Status != STATUS_INVALID_PARAMETER);
951 DPRINT("MmAccessFault %x:%x -> %x\n",
952 MmGetAddressSpaceOwner(AddressSpace),
953 Address,
954 Status);
955
956 return Status;
957 }