[NTOS:MM]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* GLOBALS ********************************************************************/
19
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23
24 /* PUBLIC FUNCTIONS ***********************************************************/
25
26 /*
27 * @implemented
28 */
29 PMDL
30 NTAPI
31 MmCreateMdl(IN PMDL Mdl,
32 IN PVOID Base,
33 IN SIZE_T Length)
34 {
35 SIZE_T Size;
36
37 //
38 // Check if we don't have an MDL built
39 //
40 if (!Mdl)
41 {
42 //
43 // Calculate the size we'll need and allocate the MDL
44 //
45 Size = MmSizeOfMdl(Base, Length);
46 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
47 if (!Mdl) return NULL;
48 }
49
50 //
51 // Initialize it
52 //
53 MmInitializeMdl(Mdl, Base, Length);
54 return Mdl;
55 }
56
57 /*
58 * @implemented
59 */
60 SIZE_T
61 NTAPI
62 MmSizeOfMdl(IN PVOID Base,
63 IN SIZE_T Length)
64 {
65 //
66 // Return the MDL size
67 //
68 return sizeof(MDL) +
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
70 }
71
72 /*
73 * @implemented
74 */
75 VOID
76 NTAPI
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
78 {
79 PPFN_NUMBER MdlPages, EndPage;
80 PFN_NUMBER Pfn, PageCount;
81 PVOID Base;
82 PMMPTE PointerPte;
83
84 //
85 // Sanity checks
86 //
87 ASSERT(Mdl->ByteCount != 0);
88 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
89 MDL_MAPPED_TO_SYSTEM_VA |
90 MDL_SOURCE_IS_NONPAGED_POOL |
91 MDL_PARTIAL)) == 0);
92
93 //
94 // We know the MDL isn't associated to a process now
95 //
96 Mdl->Process = NULL;
97
98 //
99 // Get page and VA information
100 //
101 MdlPages = (PPFN_NUMBER)(Mdl + 1);
102 Base = Mdl->StartVa;
103
104 //
105 // Set the system address and now get the page count
106 //
107 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
108 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
109 Mdl->ByteCount);
110 ASSERT(PageCount != 0);
111 EndPage = MdlPages + PageCount;
112
113 //
114 // Loop the PTEs
115 //
116 PointerPte = MiAddressToPte(Base);
117 do
118 {
119 //
120 // Write the PFN
121 //
122 Pfn = PFN_FROM_PTE(PointerPte++);
123 *MdlPages++ = Pfn;
124 } while (MdlPages < EndPage);
125
126 //
127 // Set the nonpaged pool flag
128 //
129 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
130
131 //
132 // Check if this is an I/O mapping
133 //
134 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
135 }
136
137 /*
138 * @implemented
139 */
140 PMDL
141 NTAPI
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
143 IN PHYSICAL_ADDRESS HighAddress,
144 IN PHYSICAL_ADDRESS SkipBytes,
145 IN SIZE_T TotalBytes)
146 {
147 //
148 // Call the internal routine
149 //
150 return MiAllocatePagesForMdl(LowAddress,
151 HighAddress,
152 SkipBytes,
153 TotalBytes,
154 MiNotMapped,
155 0);
156 }
157
158 /*
159 * @implemented
160 */
161 PMDL
162 NTAPI
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
164 IN PHYSICAL_ADDRESS HighAddress,
165 IN PHYSICAL_ADDRESS SkipBytes,
166 IN SIZE_T TotalBytes,
167 IN MEMORY_CACHING_TYPE CacheType,
168 IN ULONG Flags)
169 {
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
171
172 //
173 // Check for invalid cache type
174 //
175 if (CacheType > MmWriteCombined)
176 {
177 //
178 // Normalize to default
179 //
180 CacheAttribute = MiNotMapped;
181 }
182 else
183 {
184 //
185 // Conver to internal caching attribute
186 //
187 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
188 }
189
190 //
191 // Only these flags are allowed
192 //
193 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
194 {
195 //
196 // Silently fail
197 //
198 return NULL;
199 }
200
201 //
202 // Call the internal routine
203 //
204 return MiAllocatePagesForMdl(LowAddress,
205 HighAddress,
206 SkipBytes,
207 TotalBytes,
208 CacheAttribute,
209 Flags);
210 }
211
212 /*
213 * @implemented
214 */
215 VOID
216 NTAPI
217 MmFreePagesFromMdl(IN PMDL Mdl)
218 {
219 PVOID Base;
220 PPFN_NUMBER Pages;
221 LONG NumberOfPages;
222 PMMPFN Pfn1;
223 KIRQL OldIrql;
224 DPRINT("Freeing MDL: %p\n", Mdl);
225
226 //
227 // Sanity checks
228 //
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
230 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
231 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
232
233 //
234 // Get address and page information
235 //
236 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
237 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
238
239 //
240 // Acquire PFN lock
241 //
242 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
243
244 //
245 // Loop all the MDL pages
246 //
247 Pages = (PPFN_NUMBER)(Mdl + 1);
248 do
249 {
250 //
251 // Reached the last page
252 //
253 if (*Pages == LIST_HEAD) break;
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1);
260 ASSERT(Pfn1->u2.ShareCount == 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
262 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
263 {
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
266 }
267
268 //
269 // Clear it
270 //
271 Pfn1->u3.e1.StartOfAllocation = 0;
272 Pfn1->u3.e1.EndOfAllocation = 0;
273 Pfn1->u3.e1.PageLocation = StandbyPageList;
274 Pfn1->u2.ShareCount = 0;
275
276 //
277 // Dereference it
278 //
279 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
280 if (Pfn1->u3.e2.ReferenceCount != 1)
281 {
282 /* Just take off one reference */
283 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
284 }
285 else
286 {
287 /* We'll be nuking the whole page */
288 MiDecrementReferenceCount(Pfn1, *Pages);
289 }
290
291 //
292 // Clear this page and move on
293 //
294 *Pages++ = LIST_HEAD;
295 } while (--NumberOfPages != 0);
296
297 //
298 // Release the lock
299 //
300 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
301
302 //
303 // Remove the pages locked flag
304 //
305 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
306 }
307
308 /*
309 * @implemented
310 */
311 PVOID
312 NTAPI
313 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
314 IN KPROCESSOR_MODE AccessMode,
315 IN MEMORY_CACHING_TYPE CacheType,
316 IN PVOID BaseAddress,
317 IN ULONG BugCheckOnFailure,
318 IN MM_PAGE_PRIORITY Priority)
319 {
320 PVOID Base;
321 PPFN_NUMBER MdlPages, LastPage;
322 PFN_COUNT PageCount;
323 BOOLEAN IsIoMapping;
324 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
325 PMMPTE PointerPte;
326 MMPTE TempPte;
327
328 //
329 // Sanity check
330 //
331 ASSERT(Mdl->ByteCount != 0);
332
333 //
334 // Get the base
335 //
336 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
337
338 //
339 // Handle kernel case first
340 //
341 if (AccessMode == KernelMode)
342 {
343 //
344 // Get the list of pages and count
345 //
346 MdlPages = (PPFN_NUMBER)(Mdl + 1);
347 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
348 LastPage = MdlPages + PageCount;
349
350 //
351 // Sanity checks
352 //
353 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
354 MDL_SOURCE_IS_NONPAGED_POOL |
355 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
356 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
357
358 //
359 // Get the correct cache type
360 //
361 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
362 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
363
364 //
365 // Reserve the PTEs
366 //
367 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
368 if (!PointerPte)
369 {
370 //
371 // If it can fail, return NULL
372 //
373 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
374
375 //
376 // Should we bugcheck?
377 //
378 if (!BugCheckOnFailure) return NULL;
379
380 //
381 // Yes, crash the system
382 //
383 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
384 }
385
386 //
387 // Get the mapped address
388 //
389 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
390
391 //
392 // Get the template
393 //
394 TempPte = ValidKernelPte;
395 switch (CacheAttribute)
396 {
397 case MiNonCached:
398
399 //
400 // Disable caching
401 //
402 MI_PAGE_DISABLE_CACHE(&TempPte);
403 MI_PAGE_WRITE_THROUGH(&TempPte);
404 break;
405
406 case MiWriteCombined:
407
408 //
409 // Enable write combining
410 //
411 MI_PAGE_DISABLE_CACHE(&TempPte);
412 MI_PAGE_WRITE_COMBINED(&TempPte);
413 break;
414
415 default:
416 //
417 // Nothing to do
418 //
419 break;
420 }
421
422 //
423 // Loop all PTEs
424 //
425 do
426 {
427 //
428 // We're done here
429 //
430 if (*MdlPages == LIST_HEAD) break;
431
432 //
433 // Write the PTE
434 //
435 TempPte.u.Hard.PageFrameNumber = *MdlPages;
436 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
437 } while (++MdlPages < LastPage);
438
439 //
440 // Mark it as mapped
441 //
442 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
443 Mdl->MappedSystemVa = Base;
444 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
445
446 //
447 // Check if it was partial
448 //
449 if (Mdl->MdlFlags & MDL_PARTIAL)
450 {
451 //
452 // Write the appropriate flag here too
453 //
454 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
455 }
456
457 //
458 // Return the mapped address
459 //
460 return Base;
461 }
462
463 UNIMPLEMENTED;
464 return NULL;
465 }
466
467 /*
468 * @implemented
469 */
470 PVOID
471 NTAPI
472 MmMapLockedPages(IN PMDL Mdl,
473 IN KPROCESSOR_MODE AccessMode)
474 {
475 //
476 // Call the extended version
477 //
478 return MmMapLockedPagesSpecifyCache(Mdl,
479 AccessMode,
480 MmCached,
481 NULL,
482 TRUE,
483 HighPagePriority);
484 }
485
486 /*
487 * @implemented
488 */
489 VOID
490 NTAPI
491 MmUnmapLockedPages(IN PVOID BaseAddress,
492 IN PMDL Mdl)
493 {
494 PVOID Base;
495 PFN_COUNT PageCount, ExtraPageCount;
496 PPFN_NUMBER MdlPages;
497 PMMPTE PointerPte;
498
499 //
500 // Sanity check
501 //
502 ASSERT(Mdl->ByteCount != 0);
503
504 //
505 // Check if this is a kernel request
506 //
507 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
508 {
509 //
510 // Get base and count information
511 //
512 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
513 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
514
515 //
516 // Sanity checks
517 //
518 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
519 ASSERT(PageCount != 0);
520 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
521
522 //
523 // Get the PTE
524 //
525 PointerPte = MiAddressToPte(BaseAddress);
526
527 //
528 // This should be a resident system PTE
529 //
530 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
531 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
532 ASSERT(PointerPte->u.Hard.Valid == 1);
533
534 //
535 // Check if the caller wants us to free advanced pages
536 //
537 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
538 {
539 //
540 // Get the MDL page array
541 //
542 MdlPages = MmGetMdlPfnArray(Mdl);
543
544 /* Number of extra pages stored after the PFN array */
545 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
546
547 //
548 // Do the math
549 //
550 PageCount += ExtraPageCount;
551 PointerPte -= ExtraPageCount;
552 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
553 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
554
555 //
556 // Get the new base address
557 //
558 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
559 (ExtraPageCount << PAGE_SHIFT));
560 }
561
562 //
563 // Remove flags
564 //
565 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
566 MDL_PARTIAL_HAS_BEEN_MAPPED |
567 MDL_FREE_EXTRA_PTES);
568
569 //
570 // Release the system PTEs
571 //
572 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
573 }
574 else
575 {
576 UNIMPLEMENTED;
577 }
578 }
579
580 /*
581 * @implemented
582 */
583 VOID
584 NTAPI
585 MmProbeAndLockPages(IN PMDL Mdl,
586 IN KPROCESSOR_MODE AccessMode,
587 IN LOCK_OPERATION Operation)
588 {
589 PPFN_NUMBER MdlPages;
590 PVOID Base, Address, LastAddress, StartAddress;
591 ULONG LockPages, TotalPages;
592 NTSTATUS Status = STATUS_SUCCESS;
593 PEPROCESS CurrentProcess;
594 NTSTATUS ProbeStatus;
595 PMMPTE PointerPte, LastPte;
596 PMMPDE PointerPde;
597 #if (_MI_PAGING_LEVELS >= 3)
598 PMMPDE PointerPpe;
599 #endif
600 #if (_MI_PAGING_LEVELS == 4)
601 PMMPDE PointerPxe;
602 #endif
603 PFN_NUMBER PageFrameIndex;
604 BOOLEAN UsePfnLock;
605 KIRQL OldIrql;
606 PMMPFN Pfn1;
607 DPRINT("Probing MDL: %p\n", Mdl);
608
609 //
610 // Sanity checks
611 //
612 ASSERT(Mdl->ByteCount != 0);
613 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
614 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
615 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
616 MDL_MAPPED_TO_SYSTEM_VA |
617 MDL_SOURCE_IS_NONPAGED_POOL |
618 MDL_PARTIAL |
619 MDL_IO_SPACE)) == 0);
620
621 //
622 // Get page and base information
623 //
624 MdlPages = (PPFN_NUMBER)(Mdl + 1);
625 Base = Mdl->StartVa;
626
627 //
628 // Get the addresses and how many pages we span (and need to lock)
629 //
630 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
631 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
632 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
633 ASSERT(LockPages != 0);
634
635 /* Block invalid access */
636 if ((AccessMode != KernelMode) &&
637 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
638 {
639 /* Caller should be in SEH, raise the error */
640 *MdlPages = LIST_HEAD;
641 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
642 }
643
644 //
645 // Get the process
646 //
647 if (Address <= MM_HIGHEST_USER_ADDRESS)
648 {
649 //
650 // Get the process
651 //
652 CurrentProcess = PsGetCurrentProcess();
653 }
654 else
655 {
656 //
657 // No process
658 //
659 CurrentProcess = NULL;
660 }
661
662 //
663 // Save the number of pages we'll have to lock, and the start address
664 //
665 TotalPages = LockPages;
666 StartAddress = Address;
667
668 /* Large pages not supported */
669 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
670
671 //
672 // Now probe them
673 //
674 ProbeStatus = STATUS_SUCCESS;
675 _SEH2_TRY
676 {
677 //
678 // Enter probe loop
679 //
680 do
681 {
682 //
683 // Assume failure
684 //
685 *MdlPages = LIST_HEAD;
686
687 //
688 // Read
689 //
690 *(volatile CHAR*)Address;
691
692 //
693 // Check if this is write access (only probe for user-mode)
694 //
695 if ((Operation != IoReadAccess) &&
696 (Address <= MM_HIGHEST_USER_ADDRESS))
697 {
698 //
699 // Probe for write too
700 //
701 ProbeForWriteChar(Address);
702 }
703
704 //
705 // Next address...
706 //
707 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
708
709 //
710 // Next page...
711 //
712 LockPages--;
713 MdlPages++;
714 } while (Address < LastAddress);
715
716 //
717 // Reset back to the original page
718 //
719 ASSERT(LockPages == 0);
720 MdlPages = (PPFN_NUMBER)(Mdl + 1);
721 }
722 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
723 {
724 //
725 // Oops :(
726 //
727 ProbeStatus = _SEH2_GetExceptionCode();
728 }
729 _SEH2_END;
730
731 //
732 // So how did that go?
733 //
734 if (ProbeStatus != STATUS_SUCCESS)
735 {
736 //
737 // Fail
738 //
739 DPRINT1("MDL PROBE FAILED!\n");
740 Mdl->Process = NULL;
741 ExRaiseStatus(ProbeStatus);
742 }
743
744 //
745 // Get the PTE and PDE
746 //
747 PointerPte = MiAddressToPte(StartAddress);
748 PointerPde = MiAddressToPde(StartAddress);
749 #if (_MI_PAGING_LEVELS >= 3)
750 PointerPpe = MiAddressToPpe(StartAddress);
751 #endif
752 #if (_MI_PAGING_LEVELS == 4)
753 PointerPxe = MiAddressToPxe(StartAddress);
754 #endif
755
756 //
757 // Sanity check
758 //
759 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
760
761 //
762 // Check what kind of operation this is
763 //
764 if (Operation != IoReadAccess)
765 {
766 //
767 // Set the write flag
768 //
769 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
770 }
771 else
772 {
773 //
774 // Remove the write flag
775 //
776 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
777 }
778
779 //
780 // Mark the MDL as locked *now*
781 //
782 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
783
784 //
785 // Check if this came from kernel mode
786 //
787 if (Base > MM_HIGHEST_USER_ADDRESS)
788 {
789 //
790 // We should not have a process
791 //
792 ASSERT(CurrentProcess == NULL);
793 Mdl->Process = NULL;
794
795 //
796 // In kernel mode, we don't need to check for write access
797 //
798 Operation = IoReadAccess;
799
800 //
801 // Use the PFN lock
802 //
803 UsePfnLock = TRUE;
804 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
805 }
806 else
807 {
808 //
809 // Sanity checks
810 //
811 ASSERT(TotalPages != 0);
812 ASSERT(CurrentProcess == PsGetCurrentProcess());
813
814 //
815 // Track locked pages
816 //
817 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
818 TotalPages);
819
820 //
821 // Save the process
822 //
823 Mdl->Process = CurrentProcess;
824
825 /* Lock the process working set */
826 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
827 UsePfnLock = FALSE;
828 OldIrql = MM_NOIRQL;
829 }
830
831 //
832 // Get the last PTE
833 //
834 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
835
836 //
837 // Loop the pages
838 //
839 do
840 {
841 //
842 // Assume failure and check for non-mapped pages
843 //
844 *MdlPages = LIST_HEAD;
845 while (
846 #if (_MI_PAGING_LEVELS == 4)
847 (PointerPxe->u.Hard.Valid == 0) ||
848 #endif
849 #if (_MI_PAGING_LEVELS >= 3)
850 (PointerPpe->u.Hard.Valid == 0) ||
851 #endif
852 (PointerPde->u.Hard.Valid == 0) ||
853 (PointerPte->u.Hard.Valid == 0))
854 {
855 //
856 // What kind of lock were we using?
857 //
858 if (UsePfnLock)
859 {
860 //
861 // Release PFN lock
862 //
863 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
864 }
865 else
866 {
867 /* Release process working set */
868 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
869 }
870
871 //
872 // Access the page
873 //
874 Address = MiPteToAddress(PointerPte);
875
876 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
877 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
878 if (!NT_SUCCESS(Status))
879 {
880 //
881 // Fail
882 //
883 DPRINT1("Access fault failed\n");
884 goto Cleanup;
885 }
886
887 //
888 // What lock should we use?
889 //
890 if (UsePfnLock)
891 {
892 //
893 // Grab the PFN lock
894 //
895 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
896 }
897 else
898 {
899 /* Lock the process working set */
900 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
901 }
902 }
903
904 //
905 // Check if this was a write or modify
906 //
907 if (Operation != IoReadAccess)
908 {
909 //
910 // Check if the PTE is not writable
911 //
912 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
913 {
914 //
915 // Check if it's copy on write
916 //
917 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
918 {
919 //
920 // Get the base address and allow a change for user-mode
921 //
922 Address = MiPteToAddress(PointerPte);
923 if (Address <= MM_HIGHEST_USER_ADDRESS)
924 {
925 //
926 // What kind of lock were we using?
927 //
928 if (UsePfnLock)
929 {
930 //
931 // Release PFN lock
932 //
933 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
934 }
935 else
936 {
937 /* Release process working set */
938 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
939 }
940
941 //
942 // Access the page
943 //
944
945 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
946 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
947 if (!NT_SUCCESS(Status))
948 {
949 //
950 // Fail
951 //
952 DPRINT1("Access fault failed\n");
953 goto Cleanup;
954 }
955
956 //
957 // Re-acquire the lock
958 //
959 if (UsePfnLock)
960 {
961 //
962 // Grab the PFN lock
963 //
964 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
965 }
966 else
967 {
968 /* Lock the process working set */
969 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
970 }
971
972 //
973 // Start over
974 //
975 continue;
976 }
977 }
978
979 //
980 // Fail, since we won't allow this
981 //
982 Status = STATUS_ACCESS_VIOLATION;
983 goto CleanupWithLock;
984 }
985 }
986
987 //
988 // Grab the PFN
989 //
990 PageFrameIndex = PFN_FROM_PTE(PointerPte);
991 Pfn1 = MiGetPfnEntry(PageFrameIndex);
992 if (Pfn1)
993 {
994 /* Either this is for kernel-mode, or the working set is held */
995 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
996
997 /* No Physical VADs supported yet */
998 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
999
1000 /* This address should already exist and be fully valid */
1001 MiReferenceProbedPageAndBumpLockCount(Pfn1);
1002 }
1003 else
1004 {
1005 //
1006 // For I/O addresses, just remember this
1007 //
1008 Mdl->MdlFlags |= MDL_IO_SPACE;
1009 }
1010
1011 //
1012 // Write the page and move on
1013 //
1014 *MdlPages++ = PageFrameIndex;
1015 PointerPte++;
1016
1017 /* Check if we're on a PDE boundary */
1018 if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
1019 #if (_MI_PAGING_LEVELS >= 3)
1020 if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
1021 #endif
1022 #if (_MI_PAGING_LEVELS == 4)
1023 if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
1024 #endif
1025
1026 } while (PointerPte <= LastPte);
1027
1028 //
1029 // What kind of lock were we using?
1030 //
1031 if (UsePfnLock)
1032 {
1033 //
1034 // Release PFN lock
1035 //
1036 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1037 }
1038 else
1039 {
1040 /* Release process working set */
1041 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1042 }
1043
1044 //
1045 // Sanity check
1046 //
1047 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1048 return;
1049
1050 CleanupWithLock:
1051 //
1052 // This is the failure path
1053 //
1054 ASSERT(!NT_SUCCESS(Status));
1055
1056 //
1057 // What kind of lock were we using?
1058 //
1059 if (UsePfnLock)
1060 {
1061 //
1062 // Release PFN lock
1063 //
1064 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1065 }
1066 else
1067 {
1068 /* Release process working set */
1069 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1070 }
1071 Cleanup:
1072 //
1073 // Pages must be locked so MmUnlock can work
1074 //
1075 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1076 MmUnlockPages(Mdl);
1077
1078 //
1079 // Raise the error
1080 //
1081 ExRaiseStatus(Status);
1082 }
1083
1084 /*
1085 * @implemented
1086 */
1087 VOID
1088 NTAPI
1089 MmUnlockPages(IN PMDL Mdl)
1090 {
1091 PPFN_NUMBER MdlPages, LastPage;
1092 PEPROCESS Process;
1093 PVOID Base;
1094 ULONG Flags, PageCount;
1095 KIRQL OldIrql;
1096 PMMPFN Pfn1;
1097 DPRINT("Unlocking MDL: %p\n", Mdl);
1098
1099 //
1100 // Sanity checks
1101 //
1102 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1103 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1104 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1105 ASSERT(Mdl->ByteCount != 0);
1106
1107 //
1108 // Get the process associated and capture the flags which are volatile
1109 //
1110 Process = Mdl->Process;
1111 Flags = Mdl->MdlFlags;
1112
1113 //
1114 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1115 //
1116 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1117 {
1118 //
1119 // Unmap the pages from system space
1120 //
1121 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1122 }
1123
1124 //
1125 // Get the page count
1126 //
1127 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1128 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1129 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1130 ASSERT(PageCount != 0);
1131
1132 //
1133 // We don't support AWE
1134 //
1135 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1136
1137 //
1138 // Check if the buffer is mapped I/O space
1139 //
1140 if (Flags & MDL_IO_SPACE)
1141 {
1142 //
1143 // Acquire PFN lock
1144 //
1145 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1146
1147 //
1148 // Loop every page
1149 //
1150 LastPage = MdlPages + PageCount;
1151 do
1152 {
1153 //
1154 // Last page, break out
1155 //
1156 if (*MdlPages == LIST_HEAD) break;
1157
1158 //
1159 // Check if this page is in the PFN database
1160 //
1161 Pfn1 = MiGetPfnEntry(*MdlPages);
1162 if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
1163 } while (++MdlPages < LastPage);
1164
1165 //
1166 // Release the lock
1167 //
1168 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1169
1170 //
1171 // Check if we have a process
1172 //
1173 if (Process)
1174 {
1175 //
1176 // Handle the accounting of locked pages
1177 //
1178 ASSERT(Process->NumberOfLockedPages > 0);
1179 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1180 -(LONG_PTR)PageCount);
1181 }
1182
1183 //
1184 // We're done
1185 //
1186 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1187 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1188 return;
1189 }
1190
1191 //
1192 // Check if we have a process
1193 //
1194 if (Process)
1195 {
1196 //
1197 // Handle the accounting of locked pages
1198 //
1199 ASSERT(Process->NumberOfLockedPages > 0);
1200 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1201 -(LONG_PTR)PageCount);
1202 }
1203
1204 //
1205 // Loop every page
1206 //
1207 LastPage = MdlPages + PageCount;
1208 do
1209 {
1210 //
1211 // Last page reached
1212 //
1213 if (*MdlPages == LIST_HEAD)
1214 {
1215 //
1216 // Were there no pages at all?
1217 //
1218 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1219 {
1220 //
1221 // We're already done
1222 //
1223 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1224 return;
1225 }
1226
1227 //
1228 // Otherwise, stop here
1229 //
1230 LastPage = MdlPages;
1231 break;
1232 }
1233
1234 /* Save the PFN entry instead for the secondary loop */
1235 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1236 ASSERT(*MdlPages != 0);
1237 } while (++MdlPages < LastPage);
1238
1239 //
1240 // Reset pointer
1241 //
1242 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1243
1244 //
1245 // Now grab the PFN lock for the actual unlock and dereference
1246 //
1247 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1248 do
1249 {
1250 /* Get the current entry and reference count */
1251 Pfn1 = (PMMPFN)*MdlPages;
1252 MiDereferencePfnAndDropLockCount(Pfn1);
1253 } while (++MdlPages < LastPage);
1254
1255 //
1256 // Release the lock
1257 //
1258 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1259
1260 //
1261 // We're done
1262 //
1263 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1264 }
1265
1266 /*
1267 * @unimplemented
1268 */
1269 NTSTATUS
1270 NTAPI
1271 MmAdvanceMdl(IN PMDL Mdl,
1272 IN ULONG NumberOfBytes)
1273 {
1274 UNIMPLEMENTED;
1275 return STATUS_NOT_IMPLEMENTED;
1276 }
1277
1278 /*
1279 * @unimplemented
1280 */
1281 PVOID
1282 NTAPI
1283 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1284 IN ULONG PoolTag,
1285 IN PMDL MemoryDescriptorList,
1286 IN MEMORY_CACHING_TYPE CacheType)
1287 {
1288 UNIMPLEMENTED;
1289 return 0;
1290 }
1291
1292 /*
1293 * @unimplemented
1294 */
1295 VOID
1296 NTAPI
1297 MmUnmapReservedMapping(IN PVOID BaseAddress,
1298 IN ULONG PoolTag,
1299 IN PMDL MemoryDescriptorList)
1300 {
1301 UNIMPLEMENTED;
1302 }
1303
1304 /*
1305 * @unimplemented
1306 */
1307 NTSTATUS
1308 NTAPI
1309 MmPrefetchPages(IN ULONG NumberOfLists,
1310 IN PREAD_LIST *ReadLists)
1311 {
1312 UNIMPLEMENTED;
1313 return STATUS_NOT_IMPLEMENTED;
1314 }
1315
1316 /*
1317 * @unimplemented
1318 */
1319 NTSTATUS
1320 NTAPI
1321 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1322 IN ULONG NewProtect)
1323 {
1324 UNIMPLEMENTED;
1325 return STATUS_NOT_IMPLEMENTED;
1326 }
1327
1328 /*
1329 * @unimplemented
1330 */
1331 VOID
1332 NTAPI
1333 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1334 IN PEPROCESS Process,
1335 IN KPROCESSOR_MODE AccessMode,
1336 IN LOCK_OPERATION Operation)
1337 {
1338 UNIMPLEMENTED;
1339 }
1340
1341
1342 /*
1343 * @unimplemented
1344 */
1345 VOID
1346 NTAPI
1347 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1348 IN LARGE_INTEGER PageList[],
1349 IN KPROCESSOR_MODE AccessMode,
1350 IN LOCK_OPERATION Operation)
1351 {
1352 UNIMPLEMENTED;
1353 }
1354
1355 /*
1356 * @unimplemented
1357 */
1358 VOID
1359 NTAPI
1360 MmMapMemoryDumpMdl(IN PMDL Mdl)
1361 {
1362 UNIMPLEMENTED;
1363 }
1364
1365 /* EOF */