1b95448fc43b2477b0892b1696fd8eb38365719f
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 BOOLEAN MmTrackPtes;
20 BOOLEAN MmTrackLockedPages;
21
22 /* PUBLIC FUNCTIONS ***********************************************************/
23
24 /*
25 * @implemented
26 */
27 PMDL
28 NTAPI
29 MmCreateMdl(IN PMDL Mdl,
30 IN PVOID Base,
31 IN ULONG Length)
32 {
33 ULONG Size;
34
35 //
36 // Check if we don't have an MDL built
37 //
38 if (!Mdl)
39 {
40 //
41 // Calculate the size we'll need and allocate the MDL
42 //
43 Size = MmSizeOfMdl(Base, Length);
44 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
45 if (!Mdl) return NULL;
46 }
47
48 //
49 // Initialize it
50 //
51 MmInitializeMdl(Mdl, Base, Length);
52 return Mdl;
53 }
54
55 /*
56 * @implemented
57 */
58 ULONG
59 NTAPI
60 MmSizeOfMdl(IN PVOID Base,
61 IN ULONG Length)
62 {
63 //
64 // Return the MDL size
65 //
66 return sizeof(MDL) +
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
68 }
69
70 /*
71 * @implemented
72 */
73 VOID
74 NTAPI
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
76 {
77 PPFN_NUMBER MdlPages, EndPage;
78 PFN_NUMBER Pfn, PageCount;
79 PVOID Base;
80 PMMPTE PointerPte;
81
82 //
83 // Sanity checks
84 //
85 ASSERT(Mdl->ByteCount != 0);
86 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
87 MDL_MAPPED_TO_SYSTEM_VA |
88 MDL_SOURCE_IS_NONPAGED_POOL |
89 MDL_PARTIAL)) == 0);
90
91 //
92 // We know the MDL isn't associated to a process now
93 //
94 Mdl->Process = NULL;
95
96 //
97 // Get page and VA information
98 //
99 MdlPages = (PPFN_NUMBER)(Mdl + 1);
100 Base = Mdl->StartVa;
101
102 //
103 // Set the system address and now get the page count
104 //
105 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
106 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
107 Mdl->ByteCount);
108 ASSERT(PageCount != 0);
109 EndPage = MdlPages + PageCount;
110
111 //
112 // Loop the PTEs
113 //
114 PointerPte = MiAddressToPte(Base);
115 do
116 {
117 //
118 // Write the PFN
119 //
120 Pfn = PFN_FROM_PTE(PointerPte++);
121 *MdlPages++ = Pfn;
122 } while (MdlPages < EndPage);
123
124 //
125 // Set the nonpaged pool flag
126 //
127 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
128
129 //
130 // Check if this is an I/O mapping
131 //
132 if (Pfn > MmHighestPhysicalPage) Mdl->MdlFlags |= MDL_IO_SPACE;
133 }
134
135 /*
136 * @implemented
137 */
138 PMDL
139 NTAPI
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
141 IN PHYSICAL_ADDRESS HighAddress,
142 IN PHYSICAL_ADDRESS SkipBytes,
143 IN SIZE_T TotalBytes)
144 {
145 //
146 // Call the internal routine
147 //
148 return MiAllocatePagesForMdl(LowAddress,
149 HighAddress,
150 SkipBytes,
151 TotalBytes,
152 MiNotMapped,
153 0);
154 }
155
156 /*
157 * @implemented
158 */
159 PMDL
160 NTAPI
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
162 IN PHYSICAL_ADDRESS HighAddress,
163 IN PHYSICAL_ADDRESS SkipBytes,
164 IN SIZE_T TotalBytes,
165 IN MEMORY_CACHING_TYPE CacheType,
166 IN ULONG Flags)
167 {
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
169
170 //
171 // Check for invalid cache type
172 //
173 if (CacheType > MmWriteCombined)
174 {
175 //
176 // Normalize to default
177 //
178 CacheAttribute = MiNotMapped;
179 }
180 else
181 {
182 //
183 // Conver to internal caching attribute
184 //
185 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
186 }
187
188 //
189 // Only these flags are allowed
190 //
191 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
192 {
193 //
194 // Silently fail
195 //
196 return NULL;
197 }
198
199 //
200 // Call the internal routine
201 //
202 return MiAllocatePagesForMdl(LowAddress,
203 HighAddress,
204 SkipBytes,
205 TotalBytes,
206 CacheAttribute,
207 Flags);
208 }
209
210 /*
211 * @implemented
212 */
213 VOID
214 NTAPI
215 MmFreePagesFromMdl(IN PMDL Mdl)
216 {
217 PVOID Base;
218 PPFN_NUMBER Pages;
219 LONG NumberOfPages;
220 PMMPFN Pfn1;
221 KIRQL OldIrql;
222 DPRINT("Freeing MDL: %p\n", Mdl);
223
224 //
225 // Sanity checks
226 //
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
228 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
229 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
230
231 //
232 // Get address and page information
233 //
234 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
235 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
236
237 //
238 // Acquire PFN lock
239 //
240 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
241
242 //
243 // Loop all the MDL pages
244 //
245 Pages = (PPFN_NUMBER)(Mdl + 1);
246 do
247 {
248 //
249 // Reached the last page
250 //
251 if (*Pages == -1) break;
252
253 //
254 // Sanity check
255 //
256 ASSERT(*Pages <= MmHighestPhysicalPage);
257
258 //
259 // Get the page entry
260 //
261 Pfn1 = MiGetPfnEntry(*Pages);
262 ASSERT(Pfn1->u3.ReferenceCount == 1);
263
264 //
265 // Clear it
266 //
267 Pfn1->u3.e1.StartOfAllocation = 0;
268 Pfn1->u3.e1.EndOfAllocation = 0;
269
270 //
271 // Dereference it
272 //
273 MmDereferencePage(*Pages);
274
275 //
276 // Clear this page and move on
277 //
278 *Pages++ = -1;
279 } while (--NumberOfPages != 0);
280
281 //
282 // Release the lock
283 //
284 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
285
286 //
287 // Remove the pages locked flag
288 //
289 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
290 }
291
292 /*
293 * @implemented
294 */
295 PVOID
296 NTAPI
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
298 IN KPROCESSOR_MODE AccessMode,
299 IN MEMORY_CACHING_TYPE CacheType,
300 IN PVOID BaseAddress,
301 IN ULONG BugCheckOnFailure,
302 IN MM_PAGE_PRIORITY Priority)
303 {
304 PVOID Base;
305 PPFN_NUMBER MdlPages, LastPage;
306 PFN_NUMBER PageCount;
307 BOOLEAN IsIoMapping;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
309 PMMPTE PointerPte;
310 MMPTE TempPte;
311
312 //
313 // Sanity check
314 //
315 ASSERT(Mdl->ByteCount != 0);
316
317 //
318 // Get the base
319 //
320 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
321
322 //
323 // Handle kernel case first
324 //
325 if (AccessMode == KernelMode)
326 {
327 //
328 // Get the list of pages and count
329 //
330 MdlPages = (PPFN_NUMBER)(Mdl + 1);
331 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
332 LastPage = MdlPages + PageCount;
333
334 //
335 // Sanity checks
336 //
337 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
338 MDL_SOURCE_IS_NONPAGED_POOL |
339 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
340 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
341
342 //
343 // Get the correct cache type
344 //
345 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
346 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
347
348 //
349 // Reserve the PTEs
350 //
351 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
352 if (!PointerPte)
353 {
354 //
355 // If it can fail, return NULL
356 //
357 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
358
359 //
360 // Should we bugcheck?
361 //
362 if (!BugCheckOnFailure) return NULL;
363
364 //
365 // Yes, crash the system
366 //
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
368 }
369
370 //
371 // Get the mapped address
372 //
373 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
374
375 //
376 // Get the template
377 //
378 TempPte = ValidKernelPte;
379 switch (CacheAttribute)
380 {
381 case MiNonCached:
382
383 //
384 // Disable caching
385 //
386 MI_PAGE_DISABLE_CACHE(&TempPte);
387 MI_PAGE_WRITE_THROUGH(&TempPte);
388 break;
389
390 case MiWriteCombined:
391
392 //
393 // Enable write combining
394 //
395 MI_PAGE_DISABLE_CACHE(&TempPte);
396 MI_PAGE_WRITE_COMBINED(&TempPte);
397 break;
398
399 default:
400 //
401 // Nothing to do
402 //
403 break;
404 }
405
406 //
407 // Loop all PTEs
408 //
409 do
410 {
411 //
412 // We're done here
413 //
414 if (*MdlPages == -1) break;
415
416 //
417 // Write the PTE
418 //
419 ASSERT(PointerPte->u.Hard.Valid == 0);
420 TempPte.u.Hard.PageFrameNumber = *MdlPages;
421 *PointerPte++ = TempPte;
422 } while (++MdlPages < LastPage);
423
424 //
425 // Mark it as mapped
426 //
427 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
428 Mdl->MappedSystemVa = Base;
429 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
430
431 //
432 // Check if it was partial
433 //
434 if (Mdl->MdlFlags & MDL_PARTIAL)
435 {
436 //
437 // Write the appropriate flag here too
438 //
439 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
440 }
441
442 //
443 // Return the mapped address
444 //
445 return Base;
446 }
447
448 //
449 // In user-mode, let ReactOS do it
450 //
451 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
452 }
453
454 /*
455 * @implemented
456 */
457 PVOID
458 NTAPI
459 MmMapLockedPages(IN PMDL Mdl,
460 IN KPROCESSOR_MODE AccessMode)
461 {
462 //
463 // Call the extended version
464 //
465 return MmMapLockedPagesSpecifyCache(Mdl,
466 AccessMode,
467 MmCached,
468 NULL,
469 TRUE,
470 HighPagePriority);
471 }
472
473 /*
474 * @implemented
475 */
476 VOID
477 NTAPI
478 MmUnmapLockedPages(IN PVOID BaseAddress,
479 IN PMDL Mdl)
480 {
481 PVOID Base;
482 PFN_NUMBER PageCount;
483 PPFN_NUMBER MdlPages;
484 PMMPTE PointerPte;
485
486 //
487 // Sanity check
488 //
489 ASSERT(Mdl->ByteCount != 0);
490
491 //
492 // Check if this is a kernel request
493 //
494 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
495 {
496 //
497 // Get base and count information
498 //
499 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
500 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
501
502 //
503 // Sanity checks
504 //
505 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
506 ASSERT(PageCount != 0);
507 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
508
509 //
510 // Get the PTE
511 //
512 PointerPte = MiAddressToPte(BaseAddress);
513
514 //
515 // This should be a resident system PTE
516 //
517 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
518 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
519 ASSERT(PointerPte->u.Hard.Valid == 1);
520
521 //
522 // Check if the caller wants us to free advanced pages
523 //
524 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
525 {
526 //
527 // Get the MDL page array
528 //
529 MdlPages = (PPFN_NUMBER)(Mdl + 1);
530 MdlPages += PageCount;
531
532 //
533 // Do the math
534 //
535 PageCount += *MdlPages;
536 PointerPte -= *MdlPages;
537 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
538 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
539
540 //
541 // Get the new base address
542 //
543 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
544 ((*MdlPages) << PAGE_SHIFT));
545 }
546
547 //
548 // Remove flags
549 //
550 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
551 MDL_PARTIAL_HAS_BEEN_MAPPED |
552 MDL_FREE_EXTRA_PTES);
553
554 //
555 // Release the system PTEs
556 //
557 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
558 }
559 else
560 {
561 //
562 // Let ReactOS handle it
563 //
564 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
565 }
566 }
567
568 /*
569 * @implemented
570 */
571 VOID
572 NTAPI
573 MmProbeAndLockPages(IN PMDL Mdl,
574 IN KPROCESSOR_MODE AccessMode,
575 IN LOCK_OPERATION Operation)
576 {
577 PPFN_TYPE MdlPages;
578 PVOID Base, Address, LastAddress, StartAddress;
579 ULONG LockPages, TotalPages;
580 NTSTATUS Status = STATUS_SUCCESS;
581 PEPROCESS CurrentProcess;
582 PETHREAD Thread;
583 PMMSUPPORT AddressSpace;
584 NTSTATUS ProbeStatus;
585 PMMPTE PointerPte, LastPte;
586 PMMPDE PointerPde;
587 PFN_NUMBER PageFrameIndex;
588 PMMPFN Pfn1;
589 BOOLEAN UsePfnLock;
590 KIRQL OldIrql;
591 DPRINT("Probing MDL: %p\n", Mdl);
592
593 //
594 // Sanity checks
595 //
596 ASSERT(Mdl->ByteCount != 0);
597 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
598 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
599 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
600 MDL_MAPPED_TO_SYSTEM_VA |
601 MDL_SOURCE_IS_NONPAGED_POOL |
602 MDL_PARTIAL |
603 MDL_IO_SPACE)) == 0);
604
605 //
606 // Get page and base information
607 //
608 MdlPages = (PPFN_NUMBER)(Mdl + 1);
609 Base = (PVOID)Mdl->StartVa;
610
611 //
612 // Get the addresses and how many pages we span (and need to lock)
613 //
614 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
615 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
616 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
617 ASSERT(LockPages != 0);
618
619 //
620 // Get the thread and process
621 //
622 Thread = PsGetCurrentThread();
623 if (Address <= MM_HIGHEST_USER_ADDRESS)
624 {
625 //
626 // Get the process
627 //
628 CurrentProcess = PsGetCurrentProcess();
629 }
630 else
631 {
632 //
633 // No process
634 //
635 CurrentProcess = NULL;
636 }
637
638 //
639 // Save the number of pages we'll have to lock, and the start address
640 //
641 TotalPages = LockPages;
642 StartAddress = Address;
643
644 //
645 // Now probe them
646 //
647 ProbeStatus = STATUS_SUCCESS;
648 _SEH2_TRY
649 {
650 //
651 // Enter probe loop
652 //
653 do
654 {
655 //
656 // Assume failure
657 //
658 *MdlPages = -1;
659
660 //
661 // Read
662 //
663 *(volatile CHAR*)Address;
664
665 //
666 // Check if this is write access (only probe for user-mode)
667 //
668 if ((Operation != IoReadAccess) &&
669 (Address <= MM_HIGHEST_USER_ADDRESS))
670 {
671 //
672 // Probe for write too
673 //
674 ProbeForWriteChar(Address);
675 }
676
677 //
678 // Next address...
679 //
680 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
681 Address = PAGE_ALIGN(Address);
682
683 //
684 // Next page...
685 //
686 LockPages--;
687 MdlPages++;
688 } while (Address < LastAddress);
689
690 //
691 // Reset back to the original page
692 //
693 ASSERT(LockPages == 0);
694 MdlPages = (PPFN_NUMBER)(Mdl + 1);
695 }
696 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
697 {
698 //
699 // Oops :(
700 //
701 ProbeStatus = _SEH2_GetExceptionCode();
702 }
703 _SEH2_END;
704
705 //
706 // So how did that go?
707 //
708 if (ProbeStatus != STATUS_SUCCESS)
709 {
710 //
711 // Fail
712 //
713 DPRINT1("MDL PROBE FAILED!\n");
714 Mdl->Process = NULL;
715 ExRaiseStatus(ProbeStatus);
716 }
717
718 //
719 // Get the PTE and PDE
720 //
721 PointerPte = MiAddressToPte(StartAddress);
722 PointerPde = MiAddressToPde(StartAddress);
723
724 //
725 // Sanity check
726 //
727 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
728
729 //
730 // Check what kind of operation this is
731 //
732 if (Operation != IoReadAccess)
733 {
734 //
735 // Set the write flag
736 //
737 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
738 }
739 else
740 {
741 //
742 // Remove the write flag
743 //
744 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
745 }
746
747 //
748 // Mark the MDL as locked *now*
749 //
750 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
751
752 //
753 // Check if this came from kernel mode
754 //
755 if (Base >= MM_HIGHEST_USER_ADDRESS)
756 {
757 //
758 // We should not have a process
759 //
760 ASSERT(CurrentProcess == NULL);
761 Mdl->Process = NULL;
762
763 //
764 // In kernel mode, we don't need to check for write access
765 //
766 Operation = IoReadAccess;
767
768 //
769 // Use the PFN lock
770 //
771 UsePfnLock = TRUE;
772 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
773 AddressSpace = NULL; // Keep compiler happy
774 }
775 else
776 {
777 //
778 // Sanity checks
779 //
780 ASSERT(TotalPages != 0);
781 ASSERT(CurrentProcess == PsGetCurrentProcess());
782
783 //
784 // Track locked pages
785 //
786 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
787 TotalPages);
788
789 //
790 // Save the process
791 //
792 Mdl->Process = CurrentProcess;
793
794 //
795 // Use the process lock
796 //
797 UsePfnLock = FALSE;
798 AddressSpace = &CurrentProcess->Vm;
799 MmLockAddressSpace(AddressSpace);
800 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
801 }
802
803 //
804 // Get the last PTE
805 //
806 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
807
808 //
809 // Loop the pages
810 //
811 do
812 {
813 //
814 // Assume failure and check for non-mapped pages
815 //
816 *MdlPages = -1;
817 while ((PointerPde->u.Hard.Valid == 0) ||
818 (PointerPte->u.Hard.Valid == 0))
819 {
820 //
821 // What kind of lock where we using?
822 //
823 if (UsePfnLock)
824 {
825 //
826 // Release PFN lock
827 //
828 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
829 }
830 else
831 {
832 //
833 // Release process address space lock
834 //
835 MmUnlockAddressSpace(AddressSpace);
836 }
837
838 //
839 // Access the page
840 //
841 Address = MiPteToAddress(PointerPte);
842 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
843 if (!NT_SUCCESS(Status))
844 {
845 //
846 // Fail
847 //
848 DPRINT1("Access fault failed\n");
849 goto Cleanup;
850 }
851
852 //
853 // Waht lock should we use?
854 //
855 if (UsePfnLock)
856 {
857 //
858 // Grab the PFN lock
859 //
860 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
861 }
862 else
863 {
864 //
865 // Use the address space lock
866 //
867 MmLockAddressSpace(AddressSpace);
868 }
869 }
870
871 //
872 // Check if this was a write or modify
873 //
874 if (Operation != IoReadAccess)
875 {
876 //
877 // Check if the PTE is not writable
878 //
879 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
880 {
881 //
882 // Check if it's copy on write
883 //
884 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
885 {
886 //
887 // Get the base address and allow a change for user-mode
888 //
889 Address = MiPteToAddress(PointerPte);
890 if (Address <= MM_HIGHEST_USER_ADDRESS)
891 {
892 //
893 // What kind of lock where we using?
894 //
895 if (UsePfnLock)
896 {
897 //
898 // Release PFN lock
899 //
900 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
901 }
902 else
903 {
904 //
905 // Release process address space lock
906 //
907 MmUnlockAddressSpace(AddressSpace);
908 }
909
910 //
911 // Access the page
912 //
913 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
914 if (!NT_SUCCESS(Status))
915 {
916 //
917 // Fail
918 //
919 DPRINT1("Access fault failed\n");
920 goto Cleanup;
921 }
922
923 //
924 // Re-acquire the lock
925 //
926 if (UsePfnLock)
927 {
928 //
929 // Grab the PFN lock
930 //
931 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
932 }
933 else
934 {
935 //
936 // Use the address space lock
937 //
938 MmLockAddressSpace(AddressSpace);
939 }
940
941 //
942 // Start over
943 //
944 continue;
945 }
946 }
947
948 //
949 // Fail, since we won't allow this
950 //
951 Status = STATUS_ACCESS_VIOLATION;
952 goto CleanupWithLock;
953 }
954 }
955
956 //
957 // Grab the PFN
958 //
959 PageFrameIndex = PFN_FROM_PTE(PointerPte);
960 if (PageFrameIndex <= MmHighestPhysicalPage)
961 {
962 //
963 // Get the PFN entry
964 //
965 Pfn1 = MiGetPfnEntry(PageFrameIndex);
966 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
967
968 //
969 // Now lock the page
970 //
971 MmReferencePage(PageFrameIndex);
972 }
973 else
974 {
975 //
976 // For I/O addresses, just remember this
977 //
978 Mdl->MdlFlags |= MDL_IO_SPACE;
979 }
980
981 //
982 // Write the page and move on
983 //
984 *MdlPages++ = PageFrameIndex;
985 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
986 } while (PointerPte <= LastPte);
987
988 //
989 // What kind of lock where we using?
990 //
991 if (UsePfnLock)
992 {
993 //
994 // Release PFN lock
995 //
996 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
997 }
998 else
999 {
1000 //
1001 // Release process address space lock
1002 //
1003 MmUnlockAddressSpace(AddressSpace);
1004 }
1005
1006 //
1007 // Sanity check
1008 //
1009 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1010 return;
1011
1012 CleanupWithLock:
1013 //
1014 // This is the failure path
1015 //
1016 ASSERT(!NT_SUCCESS(Status));
1017
1018 //
1019 // What kind of lock where we using?
1020 //
1021 if (UsePfnLock)
1022 {
1023 //
1024 // Release PFN lock
1025 //
1026 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1027 }
1028 else
1029 {
1030 //
1031 // Release process address space lock
1032 //
1033 MmUnlockAddressSpace(AddressSpace);
1034 }
1035 Cleanup:
1036 //
1037 // Pages must be locked so MmUnlock can work
1038 //
1039 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1040 MmUnlockPages(Mdl);
1041
1042 //
1043 // Raise the error
1044 //
1045 ExRaiseStatus(Status);
1046 }
1047
1048 /*
1049 * @implemented
1050 */
1051 VOID
1052 NTAPI
1053 MmUnlockPages(IN PMDL Mdl)
1054 {
1055 PPFN_NUMBER MdlPages, LastPage;
1056 PEPROCESS Process;
1057 PVOID Base;
1058 ULONG Flags, PageCount;
1059 KIRQL OldIrql;
1060 DPRINT("Unlocking MDL: %p\n", Mdl);
1061
1062 //
1063 // Sanity checks
1064 //
1065 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1066 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1067 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1068 ASSERT(Mdl->ByteCount != 0);
1069
1070 //
1071 // Get the process associated and capture the flags which are volatile
1072 //
1073 Process = Mdl->Process;
1074 Flags = Mdl->MdlFlags;
1075
1076 //
1077 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1078 //
1079 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1080 {
1081 //
1082 // Unmap the pages from system space
1083 //
1084 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1085 }
1086
1087 //
1088 // Get the page count
1089 //
1090 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1091 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1092 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1093 ASSERT(PageCount != 0);
1094
1095 //
1096 // We don't support AWE
1097 //
1098 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1099
1100 //
1101 // Check if the buffer is mapped I/O space
1102 //
1103 if (Flags & MDL_IO_SPACE)
1104 {
1105 //
1106 // Acquire PFN lock
1107 //
1108 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1109
1110 //
1111 // Loop every page
1112 //
1113 LastPage = MdlPages + PageCount;
1114 do
1115 {
1116 //
1117 // Last page, break out
1118 //
1119 if (*MdlPages == -1) break;
1120
1121 //
1122 // Check if this page is in the PFN database
1123 //
1124 if (*MdlPages <= MmHighestPhysicalPage)
1125 {
1126 //
1127 // Unlock and dereference
1128 //
1129 MmDereferencePage(*MdlPages);
1130 }
1131 } while (++MdlPages < LastPage);
1132
1133 //
1134 // Release the lock
1135 //
1136 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1137
1138 //
1139 // Check if we have a process
1140 //
1141 if (Process)
1142 {
1143 //
1144 // Handle the accounting of locked pages
1145 //
1146 ASSERT(Process->NumberOfLockedPages > 0);
1147 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1148 -PageCount);
1149 }
1150
1151 //
1152 // We're done
1153 //
1154 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1155 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1156 return;
1157 }
1158
1159 //
1160 // Check if we have a process
1161 //
1162 if (Process)
1163 {
1164 //
1165 // Handle the accounting of locked pages
1166 //
1167 ASSERT(Process->NumberOfLockedPages > 0);
1168 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1169 -PageCount);
1170 }
1171
1172 //
1173 // Loop every page
1174 //
1175 LastPage = MdlPages + PageCount;
1176 do
1177 {
1178 //
1179 // Last page reached
1180 //
1181 if (*MdlPages == -1)
1182 {
1183 //
1184 // Were there no pages at all?
1185 //
1186 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1187 {
1188 //
1189 // We're already done
1190 //
1191 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1192 return;
1193 }
1194
1195 //
1196 // Otherwise, stop here
1197 //
1198 LastPage = MdlPages;
1199 break;
1200 }
1201
1202 //
1203 // Sanity check
1204 //
1205 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1206 } while (++MdlPages < LastPage);
1207
1208 //
1209 // Reset pointer
1210 //
1211 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1212
1213 //
1214 // Now grab the PFN lock for the actual unlock and dereference
1215 //
1216 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1217 do
1218 {
1219 //
1220 // Unlock and dereference
1221 //
1222 MmDereferencePage(*MdlPages);
1223 } while (++MdlPages < LastPage);
1224
1225 //
1226 // Release the lock
1227 //
1228 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1229
1230 //
1231 // We're done
1232 //
1233 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1234 }
1235
1236 /*
1237 * @unimplemented
1238 */
1239 NTSTATUS
1240 NTAPI
1241 MmAdvanceMdl(IN PMDL Mdl,
1242 IN ULONG NumberOfBytes)
1243 {
1244 UNIMPLEMENTED;
1245 return STATUS_NOT_IMPLEMENTED;
1246 }
1247
1248 /*
1249 * @unimplemented
1250 */
1251 PVOID
1252 NTAPI
1253 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1254 IN ULONG PoolTag,
1255 IN PMDL MemoryDescriptorList,
1256 IN MEMORY_CACHING_TYPE CacheType)
1257 {
1258 UNIMPLEMENTED;
1259 return 0;
1260 }
1261
1262 /*
1263 * @unimplemented
1264 */
1265 VOID
1266 NTAPI
1267 MmUnmapReservedMapping(IN PVOID BaseAddress,
1268 IN ULONG PoolTag,
1269 IN PMDL MemoryDescriptorList)
1270 {
1271 UNIMPLEMENTED;
1272 }
1273
1274 /*
1275 * @unimplemented
1276 */
1277 NTSTATUS
1278 NTAPI
1279 MmPrefetchPages(IN ULONG NumberOfLists,
1280 IN PREAD_LIST *ReadLists)
1281 {
1282 UNIMPLEMENTED;
1283 return STATUS_NOT_IMPLEMENTED;
1284 }
1285
1286 /*
1287 * @unimplemented
1288 */
1289 NTSTATUS
1290 NTAPI
1291 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1292 IN ULONG NewProtect)
1293 {
1294 UNIMPLEMENTED;
1295 return STATUS_NOT_IMPLEMENTED;
1296 }
1297
1298 /*
1299 * @unimplemented
1300 */
1301 VOID
1302 NTAPI
1303 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1304 IN PEPROCESS Process,
1305 IN KPROCESSOR_MODE AccessMode,
1306 IN LOCK_OPERATION Operation)
1307 {
1308 UNIMPLEMENTED;
1309 }
1310
1311
1312 /*
1313 * @unimplemented
1314 */
1315 VOID
1316 NTAPI
1317 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1318 IN LARGE_INTEGER PageList[],
1319 IN KPROCESSOR_MODE AccessMode,
1320 IN LOCK_OPERATION Operation)
1321 {
1322 UNIMPLEMENTED;
1323 }
1324
1325 /*
1326 * @unimplemented
1327 */
1328 VOID
1329 NTAPI
1330 MmMapMemoryDumpMdl(IN PMDL Mdl)
1331 {
1332 UNIMPLEMENTED;
1333 }
1334
1335 /* EOF */