- Revert 49927 "Update to trunk" as it breaks KsStudio (again)
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 BOOLEAN MmTrackPtes;
20 BOOLEAN MmTrackLockedPages;
21
22 /* PUBLIC FUNCTIONS ***********************************************************/
23
24 /*
25 * @implemented
26 */
27 PMDL
28 NTAPI
29 MmCreateMdl(IN PMDL Mdl,
30 IN PVOID Base,
31 IN SIZE_T Length)
32 {
33 SIZE_T Size;
34
35 //
36 // Check if we don't have an MDL built
37 //
38 if (!Mdl)
39 {
40 //
41 // Calculate the size we'll need and allocate the MDL
42 //
43 Size = MmSizeOfMdl(Base, Length);
44 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
45 if (!Mdl) return NULL;
46 }
47
48 //
49 // Initialize it
50 //
51 MmInitializeMdl(Mdl, Base, Length);
52 return Mdl;
53 }
54
55 /*
56 * @implemented
57 */
58 SIZE_T
59 NTAPI
60 MmSizeOfMdl(IN PVOID Base,
61 IN SIZE_T Length)
62 {
63 //
64 // Return the MDL size
65 //
66 return sizeof(MDL) +
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
68 }
69
70 /*
71 * @implemented
72 */
73 VOID
74 NTAPI
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
76 {
77 PPFN_NUMBER MdlPages, EndPage;
78 PFN_NUMBER Pfn, PageCount;
79 PVOID Base;
80 PMMPTE PointerPte;
81
82 //
83 // Sanity checks
84 //
85 ASSERT(Mdl->ByteCount != 0);
86 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
87 MDL_MAPPED_TO_SYSTEM_VA |
88 MDL_SOURCE_IS_NONPAGED_POOL |
89 MDL_PARTIAL)) == 0);
90
91 //
92 // We know the MDL isn't associated to a process now
93 //
94 Mdl->Process = NULL;
95
96 //
97 // Get page and VA information
98 //
99 MdlPages = (PPFN_NUMBER)(Mdl + 1);
100 Base = Mdl->StartVa;
101
102 //
103 // Set the system address and now get the page count
104 //
105 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
106 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
107 Mdl->ByteCount);
108 ASSERT(PageCount != 0);
109 EndPage = MdlPages + PageCount;
110
111 //
112 // Loop the PTEs
113 //
114 PointerPte = MiAddressToPte(Base);
115 do
116 {
117 //
118 // Write the PFN
119 //
120 Pfn = PFN_FROM_PTE(PointerPte++);
121 *MdlPages++ = Pfn;
122 } while (MdlPages < EndPage);
123
124 //
125 // Set the nonpaged pool flag
126 //
127 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
128
129 //
130 // Check if this is an I/O mapping
131 //
132 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
133 }
134
135 /*
136 * @implemented
137 */
138 PMDL
139 NTAPI
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
141 IN PHYSICAL_ADDRESS HighAddress,
142 IN PHYSICAL_ADDRESS SkipBytes,
143 IN SIZE_T TotalBytes)
144 {
145 //
146 // Call the internal routine
147 //
148 return MiAllocatePagesForMdl(LowAddress,
149 HighAddress,
150 SkipBytes,
151 TotalBytes,
152 MiNotMapped,
153 0);
154 }
155
156 /*
157 * @implemented
158 */
159 PMDL
160 NTAPI
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
162 IN PHYSICAL_ADDRESS HighAddress,
163 IN PHYSICAL_ADDRESS SkipBytes,
164 IN SIZE_T TotalBytes,
165 IN MEMORY_CACHING_TYPE CacheType,
166 IN ULONG Flags)
167 {
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
169
170 //
171 // Check for invalid cache type
172 //
173 if (CacheType > MmWriteCombined)
174 {
175 //
176 // Normalize to default
177 //
178 CacheAttribute = MiNotMapped;
179 }
180 else
181 {
182 //
183 // Conver to internal caching attribute
184 //
185 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
186 }
187
188 //
189 // Only these flags are allowed
190 //
191 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
192 {
193 //
194 // Silently fail
195 //
196 return NULL;
197 }
198
199 //
200 // Call the internal routine
201 //
202 return MiAllocatePagesForMdl(LowAddress,
203 HighAddress,
204 SkipBytes,
205 TotalBytes,
206 CacheAttribute,
207 Flags);
208 }
209
210 /*
211 * @implemented
212 */
213 VOID
214 NTAPI
215 MmFreePagesFromMdl(IN PMDL Mdl)
216 {
217 PVOID Base;
218 PPFN_NUMBER Pages;
219 LONG NumberOfPages;
220 PMMPFN Pfn1;
221 KIRQL OldIrql;
222 DPRINT("Freeing MDL: %p\n", Mdl);
223
224 //
225 // Sanity checks
226 //
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
228 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
229 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
230
231 //
232 // Get address and page information
233 //
234 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
235 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
236
237 //
238 // Acquire PFN lock
239 //
240 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
241
242 //
243 // Loop all the MDL pages
244 //
245 Pages = (PPFN_NUMBER)(Mdl + 1);
246 do
247 {
248 //
249 // Reached the last page
250 //
251 if (*Pages == -1) break;
252
253 //
254 // Sanity check
255 //
256 ASSERT(*Pages <= MmHighestPhysicalPage);
257
258 //
259 // Get the page entry
260 //
261 Pfn1 = MiGetPfnEntry(*Pages);
262 ASSERT(Pfn1->u3.ReferenceCount == 1);
263
264 //
265 // Clear it
266 //
267 Pfn1->u3.e1.StartOfAllocation = 0;
268 Pfn1->u3.e1.EndOfAllocation = 0;
269
270 //
271 // Dereference it
272 //
273 MmDereferencePage(*Pages);
274
275 //
276 // Clear this page and move on
277 //
278 *Pages++ = -1;
279 } while (--NumberOfPages != 0);
280
281 //
282 // Release the lock
283 //
284 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
285
286 //
287 // Remove the pages locked flag
288 //
289 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
290 }
291
292 /*
293 * @implemented
294 */
295 PVOID
296 NTAPI
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
298 IN KPROCESSOR_MODE AccessMode,
299 IN MEMORY_CACHING_TYPE CacheType,
300 IN PVOID BaseAddress,
301 IN ULONG BugCheckOnFailure,
302 IN MM_PAGE_PRIORITY Priority)
303 {
304 PVOID Base;
305 PPFN_NUMBER MdlPages, LastPage;
306 PFN_NUMBER PageCount;
307 BOOLEAN IsIoMapping;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
309 PMMPTE PointerPte;
310 MMPTE TempPte;
311
312 //
313 // Sanity check
314 //
315 ASSERT(Mdl->ByteCount != 0);
316
317 //
318 // Get the base
319 //
320 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
321
322 //
323 // Handle kernel case first
324 //
325 if (AccessMode == KernelMode)
326 {
327 //
328 // Get the list of pages and count
329 //
330 MdlPages = (PPFN_NUMBER)(Mdl + 1);
331 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
332 LastPage = MdlPages + PageCount;
333
334 //
335 // Sanity checks
336 //
337 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
338 MDL_SOURCE_IS_NONPAGED_POOL |
339 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
340 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
341
342 //
343 // Get the correct cache type
344 //
345 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
346 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
347
348 //
349 // Reserve the PTEs
350 //
351 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
352 if (!PointerPte)
353 {
354 //
355 // If it can fail, return NULL
356 //
357 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
358
359 //
360 // Should we bugcheck?
361 //
362 if (!BugCheckOnFailure) return NULL;
363
364 //
365 // Yes, crash the system
366 //
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
368 }
369
370 //
371 // Get the mapped address
372 //
373 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
374
375 //
376 // Get the template
377 //
378 TempPte = ValidKernelPte;
379 switch (CacheAttribute)
380 {
381 case MiNonCached:
382
383 //
384 // Disable caching
385 //
386 MI_PAGE_DISABLE_CACHE(&TempPte);
387 MI_PAGE_WRITE_THROUGH(&TempPte);
388 break;
389
390 case MiWriteCombined:
391
392 //
393 // Enable write combining
394 //
395 MI_PAGE_DISABLE_CACHE(&TempPte);
396 MI_PAGE_WRITE_COMBINED(&TempPte);
397 break;
398
399 default:
400 //
401 // Nothing to do
402 //
403 break;
404 }
405
406 //
407 // Loop all PTEs
408 //
409 do
410 {
411 //
412 // We're done here
413 //
414 if (*MdlPages == -1) break;
415
416 //
417 // Write the PTE
418 //
419 TempPte.u.Hard.PageFrameNumber = *MdlPages;
420 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
421 } while (++MdlPages < LastPage);
422
423 //
424 // Mark it as mapped
425 //
426 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
427 Mdl->MappedSystemVa = Base;
428 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
429
430 //
431 // Check if it was partial
432 //
433 if (Mdl->MdlFlags & MDL_PARTIAL)
434 {
435 //
436 // Write the appropriate flag here too
437 //
438 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
439 }
440
441 //
442 // Return the mapped address
443 //
444 return Base;
445 }
446
447 UNIMPLEMENTED;
448 return NULL;
449 }
450
451 /*
452 * @implemented
453 */
454 PVOID
455 NTAPI
456 MmMapLockedPages(IN PMDL Mdl,
457 IN KPROCESSOR_MODE AccessMode)
458 {
459 //
460 // Call the extended version
461 //
462 return MmMapLockedPagesSpecifyCache(Mdl,
463 AccessMode,
464 MmCached,
465 NULL,
466 TRUE,
467 HighPagePriority);
468 }
469
470 /*
471 * @implemented
472 */
473 VOID
474 NTAPI
475 MmUnmapLockedPages(IN PVOID BaseAddress,
476 IN PMDL Mdl)
477 {
478 PVOID Base;
479 PFN_NUMBER PageCount;
480 PPFN_NUMBER MdlPages;
481 PMMPTE PointerPte;
482
483 //
484 // Sanity check
485 //
486 ASSERT(Mdl->ByteCount != 0);
487
488 //
489 // Check if this is a kernel request
490 //
491 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
492 {
493 //
494 // Get base and count information
495 //
496 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
497 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
498
499 //
500 // Sanity checks
501 //
502 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
503 ASSERT(PageCount != 0);
504 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
505
506 //
507 // Get the PTE
508 //
509 PointerPte = MiAddressToPte(BaseAddress);
510
511 //
512 // This should be a resident system PTE
513 //
514 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
515 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
516 ASSERT(PointerPte->u.Hard.Valid == 1);
517
518 //
519 // Check if the caller wants us to free advanced pages
520 //
521 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
522 {
523 //
524 // Get the MDL page array
525 //
526 MdlPages = (PPFN_NUMBER)(Mdl + 1);
527 MdlPages += PageCount;
528
529 //
530 // Do the math
531 //
532 PageCount += *MdlPages;
533 PointerPte -= *MdlPages;
534 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
535 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
536
537 //
538 // Get the new base address
539 //
540 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
541 ((*MdlPages) << PAGE_SHIFT));
542 }
543
544 //
545 // Remove flags
546 //
547 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
548 MDL_PARTIAL_HAS_BEEN_MAPPED |
549 MDL_FREE_EXTRA_PTES);
550
551 //
552 // Release the system PTEs
553 //
554 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
555 }
556 else
557 {
558 UNIMPLEMENTED;
559 }
560 }
561
562 /*
563 * @implemented
564 */
565 VOID
566 NTAPI
567 MmProbeAndLockPages(IN PMDL Mdl,
568 IN KPROCESSOR_MODE AccessMode,
569 IN LOCK_OPERATION Operation)
570 {
571 PPFN_NUMBER MdlPages;
572 PVOID Base, Address, LastAddress, StartAddress;
573 ULONG LockPages, TotalPages;
574 NTSTATUS Status = STATUS_SUCCESS;
575 PEPROCESS CurrentProcess;
576 PMMSUPPORT AddressSpace;
577 NTSTATUS ProbeStatus;
578 PMMPTE PointerPte, LastPte;
579 PMMPDE PointerPde;
580 PFN_NUMBER PageFrameIndex;
581 BOOLEAN UsePfnLock;
582 KIRQL OldIrql;
583 DPRINT("Probing MDL: %p\n", Mdl);
584
585 //
586 // Sanity checks
587 //
588 ASSERT(Mdl->ByteCount != 0);
589 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
590 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
591 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
592 MDL_MAPPED_TO_SYSTEM_VA |
593 MDL_SOURCE_IS_NONPAGED_POOL |
594 MDL_PARTIAL |
595 MDL_IO_SPACE)) == 0);
596
597 //
598 // Get page and base information
599 //
600 MdlPages = (PPFN_NUMBER)(Mdl + 1);
601 Base = (PVOID)Mdl->StartVa;
602
603 //
604 // Get the addresses and how many pages we span (and need to lock)
605 //
606 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
607 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
608 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
609 ASSERT(LockPages != 0);
610
611 //
612 // Get theprocess
613 //
614 if (Address <= MM_HIGHEST_USER_ADDRESS)
615 {
616 //
617 // Get the process
618 //
619 CurrentProcess = PsGetCurrentProcess();
620 }
621 else
622 {
623 //
624 // No process
625 //
626 CurrentProcess = NULL;
627 }
628
629 //
630 // Save the number of pages we'll have to lock, and the start address
631 //
632 TotalPages = LockPages;
633 StartAddress = Address;
634
635 //
636 // Now probe them
637 //
638 ProbeStatus = STATUS_SUCCESS;
639 _SEH2_TRY
640 {
641 //
642 // Enter probe loop
643 //
644 do
645 {
646 //
647 // Assume failure
648 //
649 *MdlPages = -1;
650
651 //
652 // Read
653 //
654 *(volatile CHAR*)Address;
655
656 //
657 // Check if this is write access (only probe for user-mode)
658 //
659 if ((Operation != IoReadAccess) &&
660 (Address <= MM_HIGHEST_USER_ADDRESS))
661 {
662 //
663 // Probe for write too
664 //
665 ProbeForWriteChar(Address);
666 }
667
668 //
669 // Next address...
670 //
671 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
672 Address = PAGE_ALIGN(Address);
673
674 //
675 // Next page...
676 //
677 LockPages--;
678 MdlPages++;
679 } while (Address < LastAddress);
680
681 //
682 // Reset back to the original page
683 //
684 ASSERT(LockPages == 0);
685 MdlPages = (PPFN_NUMBER)(Mdl + 1);
686 }
687 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
688 {
689 //
690 // Oops :(
691 //
692 ProbeStatus = _SEH2_GetExceptionCode();
693 }
694 _SEH2_END;
695
696 //
697 // So how did that go?
698 //
699 if (ProbeStatus != STATUS_SUCCESS)
700 {
701 //
702 // Fail
703 //
704 DPRINT1("MDL PROBE FAILED!\n");
705 Mdl->Process = NULL;
706 ExRaiseStatus(ProbeStatus);
707 }
708
709 //
710 // Get the PTE and PDE
711 //
712 PointerPte = MiAddressToPte(StartAddress);
713 PointerPde = MiAddressToPde(StartAddress);
714
715 //
716 // Sanity check
717 //
718 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
719
720 //
721 // Check what kind of operation this is
722 //
723 if (Operation != IoReadAccess)
724 {
725 //
726 // Set the write flag
727 //
728 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
729 }
730 else
731 {
732 //
733 // Remove the write flag
734 //
735 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
736 }
737
738 //
739 // Mark the MDL as locked *now*
740 //
741 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
742
743 //
744 // Check if this came from kernel mode
745 //
746 if (Base >= MM_HIGHEST_USER_ADDRESS)
747 {
748 //
749 // We should not have a process
750 //
751 ASSERT(CurrentProcess == NULL);
752 Mdl->Process = NULL;
753
754 //
755 // In kernel mode, we don't need to check for write access
756 //
757 Operation = IoReadAccess;
758
759 //
760 // Use the PFN lock
761 //
762 UsePfnLock = TRUE;
763 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
764 AddressSpace = NULL; // Keep compiler happy
765 }
766 else
767 {
768 //
769 // Sanity checks
770 //
771 ASSERT(TotalPages != 0);
772 ASSERT(CurrentProcess == PsGetCurrentProcess());
773
774 //
775 // Track locked pages
776 //
777 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
778 TotalPages);
779
780 //
781 // Save the process
782 //
783 Mdl->Process = CurrentProcess;
784
785 //
786 // Use the process lock
787 //
788 UsePfnLock = FALSE;
789 AddressSpace = &CurrentProcess->Vm;
790 MmLockAddressSpace(AddressSpace);
791 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
792 }
793
794 //
795 // Get the last PTE
796 //
797 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
798
799 //
800 // Loop the pages
801 //
802 do
803 {
804 //
805 // Assume failure and check for non-mapped pages
806 //
807 *MdlPages = -1;
808 #if (_MI_PAGING_LEVELS >= 3)
809 /* Should be checking the PPE and PXE */
810 ASSERT(FALSE);
811 #endif
812 while ((PointerPde->u.Hard.Valid == 0) ||
813 (PointerPte->u.Hard.Valid == 0))
814 {
815 //
816 // What kind of lock where we using?
817 //
818 if (UsePfnLock)
819 {
820 //
821 // Release PFN lock
822 //
823 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
824 }
825 else
826 {
827 //
828 // Release process address space lock
829 //
830 MmUnlockAddressSpace(AddressSpace);
831 }
832
833 //
834 // Access the page
835 //
836 Address = MiPteToAddress(PointerPte);
837 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
838 if (!NT_SUCCESS(Status))
839 {
840 //
841 // Fail
842 //
843 DPRINT1("Access fault failed\n");
844 goto Cleanup;
845 }
846
847 //
848 // Waht lock should we use?
849 //
850 if (UsePfnLock)
851 {
852 //
853 // Grab the PFN lock
854 //
855 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
856 }
857 else
858 {
859 //
860 // Use the address space lock
861 //
862 MmLockAddressSpace(AddressSpace);
863 }
864 }
865
866 //
867 // Check if this was a write or modify
868 //
869 if (Operation != IoReadAccess)
870 {
871 //
872 // Check if the PTE is not writable
873 //
874 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
875 {
876 //
877 // Check if it's copy on write
878 //
879 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
880 {
881 //
882 // Get the base address and allow a change for user-mode
883 //
884 Address = MiPteToAddress(PointerPte);
885 if (Address <= MM_HIGHEST_USER_ADDRESS)
886 {
887 //
888 // What kind of lock where we using?
889 //
890 if (UsePfnLock)
891 {
892 //
893 // Release PFN lock
894 //
895 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
896 }
897 else
898 {
899 //
900 // Release process address space lock
901 //
902 MmUnlockAddressSpace(AddressSpace);
903 }
904
905 //
906 // Access the page
907 //
908 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
909 if (!NT_SUCCESS(Status))
910 {
911 //
912 // Fail
913 //
914 DPRINT1("Access fault failed\n");
915 goto Cleanup;
916 }
917
918 //
919 // Re-acquire the lock
920 //
921 if (UsePfnLock)
922 {
923 //
924 // Grab the PFN lock
925 //
926 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
927 }
928 else
929 {
930 //
931 // Use the address space lock
932 //
933 MmLockAddressSpace(AddressSpace);
934 }
935
936 //
937 // Start over
938 //
939 continue;
940 }
941 }
942
943 //
944 // Fail, since we won't allow this
945 //
946 Status = STATUS_ACCESS_VIOLATION;
947 goto CleanupWithLock;
948 }
949 }
950
951 //
952 // Grab the PFN
953 //
954 PageFrameIndex = PFN_FROM_PTE(PointerPte);
955 if (PageFrameIndex <= MmHighestPhysicalPage)
956 {
957 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
958
959 //
960 // Now lock the page
961 //
962 MmReferencePage(PageFrameIndex);
963 }
964 else
965 {
966 //
967 // For I/O addresses, just remember this
968 //
969 Mdl->MdlFlags |= MDL_IO_SPACE;
970 }
971
972 //
973 // Write the page and move on
974 //
975 *MdlPages++ = PageFrameIndex;
976 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
977 } while (PointerPte <= LastPte);
978
979 //
980 // What kind of lock where we using?
981 //
982 if (UsePfnLock)
983 {
984 //
985 // Release PFN lock
986 //
987 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
988 }
989 else
990 {
991 //
992 // Release process address space lock
993 //
994 MmUnlockAddressSpace(AddressSpace);
995 }
996
997 //
998 // Sanity check
999 //
1000 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1001 return;
1002
1003 CleanupWithLock:
1004 //
1005 // This is the failure path
1006 //
1007 ASSERT(!NT_SUCCESS(Status));
1008
1009 //
1010 // What kind of lock where we using?
1011 //
1012 if (UsePfnLock)
1013 {
1014 //
1015 // Release PFN lock
1016 //
1017 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1018 }
1019 else
1020 {
1021 //
1022 // Release process address space lock
1023 //
1024 MmUnlockAddressSpace(AddressSpace);
1025 }
1026 Cleanup:
1027 //
1028 // Pages must be locked so MmUnlock can work
1029 //
1030 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1031 MmUnlockPages(Mdl);
1032
1033 //
1034 // Raise the error
1035 //
1036 ExRaiseStatus(Status);
1037 }
1038
1039 /*
1040 * @implemented
1041 */
1042 VOID
1043 NTAPI
1044 MmUnlockPages(IN PMDL Mdl)
1045 {
1046 PPFN_NUMBER MdlPages, LastPage;
1047 PEPROCESS Process;
1048 PVOID Base;
1049 ULONG Flags, PageCount;
1050 KIRQL OldIrql;
1051 DPRINT("Unlocking MDL: %p\n", Mdl);
1052
1053 //
1054 // Sanity checks
1055 //
1056 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1057 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1058 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1059 ASSERT(Mdl->ByteCount != 0);
1060
1061 //
1062 // Get the process associated and capture the flags which are volatile
1063 //
1064 Process = Mdl->Process;
1065 Flags = Mdl->MdlFlags;
1066
1067 //
1068 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1069 //
1070 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1071 {
1072 //
1073 // Unmap the pages from system space
1074 //
1075 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1076 }
1077
1078 //
1079 // Get the page count
1080 //
1081 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1082 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1083 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1084 ASSERT(PageCount != 0);
1085
1086 //
1087 // We don't support AWE
1088 //
1089 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1090
1091 //
1092 // Check if the buffer is mapped I/O space
1093 //
1094 if (Flags & MDL_IO_SPACE)
1095 {
1096 //
1097 // Acquire PFN lock
1098 //
1099 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1100
1101 //
1102 // Loop every page
1103 //
1104 LastPage = MdlPages + PageCount;
1105 do
1106 {
1107 //
1108 // Last page, break out
1109 //
1110 if (*MdlPages == -1) break;
1111
1112 //
1113 // Check if this page is in the PFN database
1114 //
1115 if (*MdlPages <= MmHighestPhysicalPage)
1116 {
1117 //
1118 // Unlock and dereference
1119 //
1120 MmDereferencePage(*MdlPages);
1121 }
1122 } while (++MdlPages < LastPage);
1123
1124 //
1125 // Release the lock
1126 //
1127 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1128
1129 //
1130 // Check if we have a process
1131 //
1132 if (Process)
1133 {
1134 //
1135 // Handle the accounting of locked pages
1136 //
1137 ASSERT(Process->NumberOfLockedPages > 0);
1138 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1139 -PageCount);
1140 }
1141
1142 //
1143 // We're done
1144 //
1145 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1146 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1147 return;
1148 }
1149
1150 //
1151 // Check if we have a process
1152 //
1153 if (Process)
1154 {
1155 //
1156 // Handle the accounting of locked pages
1157 //
1158 ASSERT(Process->NumberOfLockedPages > 0);
1159 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1160 -PageCount);
1161 }
1162
1163 //
1164 // Loop every page
1165 //
1166 LastPage = MdlPages + PageCount;
1167 do
1168 {
1169 //
1170 // Last page reached
1171 //
1172 if (*MdlPages == -1)
1173 {
1174 //
1175 // Were there no pages at all?
1176 //
1177 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1178 {
1179 //
1180 // We're already done
1181 //
1182 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1183 return;
1184 }
1185
1186 //
1187 // Otherwise, stop here
1188 //
1189 LastPage = MdlPages;
1190 break;
1191 }
1192
1193 //
1194 // Sanity check
1195 //
1196 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1197 } while (++MdlPages < LastPage);
1198
1199 //
1200 // Reset pointer
1201 //
1202 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1203
1204 //
1205 // Now grab the PFN lock for the actual unlock and dereference
1206 //
1207 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1208 do
1209 {
1210 //
1211 // Unlock and dereference
1212 //
1213 MmDereferencePage(*MdlPages);
1214 } while (++MdlPages < LastPage);
1215
1216 //
1217 // Release the lock
1218 //
1219 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1220
1221 //
1222 // We're done
1223 //
1224 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1225 }
1226
1227 /*
1228 * @unimplemented
1229 */
1230 NTSTATUS
1231 NTAPI
1232 MmAdvanceMdl(IN PMDL Mdl,
1233 IN ULONG NumberOfBytes)
1234 {
1235 UNIMPLEMENTED;
1236 return STATUS_NOT_IMPLEMENTED;
1237 }
1238
1239 /*
1240 * @unimplemented
1241 */
1242 PVOID
1243 NTAPI
1244 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1245 IN ULONG PoolTag,
1246 IN PMDL MemoryDescriptorList,
1247 IN MEMORY_CACHING_TYPE CacheType)
1248 {
1249 UNIMPLEMENTED;
1250 return 0;
1251 }
1252
1253 /*
1254 * @unimplemented
1255 */
1256 VOID
1257 NTAPI
1258 MmUnmapReservedMapping(IN PVOID BaseAddress,
1259 IN ULONG PoolTag,
1260 IN PMDL MemoryDescriptorList)
1261 {
1262 UNIMPLEMENTED;
1263 }
1264
1265 /*
1266 * @unimplemented
1267 */
1268 NTSTATUS
1269 NTAPI
1270 MmPrefetchPages(IN ULONG NumberOfLists,
1271 IN PREAD_LIST *ReadLists)
1272 {
1273 UNIMPLEMENTED;
1274 return STATUS_NOT_IMPLEMENTED;
1275 }
1276
1277 /*
1278 * @unimplemented
1279 */
1280 NTSTATUS
1281 NTAPI
1282 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1283 IN ULONG NewProtect)
1284 {
1285 UNIMPLEMENTED;
1286 return STATUS_NOT_IMPLEMENTED;
1287 }
1288
1289 /*
1290 * @unimplemented
1291 */
1292 VOID
1293 NTAPI
1294 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1295 IN PEPROCESS Process,
1296 IN KPROCESSOR_MODE AccessMode,
1297 IN LOCK_OPERATION Operation)
1298 {
1299 UNIMPLEMENTED;
1300 }
1301
1302
1303 /*
1304 * @unimplemented
1305 */
1306 VOID
1307 NTAPI
1308 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1309 IN LARGE_INTEGER PageList[],
1310 IN KPROCESSOR_MODE AccessMode,
1311 IN LOCK_OPERATION Operation)
1312 {
1313 UNIMPLEMENTED;
1314 }
1315
1316 /*
1317 * @unimplemented
1318 */
1319 VOID
1320 NTAPI
1321 MmMapMemoryDumpMdl(IN PMDL Mdl)
1322 {
1323 UNIMPLEMENTED;
1324 }
1325
1326 /* EOF */