Sync with trunk (r48414)
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 BOOLEAN MmTrackPtes;
20 BOOLEAN MmTrackLockedPages;
21
22 /* PUBLIC FUNCTIONS ***********************************************************/
23
24 /*
25 * @implemented
26 */
27 PMDL
28 NTAPI
29 MmCreateMdl(IN PMDL Mdl,
30 IN PVOID Base,
31 IN SIZE_T Length)
32 {
33 SIZE_T Size;
34
35 //
36 // Check if we don't have an MDL built
37 //
38 if (!Mdl)
39 {
40 //
41 // Calculate the size we'll need and allocate the MDL
42 //
43 Size = MmSizeOfMdl(Base, Length);
44 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
45 if (!Mdl) return NULL;
46 }
47
48 //
49 // Initialize it
50 //
51 MmInitializeMdl(Mdl, Base, Length);
52 return Mdl;
53 }
54
55 /*
56 * @implemented
57 */
58 SIZE_T
59 NTAPI
60 MmSizeOfMdl(IN PVOID Base,
61 IN SIZE_T Length)
62 {
63 //
64 // Return the MDL size
65 //
66 return sizeof(MDL) +
67 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
68 }
69
70 /*
71 * @implemented
72 */
73 VOID
74 NTAPI
75 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
76 {
77 PPFN_NUMBER MdlPages, EndPage;
78 PFN_NUMBER Pfn, PageCount;
79 PVOID Base;
80 PMMPTE PointerPte;
81
82 //
83 // Sanity checks
84 //
85 ASSERT(Mdl->ByteCount != 0);
86 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
87 MDL_MAPPED_TO_SYSTEM_VA |
88 MDL_SOURCE_IS_NONPAGED_POOL |
89 MDL_PARTIAL)) == 0);
90
91 //
92 // We know the MDL isn't associated to a process now
93 //
94 Mdl->Process = NULL;
95
96 //
97 // Get page and VA information
98 //
99 MdlPages = (PPFN_NUMBER)(Mdl + 1);
100 Base = Mdl->StartVa;
101
102 //
103 // Set the system address and now get the page count
104 //
105 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
106 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
107 Mdl->ByteCount);
108 ASSERT(PageCount != 0);
109 EndPage = MdlPages + PageCount;
110
111 //
112 // Loop the PTEs
113 //
114 PointerPte = MiAddressToPte(Base);
115 do
116 {
117 //
118 // Write the PFN
119 //
120 Pfn = PFN_FROM_PTE(PointerPte++);
121 *MdlPages++ = Pfn;
122 } while (MdlPages < EndPage);
123
124 //
125 // Set the nonpaged pool flag
126 //
127 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
128
129 //
130 // Check if this is an I/O mapping
131 //
132 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
133 }
134
135 /*
136 * @implemented
137 */
138 PMDL
139 NTAPI
140 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
141 IN PHYSICAL_ADDRESS HighAddress,
142 IN PHYSICAL_ADDRESS SkipBytes,
143 IN SIZE_T TotalBytes)
144 {
145 //
146 // Call the internal routine
147 //
148 return MiAllocatePagesForMdl(LowAddress,
149 HighAddress,
150 SkipBytes,
151 TotalBytes,
152 MiNotMapped,
153 0);
154 }
155
156 /*
157 * @implemented
158 */
159 PMDL
160 NTAPI
161 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
162 IN PHYSICAL_ADDRESS HighAddress,
163 IN PHYSICAL_ADDRESS SkipBytes,
164 IN SIZE_T TotalBytes,
165 IN MEMORY_CACHING_TYPE CacheType,
166 IN ULONG Flags)
167 {
168 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
169
170 //
171 // Check for invalid cache type
172 //
173 if (CacheType > MmWriteCombined)
174 {
175 //
176 // Normalize to default
177 //
178 CacheAttribute = MiNotMapped;
179 }
180 else
181 {
182 //
183 // Conver to internal caching attribute
184 //
185 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
186 }
187
188 //
189 // Only these flags are allowed
190 //
191 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
192 {
193 //
194 // Silently fail
195 //
196 return NULL;
197 }
198
199 //
200 // Call the internal routine
201 //
202 return MiAllocatePagesForMdl(LowAddress,
203 HighAddress,
204 SkipBytes,
205 TotalBytes,
206 CacheAttribute,
207 Flags);
208 }
209
210 /*
211 * @implemented
212 */
213 VOID
214 NTAPI
215 MmFreePagesFromMdl(IN PMDL Mdl)
216 {
217 PVOID Base;
218 PPFN_NUMBER Pages;
219 LONG NumberOfPages;
220 PMMPFN Pfn1;
221 KIRQL OldIrql;
222 DPRINT("Freeing MDL: %p\n", Mdl);
223
224 //
225 // Sanity checks
226 //
227 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
228 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
229 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
230
231 //
232 // Get address and page information
233 //
234 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
235 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
236
237 //
238 // Acquire PFN lock
239 //
240 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
241
242 //
243 // Loop all the MDL pages
244 //
245 Pages = (PPFN_NUMBER)(Mdl + 1);
246 do
247 {
248 //
249 // Reached the last page
250 //
251 if (*Pages == -1) break;
252
253 //
254 // Sanity check
255 //
256 ASSERT(*Pages <= MmHighestPhysicalPage);
257
258 //
259 // Get the page entry
260 //
261 Pfn1 = MiGetPfnEntry(*Pages);
262 ASSERT(Pfn1->u3.ReferenceCount == 1);
263
264 //
265 // Clear it
266 //
267 Pfn1->u3.e1.StartOfAllocation = 0;
268 Pfn1->u3.e1.EndOfAllocation = 0;
269
270 //
271 // Dereference it
272 //
273 MmDereferencePage(*Pages);
274
275 //
276 // Clear this page and move on
277 //
278 *Pages++ = -1;
279 } while (--NumberOfPages != 0);
280
281 //
282 // Release the lock
283 //
284 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
285
286 //
287 // Remove the pages locked flag
288 //
289 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
290 }
291
292 /*
293 * @implemented
294 */
295 PVOID
296 NTAPI
297 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
298 IN KPROCESSOR_MODE AccessMode,
299 IN MEMORY_CACHING_TYPE CacheType,
300 IN PVOID BaseAddress,
301 IN ULONG BugCheckOnFailure,
302 IN MM_PAGE_PRIORITY Priority)
303 {
304 PVOID Base;
305 PPFN_NUMBER MdlPages, LastPage;
306 PFN_NUMBER PageCount;
307 BOOLEAN IsIoMapping;
308 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
309 PMMPTE PointerPte;
310 MMPTE TempPte;
311
312 //
313 // Sanity check
314 //
315 ASSERT(Mdl->ByteCount != 0);
316
317 //
318 // Get the base
319 //
320 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
321
322 //
323 // Handle kernel case first
324 //
325 if (AccessMode == KernelMode)
326 {
327 //
328 // Get the list of pages and count
329 //
330 MdlPages = (PPFN_NUMBER)(Mdl + 1);
331 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
332 LastPage = MdlPages + PageCount;
333
334 //
335 // Sanity checks
336 //
337 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
338 MDL_SOURCE_IS_NONPAGED_POOL |
339 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
340 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
341
342 //
343 // Get the correct cache type
344 //
345 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
346 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
347
348 //
349 // Reserve the PTEs
350 //
351 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
352 if (!PointerPte)
353 {
354 //
355 // If it can fail, return NULL
356 //
357 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
358
359 //
360 // Should we bugcheck?
361 //
362 if (!BugCheckOnFailure) return NULL;
363
364 //
365 // Yes, crash the system
366 //
367 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
368 }
369
370 //
371 // Get the mapped address
372 //
373 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
374
375 //
376 // Get the template
377 //
378 TempPte = ValidKernelPte;
379 switch (CacheAttribute)
380 {
381 case MiNonCached:
382
383 //
384 // Disable caching
385 //
386 MI_PAGE_DISABLE_CACHE(&TempPte);
387 MI_PAGE_WRITE_THROUGH(&TempPte);
388 break;
389
390 case MiWriteCombined:
391
392 //
393 // Enable write combining
394 //
395 MI_PAGE_DISABLE_CACHE(&TempPte);
396 MI_PAGE_WRITE_COMBINED(&TempPte);
397 break;
398
399 default:
400 //
401 // Nothing to do
402 //
403 break;
404 }
405
406 //
407 // Loop all PTEs
408 //
409 do
410 {
411 //
412 // We're done here
413 //
414 if (*MdlPages == -1) break;
415
416 //
417 // Write the PTE
418 //
419 TempPte.u.Hard.PageFrameNumber = *MdlPages;
420 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
421 } while (++MdlPages < LastPage);
422
423 //
424 // Mark it as mapped
425 //
426 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
427 Mdl->MappedSystemVa = Base;
428 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
429
430 //
431 // Check if it was partial
432 //
433 if (Mdl->MdlFlags & MDL_PARTIAL)
434 {
435 //
436 // Write the appropriate flag here too
437 //
438 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
439 }
440
441 //
442 // Return the mapped address
443 //
444 return Base;
445 }
446
447 //
448 // In user-mode, let ReactOS do it
449 //
450 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
451 }
452
453 /*
454 * @implemented
455 */
456 PVOID
457 NTAPI
458 MmMapLockedPages(IN PMDL Mdl,
459 IN KPROCESSOR_MODE AccessMode)
460 {
461 //
462 // Call the extended version
463 //
464 return MmMapLockedPagesSpecifyCache(Mdl,
465 AccessMode,
466 MmCached,
467 NULL,
468 TRUE,
469 HighPagePriority);
470 }
471
472 /*
473 * @implemented
474 */
475 VOID
476 NTAPI
477 MmUnmapLockedPages(IN PVOID BaseAddress,
478 IN PMDL Mdl)
479 {
480 PVOID Base;
481 PFN_NUMBER PageCount;
482 PPFN_NUMBER MdlPages;
483 PMMPTE PointerPte;
484
485 //
486 // Sanity check
487 //
488 ASSERT(Mdl->ByteCount != 0);
489
490 //
491 // Check if this is a kernel request
492 //
493 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
494 {
495 //
496 // Get base and count information
497 //
498 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
499 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
500
501 //
502 // Sanity checks
503 //
504 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
505 ASSERT(PageCount != 0);
506 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
507
508 //
509 // Get the PTE
510 //
511 PointerPte = MiAddressToPte(BaseAddress);
512
513 //
514 // This should be a resident system PTE
515 //
516 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
517 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
518 ASSERT(PointerPte->u.Hard.Valid == 1);
519
520 //
521 // Check if the caller wants us to free advanced pages
522 //
523 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
524 {
525 //
526 // Get the MDL page array
527 //
528 MdlPages = (PPFN_NUMBER)(Mdl + 1);
529 MdlPages += PageCount;
530
531 //
532 // Do the math
533 //
534 PageCount += *MdlPages;
535 PointerPte -= *MdlPages;
536 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
537 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
538
539 //
540 // Get the new base address
541 //
542 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
543 ((*MdlPages) << PAGE_SHIFT));
544 }
545
546 //
547 // Remove flags
548 //
549 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
550 MDL_PARTIAL_HAS_BEEN_MAPPED |
551 MDL_FREE_EXTRA_PTES);
552
553 //
554 // Release the system PTEs
555 //
556 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
557 }
558 else
559 {
560 //
561 // Let ReactOS handle it
562 //
563 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
564 }
565 }
566
567 /*
568 * @implemented
569 */
570 VOID
571 NTAPI
572 MmProbeAndLockPages(IN PMDL Mdl,
573 IN KPROCESSOR_MODE AccessMode,
574 IN LOCK_OPERATION Operation)
575 {
576 PPFN_NUMBER MdlPages;
577 PVOID Base, Address, LastAddress, StartAddress;
578 ULONG LockPages, TotalPages;
579 NTSTATUS Status = STATUS_SUCCESS;
580 PEPROCESS CurrentProcess;
581 PETHREAD Thread;
582 PMMSUPPORT AddressSpace;
583 NTSTATUS ProbeStatus;
584 PMMPTE PointerPte, LastPte;
585 PMMPDE PointerPde;
586 PFN_NUMBER PageFrameIndex;
587 PMMPFN Pfn1;
588 BOOLEAN UsePfnLock;
589 KIRQL OldIrql;
590 DPRINT("Probing MDL: %p\n", Mdl);
591
592 //
593 // Sanity checks
594 //
595 ASSERT(Mdl->ByteCount != 0);
596 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
597 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
598 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
599 MDL_MAPPED_TO_SYSTEM_VA |
600 MDL_SOURCE_IS_NONPAGED_POOL |
601 MDL_PARTIAL |
602 MDL_IO_SPACE)) == 0);
603
604 //
605 // Get page and base information
606 //
607 MdlPages = (PPFN_NUMBER)(Mdl + 1);
608 Base = (PVOID)Mdl->StartVa;
609
610 //
611 // Get the addresses and how many pages we span (and need to lock)
612 //
613 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
614 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
615 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
616 ASSERT(LockPages != 0);
617
618 //
619 // Get the thread and process
620 //
621 Thread = PsGetCurrentThread();
622 if (Address <= MM_HIGHEST_USER_ADDRESS)
623 {
624 //
625 // Get the process
626 //
627 CurrentProcess = PsGetCurrentProcess();
628 }
629 else
630 {
631 //
632 // No process
633 //
634 CurrentProcess = NULL;
635 }
636
637 //
638 // Save the number of pages we'll have to lock, and the start address
639 //
640 TotalPages = LockPages;
641 StartAddress = Address;
642
643 //
644 // Now probe them
645 //
646 ProbeStatus = STATUS_SUCCESS;
647 _SEH2_TRY
648 {
649 //
650 // Enter probe loop
651 //
652 do
653 {
654 //
655 // Assume failure
656 //
657 *MdlPages = -1;
658
659 //
660 // Read
661 //
662 *(volatile CHAR*)Address;
663
664 //
665 // Check if this is write access (only probe for user-mode)
666 //
667 if ((Operation != IoReadAccess) &&
668 (Address <= MM_HIGHEST_USER_ADDRESS))
669 {
670 //
671 // Probe for write too
672 //
673 ProbeForWriteChar(Address);
674 }
675
676 //
677 // Next address...
678 //
679 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
680 Address = PAGE_ALIGN(Address);
681
682 //
683 // Next page...
684 //
685 LockPages--;
686 MdlPages++;
687 } while (Address < LastAddress);
688
689 //
690 // Reset back to the original page
691 //
692 ASSERT(LockPages == 0);
693 MdlPages = (PPFN_NUMBER)(Mdl + 1);
694 }
695 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
696 {
697 //
698 // Oops :(
699 //
700 ProbeStatus = _SEH2_GetExceptionCode();
701 }
702 _SEH2_END;
703
704 //
705 // So how did that go?
706 //
707 if (ProbeStatus != STATUS_SUCCESS)
708 {
709 //
710 // Fail
711 //
712 DPRINT1("MDL PROBE FAILED!\n");
713 Mdl->Process = NULL;
714 ExRaiseStatus(ProbeStatus);
715 }
716
717 //
718 // Get the PTE and PDE
719 //
720 PointerPte = MiAddressToPte(StartAddress);
721 PointerPde = MiAddressToPde(StartAddress);
722
723 //
724 // Sanity check
725 //
726 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
727
728 //
729 // Check what kind of operation this is
730 //
731 if (Operation != IoReadAccess)
732 {
733 //
734 // Set the write flag
735 //
736 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
737 }
738 else
739 {
740 //
741 // Remove the write flag
742 //
743 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
744 }
745
746 //
747 // Mark the MDL as locked *now*
748 //
749 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
750
751 //
752 // Check if this came from kernel mode
753 //
754 if (Base >= MM_HIGHEST_USER_ADDRESS)
755 {
756 //
757 // We should not have a process
758 //
759 ASSERT(CurrentProcess == NULL);
760 Mdl->Process = NULL;
761
762 //
763 // In kernel mode, we don't need to check for write access
764 //
765 Operation = IoReadAccess;
766
767 //
768 // Use the PFN lock
769 //
770 UsePfnLock = TRUE;
771 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
772 AddressSpace = NULL; // Keep compiler happy
773 }
774 else
775 {
776 //
777 // Sanity checks
778 //
779 ASSERT(TotalPages != 0);
780 ASSERT(CurrentProcess == PsGetCurrentProcess());
781
782 //
783 // Track locked pages
784 //
785 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
786 TotalPages);
787
788 //
789 // Save the process
790 //
791 Mdl->Process = CurrentProcess;
792
793 //
794 // Use the process lock
795 //
796 UsePfnLock = FALSE;
797 AddressSpace = &CurrentProcess->Vm;
798 MmLockAddressSpace(AddressSpace);
799 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
800 }
801
802 //
803 // Get the last PTE
804 //
805 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
806
807 //
808 // Loop the pages
809 //
810 do
811 {
812 //
813 // Assume failure and check for non-mapped pages
814 //
815 *MdlPages = -1;
816 #if (_MI_PAGING_LEVELS >= 3)
817 /* Should be checking the PPE and PXE */
818 ASSERT(FALSE);
819 #endif
820 while ((PointerPde->u.Hard.Valid == 0) ||
821 (PointerPte->u.Hard.Valid == 0))
822 {
823 //
824 // What kind of lock where we using?
825 //
826 if (UsePfnLock)
827 {
828 //
829 // Release PFN lock
830 //
831 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
832 }
833 else
834 {
835 //
836 // Release process address space lock
837 //
838 MmUnlockAddressSpace(AddressSpace);
839 }
840
841 //
842 // Access the page
843 //
844 Address = MiPteToAddress(PointerPte);
845 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
846 if (!NT_SUCCESS(Status))
847 {
848 //
849 // Fail
850 //
851 DPRINT1("Access fault failed\n");
852 goto Cleanup;
853 }
854
855 //
856 // Waht lock should we use?
857 //
858 if (UsePfnLock)
859 {
860 //
861 // Grab the PFN lock
862 //
863 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
864 }
865 else
866 {
867 //
868 // Use the address space lock
869 //
870 MmLockAddressSpace(AddressSpace);
871 }
872 }
873
874 //
875 // Check if this was a write or modify
876 //
877 if (Operation != IoReadAccess)
878 {
879 //
880 // Check if the PTE is not writable
881 //
882 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
883 {
884 //
885 // Check if it's copy on write
886 //
887 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
888 {
889 //
890 // Get the base address and allow a change for user-mode
891 //
892 Address = MiPteToAddress(PointerPte);
893 if (Address <= MM_HIGHEST_USER_ADDRESS)
894 {
895 //
896 // What kind of lock where we using?
897 //
898 if (UsePfnLock)
899 {
900 //
901 // Release PFN lock
902 //
903 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
904 }
905 else
906 {
907 //
908 // Release process address space lock
909 //
910 MmUnlockAddressSpace(AddressSpace);
911 }
912
913 //
914 // Access the page
915 //
916 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
917 if (!NT_SUCCESS(Status))
918 {
919 //
920 // Fail
921 //
922 DPRINT1("Access fault failed\n");
923 goto Cleanup;
924 }
925
926 //
927 // Re-acquire the lock
928 //
929 if (UsePfnLock)
930 {
931 //
932 // Grab the PFN lock
933 //
934 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
935 }
936 else
937 {
938 //
939 // Use the address space lock
940 //
941 MmLockAddressSpace(AddressSpace);
942 }
943
944 //
945 // Start over
946 //
947 continue;
948 }
949 }
950
951 //
952 // Fail, since we won't allow this
953 //
954 Status = STATUS_ACCESS_VIOLATION;
955 goto CleanupWithLock;
956 }
957 }
958
959 //
960 // Grab the PFN
961 //
962 PageFrameIndex = PFN_FROM_PTE(PointerPte);
963 if (PageFrameIndex <= MmHighestPhysicalPage)
964 {
965 //
966 // Get the PFN entry
967 //
968 Pfn1 = MiGetPfnEntry(PageFrameIndex);
969 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
970
971 //
972 // Now lock the page
973 //
974 MmReferencePage(PageFrameIndex);
975 }
976 else
977 {
978 //
979 // For I/O addresses, just remember this
980 //
981 Mdl->MdlFlags |= MDL_IO_SPACE;
982 }
983
984 //
985 // Write the page and move on
986 //
987 *MdlPages++ = PageFrameIndex;
988 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
989 } while (PointerPte <= LastPte);
990
991 //
992 // What kind of lock where we using?
993 //
994 if (UsePfnLock)
995 {
996 //
997 // Release PFN lock
998 //
999 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1000 }
1001 else
1002 {
1003 //
1004 // Release process address space lock
1005 //
1006 MmUnlockAddressSpace(AddressSpace);
1007 }
1008
1009 //
1010 // Sanity check
1011 //
1012 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1013 return;
1014
1015 CleanupWithLock:
1016 //
1017 // This is the failure path
1018 //
1019 ASSERT(!NT_SUCCESS(Status));
1020
1021 //
1022 // What kind of lock where we using?
1023 //
1024 if (UsePfnLock)
1025 {
1026 //
1027 // Release PFN lock
1028 //
1029 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1030 }
1031 else
1032 {
1033 //
1034 // Release process address space lock
1035 //
1036 MmUnlockAddressSpace(AddressSpace);
1037 }
1038 Cleanup:
1039 //
1040 // Pages must be locked so MmUnlock can work
1041 //
1042 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1043 MmUnlockPages(Mdl);
1044
1045 //
1046 // Raise the error
1047 //
1048 ExRaiseStatus(Status);
1049 }
1050
1051 /*
1052 * @implemented
1053 */
1054 VOID
1055 NTAPI
1056 MmUnlockPages(IN PMDL Mdl)
1057 {
1058 PPFN_NUMBER MdlPages, LastPage;
1059 PEPROCESS Process;
1060 PVOID Base;
1061 ULONG Flags, PageCount;
1062 KIRQL OldIrql;
1063 DPRINT("Unlocking MDL: %p\n", Mdl);
1064
1065 //
1066 // Sanity checks
1067 //
1068 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1069 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1070 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1071 ASSERT(Mdl->ByteCount != 0);
1072
1073 //
1074 // Get the process associated and capture the flags which are volatile
1075 //
1076 Process = Mdl->Process;
1077 Flags = Mdl->MdlFlags;
1078
1079 //
1080 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1081 //
1082 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1083 {
1084 //
1085 // Unmap the pages from system space
1086 //
1087 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1088 }
1089
1090 //
1091 // Get the page count
1092 //
1093 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1094 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1095 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1096 ASSERT(PageCount != 0);
1097
1098 //
1099 // We don't support AWE
1100 //
1101 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1102
1103 //
1104 // Check if the buffer is mapped I/O space
1105 //
1106 if (Flags & MDL_IO_SPACE)
1107 {
1108 //
1109 // Acquire PFN lock
1110 //
1111 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1112
1113 //
1114 // Loop every page
1115 //
1116 LastPage = MdlPages + PageCount;
1117 do
1118 {
1119 //
1120 // Last page, break out
1121 //
1122 if (*MdlPages == -1) break;
1123
1124 //
1125 // Check if this page is in the PFN database
1126 //
1127 if (*MdlPages <= MmHighestPhysicalPage)
1128 {
1129 //
1130 // Unlock and dereference
1131 //
1132 MmDereferencePage(*MdlPages);
1133 }
1134 } while (++MdlPages < LastPage);
1135
1136 //
1137 // Release the lock
1138 //
1139 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1140
1141 //
1142 // Check if we have a process
1143 //
1144 if (Process)
1145 {
1146 //
1147 // Handle the accounting of locked pages
1148 //
1149 ASSERT(Process->NumberOfLockedPages > 0);
1150 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1151 -PageCount);
1152 }
1153
1154 //
1155 // We're done
1156 //
1157 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1158 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1159 return;
1160 }
1161
1162 //
1163 // Check if we have a process
1164 //
1165 if (Process)
1166 {
1167 //
1168 // Handle the accounting of locked pages
1169 //
1170 ASSERT(Process->NumberOfLockedPages > 0);
1171 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1172 -PageCount);
1173 }
1174
1175 //
1176 // Loop every page
1177 //
1178 LastPage = MdlPages + PageCount;
1179 do
1180 {
1181 //
1182 // Last page reached
1183 //
1184 if (*MdlPages == -1)
1185 {
1186 //
1187 // Were there no pages at all?
1188 //
1189 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1190 {
1191 //
1192 // We're already done
1193 //
1194 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1195 return;
1196 }
1197
1198 //
1199 // Otherwise, stop here
1200 //
1201 LastPage = MdlPages;
1202 break;
1203 }
1204
1205 //
1206 // Sanity check
1207 //
1208 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1209 } while (++MdlPages < LastPage);
1210
1211 //
1212 // Reset pointer
1213 //
1214 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1215
1216 //
1217 // Now grab the PFN lock for the actual unlock and dereference
1218 //
1219 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1220 do
1221 {
1222 //
1223 // Unlock and dereference
1224 //
1225 MmDereferencePage(*MdlPages);
1226 } while (++MdlPages < LastPage);
1227
1228 //
1229 // Release the lock
1230 //
1231 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1232
1233 //
1234 // We're done
1235 //
1236 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1237 }
1238
1239 /*
1240 * @unimplemented
1241 */
1242 NTSTATUS
1243 NTAPI
1244 MmAdvanceMdl(IN PMDL Mdl,
1245 IN ULONG NumberOfBytes)
1246 {
1247 UNIMPLEMENTED;
1248 return STATUS_NOT_IMPLEMENTED;
1249 }
1250
1251 /*
1252 * @unimplemented
1253 */
1254 PVOID
1255 NTAPI
1256 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1257 IN ULONG PoolTag,
1258 IN PMDL MemoryDescriptorList,
1259 IN MEMORY_CACHING_TYPE CacheType)
1260 {
1261 UNIMPLEMENTED;
1262 return 0;
1263 }
1264
1265 /*
1266 * @unimplemented
1267 */
1268 VOID
1269 NTAPI
1270 MmUnmapReservedMapping(IN PVOID BaseAddress,
1271 IN ULONG PoolTag,
1272 IN PMDL MemoryDescriptorList)
1273 {
1274 UNIMPLEMENTED;
1275 }
1276
1277 /*
1278 * @unimplemented
1279 */
1280 NTSTATUS
1281 NTAPI
1282 MmPrefetchPages(IN ULONG NumberOfLists,
1283 IN PREAD_LIST *ReadLists)
1284 {
1285 UNIMPLEMENTED;
1286 return STATUS_NOT_IMPLEMENTED;
1287 }
1288
1289 /*
1290 * @unimplemented
1291 */
1292 NTSTATUS
1293 NTAPI
1294 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1295 IN ULONG NewProtect)
1296 {
1297 UNIMPLEMENTED;
1298 return STATUS_NOT_IMPLEMENTED;
1299 }
1300
1301 /*
1302 * @unimplemented
1303 */
1304 VOID
1305 NTAPI
1306 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1307 IN PEPROCESS Process,
1308 IN KPROCESSOR_MODE AccessMode,
1309 IN LOCK_OPERATION Operation)
1310 {
1311 UNIMPLEMENTED;
1312 }
1313
1314
1315 /*
1316 * @unimplemented
1317 */
1318 VOID
1319 NTAPI
1320 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1321 IN LARGE_INTEGER PageList[],
1322 IN KPROCESSOR_MODE AccessMode,
1323 IN LOCK_OPERATION Operation)
1324 {
1325 UNIMPLEMENTED;
1326 }
1327
1328 /*
1329 * @unimplemented
1330 */
1331 VOID
1332 NTAPI
1333 MmMapMemoryDumpMdl(IN PMDL Mdl)
1334 {
1335 UNIMPLEMENTED;
1336 }
1337
1338 /* EOF */