Sync with trunk r63831.
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23
24 /* PUBLIC FUNCTIONS ***********************************************************/
25
26 /*
27 * @implemented
28 */
29 PMDL
30 NTAPI
31 MmCreateMdl(IN PMDL Mdl,
32 IN PVOID Base,
33 IN SIZE_T Length)
34 {
35 SIZE_T Size;
36
37 //
38 // Check if we don't have an MDL built
39 //
40 if (!Mdl)
41 {
42 //
43 // Calculate the size we'll need and allocate the MDL
44 //
45 Size = MmSizeOfMdl(Base, Length);
46 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
47 if (!Mdl) return NULL;
48 }
49
50 //
51 // Initialize it
52 //
53 MmInitializeMdl(Mdl, Base, Length);
54 return Mdl;
55 }
56
57 /*
58 * @implemented
59 */
60 SIZE_T
61 NTAPI
62 MmSizeOfMdl(IN PVOID Base,
63 IN SIZE_T Length)
64 {
65 //
66 // Return the MDL size
67 //
68 return sizeof(MDL) +
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
70 }
71
72 /*
73 * @implemented
74 */
75 VOID
76 NTAPI
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
78 {
79 PPFN_NUMBER MdlPages, EndPage;
80 PFN_NUMBER Pfn, PageCount;
81 PVOID Base;
82 PMMPTE PointerPte;
83
84 //
85 // Sanity checks
86 //
87 ASSERT(Mdl->ByteCount != 0);
88 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
89 MDL_MAPPED_TO_SYSTEM_VA |
90 MDL_SOURCE_IS_NONPAGED_POOL |
91 MDL_PARTIAL)) == 0);
92
93 //
94 // We know the MDL isn't associated to a process now
95 //
96 Mdl->Process = NULL;
97
98 //
99 // Get page and VA information
100 //
101 MdlPages = (PPFN_NUMBER)(Mdl + 1);
102 Base = Mdl->StartVa;
103
104 //
105 // Set the system address and now get the page count
106 //
107 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
108 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
109 Mdl->ByteCount);
110 ASSERT(PageCount != 0);
111 EndPage = MdlPages + PageCount;
112
113 //
114 // Loop the PTEs
115 //
116 PointerPte = MiAddressToPte(Base);
117 do
118 {
119 //
120 // Write the PFN
121 //
122 Pfn = PFN_FROM_PTE(PointerPte++);
123 *MdlPages++ = Pfn;
124 } while (MdlPages < EndPage);
125
126 //
127 // Set the nonpaged pool flag
128 //
129 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
130
131 //
132 // Check if this is an I/O mapping
133 //
134 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
135 }
136
137 /*
138 * @implemented
139 */
140 PMDL
141 NTAPI
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
143 IN PHYSICAL_ADDRESS HighAddress,
144 IN PHYSICAL_ADDRESS SkipBytes,
145 IN SIZE_T TotalBytes)
146 {
147 //
148 // Call the internal routine
149 //
150 return MiAllocatePagesForMdl(LowAddress,
151 HighAddress,
152 SkipBytes,
153 TotalBytes,
154 MiNotMapped,
155 0);
156 }
157
158 /*
159 * @implemented
160 */
161 PMDL
162 NTAPI
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
164 IN PHYSICAL_ADDRESS HighAddress,
165 IN PHYSICAL_ADDRESS SkipBytes,
166 IN SIZE_T TotalBytes,
167 IN MEMORY_CACHING_TYPE CacheType,
168 IN ULONG Flags)
169 {
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
171
172 //
173 // Check for invalid cache type
174 //
175 if (CacheType > MmWriteCombined)
176 {
177 //
178 // Normalize to default
179 //
180 CacheAttribute = MiNotMapped;
181 }
182 else
183 {
184 //
185 // Conver to internal caching attribute
186 //
187 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
188 }
189
190 //
191 // Only these flags are allowed
192 //
193 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
194 {
195 //
196 // Silently fail
197 //
198 return NULL;
199 }
200
201 //
202 // Call the internal routine
203 //
204 return MiAllocatePagesForMdl(LowAddress,
205 HighAddress,
206 SkipBytes,
207 TotalBytes,
208 CacheAttribute,
209 Flags);
210 }
211
212 /*
213 * @implemented
214 */
215 VOID
216 NTAPI
217 MmFreePagesFromMdl(IN PMDL Mdl)
218 {
219 PVOID Base;
220 PPFN_NUMBER Pages;
221 LONG NumberOfPages;
222 PMMPFN Pfn1;
223 KIRQL OldIrql;
224 DPRINT("Freeing MDL: %p\n", Mdl);
225
226 //
227 // Sanity checks
228 //
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
230 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
231 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
232
233 //
234 // Get address and page information
235 //
236 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
237 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
238
239 //
240 // Acquire PFN lock
241 //
242 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
243
244 //
245 // Loop all the MDL pages
246 //
247 Pages = (PPFN_NUMBER)(Mdl + 1);
248 do
249 {
250 //
251 // Reached the last page
252 //
253 if (*Pages == LIST_HEAD) break;
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1);
260 ASSERT(Pfn1->u2.ShareCount == 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
262 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
263 {
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
266 }
267
268 //
269 // Clear it
270 //
271 Pfn1->u3.e1.StartOfAllocation = 0;
272 Pfn1->u3.e1.EndOfAllocation = 0;
273 Pfn1->u2.ShareCount = 0;
274
275 //
276 // Dereference it
277 //
278 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
279 if (Pfn1->u3.e2.ReferenceCount != 1)
280 {
281 /* Just take off one reference */
282 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
283 }
284 else
285 {
286 /* We'll be nuking the whole page */
287 MiDecrementReferenceCount(Pfn1, *Pages);
288 }
289
290 //
291 // Clear this page and move on
292 //
293 *Pages++ = LIST_HEAD;
294 } while (--NumberOfPages != 0);
295
296 //
297 // Release the lock
298 //
299 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
300
301 //
302 // Remove the pages locked flag
303 //
304 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
305 }
306
307 /*
308 * @implemented
309 */
310 PVOID
311 NTAPI
312 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
313 IN KPROCESSOR_MODE AccessMode,
314 IN MEMORY_CACHING_TYPE CacheType,
315 IN PVOID BaseAddress,
316 IN ULONG BugCheckOnFailure,
317 IN MM_PAGE_PRIORITY Priority)
318 {
319 PVOID Base;
320 PPFN_NUMBER MdlPages, LastPage;
321 PFN_COUNT PageCount;
322 BOOLEAN IsIoMapping;
323 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
324 PMMPTE PointerPte;
325 MMPTE TempPte;
326
327 //
328 // Sanity check
329 //
330 ASSERT(Mdl->ByteCount != 0);
331
332 //
333 // Get the base
334 //
335 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
336
337 //
338 // Handle kernel case first
339 //
340 if (AccessMode == KernelMode)
341 {
342 //
343 // Get the list of pages and count
344 //
345 MdlPages = (PPFN_NUMBER)(Mdl + 1);
346 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
347 LastPage = MdlPages + PageCount;
348
349 //
350 // Sanity checks
351 //
352 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
353 MDL_SOURCE_IS_NONPAGED_POOL |
354 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
355 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
356
357 //
358 // Get the correct cache type
359 //
360 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
361 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
362
363 //
364 // Reserve the PTEs
365 //
366 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
367 if (!PointerPte)
368 {
369 //
370 // If it can fail, return NULL
371 //
372 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
373
374 //
375 // Should we bugcheck?
376 //
377 if (!BugCheckOnFailure) return NULL;
378
379 //
380 // Yes, crash the system
381 //
382 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
383 }
384
385 //
386 // Get the mapped address
387 //
388 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
389
390 //
391 // Get the template
392 //
393 TempPte = ValidKernelPte;
394 switch (CacheAttribute)
395 {
396 case MiNonCached:
397
398 //
399 // Disable caching
400 //
401 MI_PAGE_DISABLE_CACHE(&TempPte);
402 MI_PAGE_WRITE_THROUGH(&TempPte);
403 break;
404
405 case MiWriteCombined:
406
407 //
408 // Enable write combining
409 //
410 MI_PAGE_DISABLE_CACHE(&TempPte);
411 MI_PAGE_WRITE_COMBINED(&TempPte);
412 break;
413
414 default:
415 //
416 // Nothing to do
417 //
418 break;
419 }
420
421 //
422 // Loop all PTEs
423 //
424 do
425 {
426 //
427 // We're done here
428 //
429 if (*MdlPages == LIST_HEAD) break;
430
431 //
432 // Write the PTE
433 //
434 TempPte.u.Hard.PageFrameNumber = *MdlPages;
435 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
436 } while (++MdlPages < LastPage);
437
438 //
439 // Mark it as mapped
440 //
441 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
442 Mdl->MappedSystemVa = Base;
443 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
444
445 //
446 // Check if it was partial
447 //
448 if (Mdl->MdlFlags & MDL_PARTIAL)
449 {
450 //
451 // Write the appropriate flag here too
452 //
453 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
454 }
455
456 //
457 // Return the mapped address
458 //
459 return Base;
460 }
461
462 UNIMPLEMENTED;
463 return NULL;
464 }
465
466 /*
467 * @implemented
468 */
469 PVOID
470 NTAPI
471 MmMapLockedPages(IN PMDL Mdl,
472 IN KPROCESSOR_MODE AccessMode)
473 {
474 //
475 // Call the extended version
476 //
477 return MmMapLockedPagesSpecifyCache(Mdl,
478 AccessMode,
479 MmCached,
480 NULL,
481 TRUE,
482 HighPagePriority);
483 }
484
485 /*
486 * @implemented
487 */
488 VOID
489 NTAPI
490 MmUnmapLockedPages(IN PVOID BaseAddress,
491 IN PMDL Mdl)
492 {
493 PVOID Base;
494 PFN_COUNT PageCount, ExtraPageCount;
495 PPFN_NUMBER MdlPages;
496 PMMPTE PointerPte;
497
498 //
499 // Sanity check
500 //
501 ASSERT(Mdl->ByteCount != 0);
502
503 //
504 // Check if this is a kernel request
505 //
506 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
507 {
508 //
509 // Get base and count information
510 //
511 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
512 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
513
514 //
515 // Sanity checks
516 //
517 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
518 ASSERT(PageCount != 0);
519 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
520
521 //
522 // Get the PTE
523 //
524 PointerPte = MiAddressToPte(BaseAddress);
525
526 //
527 // This should be a resident system PTE
528 //
529 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
530 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
531 ASSERT(PointerPte->u.Hard.Valid == 1);
532
533 //
534 // Check if the caller wants us to free advanced pages
535 //
536 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
537 {
538 //
539 // Get the MDL page array
540 //
541 MdlPages = MmGetMdlPfnArray(Mdl);
542
543 /* Number of extra pages stored after the PFN array */
544 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
545
546 //
547 // Do the math
548 //
549 PageCount += ExtraPageCount;
550 PointerPte -= ExtraPageCount;
551 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
552 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
553
554 //
555 // Get the new base address
556 //
557 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
558 (ExtraPageCount << PAGE_SHIFT));
559 }
560
561 //
562 // Remove flags
563 //
564 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
565 MDL_PARTIAL_HAS_BEEN_MAPPED |
566 MDL_FREE_EXTRA_PTES);
567
568 //
569 // Release the system PTEs
570 //
571 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
572 }
573 else
574 {
575 UNIMPLEMENTED;
576 }
577 }
578
579 /*
580 * @implemented
581 */
582 VOID
583 NTAPI
584 MmProbeAndLockPages(IN PMDL Mdl,
585 IN KPROCESSOR_MODE AccessMode,
586 IN LOCK_OPERATION Operation)
587 {
588 PPFN_NUMBER MdlPages;
589 PVOID Base, Address, LastAddress, StartAddress;
590 ULONG LockPages, TotalPages;
591 NTSTATUS Status = STATUS_SUCCESS;
592 PEPROCESS CurrentProcess;
593 NTSTATUS ProbeStatus;
594 PMMPTE PointerPte, LastPte;
595 PMMPDE PointerPde;
596 #if (_MI_PAGING_LEVELS >= 3)
597 PMMPDE PointerPpe;
598 #endif
599 #if (_MI_PAGING_LEVELS == 4)
600 PMMPDE PointerPxe;
601 #endif
602 PFN_NUMBER PageFrameIndex;
603 BOOLEAN UsePfnLock;
604 KIRQL OldIrql;
605 PMMPFN Pfn1;
606 DPRINT("Probing MDL: %p\n", Mdl);
607
608 //
609 // Sanity checks
610 //
611 ASSERT(Mdl->ByteCount != 0);
612 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
613 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
614 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
615 MDL_MAPPED_TO_SYSTEM_VA |
616 MDL_SOURCE_IS_NONPAGED_POOL |
617 MDL_PARTIAL |
618 MDL_IO_SPACE)) == 0);
619
620 //
621 // Get page and base information
622 //
623 MdlPages = (PPFN_NUMBER)(Mdl + 1);
624 Base = Mdl->StartVa;
625
626 //
627 // Get the addresses and how many pages we span (and need to lock)
628 //
629 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
630 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
631 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
632 ASSERT(LockPages != 0);
633
634 /* Block invalid access */
635 if ((AccessMode != KernelMode) &&
636 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
637 {
638 /* Caller should be in SEH, raise the error */
639 *MdlPages = LIST_HEAD;
640 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
641 }
642
643 //
644 // Get the process
645 //
646 if (Address <= MM_HIGHEST_USER_ADDRESS)
647 {
648 //
649 // Get the process
650 //
651 CurrentProcess = PsGetCurrentProcess();
652 }
653 else
654 {
655 //
656 // No process
657 //
658 CurrentProcess = NULL;
659 }
660
661 //
662 // Save the number of pages we'll have to lock, and the start address
663 //
664 TotalPages = LockPages;
665 StartAddress = Address;
666
667 /* Large pages not supported */
668 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
669
670 //
671 // Now probe them
672 //
673 ProbeStatus = STATUS_SUCCESS;
674 _SEH2_TRY
675 {
676 //
677 // Enter probe loop
678 //
679 do
680 {
681 //
682 // Assume failure
683 //
684 *MdlPages = LIST_HEAD;
685
686 //
687 // Read
688 //
689 *(volatile CHAR*)Address;
690
691 //
692 // Check if this is write access (only probe for user-mode)
693 //
694 if ((Operation != IoReadAccess) &&
695 (Address <= MM_HIGHEST_USER_ADDRESS))
696 {
697 //
698 // Probe for write too
699 //
700 ProbeForWriteChar(Address);
701 }
702
703 //
704 // Next address...
705 //
706 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
707
708 //
709 // Next page...
710 //
711 LockPages--;
712 MdlPages++;
713 } while (Address < LastAddress);
714
715 //
716 // Reset back to the original page
717 //
718 ASSERT(LockPages == 0);
719 MdlPages = (PPFN_NUMBER)(Mdl + 1);
720 }
721 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
722 {
723 //
724 // Oops :(
725 //
726 ProbeStatus = _SEH2_GetExceptionCode();
727 }
728 _SEH2_END;
729
730 //
731 // So how did that go?
732 //
733 if (ProbeStatus != STATUS_SUCCESS)
734 {
735 //
736 // Fail
737 //
738 DPRINT1("MDL PROBE FAILED!\n");
739 Mdl->Process = NULL;
740 ExRaiseStatus(ProbeStatus);
741 }
742
743 //
744 // Get the PTE and PDE
745 //
746 PointerPte = MiAddressToPte(StartAddress);
747 PointerPde = MiAddressToPde(StartAddress);
748 #if (_MI_PAGING_LEVELS >= 3)
749 PointerPpe = MiAddressToPpe(StartAddress);
750 #endif
751 #if (_MI_PAGING_LEVELS == 4)
752 PointerPxe = MiAddressToPxe(StartAddress);
753 #endif
754
755 //
756 // Sanity check
757 //
758 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
759
760 //
761 // Check what kind of operation this is
762 //
763 if (Operation != IoReadAccess)
764 {
765 //
766 // Set the write flag
767 //
768 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
769 }
770 else
771 {
772 //
773 // Remove the write flag
774 //
775 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
776 }
777
778 //
779 // Mark the MDL as locked *now*
780 //
781 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
782
783 //
784 // Check if this came from kernel mode
785 //
786 if (Base > MM_HIGHEST_USER_ADDRESS)
787 {
788 //
789 // We should not have a process
790 //
791 ASSERT(CurrentProcess == NULL);
792 Mdl->Process = NULL;
793
794 //
795 // In kernel mode, we don't need to check for write access
796 //
797 Operation = IoReadAccess;
798
799 //
800 // Use the PFN lock
801 //
802 UsePfnLock = TRUE;
803 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
804 }
805 else
806 {
807 //
808 // Sanity checks
809 //
810 ASSERT(TotalPages != 0);
811 ASSERT(CurrentProcess == PsGetCurrentProcess());
812
813 //
814 // Track locked pages
815 //
816 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
817 TotalPages);
818
819 //
820 // Save the process
821 //
822 Mdl->Process = CurrentProcess;
823
824 /* Lock the process working set */
825 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
826 UsePfnLock = FALSE;
827 OldIrql = MM_NOIRQL;
828 }
829
830 //
831 // Get the last PTE
832 //
833 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
834
835 //
836 // Loop the pages
837 //
838 do
839 {
840 //
841 // Assume failure and check for non-mapped pages
842 //
843 *MdlPages = LIST_HEAD;
844 while (
845 #if (_MI_PAGING_LEVELS == 4)
846 (PointerPxe->u.Hard.Valid == 0) ||
847 #endif
848 #if (_MI_PAGING_LEVELS >= 3)
849 (PointerPpe->u.Hard.Valid == 0) ||
850 #endif
851 (PointerPde->u.Hard.Valid == 0) ||
852 (PointerPte->u.Hard.Valid == 0))
853 {
854 //
855 // What kind of lock were we using?
856 //
857 if (UsePfnLock)
858 {
859 //
860 // Release PFN lock
861 //
862 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
863 }
864 else
865 {
866 /* Release process working set */
867 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
868 }
869
870 //
871 // Access the page
872 //
873 Address = MiPteToAddress(PointerPte);
874
875 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
876 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
877 if (!NT_SUCCESS(Status))
878 {
879 //
880 // Fail
881 //
882 DPRINT1("Access fault failed\n");
883 goto Cleanup;
884 }
885
886 //
887 // What lock should we use?
888 //
889 if (UsePfnLock)
890 {
891 //
892 // Grab the PFN lock
893 //
894 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
895 }
896 else
897 {
898 /* Lock the process working set */
899 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
900 }
901 }
902
903 //
904 // Check if this was a write or modify
905 //
906 if (Operation != IoReadAccess)
907 {
908 //
909 // Check if the PTE is not writable
910 //
911 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
912 {
913 //
914 // Check if it's copy on write
915 //
916 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
917 {
918 //
919 // Get the base address and allow a change for user-mode
920 //
921 Address = MiPteToAddress(PointerPte);
922 if (Address <= MM_HIGHEST_USER_ADDRESS)
923 {
924 //
925 // What kind of lock were we using?
926 //
927 if (UsePfnLock)
928 {
929 //
930 // Release PFN lock
931 //
932 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
933 }
934 else
935 {
936 /* Release process working set */
937 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
938 }
939
940 //
941 // Access the page
942 //
943
944 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
945 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
946 if (!NT_SUCCESS(Status))
947 {
948 //
949 // Fail
950 //
951 DPRINT1("Access fault failed\n");
952 goto Cleanup;
953 }
954
955 //
956 // Re-acquire the lock
957 //
958 if (UsePfnLock)
959 {
960 //
961 // Grab the PFN lock
962 //
963 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
964 }
965 else
966 {
967 /* Lock the process working set */
968 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
969 }
970
971 //
972 // Start over
973 //
974 continue;
975 }
976 }
977
978 //
979 // Fail, since we won't allow this
980 //
981 Status = STATUS_ACCESS_VIOLATION;
982 goto CleanupWithLock;
983 }
984 }
985
986 //
987 // Grab the PFN
988 //
989 PageFrameIndex = PFN_FROM_PTE(PointerPte);
990 Pfn1 = MiGetPfnEntry(PageFrameIndex);
991 if (Pfn1)
992 {
993 /* Either this is for kernel-mode, or the working set is held */
994 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
995
996 /* No Physical VADs supported yet */
997 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
998
999 /* This address should already exist and be fully valid */
1000 MiReferenceProbedPageAndBumpLockCount(Pfn1);
1001 }
1002 else
1003 {
1004 //
1005 // For I/O addresses, just remember this
1006 //
1007 Mdl->MdlFlags |= MDL_IO_SPACE;
1008 }
1009
1010 //
1011 // Write the page and move on
1012 //
1013 *MdlPages++ = PageFrameIndex;
1014 PointerPte++;
1015
1016 /* Check if we're on a PDE boundary */
1017 if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
1018 #if (_MI_PAGING_LEVELS >= 3)
1019 if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
1020 #endif
1021 #if (_MI_PAGING_LEVELS == 4)
1022 if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
1023 #endif
1024
1025 } while (PointerPte <= LastPte);
1026
1027 //
1028 // What kind of lock were we using?
1029 //
1030 if (UsePfnLock)
1031 {
1032 //
1033 // Release PFN lock
1034 //
1035 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1036 }
1037 else
1038 {
1039 /* Release process working set */
1040 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1041 }
1042
1043 //
1044 // Sanity check
1045 //
1046 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1047 return;
1048
1049 CleanupWithLock:
1050 //
1051 // This is the failure path
1052 //
1053 ASSERT(!NT_SUCCESS(Status));
1054
1055 //
1056 // What kind of lock were we using?
1057 //
1058 if (UsePfnLock)
1059 {
1060 //
1061 // Release PFN lock
1062 //
1063 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1064 }
1065 else
1066 {
1067 /* Release process working set */
1068 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1069 }
1070 Cleanup:
1071 //
1072 // Pages must be locked so MmUnlock can work
1073 //
1074 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1075 MmUnlockPages(Mdl);
1076
1077 //
1078 // Raise the error
1079 //
1080 ExRaiseStatus(Status);
1081 }
1082
1083 /*
1084 * @implemented
1085 */
1086 VOID
1087 NTAPI
1088 MmUnlockPages(IN PMDL Mdl)
1089 {
1090 PPFN_NUMBER MdlPages, LastPage;
1091 PEPROCESS Process;
1092 PVOID Base;
1093 ULONG Flags, PageCount;
1094 KIRQL OldIrql;
1095 PMMPFN Pfn1;
1096 DPRINT("Unlocking MDL: %p\n", Mdl);
1097
1098 //
1099 // Sanity checks
1100 //
1101 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1102 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1103 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1104 ASSERT(Mdl->ByteCount != 0);
1105
1106 //
1107 // Get the process associated and capture the flags which are volatile
1108 //
1109 Process = Mdl->Process;
1110 Flags = Mdl->MdlFlags;
1111
1112 //
1113 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1114 //
1115 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1116 {
1117 //
1118 // Unmap the pages from system space
1119 //
1120 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1121 }
1122
1123 //
1124 // Get the page count
1125 //
1126 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1127 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1128 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1129 ASSERT(PageCount != 0);
1130
1131 //
1132 // We don't support AWE
1133 //
1134 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1135
1136 //
1137 // Check if the buffer is mapped I/O space
1138 //
1139 if (Flags & MDL_IO_SPACE)
1140 {
1141 //
1142 // Acquire PFN lock
1143 //
1144 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1145
1146 //
1147 // Loop every page
1148 //
1149 LastPage = MdlPages + PageCount;
1150 do
1151 {
1152 //
1153 // Last page, break out
1154 //
1155 if (*MdlPages == LIST_HEAD) break;
1156
1157 //
1158 // Check if this page is in the PFN database
1159 //
1160 Pfn1 = MiGetPfnEntry(*MdlPages);
1161 if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
1162 } while (++MdlPages < LastPage);
1163
1164 //
1165 // Release the lock
1166 //
1167 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1168
1169 //
1170 // Check if we have a process
1171 //
1172 if (Process)
1173 {
1174 //
1175 // Handle the accounting of locked pages
1176 //
1177 ASSERT(Process->NumberOfLockedPages > 0);
1178 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1179 -(LONG_PTR)PageCount);
1180 }
1181
1182 //
1183 // We're done
1184 //
1185 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1186 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1187 return;
1188 }
1189
1190 //
1191 // Check if we have a process
1192 //
1193 if (Process)
1194 {
1195 //
1196 // Handle the accounting of locked pages
1197 //
1198 ASSERT(Process->NumberOfLockedPages > 0);
1199 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1200 -(LONG_PTR)PageCount);
1201 }
1202
1203 //
1204 // Loop every page
1205 //
1206 LastPage = MdlPages + PageCount;
1207 do
1208 {
1209 //
1210 // Last page reached
1211 //
1212 if (*MdlPages == LIST_HEAD)
1213 {
1214 //
1215 // Were there no pages at all?
1216 //
1217 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1218 {
1219 //
1220 // We're already done
1221 //
1222 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1223 return;
1224 }
1225
1226 //
1227 // Otherwise, stop here
1228 //
1229 LastPage = MdlPages;
1230 break;
1231 }
1232
1233 /* Save the PFN entry instead for the secondary loop */
1234 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1235 ASSERT(*MdlPages != 0);
1236 } while (++MdlPages < LastPage);
1237
1238 //
1239 // Reset pointer
1240 //
1241 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1242
1243 //
1244 // Now grab the PFN lock for the actual unlock and dereference
1245 //
1246 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1247 do
1248 {
1249 /* Get the current entry and reference count */
1250 Pfn1 = (PMMPFN)*MdlPages;
1251 MiDereferencePfnAndDropLockCount(Pfn1);
1252 } while (++MdlPages < LastPage);
1253
1254 //
1255 // Release the lock
1256 //
1257 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1258
1259 //
1260 // We're done
1261 //
1262 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1263 }
1264
1265 /*
1266 * @unimplemented
1267 */
1268 NTSTATUS
1269 NTAPI
1270 MmAdvanceMdl(IN PMDL Mdl,
1271 IN ULONG NumberOfBytes)
1272 {
1273 UNIMPLEMENTED;
1274 return STATUS_NOT_IMPLEMENTED;
1275 }
1276
1277 /*
1278 * @unimplemented
1279 */
1280 PVOID
1281 NTAPI
1282 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1283 IN ULONG PoolTag,
1284 IN PMDL MemoryDescriptorList,
1285 IN MEMORY_CACHING_TYPE CacheType)
1286 {
1287 UNIMPLEMENTED;
1288 return 0;
1289 }
1290
1291 /*
1292 * @unimplemented
1293 */
1294 VOID
1295 NTAPI
1296 MmUnmapReservedMapping(IN PVOID BaseAddress,
1297 IN ULONG PoolTag,
1298 IN PMDL MemoryDescriptorList)
1299 {
1300 UNIMPLEMENTED;
1301 }
1302
1303 /*
1304 * @unimplemented
1305 */
1306 NTSTATUS
1307 NTAPI
1308 MmPrefetchPages(IN ULONG NumberOfLists,
1309 IN PREAD_LIST *ReadLists)
1310 {
1311 UNIMPLEMENTED;
1312 return STATUS_NOT_IMPLEMENTED;
1313 }
1314
1315 /*
1316 * @unimplemented
1317 */
1318 NTSTATUS
1319 NTAPI
1320 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1321 IN ULONG NewProtect)
1322 {
1323 UNIMPLEMENTED;
1324 return STATUS_NOT_IMPLEMENTED;
1325 }
1326
1327 /*
1328 * @unimplemented
1329 */
1330 VOID
1331 NTAPI
1332 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1333 IN PEPROCESS Process,
1334 IN KPROCESSOR_MODE AccessMode,
1335 IN LOCK_OPERATION Operation)
1336 {
1337 UNIMPLEMENTED;
1338 }
1339
1340
1341 /*
1342 * @unimplemented
1343 */
1344 VOID
1345 NTAPI
1346 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1347 IN LARGE_INTEGER PageList[],
1348 IN KPROCESSOR_MODE AccessMode,
1349 IN LOCK_OPERATION Operation)
1350 {
1351 UNIMPLEMENTED;
1352 }
1353
1354 /*
1355 * @unimplemented
1356 */
1357 VOID
1358 NTAPI
1359 MmMapMemoryDumpMdl(IN PMDL Mdl)
1360 {
1361 UNIMPLEMENTED;
1362 }
1363
1364 /* EOF */