[FREELDR]
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 BOOLEAN MmTrackPtes;
22 BOOLEAN MmTrackLockedPages;
23 SIZE_T MmSystemLockPagesCount;
24
25 /* PUBLIC FUNCTIONS ***********************************************************/
26
27 /*
28 * @implemented
29 */
30 PMDL
31 NTAPI
32 MmCreateMdl(IN PMDL Mdl,
33 IN PVOID Base,
34 IN SIZE_T Length)
35 {
36 SIZE_T Size;
37
38 //
39 // Check if we don't have an MDL built
40 //
41 if (!Mdl)
42 {
43 //
44 // Calculate the size we'll need and allocate the MDL
45 //
46 Size = MmSizeOfMdl(Base, Length);
47 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
48 if (!Mdl) return NULL;
49 }
50
51 //
52 // Initialize it
53 //
54 MmInitializeMdl(Mdl, Base, Length);
55 return Mdl;
56 }
57
58 /*
59 * @implemented
60 */
61 SIZE_T
62 NTAPI
63 MmSizeOfMdl(IN PVOID Base,
64 IN SIZE_T Length)
65 {
66 //
67 // Return the MDL size
68 //
69 return sizeof(MDL) +
70 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
71 }
72
73 /*
74 * @implemented
75 */
76 VOID
77 NTAPI
78 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
79 {
80 PPFN_NUMBER MdlPages, EndPage;
81 PFN_NUMBER Pfn, PageCount;
82 PVOID Base;
83 PMMPTE PointerPte;
84
85 //
86 // Sanity checks
87 //
88 ASSERT(Mdl->ByteCount != 0);
89 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
90 MDL_MAPPED_TO_SYSTEM_VA |
91 MDL_SOURCE_IS_NONPAGED_POOL |
92 MDL_PARTIAL)) == 0);
93
94 //
95 // We know the MDL isn't associated to a process now
96 //
97 Mdl->Process = NULL;
98
99 //
100 // Get page and VA information
101 //
102 MdlPages = (PPFN_NUMBER)(Mdl + 1);
103 Base = Mdl->StartVa;
104
105 //
106 // Set the system address and now get the page count
107 //
108 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
109 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
110 Mdl->ByteCount);
111 ASSERT(PageCount != 0);
112 EndPage = MdlPages + PageCount;
113
114 //
115 // Loop the PTEs
116 //
117 PointerPte = MiAddressToPte(Base);
118 do
119 {
120 //
121 // Write the PFN
122 //
123 Pfn = PFN_FROM_PTE(PointerPte++);
124 *MdlPages++ = Pfn;
125 } while (MdlPages < EndPage);
126
127 //
128 // Set the nonpaged pool flag
129 //
130 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
131
132 //
133 // Check if this is an I/O mapping
134 //
135 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
136 }
137
138 /*
139 * @implemented
140 */
141 PMDL
142 NTAPI
143 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
144 IN PHYSICAL_ADDRESS HighAddress,
145 IN PHYSICAL_ADDRESS SkipBytes,
146 IN SIZE_T TotalBytes)
147 {
148 //
149 // Call the internal routine
150 //
151 return MiAllocatePagesForMdl(LowAddress,
152 HighAddress,
153 SkipBytes,
154 TotalBytes,
155 MiNotMapped,
156 0);
157 }
158
159 /*
160 * @implemented
161 */
162 PMDL
163 NTAPI
164 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
165 IN PHYSICAL_ADDRESS HighAddress,
166 IN PHYSICAL_ADDRESS SkipBytes,
167 IN SIZE_T TotalBytes,
168 IN MEMORY_CACHING_TYPE CacheType,
169 IN ULONG Flags)
170 {
171 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
172
173 //
174 // Check for invalid cache type
175 //
176 if (CacheType > MmWriteCombined)
177 {
178 //
179 // Normalize to default
180 //
181 CacheAttribute = MiNotMapped;
182 }
183 else
184 {
185 //
186 // Conver to internal caching attribute
187 //
188 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
189 }
190
191 //
192 // Only these flags are allowed
193 //
194 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
195 {
196 //
197 // Silently fail
198 //
199 return NULL;
200 }
201
202 //
203 // Call the internal routine
204 //
205 return MiAllocatePagesForMdl(LowAddress,
206 HighAddress,
207 SkipBytes,
208 TotalBytes,
209 CacheAttribute,
210 Flags);
211 }
212
213 /*
214 * @implemented
215 */
216 VOID
217 NTAPI
218 MmFreePagesFromMdl(IN PMDL Mdl)
219 {
220 PVOID Base;
221 PPFN_NUMBER Pages;
222 LONG NumberOfPages;
223 PMMPFN Pfn1;
224 KIRQL OldIrql;
225 DPRINT("Freeing MDL: %p\n", Mdl);
226
227 //
228 // Sanity checks
229 //
230 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
231 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
232 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
233
234 //
235 // Get address and page information
236 //
237 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
238 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
239
240 //
241 // Acquire PFN lock
242 //
243 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
244
245 //
246 // Loop all the MDL pages
247 //
248 Pages = (PPFN_NUMBER)(Mdl + 1);
249 do
250 {
251 //
252 // Reached the last page
253 //
254 if (*Pages == LIST_HEAD) break;
255
256 //
257 // Get the page entry
258 //
259 Pfn1 = MiGetPfnEntry(*Pages);
260 ASSERT(Pfn1);
261 ASSERT(Pfn1->u2.ShareCount == 1);
262 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
263 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
264 {
265 /* Corrupted PFN entry or invalid free */
266 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
267 }
268
269 //
270 // Clear it
271 //
272 Pfn1->u3.e1.StartOfAllocation = 0;
273 Pfn1->u3.e1.EndOfAllocation = 0;
274 Pfn1->u2.ShareCount == 0;
275
276 //
277 // Dereference it
278 //
279 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
280 if (Pfn1->u3.e2.ReferenceCount != 1)
281 {
282 /* Just take off one reference */
283 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
284 }
285 else
286 {
287 /* We'll be nuking the whole page */
288 MiDecrementReferenceCount(Pfn1, *Pages);
289 }
290
291 //
292 // Clear this page and move on
293 //
294 *Pages++ = LIST_HEAD;
295 } while (--NumberOfPages != 0);
296
297 //
298 // Release the lock
299 //
300 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
301
302 //
303 // Remove the pages locked flag
304 //
305 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
306 }
307
308 /*
309 * @implemented
310 */
311 PVOID
312 NTAPI
313 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
314 IN KPROCESSOR_MODE AccessMode,
315 IN MEMORY_CACHING_TYPE CacheType,
316 IN PVOID BaseAddress,
317 IN ULONG BugCheckOnFailure,
318 IN MM_PAGE_PRIORITY Priority)
319 {
320 PVOID Base;
321 PPFN_NUMBER MdlPages, LastPage;
322 PFN_NUMBER PageCount;
323 BOOLEAN IsIoMapping;
324 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
325 PMMPTE PointerPte;
326 MMPTE TempPte;
327
328 //
329 // Sanity check
330 //
331 ASSERT(Mdl->ByteCount != 0);
332
333 //
334 // Get the base
335 //
336 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
337
338 //
339 // Handle kernel case first
340 //
341 if (AccessMode == KernelMode)
342 {
343 //
344 // Get the list of pages and count
345 //
346 MdlPages = (PPFN_NUMBER)(Mdl + 1);
347 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
348 LastPage = MdlPages + PageCount;
349
350 //
351 // Sanity checks
352 //
353 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
354 MDL_SOURCE_IS_NONPAGED_POOL |
355 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
356 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
357
358 //
359 // Get the correct cache type
360 //
361 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
362 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
363
364 //
365 // Reserve the PTEs
366 //
367 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
368 if (!PointerPte)
369 {
370 //
371 // If it can fail, return NULL
372 //
373 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
374
375 //
376 // Should we bugcheck?
377 //
378 if (!BugCheckOnFailure) return NULL;
379
380 //
381 // Yes, crash the system
382 //
383 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
384 }
385
386 //
387 // Get the mapped address
388 //
389 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
390
391 //
392 // Get the template
393 //
394 TempPte = ValidKernelPte;
395 switch (CacheAttribute)
396 {
397 case MiNonCached:
398
399 //
400 // Disable caching
401 //
402 MI_PAGE_DISABLE_CACHE(&TempPte);
403 MI_PAGE_WRITE_THROUGH(&TempPte);
404 break;
405
406 case MiWriteCombined:
407
408 //
409 // Enable write combining
410 //
411 MI_PAGE_DISABLE_CACHE(&TempPte);
412 MI_PAGE_WRITE_COMBINED(&TempPte);
413 break;
414
415 default:
416 //
417 // Nothing to do
418 //
419 break;
420 }
421
422 //
423 // Loop all PTEs
424 //
425 do
426 {
427 //
428 // We're done here
429 //
430 if (*MdlPages == LIST_HEAD) break;
431
432 //
433 // Write the PTE
434 //
435 TempPte.u.Hard.PageFrameNumber = *MdlPages;
436 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
437 } while (++MdlPages < LastPage);
438
439 //
440 // Mark it as mapped
441 //
442 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
443 Mdl->MappedSystemVa = Base;
444 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
445
446 //
447 // Check if it was partial
448 //
449 if (Mdl->MdlFlags & MDL_PARTIAL)
450 {
451 //
452 // Write the appropriate flag here too
453 //
454 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
455 }
456
457 //
458 // Return the mapped address
459 //
460 return Base;
461 }
462
463 UNIMPLEMENTED;
464 return NULL;
465 }
466
467 /*
468 * @implemented
469 */
470 PVOID
471 NTAPI
472 MmMapLockedPages(IN PMDL Mdl,
473 IN KPROCESSOR_MODE AccessMode)
474 {
475 //
476 // Call the extended version
477 //
478 return MmMapLockedPagesSpecifyCache(Mdl,
479 AccessMode,
480 MmCached,
481 NULL,
482 TRUE,
483 HighPagePriority);
484 }
485
486 /*
487 * @implemented
488 */
489 VOID
490 NTAPI
491 MmUnmapLockedPages(IN PVOID BaseAddress,
492 IN PMDL Mdl)
493 {
494 PVOID Base;
495 PFN_NUMBER PageCount;
496 PPFN_NUMBER MdlPages;
497 PMMPTE PointerPte;
498
499 //
500 // Sanity check
501 //
502 ASSERT(Mdl->ByteCount != 0);
503
504 //
505 // Check if this is a kernel request
506 //
507 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
508 {
509 //
510 // Get base and count information
511 //
512 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
513 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
514
515 //
516 // Sanity checks
517 //
518 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
519 ASSERT(PageCount != 0);
520 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
521
522 //
523 // Get the PTE
524 //
525 PointerPte = MiAddressToPte(BaseAddress);
526
527 //
528 // This should be a resident system PTE
529 //
530 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
531 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
532 ASSERT(PointerPte->u.Hard.Valid == 1);
533
534 //
535 // Check if the caller wants us to free advanced pages
536 //
537 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
538 {
539 //
540 // Get the MDL page array
541 //
542 MdlPages = (PPFN_NUMBER)(Mdl + 1);
543 MdlPages += PageCount;
544
545 //
546 // Do the math
547 //
548 PageCount += *MdlPages;
549 PointerPte -= *MdlPages;
550 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
551 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
552
553 //
554 // Get the new base address
555 //
556 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
557 ((*MdlPages) << PAGE_SHIFT));
558 }
559
560 //
561 // Remove flags
562 //
563 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
564 MDL_PARTIAL_HAS_BEEN_MAPPED |
565 MDL_FREE_EXTRA_PTES);
566
567 //
568 // Release the system PTEs
569 //
570 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
571 }
572 else
573 {
574 UNIMPLEMENTED;
575 }
576 }
577
578 /*
579 * @implemented
580 */
581 VOID
582 NTAPI
583 MmProbeAndLockPages(IN PMDL Mdl,
584 IN KPROCESSOR_MODE AccessMode,
585 IN LOCK_OPERATION Operation)
586 {
587 PPFN_NUMBER MdlPages;
588 PVOID Base, Address, LastAddress, StartAddress;
589 ULONG LockPages, TotalPages;
590 NTSTATUS Status = STATUS_SUCCESS;
591 PEPROCESS CurrentProcess;
592 NTSTATUS ProbeStatus;
593 PMMPTE PointerPte, LastPte;
594 PMMPDE PointerPde;
595 PFN_NUMBER PageFrameIndex;
596 BOOLEAN UsePfnLock;
597 KIRQL OldIrql;
598 USHORT OldRefCount, RefCount;
599 PMMPFN Pfn1;
600 DPRINT("Probing MDL: %p\n", Mdl);
601
602 //
603 // Sanity checks
604 //
605 ASSERT(Mdl->ByteCount != 0);
606 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
607 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
608 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
609 MDL_MAPPED_TO_SYSTEM_VA |
610 MDL_SOURCE_IS_NONPAGED_POOL |
611 MDL_PARTIAL |
612 MDL_IO_SPACE)) == 0);
613
614 //
615 // Get page and base information
616 //
617 MdlPages = (PPFN_NUMBER)(Mdl + 1);
618 Base = (PVOID)Mdl->StartVa;
619
620 //
621 // Get the addresses and how many pages we span (and need to lock)
622 //
623 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
624 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
625 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
626 ASSERT(LockPages != 0);
627
628 /* Block invalid access */
629 if ((AccessMode != KernelMode) &&
630 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
631 {
632 /* Caller should be in SEH, raise the error */
633 *MdlPages = LIST_HEAD;
634 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
635 }
636
637 //
638 // Get the process
639 //
640 if (Address <= MM_HIGHEST_USER_ADDRESS)
641 {
642 //
643 // Get the process
644 //
645 CurrentProcess = PsGetCurrentProcess();
646 }
647 else
648 {
649 //
650 // No process
651 //
652 CurrentProcess = NULL;
653 }
654
655 //
656 // Save the number of pages we'll have to lock, and the start address
657 //
658 TotalPages = LockPages;
659 StartAddress = Address;
660
661 /* Large pages not supported */
662 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
663
664 //
665 // Now probe them
666 //
667 ProbeStatus = STATUS_SUCCESS;
668 _SEH2_TRY
669 {
670 //
671 // Enter probe loop
672 //
673 do
674 {
675 //
676 // Assume failure
677 //
678 *MdlPages = LIST_HEAD;
679
680 //
681 // Read
682 //
683 *(volatile CHAR*)Address;
684
685 //
686 // Check if this is write access (only probe for user-mode)
687 //
688 if ((Operation != IoReadAccess) &&
689 (Address <= MM_HIGHEST_USER_ADDRESS))
690 {
691 //
692 // Probe for write too
693 //
694 ProbeForWriteChar(Address);
695 }
696
697 //
698 // Next address...
699 //
700 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
701
702 //
703 // Next page...
704 //
705 LockPages--;
706 MdlPages++;
707 } while (Address < LastAddress);
708
709 //
710 // Reset back to the original page
711 //
712 ASSERT(LockPages == 0);
713 MdlPages = (PPFN_NUMBER)(Mdl + 1);
714 }
715 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
716 {
717 //
718 // Oops :(
719 //
720 ProbeStatus = _SEH2_GetExceptionCode();
721 }
722 _SEH2_END;
723
724 //
725 // So how did that go?
726 //
727 if (ProbeStatus != STATUS_SUCCESS)
728 {
729 //
730 // Fail
731 //
732 DPRINT1("MDL PROBE FAILED!\n");
733 Mdl->Process = NULL;
734 ExRaiseStatus(ProbeStatus);
735 }
736
737 //
738 // Get the PTE and PDE
739 //
740 PointerPte = MiAddressToPte(StartAddress);
741 PointerPde = MiAddressToPde(StartAddress);
742 #if (_MI_PAGING_LEVELS >= 3)
743 DPRINT1("PAE/x64 Not Implemented\n");
744 ASSERT(FALSE);
745 #endif
746
747 //
748 // Sanity check
749 //
750 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
751
752 //
753 // Check what kind of operation this is
754 //
755 if (Operation != IoReadAccess)
756 {
757 //
758 // Set the write flag
759 //
760 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
761 }
762 else
763 {
764 //
765 // Remove the write flag
766 //
767 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
768 }
769
770 //
771 // Mark the MDL as locked *now*
772 //
773 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
774
775 //
776 // Check if this came from kernel mode
777 //
778 if (Base >= MM_HIGHEST_USER_ADDRESS)
779 {
780 //
781 // We should not have a process
782 //
783 ASSERT(CurrentProcess == NULL);
784 Mdl->Process = NULL;
785
786 //
787 // In kernel mode, we don't need to check for write access
788 //
789 Operation = IoReadAccess;
790
791 //
792 // Use the PFN lock
793 //
794 UsePfnLock = TRUE;
795 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
796 }
797 else
798 {
799 //
800 // Sanity checks
801 //
802 ASSERT(TotalPages != 0);
803 ASSERT(CurrentProcess == PsGetCurrentProcess());
804
805 //
806 // Track locked pages
807 //
808 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
809 TotalPages);
810
811 //
812 // Save the process
813 //
814 Mdl->Process = CurrentProcess;
815
816 /* Lock the process working set */
817 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
818 UsePfnLock = FALSE;
819 OldIrql = MM_NOIRQL;
820 }
821
822 //
823 // Get the last PTE
824 //
825 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
826
827 //
828 // Loop the pages
829 //
830 do
831 {
832 //
833 // Assume failure and check for non-mapped pages
834 //
835 *MdlPages = LIST_HEAD;
836 #if (_MI_PAGING_LEVELS >= 3)
837 /* Should be checking the PPE and PXE */
838 ASSERT(FALSE);
839 #endif
840 while ((PointerPde->u.Hard.Valid == 0) ||
841 (PointerPte->u.Hard.Valid == 0))
842 {
843 //
844 // What kind of lock where we using?
845 //
846 if (UsePfnLock)
847 {
848 //
849 // Release PFN lock
850 //
851 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
852 }
853 else
854 {
855 /* Release process working set */
856 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
857 }
858
859 //
860 // Access the page
861 //
862 Address = MiPteToAddress(PointerPte);
863 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
864 if (!NT_SUCCESS(Status))
865 {
866 //
867 // Fail
868 //
869 DPRINT1("Access fault failed\n");
870 goto Cleanup;
871 }
872
873 //
874 // Waht lock should we use?
875 //
876 if (UsePfnLock)
877 {
878 //
879 // Grab the PFN lock
880 //
881 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
882 }
883 else
884 {
885 /* Lock the process working set */
886 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
887 }
888 }
889
890 //
891 // Check if this was a write or modify
892 //
893 if (Operation != IoReadAccess)
894 {
895 //
896 // Check if the PTE is not writable
897 //
898 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
899 {
900 //
901 // Check if it's copy on write
902 //
903 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
904 {
905 //
906 // Get the base address and allow a change for user-mode
907 //
908 Address = MiPteToAddress(PointerPte);
909 if (Address <= MM_HIGHEST_USER_ADDRESS)
910 {
911 //
912 // What kind of lock where we using?
913 //
914 if (UsePfnLock)
915 {
916 //
917 // Release PFN lock
918 //
919 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
920 }
921 else
922 {
923 /* Release process working set */
924 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
925 }
926
927 //
928 // Access the page
929 //
930 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
931 if (!NT_SUCCESS(Status))
932 {
933 //
934 // Fail
935 //
936 DPRINT1("Access fault failed\n");
937 goto Cleanup;
938 }
939
940 //
941 // Re-acquire the lock
942 //
943 if (UsePfnLock)
944 {
945 //
946 // Grab the PFN lock
947 //
948 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
949 }
950 else
951 {
952 /* Lock the process working set */
953 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
954 }
955
956 //
957 // Start over
958 //
959 continue;
960 }
961 }
962
963 //
964 // Fail, since we won't allow this
965 //
966 Status = STATUS_ACCESS_VIOLATION;
967 goto CleanupWithLock;
968 }
969 }
970
971 //
972 // Grab the PFN
973 //
974 PageFrameIndex = PFN_FROM_PTE(PointerPte);
975 Pfn1 = MiGetPfnEntry(PageFrameIndex);
976 if (Pfn1)
977 {
978 /* Either this is for kernel-mode, or the working set is held */
979 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
980
981 /* No Physical VADs supported yet */
982 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
983
984 /* This address should already exist and be fully valid */
985 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
986 if (MI_IS_ROS_PFN(Pfn1))
987 {
988 /* ReactOS Mm doesn't track share count */
989 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
990 }
991 else
992 {
993 /* On ARM3 pages, we should see a valid share count */
994 ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
995
996 /* We don't support mapping a prototype page yet */
997 ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
998 }
999
1000 /* More locked pages! */
1001 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
1002
1003 /* Loop trying to update the reference count */
1004 do
1005 {
1006 /* Get the current reference count, make sure it's valid */
1007 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1008 ASSERT(OldRefCount != 0);
1009 ASSERT(OldRefCount < 2500);
1010
1011 /* Bump it up by one */
1012 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1013 OldRefCount + 1,
1014 OldRefCount);
1015 ASSERT(RefCount != 0);
1016 } while (OldRefCount != RefCount);
1017
1018 /* Was this the first lock attempt? */
1019 if (OldRefCount != 1)
1020 {
1021 /* Someone else came through */
1022 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1023 }
1024 }
1025 else
1026 {
1027 //
1028 // For I/O addresses, just remember this
1029 //
1030 Mdl->MdlFlags |= MDL_IO_SPACE;
1031 }
1032
1033 //
1034 // Write the page and move on
1035 //
1036 *MdlPages++ = PageFrameIndex;
1037 PointerPte++;
1038
1039 /* Check if we're on a PDE boundary */
1040 if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
1041 } while (PointerPte <= LastPte);
1042
1043 //
1044 // What kind of lock where we using?
1045 //
1046 if (UsePfnLock)
1047 {
1048 //
1049 // Release PFN lock
1050 //
1051 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1052 }
1053 else
1054 {
1055 /* Release process working set */
1056 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1057 }
1058
1059 //
1060 // Sanity check
1061 //
1062 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1063 return;
1064
1065 CleanupWithLock:
1066 //
1067 // This is the failure path
1068 //
1069 ASSERT(!NT_SUCCESS(Status));
1070
1071 //
1072 // What kind of lock where we using?
1073 //
1074 if (UsePfnLock)
1075 {
1076 //
1077 // Release PFN lock
1078 //
1079 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1080 }
1081 else
1082 {
1083 /* Release process working set */
1084 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1085 }
1086 Cleanup:
1087 //
1088 // Pages must be locked so MmUnlock can work
1089 //
1090 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1091 MmUnlockPages(Mdl);
1092
1093 //
1094 // Raise the error
1095 //
1096 ExRaiseStatus(Status);
1097 }
1098
1099 /*
1100 * @implemented
1101 */
1102 VOID
1103 NTAPI
1104 MmUnlockPages(IN PMDL Mdl)
1105 {
1106 PPFN_NUMBER MdlPages, LastPage;
1107 PEPROCESS Process;
1108 PVOID Base;
1109 ULONG Flags, PageCount;
1110 KIRQL OldIrql;
1111 USHORT RefCount, OldRefCount;
1112 PMMPFN Pfn1;
1113 DPRINT("Unlocking MDL: %p\n", Mdl);
1114
1115 //
1116 // Sanity checks
1117 //
1118 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1119 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1120 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1121 ASSERT(Mdl->ByteCount != 0);
1122
1123 //
1124 // Get the process associated and capture the flags which are volatile
1125 //
1126 Process = Mdl->Process;
1127 Flags = Mdl->MdlFlags;
1128
1129 //
1130 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1131 //
1132 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1133 {
1134 //
1135 // Unmap the pages from system space
1136 //
1137 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1138 }
1139
1140 //
1141 // Get the page count
1142 //
1143 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1144 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1145 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1146 ASSERT(PageCount != 0);
1147
1148 //
1149 // We don't support AWE
1150 //
1151 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1152
1153 //
1154 // Check if the buffer is mapped I/O space
1155 //
1156 if (Flags & MDL_IO_SPACE)
1157 {
1158 //
1159 // Acquire PFN lock
1160 //
1161 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1162
1163 //
1164 // Loop every page
1165 //
1166 LastPage = MdlPages + PageCount;
1167 do
1168 {
1169 //
1170 // Last page, break out
1171 //
1172 if (*MdlPages == LIST_HEAD) break;
1173
1174 //
1175 // Check if this page is in the PFN database
1176 //
1177 Pfn1 = MiGetPfnEntry(*MdlPages);
1178 if (Pfn1);
1179 {
1180 /* Get the current entry and reference count */
1181 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1182 ASSERT(OldRefCount != 0);
1183
1184 /* Is this already the last dereference */
1185 if (OldRefCount == 1)
1186 {
1187 /* It should be on a free list waiting for us */
1188 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1189 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1190 ASSERT(Pfn1->u2.ShareCount == 0);
1191
1192 /* Not supported yet */
1193 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1194 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1195
1196 /* One less page */
1197 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1198
1199 /* Do the last dereference, we're done here */
1200 MiDecrementReferenceCount(Pfn1, *MdlPages);
1201 }
1202 else
1203 {
1204 /* Loop decrementing one reference */
1205 do
1206 {
1207 /* Make sure it's still valid */
1208 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1209 ASSERT(OldRefCount != 0);
1210
1211 /* Take off one reference */
1212 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1213 OldRefCount - 1,
1214 OldRefCount);
1215 ASSERT(RefCount != 0);
1216 } while (OldRefCount != RefCount);
1217 ASSERT(RefCount > 1);
1218
1219 /* Are there only lock references left? */
1220 if (RefCount == 2)
1221 {
1222 /* And does the page still have users? */
1223 if (Pfn1->u2.ShareCount >= 1)
1224 {
1225 /* Then it should still be valid */
1226 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1227
1228 /* Not supported yet */
1229 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1230 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1231
1232 /* But there is one less "locked" page though */
1233 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1234 }
1235 }
1236 }
1237 }
1238 } while (++MdlPages < LastPage);
1239
1240 //
1241 // Release the lock
1242 //
1243 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1244
1245 //
1246 // Check if we have a process
1247 //
1248 if (Process)
1249 {
1250 //
1251 // Handle the accounting of locked pages
1252 //
1253 ASSERT(Process->NumberOfLockedPages > 0);
1254 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1255 -PageCount);
1256 }
1257
1258 //
1259 // We're done
1260 //
1261 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1262 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1263 return;
1264 }
1265
1266 //
1267 // Check if we have a process
1268 //
1269 if (Process)
1270 {
1271 //
1272 // Handle the accounting of locked pages
1273 //
1274 ASSERT(Process->NumberOfLockedPages > 0);
1275 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1276 -PageCount);
1277 }
1278
1279 //
1280 // Loop every page
1281 //
1282 LastPage = MdlPages + PageCount;
1283 do
1284 {
1285 //
1286 // Last page reached
1287 //
1288 if (*MdlPages == LIST_HEAD)
1289 {
1290 //
1291 // Were there no pages at all?
1292 //
1293 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1294 {
1295 //
1296 // We're already done
1297 //
1298 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1299 return;
1300 }
1301
1302 //
1303 // Otherwise, stop here
1304 //
1305 LastPage = MdlPages;
1306 break;
1307 }
1308
1309 /* Save the PFN entry instead for the secondary loop */
1310 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1311 ASSERT((*MdlPages) != 0);
1312 } while (++MdlPages < LastPage);
1313
1314 //
1315 // Reset pointer
1316 //
1317 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1318
1319 //
1320 // Now grab the PFN lock for the actual unlock and dereference
1321 //
1322 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1323 do
1324 {
1325 /* Get the current entry and reference count */
1326 Pfn1 = (PMMPFN)(*MdlPages);
1327 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1328 ASSERT(OldRefCount != 0);
1329
1330 /* Is this already the last dereference */
1331 if (OldRefCount == 1)
1332 {
1333 /* It should be on a free list waiting for us */
1334 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1335 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1336 ASSERT(Pfn1->u2.ShareCount == 0);
1337
1338 /* Not supported yet */
1339 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1340 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1341
1342 /* One less page */
1343 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1344
1345 /* Do the last dereference, we're done here */
1346 MiDecrementReferenceCount(Pfn1, *MdlPages);
1347 }
1348 else
1349 {
1350 /* Loop decrementing one reference */
1351 do
1352 {
1353 /* Make sure it's still valid */
1354 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1355 ASSERT(OldRefCount != 0);
1356
1357 /* Take off one reference */
1358 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1359 OldRefCount - 1,
1360 OldRefCount);
1361 ASSERT(RefCount != 0);
1362 } while (OldRefCount != RefCount);
1363 ASSERT(RefCount > 1);
1364
1365 /* Are there only lock references left? */
1366 if (RefCount == 2)
1367 {
1368 /* And does the page still have users? */
1369 if (Pfn1->u2.ShareCount >= 1)
1370 {
1371 /* Then it should still be valid */
1372 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1373
1374 /* Not supported yet */
1375 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1376 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1377
1378 /* But there is one less "locked" page though */
1379 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1380 }
1381 }
1382 }
1383 } while (++MdlPages < LastPage);
1384
1385 //
1386 // Release the lock
1387 //
1388 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1389
1390 //
1391 // We're done
1392 //
1393 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1394 }
1395
1396 /*
1397 * @unimplemented
1398 */
1399 NTSTATUS
1400 NTAPI
1401 MmAdvanceMdl(IN PMDL Mdl,
1402 IN ULONG NumberOfBytes)
1403 {
1404 UNIMPLEMENTED;
1405 return STATUS_NOT_IMPLEMENTED;
1406 }
1407
1408 /*
1409 * @unimplemented
1410 */
1411 PVOID
1412 NTAPI
1413 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1414 IN ULONG PoolTag,
1415 IN PMDL MemoryDescriptorList,
1416 IN MEMORY_CACHING_TYPE CacheType)
1417 {
1418 UNIMPLEMENTED;
1419 return 0;
1420 }
1421
1422 /*
1423 * @unimplemented
1424 */
1425 VOID
1426 NTAPI
1427 MmUnmapReservedMapping(IN PVOID BaseAddress,
1428 IN ULONG PoolTag,
1429 IN PMDL MemoryDescriptorList)
1430 {
1431 UNIMPLEMENTED;
1432 }
1433
1434 /*
1435 * @unimplemented
1436 */
1437 NTSTATUS
1438 NTAPI
1439 MmPrefetchPages(IN ULONG NumberOfLists,
1440 IN PREAD_LIST *ReadLists)
1441 {
1442 UNIMPLEMENTED;
1443 return STATUS_NOT_IMPLEMENTED;
1444 }
1445
1446 /*
1447 * @unimplemented
1448 */
1449 NTSTATUS
1450 NTAPI
1451 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1452 IN ULONG NewProtect)
1453 {
1454 UNIMPLEMENTED;
1455 return STATUS_NOT_IMPLEMENTED;
1456 }
1457
1458 /*
1459 * @unimplemented
1460 */
1461 VOID
1462 NTAPI
1463 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1464 IN PEPROCESS Process,
1465 IN KPROCESSOR_MODE AccessMode,
1466 IN LOCK_OPERATION Operation)
1467 {
1468 UNIMPLEMENTED;
1469 }
1470
1471
1472 /*
1473 * @unimplemented
1474 */
1475 VOID
1476 NTAPI
1477 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1478 IN LARGE_INTEGER PageList[],
1479 IN KPROCESSOR_MODE AccessMode,
1480 IN LOCK_OPERATION Operation)
1481 {
1482 UNIMPLEMENTED;
1483 }
1484
1485 /*
1486 * @unimplemented
1487 */
1488 VOID
1489 NTAPI
1490 MmMapMemoryDumpMdl(IN PMDL Mdl)
1491 {
1492 UNIMPLEMENTED;
1493 }
1494
1495 /* EOF */