[NTOSKRNL]
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23
24 /* PUBLIC FUNCTIONS ***********************************************************/
25
26 /*
27 * @implemented
28 */
29 PMDL
30 NTAPI
31 MmCreateMdl(IN PMDL Mdl,
32 IN PVOID Base,
33 IN SIZE_T Length)
34 {
35 SIZE_T Size;
36
37 //
38 // Check if we don't have an MDL built
39 //
40 if (!Mdl)
41 {
42 //
43 // Calculate the size we'll need and allocate the MDL
44 //
45 Size = MmSizeOfMdl(Base, Length);
46 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
47 if (!Mdl) return NULL;
48 }
49
50 //
51 // Initialize it
52 //
53 MmInitializeMdl(Mdl, Base, Length);
54 return Mdl;
55 }
56
57 /*
58 * @implemented
59 */
60 SIZE_T
61 NTAPI
62 MmSizeOfMdl(IN PVOID Base,
63 IN SIZE_T Length)
64 {
65 //
66 // Return the MDL size
67 //
68 return sizeof(MDL) +
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
70 }
71
72 /*
73 * @implemented
74 */
75 VOID
76 NTAPI
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
78 {
79 PPFN_NUMBER MdlPages, EndPage;
80 PFN_NUMBER Pfn, PageCount;
81 PVOID Base;
82 PMMPTE PointerPte;
83
84 //
85 // Sanity checks
86 //
87 ASSERT(Mdl->ByteCount != 0);
88 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
89 MDL_MAPPED_TO_SYSTEM_VA |
90 MDL_SOURCE_IS_NONPAGED_POOL |
91 MDL_PARTIAL)) == 0);
92
93 //
94 // We know the MDL isn't associated to a process now
95 //
96 Mdl->Process = NULL;
97
98 //
99 // Get page and VA information
100 //
101 MdlPages = (PPFN_NUMBER)(Mdl + 1);
102 Base = Mdl->StartVa;
103
104 //
105 // Set the system address and now get the page count
106 //
107 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
108 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
109 Mdl->ByteCount);
110 ASSERT(PageCount != 0);
111 EndPage = MdlPages + PageCount;
112
113 //
114 // Loop the PTEs
115 //
116 PointerPte = MiAddressToPte(Base);
117 do
118 {
119 //
120 // Write the PFN
121 //
122 Pfn = PFN_FROM_PTE(PointerPte++);
123 *MdlPages++ = Pfn;
124 } while (MdlPages < EndPage);
125
126 //
127 // Set the nonpaged pool flag
128 //
129 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
130
131 //
132 // Check if this is an I/O mapping
133 //
134 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
135 }
136
137 /*
138 * @implemented
139 */
140 PMDL
141 NTAPI
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
143 IN PHYSICAL_ADDRESS HighAddress,
144 IN PHYSICAL_ADDRESS SkipBytes,
145 IN SIZE_T TotalBytes)
146 {
147 //
148 // Call the internal routine
149 //
150 return MiAllocatePagesForMdl(LowAddress,
151 HighAddress,
152 SkipBytes,
153 TotalBytes,
154 MiNotMapped,
155 0);
156 }
157
158 /*
159 * @implemented
160 */
161 PMDL
162 NTAPI
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
164 IN PHYSICAL_ADDRESS HighAddress,
165 IN PHYSICAL_ADDRESS SkipBytes,
166 IN SIZE_T TotalBytes,
167 IN MEMORY_CACHING_TYPE CacheType,
168 IN ULONG Flags)
169 {
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
171
172 //
173 // Check for invalid cache type
174 //
175 if (CacheType > MmWriteCombined)
176 {
177 //
178 // Normalize to default
179 //
180 CacheAttribute = MiNotMapped;
181 }
182 else
183 {
184 //
185 // Conver to internal caching attribute
186 //
187 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
188 }
189
190 //
191 // Only these flags are allowed
192 //
193 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
194 {
195 //
196 // Silently fail
197 //
198 return NULL;
199 }
200
201 //
202 // Call the internal routine
203 //
204 return MiAllocatePagesForMdl(LowAddress,
205 HighAddress,
206 SkipBytes,
207 TotalBytes,
208 CacheAttribute,
209 Flags);
210 }
211
212 /*
213 * @implemented
214 */
215 VOID
216 NTAPI
217 MmFreePagesFromMdl(IN PMDL Mdl)
218 {
219 PVOID Base;
220 PPFN_NUMBER Pages;
221 LONG NumberOfPages;
222 PMMPFN Pfn1;
223 KIRQL OldIrql;
224 DPRINT("Freeing MDL: %p\n", Mdl);
225
226 //
227 // Sanity checks
228 //
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
230 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
231 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
232
233 //
234 // Get address and page information
235 //
236 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
237 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
238
239 //
240 // Acquire PFN lock
241 //
242 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
243
244 //
245 // Loop all the MDL pages
246 //
247 Pages = (PPFN_NUMBER)(Mdl + 1);
248 do
249 {
250 //
251 // Reached the last page
252 //
253 if (*Pages == LIST_HEAD) break;
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1);
260 ASSERT(Pfn1->u2.ShareCount == 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
262 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
263 {
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
266 }
267
268 //
269 // Clear it
270 //
271 Pfn1->u3.e1.StartOfAllocation = 0;
272 Pfn1->u3.e1.EndOfAllocation = 0;
273 Pfn1->u2.ShareCount = 0;
274
275 //
276 // Dereference it
277 //
278 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
279 if (Pfn1->u3.e2.ReferenceCount != 1)
280 {
281 /* Just take off one reference */
282 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
283 }
284 else
285 {
286 /* We'll be nuking the whole page */
287 MiDecrementReferenceCount(Pfn1, *Pages);
288 }
289
290 //
291 // Clear this page and move on
292 //
293 *Pages++ = LIST_HEAD;
294 } while (--NumberOfPages != 0);
295
296 //
297 // Release the lock
298 //
299 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
300
301 //
302 // Remove the pages locked flag
303 //
304 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
305 }
306
307 /*
308 * @implemented
309 */
310 PVOID
311 NTAPI
312 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
313 IN KPROCESSOR_MODE AccessMode,
314 IN MEMORY_CACHING_TYPE CacheType,
315 IN PVOID BaseAddress,
316 IN ULONG BugCheckOnFailure,
317 IN MM_PAGE_PRIORITY Priority)
318 {
319 PVOID Base;
320 PPFN_NUMBER MdlPages, LastPage;
321 PFN_NUMBER PageCount;
322 BOOLEAN IsIoMapping;
323 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
324 PMMPTE PointerPte;
325 MMPTE TempPte;
326
327 //
328 // Sanity check
329 //
330 ASSERT(Mdl->ByteCount != 0);
331
332 //
333 // Get the base
334 //
335 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
336
337 //
338 // Handle kernel case first
339 //
340 if (AccessMode == KernelMode)
341 {
342 //
343 // Get the list of pages and count
344 //
345 MdlPages = (PPFN_NUMBER)(Mdl + 1);
346 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
347 LastPage = MdlPages + PageCount;
348
349 //
350 // Sanity checks
351 //
352 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
353 MDL_SOURCE_IS_NONPAGED_POOL |
354 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
355 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
356
357 //
358 // Get the correct cache type
359 //
360 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
361 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
362
363 //
364 // Reserve the PTEs
365 //
366 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
367 if (!PointerPte)
368 {
369 //
370 // If it can fail, return NULL
371 //
372 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
373
374 //
375 // Should we bugcheck?
376 //
377 if (!BugCheckOnFailure) return NULL;
378
379 //
380 // Yes, crash the system
381 //
382 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
383 }
384
385 //
386 // Get the mapped address
387 //
388 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
389
390 //
391 // Get the template
392 //
393 TempPte = ValidKernelPte;
394 switch (CacheAttribute)
395 {
396 case MiNonCached:
397
398 //
399 // Disable caching
400 //
401 MI_PAGE_DISABLE_CACHE(&TempPte);
402 MI_PAGE_WRITE_THROUGH(&TempPte);
403 break;
404
405 case MiWriteCombined:
406
407 //
408 // Enable write combining
409 //
410 MI_PAGE_DISABLE_CACHE(&TempPte);
411 MI_PAGE_WRITE_COMBINED(&TempPte);
412 break;
413
414 default:
415 //
416 // Nothing to do
417 //
418 break;
419 }
420
421 //
422 // Loop all PTEs
423 //
424 do
425 {
426 //
427 // We're done here
428 //
429 if (*MdlPages == LIST_HEAD) break;
430
431 //
432 // Write the PTE
433 //
434 TempPte.u.Hard.PageFrameNumber = *MdlPages;
435 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
436 } while (++MdlPages < LastPage);
437
438 //
439 // Mark it as mapped
440 //
441 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
442 Mdl->MappedSystemVa = Base;
443 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
444
445 //
446 // Check if it was partial
447 //
448 if (Mdl->MdlFlags & MDL_PARTIAL)
449 {
450 //
451 // Write the appropriate flag here too
452 //
453 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
454 }
455
456 //
457 // Return the mapped address
458 //
459 return Base;
460 }
461
462 UNIMPLEMENTED;
463 return NULL;
464 }
465
466 /*
467 * @implemented
468 */
469 PVOID
470 NTAPI
471 MmMapLockedPages(IN PMDL Mdl,
472 IN KPROCESSOR_MODE AccessMode)
473 {
474 //
475 // Call the extended version
476 //
477 return MmMapLockedPagesSpecifyCache(Mdl,
478 AccessMode,
479 MmCached,
480 NULL,
481 TRUE,
482 HighPagePriority);
483 }
484
485 /*
486 * @implemented
487 */
488 VOID
489 NTAPI
490 MmUnmapLockedPages(IN PVOID BaseAddress,
491 IN PMDL Mdl)
492 {
493 PVOID Base;
494 PFN_NUMBER PageCount;
495 PPFN_NUMBER MdlPages;
496 PMMPTE PointerPte;
497
498 //
499 // Sanity check
500 //
501 ASSERT(Mdl->ByteCount != 0);
502
503 //
504 // Check if this is a kernel request
505 //
506 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
507 {
508 //
509 // Get base and count information
510 //
511 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
512 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
513
514 //
515 // Sanity checks
516 //
517 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
518 ASSERT(PageCount != 0);
519 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
520
521 //
522 // Get the PTE
523 //
524 PointerPte = MiAddressToPte(BaseAddress);
525
526 //
527 // This should be a resident system PTE
528 //
529 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
530 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
531 ASSERT(PointerPte->u.Hard.Valid == 1);
532
533 //
534 // Check if the caller wants us to free advanced pages
535 //
536 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
537 {
538 //
539 // Get the MDL page array
540 //
541 MdlPages = (PPFN_NUMBER)(Mdl + 1);
542 MdlPages += PageCount;
543
544 //
545 // Do the math
546 //
547 PageCount += *MdlPages;
548 PointerPte -= *MdlPages;
549 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
550 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
551
552 //
553 // Get the new base address
554 //
555 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
556 ((*MdlPages) << PAGE_SHIFT));
557 }
558
559 //
560 // Remove flags
561 //
562 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
563 MDL_PARTIAL_HAS_BEEN_MAPPED |
564 MDL_FREE_EXTRA_PTES);
565
566 //
567 // Release the system PTEs
568 //
569 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
570 }
571 else
572 {
573 UNIMPLEMENTED;
574 }
575 }
576
577 /*
578 * @implemented
579 */
580 VOID
581 NTAPI
582 MmProbeAndLockPages(IN PMDL Mdl,
583 IN KPROCESSOR_MODE AccessMode,
584 IN LOCK_OPERATION Operation)
585 {
586 PPFN_NUMBER MdlPages;
587 PVOID Base, Address, LastAddress, StartAddress;
588 ULONG LockPages, TotalPages;
589 NTSTATUS Status = STATUS_SUCCESS;
590 PEPROCESS CurrentProcess;
591 NTSTATUS ProbeStatus;
592 PMMPTE PointerPte, LastPte;
593 PMMPDE PointerPde;
594 PFN_NUMBER PageFrameIndex;
595 BOOLEAN UsePfnLock;
596 KIRQL OldIrql;
597 USHORT OldRefCount, RefCount;
598 PMMPFN Pfn1;
599 DPRINT("Probing MDL: %p\n", Mdl);
600
601 //
602 // Sanity checks
603 //
604 ASSERT(Mdl->ByteCount != 0);
605 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
606 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
607 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
608 MDL_MAPPED_TO_SYSTEM_VA |
609 MDL_SOURCE_IS_NONPAGED_POOL |
610 MDL_PARTIAL |
611 MDL_IO_SPACE)) == 0);
612
613 //
614 // Get page and base information
615 //
616 MdlPages = (PPFN_NUMBER)(Mdl + 1);
617 Base = (PVOID)Mdl->StartVa;
618
619 //
620 // Get the addresses and how many pages we span (and need to lock)
621 //
622 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
623 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
624 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
625 ASSERT(LockPages != 0);
626
627 /* Block invalid access */
628 if ((AccessMode != KernelMode) &&
629 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
630 {
631 /* Caller should be in SEH, raise the error */
632 *MdlPages = LIST_HEAD;
633 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
634 }
635
636 //
637 // Get the process
638 //
639 if (Address <= MM_HIGHEST_USER_ADDRESS)
640 {
641 //
642 // Get the process
643 //
644 CurrentProcess = PsGetCurrentProcess();
645 }
646 else
647 {
648 //
649 // No process
650 //
651 CurrentProcess = NULL;
652 }
653
654 //
655 // Save the number of pages we'll have to lock, and the start address
656 //
657 TotalPages = LockPages;
658 StartAddress = Address;
659
660 /* Large pages not supported */
661 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
662
663 //
664 // Now probe them
665 //
666 ProbeStatus = STATUS_SUCCESS;
667 _SEH2_TRY
668 {
669 //
670 // Enter probe loop
671 //
672 do
673 {
674 //
675 // Assume failure
676 //
677 *MdlPages = LIST_HEAD;
678
679 //
680 // Read
681 //
682 *(volatile CHAR*)Address;
683
684 //
685 // Check if this is write access (only probe for user-mode)
686 //
687 if ((Operation != IoReadAccess) &&
688 (Address <= MM_HIGHEST_USER_ADDRESS))
689 {
690 //
691 // Probe for write too
692 //
693 ProbeForWriteChar(Address);
694 }
695
696 //
697 // Next address...
698 //
699 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
700
701 //
702 // Next page...
703 //
704 LockPages--;
705 MdlPages++;
706 } while (Address < LastAddress);
707
708 //
709 // Reset back to the original page
710 //
711 ASSERT(LockPages == 0);
712 MdlPages = (PPFN_NUMBER)(Mdl + 1);
713 }
714 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
715 {
716 //
717 // Oops :(
718 //
719 ProbeStatus = _SEH2_GetExceptionCode();
720 }
721 _SEH2_END;
722
723 //
724 // So how did that go?
725 //
726 if (ProbeStatus != STATUS_SUCCESS)
727 {
728 //
729 // Fail
730 //
731 DPRINT1("MDL PROBE FAILED!\n");
732 Mdl->Process = NULL;
733 ExRaiseStatus(ProbeStatus);
734 }
735
736 //
737 // Get the PTE and PDE
738 //
739 PointerPte = MiAddressToPte(StartAddress);
740 PointerPde = MiAddressToPde(StartAddress);
741 #if (_MI_PAGING_LEVELS >= 3)
742 DPRINT1("PAE/x64 Not Implemented\n");
743 ASSERT(FALSE);
744 #endif
745
746 //
747 // Sanity check
748 //
749 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
750
751 //
752 // Check what kind of operation this is
753 //
754 if (Operation != IoReadAccess)
755 {
756 //
757 // Set the write flag
758 //
759 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
760 }
761 else
762 {
763 //
764 // Remove the write flag
765 //
766 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
767 }
768
769 //
770 // Mark the MDL as locked *now*
771 //
772 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
773
774 //
775 // Check if this came from kernel mode
776 //
777 if (Base >= MM_HIGHEST_USER_ADDRESS)
778 {
779 //
780 // We should not have a process
781 //
782 ASSERT(CurrentProcess == NULL);
783 Mdl->Process = NULL;
784
785 //
786 // In kernel mode, we don't need to check for write access
787 //
788 Operation = IoReadAccess;
789
790 //
791 // Use the PFN lock
792 //
793 UsePfnLock = TRUE;
794 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
795 }
796 else
797 {
798 //
799 // Sanity checks
800 //
801 ASSERT(TotalPages != 0);
802 ASSERT(CurrentProcess == PsGetCurrentProcess());
803
804 //
805 // Track locked pages
806 //
807 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
808 TotalPages);
809
810 //
811 // Save the process
812 //
813 Mdl->Process = CurrentProcess;
814
815 /* Lock the process working set */
816 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
817 UsePfnLock = FALSE;
818 OldIrql = MM_NOIRQL;
819 }
820
821 //
822 // Get the last PTE
823 //
824 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
825
826 //
827 // Loop the pages
828 //
829 do
830 {
831 //
832 // Assume failure and check for non-mapped pages
833 //
834 *MdlPages = LIST_HEAD;
835 #if (_MI_PAGING_LEVELS >= 3)
836 /* Should be checking the PPE and PXE */
837 ASSERT(FALSE);
838 #endif
839 while ((PointerPde->u.Hard.Valid == 0) ||
840 (PointerPte->u.Hard.Valid == 0))
841 {
842 //
843 // What kind of lock where we using?
844 //
845 if (UsePfnLock)
846 {
847 //
848 // Release PFN lock
849 //
850 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
851 }
852 else
853 {
854 /* Release process working set */
855 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
856 }
857
858 //
859 // Access the page
860 //
861 Address = MiPteToAddress(PointerPte);
862 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
863 if (!NT_SUCCESS(Status))
864 {
865 //
866 // Fail
867 //
868 DPRINT1("Access fault failed\n");
869 goto Cleanup;
870 }
871
872 //
873 // Waht lock should we use?
874 //
875 if (UsePfnLock)
876 {
877 //
878 // Grab the PFN lock
879 //
880 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
881 }
882 else
883 {
884 /* Lock the process working set */
885 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
886 }
887 }
888
889 //
890 // Check if this was a write or modify
891 //
892 if (Operation != IoReadAccess)
893 {
894 //
895 // Check if the PTE is not writable
896 //
897 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
898 {
899 //
900 // Check if it's copy on write
901 //
902 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
903 {
904 //
905 // Get the base address and allow a change for user-mode
906 //
907 Address = MiPteToAddress(PointerPte);
908 if (Address <= MM_HIGHEST_USER_ADDRESS)
909 {
910 //
911 // What kind of lock where we using?
912 //
913 if (UsePfnLock)
914 {
915 //
916 // Release PFN lock
917 //
918 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
919 }
920 else
921 {
922 /* Release process working set */
923 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
924 }
925
926 //
927 // Access the page
928 //
929 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
930 if (!NT_SUCCESS(Status))
931 {
932 //
933 // Fail
934 //
935 DPRINT1("Access fault failed\n");
936 goto Cleanup;
937 }
938
939 //
940 // Re-acquire the lock
941 //
942 if (UsePfnLock)
943 {
944 //
945 // Grab the PFN lock
946 //
947 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
948 }
949 else
950 {
951 /* Lock the process working set */
952 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
953 }
954
955 //
956 // Start over
957 //
958 continue;
959 }
960 }
961
962 //
963 // Fail, since we won't allow this
964 //
965 Status = STATUS_ACCESS_VIOLATION;
966 goto CleanupWithLock;
967 }
968 }
969
970 //
971 // Grab the PFN
972 //
973 PageFrameIndex = PFN_FROM_PTE(PointerPte);
974 Pfn1 = MiGetPfnEntry(PageFrameIndex);
975 if (Pfn1)
976 {
977 /* Either this is for kernel-mode, or the working set is held */
978 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
979
980 /* No Physical VADs supported yet */
981 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
982
983 /* This address should already exist and be fully valid */
984 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
985 if (MI_IS_ROS_PFN(Pfn1))
986 {
987 /* ReactOS Mm doesn't track share count */
988 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
989 }
990 else
991 {
992 /* On ARM3 pages, we should see a valid share count */
993 ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
994
995 /* We don't support mapping a prototype page yet */
996 ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
997 }
998
999 /* More locked pages! */
1000 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
1001
1002 /* Loop trying to update the reference count */
1003 do
1004 {
1005 /* Get the current reference count, make sure it's valid */
1006 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1007 ASSERT(OldRefCount != 0);
1008 ASSERT(OldRefCount < 2500);
1009
1010 /* Bump it up by one */
1011 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1012 OldRefCount + 1,
1013 OldRefCount);
1014 ASSERT(RefCount != 0);
1015 } while (OldRefCount != RefCount);
1016
1017 /* Was this the first lock attempt? */
1018 if (OldRefCount != 1)
1019 {
1020 /* Someone else came through */
1021 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1022 }
1023 }
1024 else
1025 {
1026 //
1027 // For I/O addresses, just remember this
1028 //
1029 Mdl->MdlFlags |= MDL_IO_SPACE;
1030 }
1031
1032 //
1033 // Write the page and move on
1034 //
1035 *MdlPages++ = PageFrameIndex;
1036 PointerPte++;
1037
1038 /* Check if we're on a PDE boundary */
1039 if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
1040 } while (PointerPte <= LastPte);
1041
1042 //
1043 // What kind of lock where we using?
1044 //
1045 if (UsePfnLock)
1046 {
1047 //
1048 // Release PFN lock
1049 //
1050 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1051 }
1052 else
1053 {
1054 /* Release process working set */
1055 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1056 }
1057
1058 //
1059 // Sanity check
1060 //
1061 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1062 return;
1063
1064 CleanupWithLock:
1065 //
1066 // This is the failure path
1067 //
1068 ASSERT(!NT_SUCCESS(Status));
1069
1070 //
1071 // What kind of lock where we using?
1072 //
1073 if (UsePfnLock)
1074 {
1075 //
1076 // Release PFN lock
1077 //
1078 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1079 }
1080 else
1081 {
1082 /* Release process working set */
1083 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1084 }
1085 Cleanup:
1086 //
1087 // Pages must be locked so MmUnlock can work
1088 //
1089 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1090 MmUnlockPages(Mdl);
1091
1092 //
1093 // Raise the error
1094 //
1095 ExRaiseStatus(Status);
1096 }
1097
1098 /*
1099 * @implemented
1100 */
1101 VOID
1102 NTAPI
1103 MmUnlockPages(IN PMDL Mdl)
1104 {
1105 PPFN_NUMBER MdlPages, LastPage;
1106 PEPROCESS Process;
1107 PVOID Base;
1108 ULONG Flags, PageCount;
1109 KIRQL OldIrql;
1110 USHORT RefCount, OldRefCount;
1111 PMMPFN Pfn1;
1112 DPRINT("Unlocking MDL: %p\n", Mdl);
1113
1114 //
1115 // Sanity checks
1116 //
1117 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1118 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1119 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1120 ASSERT(Mdl->ByteCount != 0);
1121
1122 //
1123 // Get the process associated and capture the flags which are volatile
1124 //
1125 Process = Mdl->Process;
1126 Flags = Mdl->MdlFlags;
1127
1128 //
1129 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1130 //
1131 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1132 {
1133 //
1134 // Unmap the pages from system space
1135 //
1136 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1137 }
1138
1139 //
1140 // Get the page count
1141 //
1142 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1143 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1144 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1145 ASSERT(PageCount != 0);
1146
1147 //
1148 // We don't support AWE
1149 //
1150 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1151
1152 //
1153 // Check if the buffer is mapped I/O space
1154 //
1155 if (Flags & MDL_IO_SPACE)
1156 {
1157 //
1158 // Acquire PFN lock
1159 //
1160 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1161
1162 //
1163 // Loop every page
1164 //
1165 LastPage = MdlPages + PageCount;
1166 do
1167 {
1168 //
1169 // Last page, break out
1170 //
1171 if (*MdlPages == LIST_HEAD) break;
1172
1173 //
1174 // Check if this page is in the PFN database
1175 //
1176 Pfn1 = MiGetPfnEntry(*MdlPages);
1177 if (Pfn1);
1178 {
1179 /* Get the current entry and reference count */
1180 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1181 ASSERT(OldRefCount != 0);
1182
1183 /* Is this already the last dereference */
1184 if (OldRefCount == 1)
1185 {
1186 /* It should be on a free list waiting for us */
1187 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1188 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1189 ASSERT(Pfn1->u2.ShareCount == 0);
1190
1191 /* Not supported yet */
1192 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1193 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1194
1195 /* One less page */
1196 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1197
1198 /* Do the last dereference, we're done here */
1199 MiDecrementReferenceCount(Pfn1, *MdlPages);
1200 }
1201 else
1202 {
1203 /* Loop decrementing one reference */
1204 do
1205 {
1206 /* Make sure it's still valid */
1207 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1208 ASSERT(OldRefCount != 0);
1209
1210 /* Take off one reference */
1211 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1212 OldRefCount - 1,
1213 OldRefCount);
1214 ASSERT(RefCount != 0);
1215 } while (OldRefCount != RefCount);
1216 ASSERT(RefCount > 1);
1217
1218 /* Are there only lock references left? */
1219 if (RefCount == 2)
1220 {
1221 /* And does the page still have users? */
1222 if (Pfn1->u2.ShareCount >= 1)
1223 {
1224 /* Then it should still be valid */
1225 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1226
1227 /* Not supported yet */
1228 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1229 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1230
1231 /* But there is one less "locked" page though */
1232 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1233 }
1234 }
1235 }
1236 }
1237 } while (++MdlPages < LastPage);
1238
1239 //
1240 // Release the lock
1241 //
1242 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1243
1244 //
1245 // Check if we have a process
1246 //
1247 if (Process)
1248 {
1249 //
1250 // Handle the accounting of locked pages
1251 //
1252 ASSERT(Process->NumberOfLockedPages > 0);
1253 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1254 -PageCount);
1255 }
1256
1257 //
1258 // We're done
1259 //
1260 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1261 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1262 return;
1263 }
1264
1265 //
1266 // Check if we have a process
1267 //
1268 if (Process)
1269 {
1270 //
1271 // Handle the accounting of locked pages
1272 //
1273 ASSERT(Process->NumberOfLockedPages > 0);
1274 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1275 -PageCount);
1276 }
1277
1278 //
1279 // Loop every page
1280 //
1281 LastPage = MdlPages + PageCount;
1282 do
1283 {
1284 //
1285 // Last page reached
1286 //
1287 if (*MdlPages == LIST_HEAD)
1288 {
1289 //
1290 // Were there no pages at all?
1291 //
1292 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1293 {
1294 //
1295 // We're already done
1296 //
1297 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1298 return;
1299 }
1300
1301 //
1302 // Otherwise, stop here
1303 //
1304 LastPage = MdlPages;
1305 break;
1306 }
1307
1308 /* Save the PFN entry instead for the secondary loop */
1309 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1310 ASSERT((*MdlPages) != 0);
1311 } while (++MdlPages < LastPage);
1312
1313 //
1314 // Reset pointer
1315 //
1316 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1317
1318 //
1319 // Now grab the PFN lock for the actual unlock and dereference
1320 //
1321 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1322 do
1323 {
1324 /* Get the current entry and reference count */
1325 Pfn1 = (PMMPFN)(*MdlPages);
1326 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1327 ASSERT(OldRefCount != 0);
1328
1329 /* Is this already the last dereference */
1330 if (OldRefCount == 1)
1331 {
1332 /* It should be on a free list waiting for us */
1333 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1334 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1335 ASSERT(Pfn1->u2.ShareCount == 0);
1336
1337 /* Not supported yet */
1338 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1339 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1340
1341 /* One less page */
1342 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1343
1344 /* Do the last dereference, we're done here */
1345 MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
1346 }
1347 else
1348 {
1349 /* Loop decrementing one reference */
1350 do
1351 {
1352 /* Make sure it's still valid */
1353 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1354 ASSERT(OldRefCount != 0);
1355
1356 /* Take off one reference */
1357 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1358 OldRefCount - 1,
1359 OldRefCount);
1360 ASSERT(RefCount != 0);
1361 } while (OldRefCount != RefCount);
1362 ASSERT(RefCount > 1);
1363
1364 /* Are there only lock references left? */
1365 if (RefCount == 2)
1366 {
1367 /* And does the page still have users? */
1368 if (Pfn1->u2.ShareCount >= 1)
1369 {
1370 /* Then it should still be valid */
1371 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1372
1373 /* Not supported yet */
1374 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1375 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1376
1377 /* But there is one less "locked" page though */
1378 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1379 }
1380 }
1381 }
1382 } while (++MdlPages < LastPage);
1383
1384 //
1385 // Release the lock
1386 //
1387 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1388
1389 //
1390 // We're done
1391 //
1392 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1393 }
1394
1395 /*
1396 * @unimplemented
1397 */
1398 NTSTATUS
1399 NTAPI
1400 MmAdvanceMdl(IN PMDL Mdl,
1401 IN ULONG NumberOfBytes)
1402 {
1403 UNIMPLEMENTED;
1404 return STATUS_NOT_IMPLEMENTED;
1405 }
1406
1407 /*
1408 * @unimplemented
1409 */
1410 PVOID
1411 NTAPI
1412 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1413 IN ULONG PoolTag,
1414 IN PMDL MemoryDescriptorList,
1415 IN MEMORY_CACHING_TYPE CacheType)
1416 {
1417 UNIMPLEMENTED;
1418 return 0;
1419 }
1420
1421 /*
1422 * @unimplemented
1423 */
1424 VOID
1425 NTAPI
1426 MmUnmapReservedMapping(IN PVOID BaseAddress,
1427 IN ULONG PoolTag,
1428 IN PMDL MemoryDescriptorList)
1429 {
1430 UNIMPLEMENTED;
1431 }
1432
1433 /*
1434 * @unimplemented
1435 */
1436 NTSTATUS
1437 NTAPI
1438 MmPrefetchPages(IN ULONG NumberOfLists,
1439 IN PREAD_LIST *ReadLists)
1440 {
1441 UNIMPLEMENTED;
1442 return STATUS_NOT_IMPLEMENTED;
1443 }
1444
1445 /*
1446 * @unimplemented
1447 */
1448 NTSTATUS
1449 NTAPI
1450 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1451 IN ULONG NewProtect)
1452 {
1453 UNIMPLEMENTED;
1454 return STATUS_NOT_IMPLEMENTED;
1455 }
1456
1457 /*
1458 * @unimplemented
1459 */
1460 VOID
1461 NTAPI
1462 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1463 IN PEPROCESS Process,
1464 IN KPROCESSOR_MODE AccessMode,
1465 IN LOCK_OPERATION Operation)
1466 {
1467 UNIMPLEMENTED;
1468 }
1469
1470
1471 /*
1472 * @unimplemented
1473 */
1474 VOID
1475 NTAPI
1476 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1477 IN LARGE_INTEGER PageList[],
1478 IN KPROCESSOR_MODE AccessMode,
1479 IN LOCK_OPERATION Operation)
1480 {
1481 UNIMPLEMENTED;
1482 }
1483
1484 /*
1485 * @unimplemented
1486 */
1487 VOID
1488 NTAPI
1489 MmMapMemoryDumpMdl(IN PMDL Mdl)
1490 {
1491 UNIMPLEMENTED;
1492 }
1493
1494 /* EOF */