Merge r55012 adding Wine3D control panel as per Amine's request.
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include "../ARM3/miarm.h"
17
18 /* GLOBALS ********************************************************************/
19
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23
24 /* PUBLIC FUNCTIONS ***********************************************************/
25
26 /*
27 * @implemented
28 */
29 PMDL
30 NTAPI
31 MmCreateMdl(IN PMDL Mdl,
32 IN PVOID Base,
33 IN SIZE_T Length)
34 {
35 SIZE_T Size;
36
37 //
38 // Check if we don't have an MDL built
39 //
40 if (!Mdl)
41 {
42 //
43 // Calculate the size we'll need and allocate the MDL
44 //
45 Size = MmSizeOfMdl(Base, Length);
46 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
47 if (!Mdl) return NULL;
48 }
49
50 //
51 // Initialize it
52 //
53 MmInitializeMdl(Mdl, Base, Length);
54 return Mdl;
55 }
56
57 /*
58 * @implemented
59 */
60 SIZE_T
61 NTAPI
62 MmSizeOfMdl(IN PVOID Base,
63 IN SIZE_T Length)
64 {
65 //
66 // Return the MDL size
67 //
68 return sizeof(MDL) +
69 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
70 }
71
72 /*
73 * @implemented
74 */
75 VOID
76 NTAPI
77 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
78 {
79 PPFN_NUMBER MdlPages, EndPage;
80 PFN_NUMBER Pfn, PageCount;
81 PVOID Base;
82 PMMPTE PointerPte;
83
84 //
85 // Sanity checks
86 //
87 ASSERT(Mdl->ByteCount != 0);
88 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
89 MDL_MAPPED_TO_SYSTEM_VA |
90 MDL_SOURCE_IS_NONPAGED_POOL |
91 MDL_PARTIAL)) == 0);
92
93 //
94 // We know the MDL isn't associated to a process now
95 //
96 Mdl->Process = NULL;
97
98 //
99 // Get page and VA information
100 //
101 MdlPages = (PPFN_NUMBER)(Mdl + 1);
102 Base = Mdl->StartVa;
103
104 //
105 // Set the system address and now get the page count
106 //
107 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
108 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
109 Mdl->ByteCount);
110 ASSERT(PageCount != 0);
111 EndPage = MdlPages + PageCount;
112
113 //
114 // Loop the PTEs
115 //
116 PointerPte = MiAddressToPte(Base);
117 do
118 {
119 //
120 // Write the PFN
121 //
122 Pfn = PFN_FROM_PTE(PointerPte++);
123 *MdlPages++ = Pfn;
124 } while (MdlPages < EndPage);
125
126 //
127 // Set the nonpaged pool flag
128 //
129 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
130
131 //
132 // Check if this is an I/O mapping
133 //
134 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
135 }
136
137 /*
138 * @implemented
139 */
140 PMDL
141 NTAPI
142 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
143 IN PHYSICAL_ADDRESS HighAddress,
144 IN PHYSICAL_ADDRESS SkipBytes,
145 IN SIZE_T TotalBytes)
146 {
147 //
148 // Call the internal routine
149 //
150 return MiAllocatePagesForMdl(LowAddress,
151 HighAddress,
152 SkipBytes,
153 TotalBytes,
154 MiNotMapped,
155 0);
156 }
157
158 /*
159 * @implemented
160 */
161 PMDL
162 NTAPI
163 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
164 IN PHYSICAL_ADDRESS HighAddress,
165 IN PHYSICAL_ADDRESS SkipBytes,
166 IN SIZE_T TotalBytes,
167 IN MEMORY_CACHING_TYPE CacheType,
168 IN ULONG Flags)
169 {
170 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
171
172 //
173 // Check for invalid cache type
174 //
175 if (CacheType > MmWriteCombined)
176 {
177 //
178 // Normalize to default
179 //
180 CacheAttribute = MiNotMapped;
181 }
182 else
183 {
184 //
185 // Conver to internal caching attribute
186 //
187 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
188 }
189
190 //
191 // Only these flags are allowed
192 //
193 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
194 {
195 //
196 // Silently fail
197 //
198 return NULL;
199 }
200
201 //
202 // Call the internal routine
203 //
204 return MiAllocatePagesForMdl(LowAddress,
205 HighAddress,
206 SkipBytes,
207 TotalBytes,
208 CacheAttribute,
209 Flags);
210 }
211
212 /*
213 * @implemented
214 */
215 VOID
216 NTAPI
217 MmFreePagesFromMdl(IN PMDL Mdl)
218 {
219 PVOID Base;
220 PPFN_NUMBER Pages;
221 LONG NumberOfPages;
222 PMMPFN Pfn1;
223 KIRQL OldIrql;
224 DPRINT("Freeing MDL: %p\n", Mdl);
225
226 //
227 // Sanity checks
228 //
229 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
230 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
231 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
232
233 //
234 // Get address and page information
235 //
236 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
237 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
238
239 //
240 // Acquire PFN lock
241 //
242 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
243
244 //
245 // Loop all the MDL pages
246 //
247 Pages = (PPFN_NUMBER)(Mdl + 1);
248 do
249 {
250 //
251 // Reached the last page
252 //
253 if (*Pages == LIST_HEAD) break;
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1);
260 ASSERT(Pfn1->u2.ShareCount == 1);
261 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
262 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
263 {
264 /* Corrupted PFN entry or invalid free */
265 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
266 }
267
268 //
269 // Clear it
270 //
271 Pfn1->u3.e1.StartOfAllocation = 0;
272 Pfn1->u3.e1.EndOfAllocation = 0;
273 Pfn1->u2.ShareCount = 0;
274
275 //
276 // Dereference it
277 //
278 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
279 if (Pfn1->u3.e2.ReferenceCount != 1)
280 {
281 /* Just take off one reference */
282 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
283 }
284 else
285 {
286 /* We'll be nuking the whole page */
287 MiDecrementReferenceCount(Pfn1, *Pages);
288 }
289
290 //
291 // Clear this page and move on
292 //
293 *Pages++ = LIST_HEAD;
294 } while (--NumberOfPages != 0);
295
296 //
297 // Release the lock
298 //
299 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
300
301 //
302 // Remove the pages locked flag
303 //
304 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
305 }
306
307 /*
308 * @implemented
309 */
310 PVOID
311 NTAPI
312 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
313 IN KPROCESSOR_MODE AccessMode,
314 IN MEMORY_CACHING_TYPE CacheType,
315 IN PVOID BaseAddress,
316 IN ULONG BugCheckOnFailure,
317 IN MM_PAGE_PRIORITY Priority)
318 {
319 PVOID Base;
320 PPFN_NUMBER MdlPages, LastPage;
321 PFN_COUNT PageCount;
322 BOOLEAN IsIoMapping;
323 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
324 PMMPTE PointerPte;
325 MMPTE TempPte;
326
327 //
328 // Sanity check
329 //
330 ASSERT(Mdl->ByteCount != 0);
331
332 //
333 // Get the base
334 //
335 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
336
337 //
338 // Handle kernel case first
339 //
340 if (AccessMode == KernelMode)
341 {
342 //
343 // Get the list of pages and count
344 //
345 MdlPages = (PPFN_NUMBER)(Mdl + 1);
346 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
347 LastPage = MdlPages + PageCount;
348
349 //
350 // Sanity checks
351 //
352 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
353 MDL_SOURCE_IS_NONPAGED_POOL |
354 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
355 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
356
357 //
358 // Get the correct cache type
359 //
360 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
361 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
362
363 //
364 // Reserve the PTEs
365 //
366 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
367 if (!PointerPte)
368 {
369 //
370 // If it can fail, return NULL
371 //
372 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
373
374 //
375 // Should we bugcheck?
376 //
377 if (!BugCheckOnFailure) return NULL;
378
379 //
380 // Yes, crash the system
381 //
382 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
383 }
384
385 //
386 // Get the mapped address
387 //
388 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
389
390 //
391 // Get the template
392 //
393 TempPte = ValidKernelPte;
394 switch (CacheAttribute)
395 {
396 case MiNonCached:
397
398 //
399 // Disable caching
400 //
401 MI_PAGE_DISABLE_CACHE(&TempPte);
402 MI_PAGE_WRITE_THROUGH(&TempPte);
403 break;
404
405 case MiWriteCombined:
406
407 //
408 // Enable write combining
409 //
410 MI_PAGE_DISABLE_CACHE(&TempPte);
411 MI_PAGE_WRITE_COMBINED(&TempPte);
412 break;
413
414 default:
415 //
416 // Nothing to do
417 //
418 break;
419 }
420
421 //
422 // Loop all PTEs
423 //
424 do
425 {
426 //
427 // We're done here
428 //
429 if (*MdlPages == LIST_HEAD) break;
430
431 //
432 // Write the PTE
433 //
434 TempPte.u.Hard.PageFrameNumber = *MdlPages;
435 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
436 } while (++MdlPages < LastPage);
437
438 //
439 // Mark it as mapped
440 //
441 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
442 Mdl->MappedSystemVa = Base;
443 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
444
445 //
446 // Check if it was partial
447 //
448 if (Mdl->MdlFlags & MDL_PARTIAL)
449 {
450 //
451 // Write the appropriate flag here too
452 //
453 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
454 }
455
456 //
457 // Return the mapped address
458 //
459 return Base;
460 }
461
462 UNIMPLEMENTED;
463 return NULL;
464 }
465
466 /*
467 * @implemented
468 */
469 PVOID
470 NTAPI
471 MmMapLockedPages(IN PMDL Mdl,
472 IN KPROCESSOR_MODE AccessMode)
473 {
474 //
475 // Call the extended version
476 //
477 return MmMapLockedPagesSpecifyCache(Mdl,
478 AccessMode,
479 MmCached,
480 NULL,
481 TRUE,
482 HighPagePriority);
483 }
484
485 /*
486 * @implemented
487 */
488 VOID
489 NTAPI
490 MmUnmapLockedPages(IN PVOID BaseAddress,
491 IN PMDL Mdl)
492 {
493 PVOID Base;
494 PFN_COUNT PageCount, ExtraPageCount;
495 PPFN_NUMBER MdlPages;
496 PMMPTE PointerPte;
497
498 //
499 // Sanity check
500 //
501 ASSERT(Mdl->ByteCount != 0);
502
503 //
504 // Check if this is a kernel request
505 //
506 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
507 {
508 //
509 // Get base and count information
510 //
511 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
512 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
513
514 //
515 // Sanity checks
516 //
517 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
518 ASSERT(PageCount != 0);
519 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
520
521 //
522 // Get the PTE
523 //
524 PointerPte = MiAddressToPte(BaseAddress);
525
526 //
527 // This should be a resident system PTE
528 //
529 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
530 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
531 ASSERT(PointerPte->u.Hard.Valid == 1);
532
533 //
534 // Check if the caller wants us to free advanced pages
535 //
536 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
537 {
538 //
539 // Get the MDL page array
540 //
541 MdlPages = MmGetMdlPfnArray(Mdl);
542
543 /* Number of extra pages stored after the PFN array */
544 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
545
546 //
547 // Do the math
548 //
549 PageCount += ExtraPageCount;
550 PointerPte -= ExtraPageCount;
551 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
552 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
553
554 //
555 // Get the new base address
556 //
557 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
558 (ExtraPageCount << PAGE_SHIFT));
559 }
560
561 //
562 // Remove flags
563 //
564 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
565 MDL_PARTIAL_HAS_BEEN_MAPPED |
566 MDL_FREE_EXTRA_PTES);
567
568 //
569 // Release the system PTEs
570 //
571 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
572 }
573 else
574 {
575 UNIMPLEMENTED;
576 }
577 }
578
579 /*
580 * @implemented
581 */
582 VOID
583 NTAPI
584 MmProbeAndLockPages(IN PMDL Mdl,
585 IN KPROCESSOR_MODE AccessMode,
586 IN LOCK_OPERATION Operation)
587 {
588 PPFN_NUMBER MdlPages;
589 PVOID Base, Address, LastAddress, StartAddress;
590 ULONG LockPages, TotalPages;
591 NTSTATUS Status = STATUS_SUCCESS;
592 PEPROCESS CurrentProcess;
593 NTSTATUS ProbeStatus;
594 PMMPTE PointerPte, LastPte;
595 PMMPDE PointerPde;
596 PFN_NUMBER PageFrameIndex;
597 BOOLEAN UsePfnLock;
598 KIRQL OldIrql;
599 USHORT OldRefCount, RefCount;
600 PMMPFN Pfn1;
601 DPRINT("Probing MDL: %p\n", Mdl);
602
603 //
604 // Sanity checks
605 //
606 ASSERT(Mdl->ByteCount != 0);
607 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
608 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
609 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
610 MDL_MAPPED_TO_SYSTEM_VA |
611 MDL_SOURCE_IS_NONPAGED_POOL |
612 MDL_PARTIAL |
613 MDL_IO_SPACE)) == 0);
614
615 //
616 // Get page and base information
617 //
618 MdlPages = (PPFN_NUMBER)(Mdl + 1);
619 Base = Mdl->StartVa;
620
621 //
622 // Get the addresses and how many pages we span (and need to lock)
623 //
624 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
625 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
626 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
627 ASSERT(LockPages != 0);
628
629 /* Block invalid access */
630 if ((AccessMode != KernelMode) &&
631 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
632 {
633 /* Caller should be in SEH, raise the error */
634 *MdlPages = LIST_HEAD;
635 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
636 }
637
638 //
639 // Get the process
640 //
641 if (Address <= MM_HIGHEST_USER_ADDRESS)
642 {
643 //
644 // Get the process
645 //
646 CurrentProcess = PsGetCurrentProcess();
647 }
648 else
649 {
650 //
651 // No process
652 //
653 CurrentProcess = NULL;
654 }
655
656 //
657 // Save the number of pages we'll have to lock, and the start address
658 //
659 TotalPages = LockPages;
660 StartAddress = Address;
661
662 /* Large pages not supported */
663 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
664
665 //
666 // Now probe them
667 //
668 ProbeStatus = STATUS_SUCCESS;
669 _SEH2_TRY
670 {
671 //
672 // Enter probe loop
673 //
674 do
675 {
676 //
677 // Assume failure
678 //
679 *MdlPages = LIST_HEAD;
680
681 //
682 // Read
683 //
684 *(volatile CHAR*)Address;
685
686 //
687 // Check if this is write access (only probe for user-mode)
688 //
689 if ((Operation != IoReadAccess) &&
690 (Address <= MM_HIGHEST_USER_ADDRESS))
691 {
692 //
693 // Probe for write too
694 //
695 ProbeForWriteChar(Address);
696 }
697
698 //
699 // Next address...
700 //
701 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
702
703 //
704 // Next page...
705 //
706 LockPages--;
707 MdlPages++;
708 } while (Address < LastAddress);
709
710 //
711 // Reset back to the original page
712 //
713 ASSERT(LockPages == 0);
714 MdlPages = (PPFN_NUMBER)(Mdl + 1);
715 }
716 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
717 {
718 //
719 // Oops :(
720 //
721 ProbeStatus = _SEH2_GetExceptionCode();
722 }
723 _SEH2_END;
724
725 //
726 // So how did that go?
727 //
728 if (ProbeStatus != STATUS_SUCCESS)
729 {
730 //
731 // Fail
732 //
733 DPRINT1("MDL PROBE FAILED!\n");
734 Mdl->Process = NULL;
735 ExRaiseStatus(ProbeStatus);
736 }
737
738 //
739 // Get the PTE and PDE
740 //
741 PointerPte = MiAddressToPte(StartAddress);
742 PointerPde = MiAddressToPde(StartAddress);
743 #if (_MI_PAGING_LEVELS >= 3)
744 DPRINT1("PAE/x64 Not Implemented\n");
745 ASSERT(FALSE);
746 #endif
747
748 //
749 // Sanity check
750 //
751 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
752
753 //
754 // Check what kind of operation this is
755 //
756 if (Operation != IoReadAccess)
757 {
758 //
759 // Set the write flag
760 //
761 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
762 }
763 else
764 {
765 //
766 // Remove the write flag
767 //
768 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
769 }
770
771 //
772 // Mark the MDL as locked *now*
773 //
774 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
775
776 //
777 // Check if this came from kernel mode
778 //
779 if (Base >= MM_HIGHEST_USER_ADDRESS)
780 {
781 //
782 // We should not have a process
783 //
784 ASSERT(CurrentProcess == NULL);
785 Mdl->Process = NULL;
786
787 //
788 // In kernel mode, we don't need to check for write access
789 //
790 Operation = IoReadAccess;
791
792 //
793 // Use the PFN lock
794 //
795 UsePfnLock = TRUE;
796 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
797 }
798 else
799 {
800 //
801 // Sanity checks
802 //
803 ASSERT(TotalPages != 0);
804 ASSERT(CurrentProcess == PsGetCurrentProcess());
805
806 //
807 // Track locked pages
808 //
809 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
810 TotalPages);
811
812 //
813 // Save the process
814 //
815 Mdl->Process = CurrentProcess;
816
817 /* Lock the process working set */
818 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
819 UsePfnLock = FALSE;
820 OldIrql = MM_NOIRQL;
821 }
822
823 //
824 // Get the last PTE
825 //
826 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
827
828 //
829 // Loop the pages
830 //
831 do
832 {
833 //
834 // Assume failure and check for non-mapped pages
835 //
836 *MdlPages = LIST_HEAD;
837 #if (_MI_PAGING_LEVELS >= 3)
838 /* Should be checking the PPE and PXE */
839 ASSERT(FALSE);
840 #endif
841 while ((PointerPde->u.Hard.Valid == 0) ||
842 (PointerPte->u.Hard.Valid == 0))
843 {
844 //
845 // What kind of lock were we using?
846 //
847 if (UsePfnLock)
848 {
849 //
850 // Release PFN lock
851 //
852 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
853 }
854 else
855 {
856 /* Release process working set */
857 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
858 }
859
860 //
861 // Access the page
862 //
863 Address = MiPteToAddress(PointerPte);
864
865 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
866 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
867 if (!NT_SUCCESS(Status))
868 {
869 //
870 // Fail
871 //
872 DPRINT1("Access fault failed\n");
873 goto Cleanup;
874 }
875
876 //
877 // What lock should we use?
878 //
879 if (UsePfnLock)
880 {
881 //
882 // Grab the PFN lock
883 //
884 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
885 }
886 else
887 {
888 /* Lock the process working set */
889 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
890 }
891 }
892
893 //
894 // Check if this was a write or modify
895 //
896 if (Operation != IoReadAccess)
897 {
898 //
899 // Check if the PTE is not writable
900 //
901 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
902 {
903 //
904 // Check if it's copy on write
905 //
906 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
907 {
908 //
909 // Get the base address and allow a change for user-mode
910 //
911 Address = MiPteToAddress(PointerPte);
912 if (Address <= MM_HIGHEST_USER_ADDRESS)
913 {
914 //
915 // What kind of lock were we using?
916 //
917 if (UsePfnLock)
918 {
919 //
920 // Release PFN lock
921 //
922 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
923 }
924 else
925 {
926 /* Release process working set */
927 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
928 }
929
930 //
931 // Access the page
932 //
933
934 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
935 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
936 if (!NT_SUCCESS(Status))
937 {
938 //
939 // Fail
940 //
941 DPRINT1("Access fault failed\n");
942 goto Cleanup;
943 }
944
945 //
946 // Re-acquire the lock
947 //
948 if (UsePfnLock)
949 {
950 //
951 // Grab the PFN lock
952 //
953 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
954 }
955 else
956 {
957 /* Lock the process working set */
958 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
959 }
960
961 //
962 // Start over
963 //
964 continue;
965 }
966 }
967
968 //
969 // Fail, since we won't allow this
970 //
971 Status = STATUS_ACCESS_VIOLATION;
972 goto CleanupWithLock;
973 }
974 }
975
976 //
977 // Grab the PFN
978 //
979 PageFrameIndex = PFN_FROM_PTE(PointerPte);
980 Pfn1 = MiGetPfnEntry(PageFrameIndex);
981 if (Pfn1)
982 {
983 /* Either this is for kernel-mode, or the working set is held */
984 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
985
986 /* No Physical VADs supported yet */
987 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
988
989 /* This address should already exist and be fully valid */
990 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
991 if (MI_IS_ROS_PFN(Pfn1))
992 {
993 /* ReactOS Mm doesn't track share count */
994 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
995 }
996 else
997 {
998 /* On ARM3 pages, we should see a valid share count */
999 ASSERT((Pfn1->u2.ShareCount != 0) && (Pfn1->u3.e1.PageLocation == ActiveAndValid));
1000
1001 /* We don't support mapping a prototype page yet */
1002 ASSERT((Pfn1->u3.e1.PrototypePte == 0) && (Pfn1->OriginalPte.u.Soft.Prototype == 0));
1003 }
1004
1005 /* More locked pages! */
1006 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, 1);
1007
1008 /* Loop trying to update the reference count */
1009 do
1010 {
1011 /* Get the current reference count, make sure it's valid */
1012 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1013 ASSERT(OldRefCount != 0);
1014 ASSERT(OldRefCount < 2500);
1015
1016 /* Bump it up by one */
1017 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1018 OldRefCount + 1,
1019 OldRefCount);
1020 ASSERT(RefCount != 0);
1021 } while (OldRefCount != RefCount);
1022
1023 /* Was this the first lock attempt? */
1024 if (OldRefCount != 1)
1025 {
1026 /* Someone else came through */
1027 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1028 }
1029 }
1030 else
1031 {
1032 //
1033 // For I/O addresses, just remember this
1034 //
1035 Mdl->MdlFlags |= MDL_IO_SPACE;
1036 }
1037
1038 //
1039 // Write the page and move on
1040 //
1041 *MdlPages++ = PageFrameIndex;
1042 PointerPte++;
1043
1044 /* Check if we're on a PDE boundary */
1045 if (!((ULONG_PTR)PointerPte & (PD_SIZE - 1))) PointerPde++;
1046 } while (PointerPte <= LastPte);
1047
1048 //
1049 // What kind of lock were we using?
1050 //
1051 if (UsePfnLock)
1052 {
1053 //
1054 // Release PFN lock
1055 //
1056 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1057 }
1058 else
1059 {
1060 /* Release process working set */
1061 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1062 }
1063
1064 //
1065 // Sanity check
1066 //
1067 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1068 return;
1069
1070 CleanupWithLock:
1071 //
1072 // This is the failure path
1073 //
1074 ASSERT(!NT_SUCCESS(Status));
1075
1076 //
1077 // What kind of lock were we using?
1078 //
1079 if (UsePfnLock)
1080 {
1081 //
1082 // Release PFN lock
1083 //
1084 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1085 }
1086 else
1087 {
1088 /* Release process working set */
1089 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1090 }
1091 Cleanup:
1092 //
1093 // Pages must be locked so MmUnlock can work
1094 //
1095 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1096 MmUnlockPages(Mdl);
1097
1098 //
1099 // Raise the error
1100 //
1101 ExRaiseStatus(Status);
1102 }
1103
1104 /*
1105 * @implemented
1106 */
1107 VOID
1108 NTAPI
1109 MmUnlockPages(IN PMDL Mdl)
1110 {
1111 PPFN_NUMBER MdlPages, LastPage;
1112 PEPROCESS Process;
1113 PVOID Base;
1114 ULONG Flags, PageCount;
1115 KIRQL OldIrql;
1116 USHORT RefCount, OldRefCount;
1117 PMMPFN Pfn1;
1118 DPRINT("Unlocking MDL: %p\n", Mdl);
1119
1120 //
1121 // Sanity checks
1122 //
1123 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1124 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1125 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1126 ASSERT(Mdl->ByteCount != 0);
1127
1128 //
1129 // Get the process associated and capture the flags which are volatile
1130 //
1131 Process = Mdl->Process;
1132 Flags = Mdl->MdlFlags;
1133
1134 //
1135 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1136 //
1137 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1138 {
1139 //
1140 // Unmap the pages from system space
1141 //
1142 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1143 }
1144
1145 //
1146 // Get the page count
1147 //
1148 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1149 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1150 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1151 ASSERT(PageCount != 0);
1152
1153 //
1154 // We don't support AWE
1155 //
1156 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1157
1158 //
1159 // Check if the buffer is mapped I/O space
1160 //
1161 if (Flags & MDL_IO_SPACE)
1162 {
1163 //
1164 // Acquire PFN lock
1165 //
1166 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1167
1168 //
1169 // Loop every page
1170 //
1171 LastPage = MdlPages + PageCount;
1172 do
1173 {
1174 //
1175 // Last page, break out
1176 //
1177 if (*MdlPages == LIST_HEAD) break;
1178
1179 //
1180 // Check if this page is in the PFN database
1181 //
1182 Pfn1 = MiGetPfnEntry(*MdlPages);
1183 if (Pfn1)
1184 {
1185 /* Get the current entry and reference count */
1186 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1187 ASSERT(OldRefCount != 0);
1188
1189 /* Is this already the last dereference */
1190 if (OldRefCount == 1)
1191 {
1192 /* It should be on a free list waiting for us */
1193 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1194 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1195 ASSERT(Pfn1->u2.ShareCount == 0);
1196
1197 /* Not supported yet */
1198 ASSERT((Pfn1->u3.e1.PrototypePte == 0) &&
1199 (Pfn1->OriginalPte.u.Soft.Prototype == 0));
1200
1201 /* One less page */
1202 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1203
1204 /* Do the last dereference, we're done here */
1205 MiDecrementReferenceCount(Pfn1, *MdlPages);
1206 }
1207 else
1208 {
1209 /* Loop decrementing one reference */
1210 do
1211 {
1212 /* Make sure it's still valid */
1213 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1214 ASSERT(OldRefCount != 0);
1215
1216 /* Take off one reference */
1217 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1218 OldRefCount - 1,
1219 OldRefCount);
1220 ASSERT(RefCount != 0);
1221 } while (OldRefCount != RefCount);
1222 ASSERT(RefCount > 1);
1223
1224 /* Are there only lock references left? */
1225 if (RefCount == 2)
1226 {
1227 /* And does the page still have users? */
1228 if (Pfn1->u2.ShareCount >= 1)
1229 {
1230 /* Then it should still be valid */
1231 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1232
1233 /* Not supported yet */
1234 ASSERT((Pfn1->u3.e1.PrototypePte == 0) &&
1235 (Pfn1->OriginalPte.u.Soft.Prototype == 0));
1236
1237 /* But there is one less "locked" page though */
1238 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1239 }
1240 }
1241 }
1242 }
1243 } while (++MdlPages < LastPage);
1244
1245 //
1246 // Release the lock
1247 //
1248 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1249
1250 //
1251 // Check if we have a process
1252 //
1253 if (Process)
1254 {
1255 //
1256 // Handle the accounting of locked pages
1257 //
1258 ASSERT(Process->NumberOfLockedPages > 0);
1259 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1260 -(LONG_PTR)PageCount);
1261 }
1262
1263 //
1264 // We're done
1265 //
1266 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1267 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1268 return;
1269 }
1270
1271 //
1272 // Check if we have a process
1273 //
1274 if (Process)
1275 {
1276 //
1277 // Handle the accounting of locked pages
1278 //
1279 ASSERT(Process->NumberOfLockedPages > 0);
1280 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1281 -(LONG_PTR)PageCount);
1282 }
1283
1284 //
1285 // Loop every page
1286 //
1287 LastPage = MdlPages + PageCount;
1288 do
1289 {
1290 //
1291 // Last page reached
1292 //
1293 if (*MdlPages == LIST_HEAD)
1294 {
1295 //
1296 // Were there no pages at all?
1297 //
1298 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1299 {
1300 //
1301 // We're already done
1302 //
1303 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1304 return;
1305 }
1306
1307 //
1308 // Otherwise, stop here
1309 //
1310 LastPage = MdlPages;
1311 break;
1312 }
1313
1314 /* Save the PFN entry instead for the secondary loop */
1315 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1316 ASSERT(*MdlPages != 0);
1317 } while (++MdlPages < LastPage);
1318
1319 //
1320 // Reset pointer
1321 //
1322 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1323
1324 //
1325 // Now grab the PFN lock for the actual unlock and dereference
1326 //
1327 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1328 do
1329 {
1330 /* Get the current entry and reference count */
1331 Pfn1 = (PMMPFN)*MdlPages;
1332 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1333 ASSERT(OldRefCount != 0);
1334
1335 /* Is this already the last dereference */
1336 if (OldRefCount == 1)
1337 {
1338 /* It should be on a free list waiting for us */
1339 ASSERT(Pfn1->u3.e2.ReferenceCount == 1);
1340 ASSERT(Pfn1->u3.e1.PageLocation != ActiveAndValid);
1341 ASSERT(Pfn1->u2.ShareCount == 0);
1342
1343 /* Not supported yet */
1344 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1345 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1346
1347 /* One less page */
1348 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1349
1350 /* Do the last dereference, we're done here */
1351 MiDecrementReferenceCount(Pfn1, MiGetPfnEntryIndex(Pfn1));
1352 }
1353 else
1354 {
1355 /* Loop decrementing one reference */
1356 do
1357 {
1358 /* Make sure it's still valid */
1359 OldRefCount = Pfn1->u3.e2.ReferenceCount;
1360 ASSERT(OldRefCount != 0);
1361
1362 /* Take off one reference */
1363 RefCount = InterlockedCompareExchange16((PSHORT)&Pfn1->u3.e2.ReferenceCount,
1364 OldRefCount - 1,
1365 OldRefCount);
1366 ASSERT(RefCount != 0);
1367 } while (OldRefCount != RefCount);
1368 ASSERT(RefCount > 1);
1369
1370 /* Are there only lock references left? */
1371 if (RefCount == 2)
1372 {
1373 /* And does the page still have users? */
1374 if (Pfn1->u2.ShareCount >= 1)
1375 {
1376 /* Then it should still be valid */
1377 ASSERT(Pfn1->u3.e1.PageLocation == ActiveAndValid);
1378
1379 /* Not supported yet */
1380 ASSERT(((Pfn1->u3.e1.PrototypePte == 0) &&
1381 (Pfn1->OriginalPte.u.Soft.Prototype == 0)));
1382
1383 /* But there is one less "locked" page though */
1384 InterlockedExchangeAddSizeT(&MmSystemLockPagesCount, -1);
1385 }
1386 }
1387 }
1388 } while (++MdlPages < LastPage);
1389
1390 //
1391 // Release the lock
1392 //
1393 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1394
1395 //
1396 // We're done
1397 //
1398 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1399 }
1400
1401 /*
1402 * @unimplemented
1403 */
1404 NTSTATUS
1405 NTAPI
1406 MmAdvanceMdl(IN PMDL Mdl,
1407 IN ULONG NumberOfBytes)
1408 {
1409 UNIMPLEMENTED;
1410 return STATUS_NOT_IMPLEMENTED;
1411 }
1412
1413 /*
1414 * @unimplemented
1415 */
1416 PVOID
1417 NTAPI
1418 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1419 IN ULONG PoolTag,
1420 IN PMDL MemoryDescriptorList,
1421 IN MEMORY_CACHING_TYPE CacheType)
1422 {
1423 UNIMPLEMENTED;
1424 return 0;
1425 }
1426
1427 /*
1428 * @unimplemented
1429 */
1430 VOID
1431 NTAPI
1432 MmUnmapReservedMapping(IN PVOID BaseAddress,
1433 IN ULONG PoolTag,
1434 IN PMDL MemoryDescriptorList)
1435 {
1436 UNIMPLEMENTED;
1437 }
1438
1439 /*
1440 * @unimplemented
1441 */
1442 NTSTATUS
1443 NTAPI
1444 MmPrefetchPages(IN ULONG NumberOfLists,
1445 IN PREAD_LIST *ReadLists)
1446 {
1447 UNIMPLEMENTED;
1448 return STATUS_NOT_IMPLEMENTED;
1449 }
1450
1451 /*
1452 * @unimplemented
1453 */
1454 NTSTATUS
1455 NTAPI
1456 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1457 IN ULONG NewProtect)
1458 {
1459 UNIMPLEMENTED;
1460 return STATUS_NOT_IMPLEMENTED;
1461 }
1462
1463 /*
1464 * @unimplemented
1465 */
1466 VOID
1467 NTAPI
1468 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1469 IN PEPROCESS Process,
1470 IN KPROCESSOR_MODE AccessMode,
1471 IN LOCK_OPERATION Operation)
1472 {
1473 UNIMPLEMENTED;
1474 }
1475
1476
1477 /*
1478 * @unimplemented
1479 */
1480 VOID
1481 NTAPI
1482 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1483 IN LARGE_INTEGER PageList[],
1484 IN KPROCESSOR_MODE AccessMode,
1485 IN LOCK_OPERATION Operation)
1486 {
1487 UNIMPLEMENTED;
1488 }
1489
1490 /*
1491 * @unimplemented
1492 */
1493 VOID
1494 NTAPI
1495 MmMapMemoryDumpMdl(IN PMDL Mdl)
1496 {
1497 UNIMPLEMENTED;
1498 }
1499
1500 /* EOF */