2839b23f0a43fc9ce5d26a068e7bcf2ede18c4ee
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PUBLIC FUNCTIONS ***********************************************************/
20
21 /*
22 * @implemented
23 */
24 PMDL
25 NTAPI
26 MmCreateMdl(IN PMDL Mdl,
27 IN PVOID Base,
28 IN SIZE_T Length)
29 {
30 SIZE_T Size;
31
32 //
33 // Check if we don't have an MDL built
34 //
35 if (!Mdl)
36 {
37 //
38 // Calculate the size we'll need and allocate the MDL
39 //
40 Size = MmSizeOfMdl(Base, Length);
41 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
42 if (!Mdl) return NULL;
43 }
44
45 //
46 // Initialize it
47 //
48 MmInitializeMdl(Mdl, Base, Length);
49 return Mdl;
50 }
51
52 /*
53 * @implemented
54 */
55 SIZE_T
56 NTAPI
57 MmSizeOfMdl(IN PVOID Base,
58 IN SIZE_T Length)
59 {
60 //
61 // Return the MDL size
62 //
63 return sizeof(MDL) +
64 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
65 }
66
67 /*
68 * @implemented
69 */
70 VOID
71 NTAPI
72 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
73 {
74 PPFN_NUMBER MdlPages, EndPage;
75 PFN_NUMBER Pfn, PageCount;
76 PVOID Base;
77 PMMPTE PointerPte;
78
79 //
80 // Sanity checks
81 //
82 ASSERT(Mdl->ByteCount != 0);
83 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
84 MDL_MAPPED_TO_SYSTEM_VA |
85 MDL_SOURCE_IS_NONPAGED_POOL |
86 MDL_PARTIAL)) == 0);
87
88 //
89 // We know the MDL isn't associated to a process now
90 //
91 Mdl->Process = NULL;
92
93 //
94 // Get page and VA information
95 //
96 MdlPages = (PPFN_NUMBER)(Mdl + 1);
97 Base = Mdl->StartVa;
98
99 //
100 // Set the system address and now get the page count
101 //
102 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
103 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
104 Mdl->ByteCount);
105 ASSERT(PageCount != 0);
106 EndPage = MdlPages + PageCount;
107
108 //
109 // Loop the PTEs
110 //
111 PointerPte = MiAddressToPte(Base);
112 do
113 {
114 //
115 // Write the PFN
116 //
117 Pfn = PFN_FROM_PTE(PointerPte++);
118 *MdlPages++ = Pfn;
119 } while (MdlPages < EndPage);
120
121 //
122 // Set the nonpaged pool flag
123 //
124 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
125
126 //
127 // Check if this is an I/O mapping
128 //
129 if (Pfn > MmHighestPhysicalPage) Mdl->MdlFlags |= MDL_IO_SPACE;
130 }
131
132 /*
133 * @implemented
134 */
135 PMDL
136 NTAPI
137 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
138 IN PHYSICAL_ADDRESS HighAddress,
139 IN PHYSICAL_ADDRESS SkipBytes,
140 IN SIZE_T TotalBytes)
141 {
142 //
143 // Call the internal routine
144 //
145 return MiAllocatePagesForMdl(LowAddress,
146 HighAddress,
147 SkipBytes,
148 TotalBytes,
149 MiNotMapped,
150 0);
151 }
152
153 /*
154 * @implemented
155 */
156 PMDL
157 NTAPI
158 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
159 IN PHYSICAL_ADDRESS HighAddress,
160 IN PHYSICAL_ADDRESS SkipBytes,
161 IN SIZE_T TotalBytes,
162 IN MEMORY_CACHING_TYPE CacheType,
163 IN ULONG Flags)
164 {
165 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
166
167 //
168 // Check for invalid cache type
169 //
170 if (CacheType > MmWriteCombined)
171 {
172 //
173 // Normalize to default
174 //
175 CacheAttribute = MiNotMapped;
176 }
177 else
178 {
179 //
180 // Conver to internal caching attribute
181 //
182 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
183 }
184
185 //
186 // Only these flags are allowed
187 //
188 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
189 {
190 //
191 // Silently fail
192 //
193 return NULL;
194 }
195
196 //
197 // Call the internal routine
198 //
199 return MiAllocatePagesForMdl(LowAddress,
200 HighAddress,
201 SkipBytes,
202 TotalBytes,
203 CacheAttribute,
204 Flags);
205 }
206
207 /*
208 * @implemented
209 */
210 VOID
211 NTAPI
212 MmFreePagesFromMdl(IN PMDL Mdl)
213 {
214 PVOID Base;
215 PPFN_NUMBER Pages;
216 LONG NumberOfPages;
217 PMMPFN Pfn1;
218 KIRQL OldIrql;
219 DPRINT("Freeing MDL: %p\n", Mdl);
220
221 //
222 // Sanity checks
223 //
224 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
225 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
226 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
227
228 //
229 // Get address and page information
230 //
231 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
232 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
233
234 //
235 // Acquire PFN lock
236 //
237 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
238
239 //
240 // Loop all the MDL pages
241 //
242 Pages = (PPFN_NUMBER)(Mdl + 1);
243 do
244 {
245 //
246 // Reached the last page
247 //
248 if (*Pages == -1) break;
249
250 //
251 // Sanity check
252 //
253 ASSERT(*Pages <= MmHighestPhysicalPage);
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1->u3.ReferenceCount == 1);
260
261 //
262 // Clear it
263 //
264 Pfn1->u3.e1.StartOfAllocation = 0;
265 Pfn1->u3.e1.EndOfAllocation = 0;
266
267 //
268 // Dereference it
269 //
270 MmDereferencePage(*Pages);
271
272 //
273 // Clear this page and move on
274 //
275 *Pages++ = -1;
276 } while (--NumberOfPages != 0);
277
278 //
279 // Release the lock
280 //
281 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
282
283 //
284 // Remove the pages locked flag
285 //
286 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
287 }
288
289 /*
290 * @implemented
291 */
292 PVOID
293 NTAPI
294 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
295 IN KPROCESSOR_MODE AccessMode,
296 IN MEMORY_CACHING_TYPE CacheType,
297 IN PVOID BaseAddress,
298 IN ULONG BugCheckOnFailure,
299 IN MM_PAGE_PRIORITY Priority)
300 {
301 PVOID Base;
302 PPFN_NUMBER MdlPages, LastPage;
303 PFN_NUMBER PageCount;
304 BOOLEAN IsIoMapping;
305 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
306 PMMPTE PointerPte;
307 MMPTE TempPte;
308
309 //
310 // Sanity check
311 //
312 ASSERT(Mdl->ByteCount != 0);
313
314 //
315 // Get the base
316 //
317 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
318
319 //
320 // Handle kernel case first
321 //
322 if (AccessMode == KernelMode)
323 {
324 //
325 // Get the list of pages and count
326 //
327 MdlPages = (PPFN_NUMBER)(Mdl + 1);
328 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
329 LastPage = MdlPages + PageCount;
330
331 //
332 // Sanity checks
333 //
334 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
335 MDL_SOURCE_IS_NONPAGED_POOL |
336 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
337 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
338
339 //
340 // Get the correct cache type
341 //
342 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
343 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
344
345 //
346 // Reserve the PTEs
347 //
348 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
349 if (!PointerPte)
350 {
351 //
352 // If it can fail, return NULL
353 //
354 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
355
356 //
357 // Should we bugcheck?
358 //
359 if (!BugCheckOnFailure) return NULL;
360
361 //
362 // Yes, crash the system
363 //
364 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
365 }
366
367 //
368 // Get the mapped address
369 //
370 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
371
372 //
373 // Get the template
374 //
375 TempPte = HyperTemplatePte;
376 switch (CacheAttribute)
377 {
378 case MiNonCached:
379
380 //
381 // Disable caching
382 //
383 MI_PAGE_DISABLE_CACHE(&TempPte);
384 MI_PAGE_WRITE_THROUGH(&TempPte);
385 break;
386
387 case MiWriteCombined:
388
389 //
390 // Enable write combining
391 //
392 MI_PAGE_DISABLE_CACHE(&TempPte);
393 MI_PAGE_WRITE_COMBINED(&TempPte);
394 break;
395
396 default:
397 //
398 // Nothing to do
399 //
400 break;
401 }
402
403 //
404 // Loop all PTEs
405 //
406 do
407 {
408 //
409 // We're done here
410 //
411 if (*MdlPages == -1) break;
412
413 //
414 // Write the PTE
415 //
416 ASSERT(PointerPte->u.Hard.Valid == 0);
417 TempPte.u.Hard.PageFrameNumber = *MdlPages;
418 *PointerPte++ = TempPte;
419 } while (++MdlPages < LastPage);
420
421 //
422 // Mark it as mapped
423 //
424 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
425 Mdl->MappedSystemVa = Base;
426 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
427
428 //
429 // Check if it was partial
430 //
431 if (Mdl->MdlFlags & MDL_PARTIAL)
432 {
433 //
434 // Write the appropriate flag here too
435 //
436 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
437 }
438
439 //
440 // Return the mapped address
441 //
442 return Base;
443 }
444
445 //
446 // In user-mode, let ReactOS do it
447 //
448 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
449 }
450
451 /*
452 * @implemented
453 */
454 PVOID
455 NTAPI
456 MmMapLockedPages(IN PMDL Mdl,
457 IN KPROCESSOR_MODE AccessMode)
458 {
459 //
460 // Call the extended version
461 //
462 return MmMapLockedPagesSpecifyCache(Mdl,
463 AccessMode,
464 MmCached,
465 NULL,
466 TRUE,
467 HighPagePriority);
468 }
469
470 /*
471 * @implemented
472 */
473 VOID
474 NTAPI
475 MmUnmapLockedPages(IN PVOID BaseAddress,
476 IN PMDL Mdl)
477 {
478 PVOID Base;
479 PFN_NUMBER PageCount;
480 PPFN_NUMBER MdlPages;
481 PMMPTE PointerPte;
482
483 //
484 // Sanity check
485 //
486 ASSERT(Mdl->ByteCount != 0);
487
488 //
489 // Check if this is a kernel request
490 //
491 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
492 {
493 //
494 // Get base and count information
495 //
496 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
497 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
498
499 //
500 // Sanity checks
501 //
502 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
503 ASSERT(PageCount != 0);
504 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
505
506 //
507 // Get the PTE
508 //
509 PointerPte = MiAddressToPte(BaseAddress);
510
511 //
512 // This should be a resident system PTE
513 //
514 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
515 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
516 ASSERT(PointerPte->u.Hard.Valid == 1);
517
518 //
519 // Check if the caller wants us to free advanced pages
520 //
521 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
522 {
523 //
524 // Get the MDL page array
525 //
526 MdlPages = (PPFN_NUMBER)(Mdl + 1);
527 MdlPages += PageCount;
528
529 //
530 // Do the math
531 //
532 PageCount += *MdlPages;
533 PointerPte -= *MdlPages;
534 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
535 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
536
537 //
538 // Get the new base address
539 //
540 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
541 ((*MdlPages) << PAGE_SHIFT));
542 }
543
544 //
545 // Remove flags
546 //
547 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
548 MDL_PARTIAL_HAS_BEEN_MAPPED |
549 MDL_FREE_EXTRA_PTES);
550
551 //
552 // Release the system PTEs
553 //
554 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
555 }
556 else
557 {
558 //
559 // Let ReactOS handle it
560 //
561 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
562 }
563 }
564
565 /*
566 * @implemented
567 */
568 VOID
569 NTAPI
570 MmProbeAndLockPages(IN PMDL Mdl,
571 IN KPROCESSOR_MODE AccessMode,
572 IN LOCK_OPERATION Operation)
573 {
574 PPFN_NUMBER MdlPages;
575 PVOID Base, Address, LastAddress, StartAddress;
576 ULONG LockPages, TotalPages;
577 NTSTATUS Status = STATUS_SUCCESS;
578 PEPROCESS CurrentProcess;
579 PETHREAD Thread;
580 PMMSUPPORT AddressSpace;
581 NTSTATUS ProbeStatus;
582 PMMPTE PointerPte, PointerPde, LastPte;
583 PFN_NUMBER PageFrameIndex;
584 PMMPFN Pfn1;
585 BOOLEAN UsePfnLock;
586 KIRQL OldIrql;
587 DPRINT("Probing MDL: %p\n", Mdl);
588
589 //
590 // Sanity checks
591 //
592 ASSERT(Mdl->ByteCount != 0);
593 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
594 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
595 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
596 MDL_MAPPED_TO_SYSTEM_VA |
597 MDL_SOURCE_IS_NONPAGED_POOL |
598 MDL_PARTIAL |
599 MDL_IO_SPACE)) == 0);
600
601 //
602 // Get page and base information
603 //
604 MdlPages = (PPFN_NUMBER)(Mdl + 1);
605 Base = (PVOID)Mdl->StartVa;
606
607 //
608 // Get the addresses and how many pages we span (and need to lock)
609 //
610 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
611 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
612 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
613 ASSERT(LockPages != 0);
614
615 //
616 // Get the thread and process
617 //
618 Thread = PsGetCurrentThread();
619 if (Address <= MM_HIGHEST_USER_ADDRESS)
620 {
621 //
622 // Get the process
623 //
624 CurrentProcess = PsGetCurrentProcess();
625 }
626 else
627 {
628 //
629 // No process
630 //
631 CurrentProcess = NULL;
632 }
633
634 //
635 // Save the number of pages we'll have to lock, and the start address
636 //
637 TotalPages = LockPages;
638 StartAddress = Address;
639
640 //
641 // Now probe them
642 //
643 ProbeStatus = STATUS_SUCCESS;
644 _SEH2_TRY
645 {
646 //
647 // Enter probe loop
648 //
649 do
650 {
651 //
652 // Assume failure
653 //
654 *MdlPages = -1;
655
656 //
657 // Read
658 //
659 *(volatile CHAR*)Address;
660
661 //
662 // Check if this is write access (only probe for user-mode)
663 //
664 if ((Operation != IoReadAccess) &&
665 (Address <= MM_HIGHEST_USER_ADDRESS))
666 {
667 //
668 // Probe for write too
669 //
670 ProbeForWriteChar(Address);
671 }
672
673 //
674 // Next address...
675 //
676 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
677 Address = PAGE_ALIGN(Address);
678
679 //
680 // Next page...
681 //
682 LockPages--;
683 MdlPages++;
684 } while (Address < LastAddress);
685
686 //
687 // Reset back to the original page
688 //
689 ASSERT(LockPages == 0);
690 MdlPages = (PPFN_NUMBER)(Mdl + 1);
691 }
692 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
693 {
694 //
695 // Oops :(
696 //
697 ProbeStatus = _SEH2_GetExceptionCode();
698 }
699 _SEH2_END;
700
701 //
702 // So how did that go?
703 //
704 if (ProbeStatus != STATUS_SUCCESS)
705 {
706 //
707 // Fail
708 //
709 DPRINT1("MDL PROBE FAILED!\n");
710 Mdl->Process = NULL;
711 ExRaiseStatus(ProbeStatus);
712 }
713
714 //
715 // Get the PTE and PDE
716 //
717 PointerPte = MiAddressToPte(StartAddress);
718 PointerPde = MiAddressToPde(StartAddress);
719
720 //
721 // Sanity check
722 //
723 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
724
725 //
726 // Check what kind of operation this is
727 //
728 if (Operation != IoReadAccess)
729 {
730 //
731 // Set the write flag
732 //
733 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
734 }
735 else
736 {
737 //
738 // Remove the write flag
739 //
740 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
741 }
742
743 //
744 // Mark the MDL as locked *now*
745 //
746 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
747
748 //
749 // Check if this came from kernel mode
750 //
751 if (Base >= MM_HIGHEST_USER_ADDRESS)
752 {
753 //
754 // We should not have a process
755 //
756 ASSERT(CurrentProcess == NULL);
757 Mdl->Process = NULL;
758
759 //
760 // In kernel mode, we don't need to check for write access
761 //
762 Operation = IoReadAccess;
763
764 //
765 // Use the PFN lock
766 //
767 UsePfnLock = TRUE;
768 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
769 AddressSpace = NULL; // Keep compiler happy
770 }
771 else
772 {
773 //
774 // Sanity checks
775 //
776 ASSERT(TotalPages != 0);
777 ASSERT(CurrentProcess == PsGetCurrentProcess());
778
779 //
780 // Track locked pages
781 //
782 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
783 TotalPages);
784
785 //
786 // Save the process
787 //
788 Mdl->Process = CurrentProcess;
789
790 //
791 // Use the process lock
792 //
793 UsePfnLock = FALSE;
794 AddressSpace = &CurrentProcess->Vm;
795 MmLockAddressSpace(AddressSpace);
796 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
797 }
798
799 //
800 // Get the last PTE
801 //
802 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
803
804 //
805 // Loop the pages
806 //
807 do
808 {
809 //
810 // Assume failure and check for non-mapped pages
811 //
812 *MdlPages = -1;
813 while ((PointerPde->u.Hard.Valid == 0) ||
814 (PointerPte->u.Hard.Valid == 0))
815 {
816 //
817 // What kind of lock where we using?
818 //
819 if (UsePfnLock)
820 {
821 //
822 // Release PFN lock
823 //
824 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
825 }
826 else
827 {
828 //
829 // Release process address space lock
830 //
831 MmUnlockAddressSpace(AddressSpace);
832 }
833
834 //
835 // Access the page
836 //
837 Address = MiPteToAddress(PointerPte);
838 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
839 if (!NT_SUCCESS(Status))
840 {
841 //
842 // Fail
843 //
844 DPRINT1("Access fault failed\n");
845 goto Cleanup;
846 }
847
848 //
849 // Waht lock should we use?
850 //
851 if (UsePfnLock)
852 {
853 //
854 // Grab the PFN lock
855 //
856 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
857 }
858 else
859 {
860 //
861 // Use the address space lock
862 //
863 MmLockAddressSpace(AddressSpace);
864 }
865 }
866
867 //
868 // Check if this was a write or modify
869 //
870 if (Operation != IoReadAccess)
871 {
872 //
873 // Check if the PTE is not writable
874 //
875 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
876 {
877 //
878 // Check if it's copy on write
879 //
880 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
881 {
882 //
883 // Get the base address and allow a change for user-mode
884 //
885 Address = MiPteToAddress(PointerPte);
886 if (Address <= MM_HIGHEST_USER_ADDRESS)
887 {
888 //
889 // What kind of lock where we using?
890 //
891 if (UsePfnLock)
892 {
893 //
894 // Release PFN lock
895 //
896 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
897 }
898 else
899 {
900 //
901 // Release process address space lock
902 //
903 MmUnlockAddressSpace(AddressSpace);
904 }
905
906 //
907 // Access the page
908 //
909 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
910 if (!NT_SUCCESS(Status))
911 {
912 //
913 // Fail
914 //
915 DPRINT1("Access fault failed\n");
916 goto Cleanup;
917 }
918
919 //
920 // Re-acquire the lock
921 //
922 if (UsePfnLock)
923 {
924 //
925 // Grab the PFN lock
926 //
927 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
928 }
929 else
930 {
931 //
932 // Use the address space lock
933 //
934 MmLockAddressSpace(AddressSpace);
935 }
936
937 //
938 // Start over
939 //
940 continue;
941 }
942 }
943
944 //
945 // Fail, since we won't allow this
946 //
947 Status = STATUS_ACCESS_VIOLATION;
948 goto CleanupWithLock;
949 }
950 }
951
952 //
953 // Grab the PFN
954 //
955 PageFrameIndex = PFN_FROM_PTE(PointerPte);
956 if (PageFrameIndex < MmHighestPhysicalPage)
957 {
958 //
959 // Get the PFN entry
960 //
961 Pfn1 = MiGetPfnEntry(PageFrameIndex);
962 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
963
964 //
965 // Now lock the page
966 //
967 MmReferencePage(PageFrameIndex);
968 MmLockPage(PageFrameIndex);
969 }
970 else
971 {
972 //
973 // For I/O addresses, just remember this
974 //
975 Mdl->MdlFlags |= MDL_IO_SPACE;
976 }
977
978 //
979 // Write the page and move on
980 //
981 *MdlPages++ = PageFrameIndex;
982 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
983 } while (PointerPte <= LastPte);
984
985 //
986 // What kind of lock where we using?
987 //
988 if (UsePfnLock)
989 {
990 //
991 // Release PFN lock
992 //
993 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
994 }
995 else
996 {
997 //
998 // Release process address space lock
999 //
1000 MmUnlockAddressSpace(AddressSpace);
1001 }
1002
1003 //
1004 // Sanity check
1005 //
1006 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1007 return;
1008
1009 CleanupWithLock:
1010 //
1011 // This is the failure path
1012 //
1013 ASSERT(!NT_SUCCESS(Status));
1014
1015 //
1016 // What kind of lock where we using?
1017 //
1018 if (UsePfnLock)
1019 {
1020 //
1021 // Release PFN lock
1022 //
1023 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1024 }
1025 else
1026 {
1027 //
1028 // Release process address space lock
1029 //
1030 MmUnlockAddressSpace(AddressSpace);
1031 }
1032 Cleanup:
1033 //
1034 // Pages must be locked so MmUnlock can work
1035 //
1036 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1037 MmUnlockPages(Mdl);
1038
1039 //
1040 // Raise the error
1041 //
1042 ExRaiseStatus(Status);
1043 }
1044
1045 /*
1046 * @implemented
1047 */
1048 VOID
1049 NTAPI
1050 MmUnlockPages(IN PMDL Mdl)
1051 {
1052 PPFN_NUMBER MdlPages, LastPage;
1053 PEPROCESS Process;
1054 PVOID Base;
1055 ULONG Flags, PageCount;
1056 KIRQL OldIrql;
1057 DPRINT("Unlocking MDL: %p\n", Mdl);
1058
1059 //
1060 // Sanity checks
1061 //
1062 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1063 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1064 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1065 ASSERT(Mdl->ByteCount != 0);
1066
1067 //
1068 // Get the process associated and capture the flags which are volatile
1069 //
1070 Process = Mdl->Process;
1071 Flags = Mdl->MdlFlags;
1072
1073 //
1074 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1075 //
1076 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1077 {
1078 //
1079 // Unmap the pages from system space
1080 //
1081 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1082 }
1083
1084 //
1085 // Get the page count
1086 //
1087 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1088 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1089 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1090 ASSERT(PageCount != 0);
1091
1092 //
1093 // We don't support AWE
1094 //
1095 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1096
1097 //
1098 // Check if the buffer is mapped I/O space
1099 //
1100 if (Flags & MDL_IO_SPACE)
1101 {
1102 //
1103 // Acquire PFN lock
1104 //
1105 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1106
1107 //
1108 // Loop every page
1109 //
1110 LastPage = MdlPages + PageCount;
1111 do
1112 {
1113 //
1114 // Last page, break out
1115 //
1116 if (*MdlPages == -1) break;
1117
1118 //
1119 // Check if this page is in the PFN database
1120 //
1121 if (*MdlPages <= MmHighestPhysicalPage)
1122 {
1123 //
1124 // Unlock and dereference
1125 //
1126 MmUnlockPage(*MdlPages);
1127 MmDereferencePage(*MdlPages);
1128 }
1129 } while (++MdlPages < LastPage);
1130
1131 //
1132 // Release the lock
1133 //
1134 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1135
1136 //
1137 // Check if we have a process
1138 //
1139 if (Process)
1140 {
1141 //
1142 // Handle the accounting of locked pages
1143 //
1144 ASSERT(Process->NumberOfLockedPages > 0);
1145 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1146 -PageCount);
1147 }
1148
1149 //
1150 // We're done
1151 //
1152 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1153 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1154 return;
1155 }
1156
1157 //
1158 // Check if we have a process
1159 //
1160 if (Process)
1161 {
1162 //
1163 // Handle the accounting of locked pages
1164 //
1165 ASSERT(Process->NumberOfLockedPages > 0);
1166 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1167 -PageCount);
1168 }
1169
1170 //
1171 // Loop every page
1172 //
1173 LastPage = MdlPages + PageCount;
1174 do
1175 {
1176 //
1177 // Last page reached
1178 //
1179 if (*MdlPages == -1)
1180 {
1181 //
1182 // Were there no pages at all?
1183 //
1184 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1185 {
1186 //
1187 // We're already done
1188 //
1189 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1190 return;
1191 }
1192
1193 //
1194 // Otherwise, stop here
1195 //
1196 LastPage = MdlPages;
1197 break;
1198 }
1199
1200 //
1201 // Sanity check
1202 //
1203 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1204 } while (++MdlPages < LastPage);
1205
1206 //
1207 // Reset pointer
1208 //
1209 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1210
1211 //
1212 // Now grab the PFN lock for the actual unlock and dereference
1213 //
1214 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1215 do
1216 {
1217 //
1218 // Unlock and dereference
1219 //
1220 MmUnlockPage(*MdlPages);
1221 MmDereferencePage(*MdlPages);
1222 } while (++MdlPages < LastPage);
1223
1224 //
1225 // Release the lock
1226 //
1227 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1228
1229 //
1230 // We're done
1231 //
1232 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1233 }
1234
1235 /*
1236 * @unimplemented
1237 */
1238 NTSTATUS
1239 NTAPI
1240 MmAdvanceMdl(IN PMDL Mdl,
1241 IN ULONG NumberOfBytes)
1242 {
1243 UNIMPLEMENTED;
1244 return STATUS_NOT_IMPLEMENTED;
1245 }
1246
1247 /*
1248 * @unimplemented
1249 */
1250 PVOID
1251 NTAPI
1252 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1253 IN ULONG PoolTag,
1254 IN PMDL MemoryDescriptorList,
1255 IN MEMORY_CACHING_TYPE CacheType)
1256 {
1257 UNIMPLEMENTED;
1258 return 0;
1259 }
1260
1261 /*
1262 * @unimplemented
1263 */
1264 VOID
1265 NTAPI
1266 MmUnmapReservedMapping(IN PVOID BaseAddress,
1267 IN ULONG PoolTag,
1268 IN PMDL MemoryDescriptorList)
1269 {
1270 UNIMPLEMENTED;
1271 }
1272
1273 /*
1274 * @unimplemented
1275 */
1276 NTSTATUS
1277 NTAPI
1278 MmPrefetchPages(IN ULONG NumberOfLists,
1279 IN PREAD_LIST *ReadLists)
1280 {
1281 UNIMPLEMENTED;
1282 return STATUS_NOT_IMPLEMENTED;
1283 }
1284
1285 /*
1286 * @unimplemented
1287 */
1288 NTSTATUS
1289 NTAPI
1290 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1291 IN ULONG NewProtect)
1292 {
1293 UNIMPLEMENTED;
1294 return STATUS_NOT_IMPLEMENTED;
1295 }
1296
1297 /*
1298 * @unimplemented
1299 */
1300 VOID
1301 NTAPI
1302 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1303 IN PEPROCESS Process,
1304 IN KPROCESSOR_MODE AccessMode,
1305 IN LOCK_OPERATION Operation)
1306 {
1307 UNIMPLEMENTED;
1308 }
1309
1310
1311 /*
1312 * @unimplemented
1313 */
1314 VOID
1315 NTAPI
1316 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1317 IN LARGE_INTEGER PageList[],
1318 IN KPROCESSOR_MODE AccessMode,
1319 IN LOCK_OPERATION Operation)
1320 {
1321 UNIMPLEMENTED;
1322 }
1323
1324 /*
1325 * @unimplemented
1326 */
1327 VOID
1328 NTAPI
1329 MmMapMemoryDumpMdl(IN PMDL Mdl)
1330 {
1331 UNIMPLEMENTED;
1332 }
1333
1334 /* EOF */