Sync with trunk head (part 1 of 2)
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::MDLSUP"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PUBLIC FUNCTIONS ***********************************************************/
20
21 /*
22 * @implemented
23 */
24 PMDL
25 NTAPI
26 MmCreateMdl(IN PMDL Mdl,
27 IN PVOID Base,
28 IN ULONG Length)
29 {
30 ULONG Size;
31
32 //
33 // Check if we don't have an MDL built
34 //
35 if (!Mdl)
36 {
37 //
38 // Calculate the size we'll need and allocate the MDL
39 //
40 Size = MmSizeOfMdl(Base, Length);
41 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
42 if (!Mdl) return NULL;
43 }
44
45 //
46 // Initialize it
47 //
48 MmInitializeMdl(Mdl, Base, Length);
49 return Mdl;
50 }
51
52 /*
53 * @implemented
54 */
55 ULONG
56 NTAPI
57 MmSizeOfMdl(IN PVOID Base,
58 IN ULONG Length)
59 {
60 //
61 // Return the MDL size
62 //
63 return sizeof(MDL) +
64 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
65 }
66
67 /*
68 * @implemented
69 */
70 VOID
71 NTAPI
72 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
73 {
74 PPFN_NUMBER MdlPages, EndPage;
75 PFN_NUMBER Pfn, PageCount;
76 PVOID Base;
77 PMMPTE PointerPte;
78
79 //
80 // Sanity checks
81 //
82 ASSERT(Mdl->ByteCount != 0);
83 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
84 MDL_MAPPED_TO_SYSTEM_VA |
85 MDL_SOURCE_IS_NONPAGED_POOL |
86 MDL_PARTIAL)) == 0);
87
88 //
89 // We know the MDL isn't associated to a process now
90 //
91 Mdl->Process = NULL;
92
93 //
94 // Get page and VA information
95 //
96 MdlPages = (PPFN_NUMBER)(Mdl + 1);
97 Base = Mdl->StartVa;
98
99 //
100 // Set the system address and now get the page count
101 //
102 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
103 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
104 Mdl->ByteCount);
105 ASSERT(PageCount != 0);
106 EndPage = MdlPages + PageCount;
107
108 //
109 // Loop the PTEs
110 //
111 PointerPte = MiAddressToPte(Base);
112 do
113 {
114 //
115 // Write the PFN
116 //
117 Pfn = PFN_FROM_PTE(PointerPte++);
118 *MdlPages++ = Pfn;
119 } while (MdlPages < EndPage);
120
121 //
122 // Set the nonpaged pool flag
123 //
124 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
125
126 //
127 // Check if this is an I/O mapping
128 //
129 if (Pfn > MmHighestPhysicalPage) Mdl->MdlFlags |= MDL_IO_SPACE;
130 }
131
132 /*
133 * @implemented
134 */
135 PMDL
136 NTAPI
137 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
138 IN PHYSICAL_ADDRESS HighAddress,
139 IN PHYSICAL_ADDRESS SkipBytes,
140 IN SIZE_T TotalBytes)
141 {
142 //
143 // Call the internal routine
144 //
145 return MiAllocatePagesForMdl(LowAddress,
146 HighAddress,
147 SkipBytes,
148 TotalBytes,
149 MiNotMapped,
150 0);
151 }
152
153 /*
154 * @implemented
155 */
156 PMDL
157 NTAPI
158 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
159 IN PHYSICAL_ADDRESS HighAddress,
160 IN PHYSICAL_ADDRESS SkipBytes,
161 IN SIZE_T TotalBytes,
162 IN MEMORY_CACHING_TYPE CacheType,
163 IN ULONG Flags)
164 {
165 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
166
167 //
168 // Check for invalid cache type
169 //
170 if (CacheType > MmWriteCombined)
171 {
172 //
173 // Normalize to default
174 //
175 CacheAttribute = MiNotMapped;
176 }
177 else
178 {
179 //
180 // Conver to internal caching attribute
181 //
182 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
183 }
184
185 //
186 // Only these flags are allowed
187 //
188 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
189 {
190 //
191 // Silently fail
192 //
193 return NULL;
194 }
195
196 //
197 // Call the internal routine
198 //
199 return MiAllocatePagesForMdl(LowAddress,
200 HighAddress,
201 SkipBytes,
202 TotalBytes,
203 CacheAttribute,
204 Flags);
205 }
206
207 /*
208 * @implemented
209 */
210 VOID
211 NTAPI
212 MmFreePagesFromMdl(IN PMDL Mdl)
213 {
214 PVOID Base;
215 PPFN_NUMBER Pages;
216 LONG NumberOfPages;
217 PMMPFN Pfn1;
218 KIRQL OldIrql;
219 DPRINT("Freeing MDL: %p\n", Mdl);
220
221 //
222 // Sanity checks
223 //
224 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
225 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
226 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
227
228 //
229 // Get address and page information
230 //
231 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
232 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
233
234 //
235 // Acquire PFN lock
236 //
237 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
238
239 //
240 // Loop all the MDL pages
241 //
242 Pages = (PPFN_NUMBER)(Mdl + 1);
243 do
244 {
245 //
246 // Reached the last page
247 //
248 if (*Pages == -1) break;
249
250 //
251 // Sanity check
252 //
253 ASSERT(*Pages <= MmHighestPhysicalPage);
254
255 //
256 // Get the page entry
257 //
258 Pfn1 = MiGetPfnEntry(*Pages);
259 ASSERT(Pfn1->u3.ReferenceCount == 1);
260
261 //
262 // Clear it
263 //
264 Pfn1->u3.e1.StartOfAllocation = 0;
265 Pfn1->u3.e1.EndOfAllocation = 0;
266
267 //
268 // Dereference it
269 //
270 MmDereferencePage(*Pages);
271
272 //
273 // Clear this page and move on
274 //
275 *Pages++ = -1;
276 } while (--NumberOfPages != 0);
277
278 //
279 // Release the lock
280 //
281 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
282
283 //
284 // Remove the pages locked flag
285 //
286 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
287 }
288
289 /*
290 * @implemented
291 */
292 PVOID
293 NTAPI
294 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
295 IN KPROCESSOR_MODE AccessMode,
296 IN MEMORY_CACHING_TYPE CacheType,
297 IN PVOID BaseAddress,
298 IN ULONG BugCheckOnFailure,
299 IN MM_PAGE_PRIORITY Priority)
300 {
301 PVOID Base;
302 PPFN_NUMBER MdlPages, LastPage;
303 PFN_NUMBER PageCount;
304 BOOLEAN IsIoMapping;
305 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
306 PMMPTE PointerPte;
307 MMPTE TempPte;
308
309 //
310 // Sanity check
311 //
312 ASSERT(Mdl->ByteCount != 0);
313
314 //
315 // Get the base
316 //
317 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
318
319 //
320 // Handle kernel case first
321 //
322 if (AccessMode == KernelMode)
323 {
324 //
325 // Get the list of pages and count
326 //
327 MdlPages = (PPFN_NUMBER)(Mdl + 1);
328 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
329 LastPage = MdlPages + PageCount;
330
331 //
332 // Sanity checks
333 //
334 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
335 MDL_SOURCE_IS_NONPAGED_POOL |
336 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
337 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
338
339 //
340 // Get the correct cache type
341 //
342 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
343 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
344
345 //
346 // Reserve the PTEs
347 //
348 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
349 if (!PointerPte)
350 {
351 //
352 // If it can fail, return NULL
353 //
354 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
355
356 //
357 // Should we bugcheck?
358 //
359 if (!BugCheckOnFailure) return NULL;
360
361 //
362 // Yes, crash the system
363 //
364 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
365 }
366
367 //
368 // Get the mapped address
369 //
370 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
371
372 //
373 // Get the template
374 //
375 TempPte = ValidKernelPte;
376 switch (CacheAttribute)
377 {
378 case MiNonCached:
379
380 //
381 // Disable caching
382 //
383 MI_PAGE_DISABLE_CACHE(&TempPte);
384 MI_PAGE_WRITE_THROUGH(&TempPte);
385 break;
386
387 case MiWriteCombined:
388
389 //
390 // Enable write combining
391 //
392 MI_PAGE_DISABLE_CACHE(&TempPte);
393 MI_PAGE_WRITE_COMBINED(&TempPte);
394 break;
395
396 default:
397 //
398 // Nothing to do
399 //
400 break;
401 }
402
403 //
404 // Loop all PTEs
405 //
406 do
407 {
408 //
409 // We're done here
410 //
411 if (*MdlPages == -1) break;
412
413 //
414 // Write the PTE
415 //
416 ASSERT(PointerPte->u.Hard.Valid == 0);
417 TempPte.u.Hard.PageFrameNumber = *MdlPages;
418 *PointerPte++ = TempPte;
419 } while (++MdlPages < LastPage);
420
421 //
422 // Mark it as mapped
423 //
424 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
425 Mdl->MappedSystemVa = Base;
426 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
427
428 //
429 // Check if it was partial
430 //
431 if (Mdl->MdlFlags & MDL_PARTIAL)
432 {
433 //
434 // Write the appropriate flag here too
435 //
436 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
437 }
438
439 //
440 // Return the mapped address
441 //
442 return Base;
443 }
444
445 //
446 // In user-mode, let ReactOS do it
447 //
448 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
449 }
450
451 /*
452 * @implemented
453 */
454 PVOID
455 NTAPI
456 MmMapLockedPages(IN PMDL Mdl,
457 IN KPROCESSOR_MODE AccessMode)
458 {
459 //
460 // Call the extended version
461 //
462 return MmMapLockedPagesSpecifyCache(Mdl,
463 AccessMode,
464 MmCached,
465 NULL,
466 TRUE,
467 HighPagePriority);
468 }
469
470 /*
471 * @implemented
472 */
473 VOID
474 NTAPI
475 MmUnmapLockedPages(IN PVOID BaseAddress,
476 IN PMDL Mdl)
477 {
478 PVOID Base;
479 PFN_NUMBER PageCount;
480 PPFN_NUMBER MdlPages;
481 PMMPTE PointerPte;
482
483 //
484 // Sanity check
485 //
486 ASSERT(Mdl->ByteCount != 0);
487
488 //
489 // Check if this is a kernel request
490 //
491 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
492 {
493 //
494 // Get base and count information
495 //
496 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
497 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
498
499 //
500 // Sanity checks
501 //
502 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
503 ASSERT(PageCount != 0);
504 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
505
506 //
507 // Get the PTE
508 //
509 PointerPte = MiAddressToPte(BaseAddress);
510
511 //
512 // This should be a resident system PTE
513 //
514 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
515 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
516 ASSERT(PointerPte->u.Hard.Valid == 1);
517
518 //
519 // Check if the caller wants us to free advanced pages
520 //
521 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
522 {
523 //
524 // Get the MDL page array
525 //
526 MdlPages = (PPFN_NUMBER)(Mdl + 1);
527 MdlPages += PageCount;
528
529 //
530 // Do the math
531 //
532 PageCount += *MdlPages;
533 PointerPte -= *MdlPages;
534 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
535 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
536
537 //
538 // Get the new base address
539 //
540 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
541 ((*MdlPages) << PAGE_SHIFT));
542 }
543
544 //
545 // Remove flags
546 //
547 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
548 MDL_PARTIAL_HAS_BEEN_MAPPED |
549 MDL_FREE_EXTRA_PTES);
550
551 //
552 // Release the system PTEs
553 //
554 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
555 }
556 else
557 {
558 //
559 // Let ReactOS handle it
560 //
561 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
562 }
563 }
564
565 /*
566 * @implemented
567 */
568 VOID
569 NTAPI
570 MmProbeAndLockPages(IN PMDL Mdl,
571 IN KPROCESSOR_MODE AccessMode,
572 IN LOCK_OPERATION Operation)
573 {
574 PPFN_TYPE MdlPages;
575 PVOID Base, Address, LastAddress, StartAddress;
576 ULONG LockPages, TotalPages;
577 NTSTATUS Status = STATUS_SUCCESS;
578 PEPROCESS CurrentProcess;
579 PETHREAD Thread;
580 PMMSUPPORT AddressSpace;
581 NTSTATUS ProbeStatus;
582 PMMPTE PointerPte, LastPte;
583 PMMPDE PointerPde;
584 PFN_NUMBER PageFrameIndex;
585 PMMPFN Pfn1;
586 BOOLEAN UsePfnLock;
587 KIRQL OldIrql;
588 DPRINT("Probing MDL: %p\n", Mdl);
589
590 //
591 // Sanity checks
592 //
593 ASSERT(Mdl->ByteCount != 0);
594 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
595 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
596 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
597 MDL_MAPPED_TO_SYSTEM_VA |
598 MDL_SOURCE_IS_NONPAGED_POOL |
599 MDL_PARTIAL |
600 MDL_IO_SPACE)) == 0);
601
602 //
603 // Get page and base information
604 //
605 MdlPages = (PPFN_NUMBER)(Mdl + 1);
606 Base = (PVOID)Mdl->StartVa;
607
608 //
609 // Get the addresses and how many pages we span (and need to lock)
610 //
611 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
612 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
613 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
614 ASSERT(LockPages != 0);
615
616 //
617 // Get the thread and process
618 //
619 Thread = PsGetCurrentThread();
620 if (Address <= MM_HIGHEST_USER_ADDRESS)
621 {
622 //
623 // Get the process
624 //
625 CurrentProcess = PsGetCurrentProcess();
626 }
627 else
628 {
629 //
630 // No process
631 //
632 CurrentProcess = NULL;
633 }
634
635 //
636 // Save the number of pages we'll have to lock, and the start address
637 //
638 TotalPages = LockPages;
639 StartAddress = Address;
640
641 //
642 // Now probe them
643 //
644 ProbeStatus = STATUS_SUCCESS;
645 _SEH2_TRY
646 {
647 //
648 // Enter probe loop
649 //
650 do
651 {
652 //
653 // Assume failure
654 //
655 *MdlPages = -1;
656
657 //
658 // Read
659 //
660 *(volatile CHAR*)Address;
661
662 //
663 // Check if this is write access (only probe for user-mode)
664 //
665 if ((Operation != IoReadAccess) &&
666 (Address <= MM_HIGHEST_USER_ADDRESS))
667 {
668 //
669 // Probe for write too
670 //
671 ProbeForWriteChar(Address);
672 }
673
674 //
675 // Next address...
676 //
677 Address = (PVOID)((ULONG_PTR)Address + PAGE_SIZE);
678 Address = PAGE_ALIGN(Address);
679
680 //
681 // Next page...
682 //
683 LockPages--;
684 MdlPages++;
685 } while (Address < LastAddress);
686
687 //
688 // Reset back to the original page
689 //
690 ASSERT(LockPages == 0);
691 MdlPages = (PPFN_NUMBER)(Mdl + 1);
692 }
693 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
694 {
695 //
696 // Oops :(
697 //
698 ProbeStatus = _SEH2_GetExceptionCode();
699 }
700 _SEH2_END;
701
702 //
703 // So how did that go?
704 //
705 if (ProbeStatus != STATUS_SUCCESS)
706 {
707 //
708 // Fail
709 //
710 DPRINT1("MDL PROBE FAILED!\n");
711 Mdl->Process = NULL;
712 ExRaiseStatus(ProbeStatus);
713 }
714
715 //
716 // Get the PTE and PDE
717 //
718 PointerPte = MiAddressToPte(StartAddress);
719 PointerPde = MiAddressToPde(StartAddress);
720
721 //
722 // Sanity check
723 //
724 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
725
726 //
727 // Check what kind of operation this is
728 //
729 if (Operation != IoReadAccess)
730 {
731 //
732 // Set the write flag
733 //
734 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
735 }
736 else
737 {
738 //
739 // Remove the write flag
740 //
741 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
742 }
743
744 //
745 // Mark the MDL as locked *now*
746 //
747 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
748
749 //
750 // Check if this came from kernel mode
751 //
752 if (Base >= MM_HIGHEST_USER_ADDRESS)
753 {
754 //
755 // We should not have a process
756 //
757 ASSERT(CurrentProcess == NULL);
758 Mdl->Process = NULL;
759
760 //
761 // In kernel mode, we don't need to check for write access
762 //
763 Operation = IoReadAccess;
764
765 //
766 // Use the PFN lock
767 //
768 UsePfnLock = TRUE;
769 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
770 AddressSpace = NULL; // Keep compiler happy
771 }
772 else
773 {
774 //
775 // Sanity checks
776 //
777 ASSERT(TotalPages != 0);
778 ASSERT(CurrentProcess == PsGetCurrentProcess());
779
780 //
781 // Track locked pages
782 //
783 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
784 TotalPages);
785
786 //
787 // Save the process
788 //
789 Mdl->Process = CurrentProcess;
790
791 //
792 // Use the process lock
793 //
794 UsePfnLock = FALSE;
795 AddressSpace = &CurrentProcess->Vm;
796 MmLockAddressSpace(AddressSpace);
797 OldIrql = DISPATCH_LEVEL; // Keep compiler happy
798 }
799
800 //
801 // Get the last PTE
802 //
803 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
804
805 //
806 // Loop the pages
807 //
808 do
809 {
810 //
811 // Assume failure and check for non-mapped pages
812 //
813 *MdlPages = -1;
814 while ((PointerPde->u.Hard.Valid == 0) ||
815 (PointerPte->u.Hard.Valid == 0))
816 {
817 //
818 // What kind of lock where we using?
819 //
820 if (UsePfnLock)
821 {
822 //
823 // Release PFN lock
824 //
825 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
826 }
827 else
828 {
829 //
830 // Release process address space lock
831 //
832 MmUnlockAddressSpace(AddressSpace);
833 }
834
835 //
836 // Access the page
837 //
838 Address = MiPteToAddress(PointerPte);
839 Status = MmAccessFault(FALSE, Address, KernelMode, NULL);
840 if (!NT_SUCCESS(Status))
841 {
842 //
843 // Fail
844 //
845 DPRINT1("Access fault failed\n");
846 goto Cleanup;
847 }
848
849 //
850 // Waht lock should we use?
851 //
852 if (UsePfnLock)
853 {
854 //
855 // Grab the PFN lock
856 //
857 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
858 }
859 else
860 {
861 //
862 // Use the address space lock
863 //
864 MmLockAddressSpace(AddressSpace);
865 }
866 }
867
868 //
869 // Check if this was a write or modify
870 //
871 if (Operation != IoReadAccess)
872 {
873 //
874 // Check if the PTE is not writable
875 //
876 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
877 {
878 //
879 // Check if it's copy on write
880 //
881 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
882 {
883 //
884 // Get the base address and allow a change for user-mode
885 //
886 Address = MiPteToAddress(PointerPte);
887 if (Address <= MM_HIGHEST_USER_ADDRESS)
888 {
889 //
890 // What kind of lock where we using?
891 //
892 if (UsePfnLock)
893 {
894 //
895 // Release PFN lock
896 //
897 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
898 }
899 else
900 {
901 //
902 // Release process address space lock
903 //
904 MmUnlockAddressSpace(AddressSpace);
905 }
906
907 //
908 // Access the page
909 //
910 Status = MmAccessFault(TRUE, Address, KernelMode, NULL);
911 if (!NT_SUCCESS(Status))
912 {
913 //
914 // Fail
915 //
916 DPRINT1("Access fault failed\n");
917 goto Cleanup;
918 }
919
920 //
921 // Re-acquire the lock
922 //
923 if (UsePfnLock)
924 {
925 //
926 // Grab the PFN lock
927 //
928 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
929 }
930 else
931 {
932 //
933 // Use the address space lock
934 //
935 MmLockAddressSpace(AddressSpace);
936 }
937
938 //
939 // Start over
940 //
941 continue;
942 }
943 }
944
945 //
946 // Fail, since we won't allow this
947 //
948 Status = STATUS_ACCESS_VIOLATION;
949 goto CleanupWithLock;
950 }
951 }
952
953 //
954 // Grab the PFN
955 //
956 PageFrameIndex = PFN_FROM_PTE(PointerPte);
957 if (PageFrameIndex <= MmHighestPhysicalPage)
958 {
959 //
960 // Get the PFN entry
961 //
962 Pfn1 = MiGetPfnEntry(PageFrameIndex);
963 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
964
965 //
966 // Now lock the page
967 //
968 MmReferencePage(PageFrameIndex);
969 }
970 else
971 {
972 //
973 // For I/O addresses, just remember this
974 //
975 Mdl->MdlFlags |= MDL_IO_SPACE;
976 }
977
978 //
979 // Write the page and move on
980 //
981 *MdlPages++ = PageFrameIndex;
982 if (!((ULONG_PTR)(++PointerPte) & (PAGE_SIZE - 1))) PointerPde++;
983 } while (PointerPte <= LastPte);
984
985 //
986 // What kind of lock where we using?
987 //
988 if (UsePfnLock)
989 {
990 //
991 // Release PFN lock
992 //
993 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
994 }
995 else
996 {
997 //
998 // Release process address space lock
999 //
1000 MmUnlockAddressSpace(AddressSpace);
1001 }
1002
1003 //
1004 // Sanity check
1005 //
1006 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1007 return;
1008
1009 CleanupWithLock:
1010 //
1011 // This is the failure path
1012 //
1013 ASSERT(!NT_SUCCESS(Status));
1014
1015 //
1016 // What kind of lock where we using?
1017 //
1018 if (UsePfnLock)
1019 {
1020 //
1021 // Release PFN lock
1022 //
1023 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1024 }
1025 else
1026 {
1027 //
1028 // Release process address space lock
1029 //
1030 MmUnlockAddressSpace(AddressSpace);
1031 }
1032 Cleanup:
1033 //
1034 // Pages must be locked so MmUnlock can work
1035 //
1036 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1037 MmUnlockPages(Mdl);
1038
1039 //
1040 // Raise the error
1041 //
1042 ExRaiseStatus(Status);
1043 }
1044
1045 /*
1046 * @implemented
1047 */
1048 VOID
1049 NTAPI
1050 MmUnlockPages(IN PMDL Mdl)
1051 {
1052 PPFN_NUMBER MdlPages, LastPage;
1053 PEPROCESS Process;
1054 PVOID Base;
1055 ULONG Flags, PageCount;
1056 KIRQL OldIrql;
1057 DPRINT("Unlocking MDL: %p\n", Mdl);
1058
1059 //
1060 // Sanity checks
1061 //
1062 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1063 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1064 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1065 ASSERT(Mdl->ByteCount != 0);
1066
1067 //
1068 // Get the process associated and capture the flags which are volatile
1069 //
1070 Process = Mdl->Process;
1071 Flags = Mdl->MdlFlags;
1072
1073 //
1074 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1075 //
1076 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1077 {
1078 //
1079 // Unmap the pages from system space
1080 //
1081 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1082 }
1083
1084 //
1085 // Get the page count
1086 //
1087 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1088 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1089 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1090 ASSERT(PageCount != 0);
1091
1092 //
1093 // We don't support AWE
1094 //
1095 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1096
1097 //
1098 // Check if the buffer is mapped I/O space
1099 //
1100 if (Flags & MDL_IO_SPACE)
1101 {
1102 //
1103 // Acquire PFN lock
1104 //
1105 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1106
1107 //
1108 // Loop every page
1109 //
1110 LastPage = MdlPages + PageCount;
1111 do
1112 {
1113 //
1114 // Last page, break out
1115 //
1116 if (*MdlPages == -1) break;
1117
1118 //
1119 // Check if this page is in the PFN database
1120 //
1121 if (*MdlPages <= MmHighestPhysicalPage)
1122 {
1123 //
1124 // Unlock and dereference
1125 //
1126 MmDereferencePage(*MdlPages);
1127 }
1128 } while (++MdlPages < LastPage);
1129
1130 //
1131 // Release the lock
1132 //
1133 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1134
1135 //
1136 // Check if we have a process
1137 //
1138 if (Process)
1139 {
1140 //
1141 // Handle the accounting of locked pages
1142 //
1143 ASSERT(Process->NumberOfLockedPages > 0);
1144 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1145 -PageCount);
1146 }
1147
1148 //
1149 // We're done
1150 //
1151 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1152 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1153 return;
1154 }
1155
1156 //
1157 // Check if we have a process
1158 //
1159 if (Process)
1160 {
1161 //
1162 // Handle the accounting of locked pages
1163 //
1164 ASSERT(Process->NumberOfLockedPages > 0);
1165 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1166 -PageCount);
1167 }
1168
1169 //
1170 // Loop every page
1171 //
1172 LastPage = MdlPages + PageCount;
1173 do
1174 {
1175 //
1176 // Last page reached
1177 //
1178 if (*MdlPages == -1)
1179 {
1180 //
1181 // Were there no pages at all?
1182 //
1183 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1184 {
1185 //
1186 // We're already done
1187 //
1188 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1189 return;
1190 }
1191
1192 //
1193 // Otherwise, stop here
1194 //
1195 LastPage = MdlPages;
1196 break;
1197 }
1198
1199 //
1200 // Sanity check
1201 //
1202 ASSERT(*MdlPages <= MmHighestPhysicalPage);
1203 } while (++MdlPages < LastPage);
1204
1205 //
1206 // Reset pointer
1207 //
1208 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1209
1210 //
1211 // Now grab the PFN lock for the actual unlock and dereference
1212 //
1213 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1214 do
1215 {
1216 //
1217 // Unlock and dereference
1218 //
1219 MmDereferencePage(*MdlPages);
1220 } while (++MdlPages < LastPage);
1221
1222 //
1223 // Release the lock
1224 //
1225 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1226
1227 //
1228 // We're done
1229 //
1230 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1231 }
1232
1233 /*
1234 * @unimplemented
1235 */
1236 NTSTATUS
1237 NTAPI
1238 MmAdvanceMdl(IN PMDL Mdl,
1239 IN ULONG NumberOfBytes)
1240 {
1241 UNIMPLEMENTED;
1242 return STATUS_NOT_IMPLEMENTED;
1243 }
1244
1245 /*
1246 * @unimplemented
1247 */
1248 PVOID
1249 NTAPI
1250 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1251 IN ULONG PoolTag,
1252 IN PMDL MemoryDescriptorList,
1253 IN MEMORY_CACHING_TYPE CacheType)
1254 {
1255 UNIMPLEMENTED;
1256 return 0;
1257 }
1258
1259 /*
1260 * @unimplemented
1261 */
1262 VOID
1263 NTAPI
1264 MmUnmapReservedMapping(IN PVOID BaseAddress,
1265 IN ULONG PoolTag,
1266 IN PMDL MemoryDescriptorList)
1267 {
1268 UNIMPLEMENTED;
1269 }
1270
1271 /*
1272 * @unimplemented
1273 */
1274 NTSTATUS
1275 NTAPI
1276 MmPrefetchPages(IN ULONG NumberOfLists,
1277 IN PREAD_LIST *ReadLists)
1278 {
1279 UNIMPLEMENTED;
1280 return STATUS_NOT_IMPLEMENTED;
1281 }
1282
1283 /*
1284 * @unimplemented
1285 */
1286 NTSTATUS
1287 NTAPI
1288 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1289 IN ULONG NewProtect)
1290 {
1291 UNIMPLEMENTED;
1292 return STATUS_NOT_IMPLEMENTED;
1293 }
1294
1295 /*
1296 * @unimplemented
1297 */
1298 VOID
1299 NTAPI
1300 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1301 IN PEPROCESS Process,
1302 IN KPROCESSOR_MODE AccessMode,
1303 IN LOCK_OPERATION Operation)
1304 {
1305 UNIMPLEMENTED;
1306 }
1307
1308
1309 /*
1310 * @unimplemented
1311 */
1312 VOID
1313 NTAPI
1314 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1315 IN LARGE_INTEGER PageList[],
1316 IN KPROCESSOR_MODE AccessMode,
1317 IN LOCK_OPERATION Operation)
1318 {
1319 UNIMPLEMENTED;
1320 }
1321
1322 /*
1323 * @unimplemented
1324 */
1325 VOID
1326 NTAPI
1327 MmMapMemoryDumpMdl(IN PMDL Mdl)
1328 {
1329 UNIMPLEMENTED;
1330 }
1331
1332 /* EOF */