3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
15 #include <internal/debug.h>
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializeMdlImplementation)
21 /* GLOBALS *******************************************************************/
23 #define TAG_MDL TAG('M', 'D', 'L', ' ')
25 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
27 static PVOID MiMdlMappingRegionBase
= NULL
;
28 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
29 static ULONG MiMdlMappingRegionHint
;
30 static KSPIN_LOCK MiMdlMappingRegionLock
;
31 extern ULONG MmPageArraySize
;
36 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
37 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
38 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
39 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
40 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
41 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
44 /* FUNCTIONS *****************************************************************/
54 IN ULONG NumberOfBytes
58 return STATUS_NOT_IMPLEMENTED
;
65 MmInitializeMdlImplementation(VOID
)
70 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
72 BoundaryAddressMultiple
.QuadPart
= 0;
73 MiMdlMappingRegionHint
= 0;
74 MiMdlMappingRegionBase
= NULL
;
76 MmLockAddressSpace(MmGetKernelAddressSpace());
77 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
78 MEMORY_AREA_MDL_MAPPING
,
79 &MiMdlMappingRegionBase
,
80 MI_MDL_MAPPING_REGION_SIZE
,
85 BoundaryAddressMultiple
);
86 if (!NT_SUCCESS(Status
))
88 MmUnlockAddressSpace(MmGetKernelAddressSpace());
91 MmUnlockAddressSpace(MmGetKernelAddressSpace());
93 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
94 MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
97 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
98 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
100 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
106 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
108 PPFN_NUMBER MdlPages
;
110 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
112 return((PVOID
)MdlPages
[((ULONG_PTR
)Offset
) / PAGE_SIZE
]);
120 MmUnlockPages(PMDL Mdl
)
122 * FUNCTION: Unlocks the physical pages described by a given MDL
124 * MemoryDescriptorList = MDL describing the buffer to be unlocked
125 * NOTES: The memory described by the specified MDL must have been locked
126 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
129 * May be called in any process context.
133 PPFN_NUMBER MdlPages
;
137 * MmProbeAndLockPages MUST have been called to lock this mdl!
139 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
140 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
142 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
144 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
148 /* If mdl buffer is mapped io space -> do nothing */
149 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
151 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
155 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
156 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
158 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
162 * FIXME: I don't know whether this right, but it looks sensible
164 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
165 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
171 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
172 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
176 MmDereferencePage(Page
);
179 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
188 MmMapLockedPagesWithReservedMapping (
189 IN PVOID MappingAddress
,
191 IN PMDL MemoryDescriptorList
,
192 IN MEMORY_CACHING_TYPE CacheType
204 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
206 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
208 * BaseAddress = Base virtual address to which the pages were mapped
209 * MemoryDescriptorList = MDL describing the mapped pages
211 * User space unmappings _must_ be done from the original process context!
219 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
222 * In this case, the MDL has the same system address as the base address
223 * so there is no need to free it
225 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) &&
226 (BaseAddress
>= MmSystemRangeStart
))
232 /* Calculate the number of pages we mapped. */
233 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
236 * Docs says that BaseAddress should be a _base_ address, but every example
237 * I've seen pass the actual address. -Gunnar
239 BaseAddress
= PAGE_ALIGN(BaseAddress
);
241 /* Unmap all the pages. */
242 for (i
= 0; i
< PageCount
; i
++)
244 MmDeleteVirtualMapping(Mdl
->Process
,
245 (char*)BaseAddress
+ (i
* PAGE_SIZE
),
251 if (BaseAddress
>= MmSystemRangeStart
)
253 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
255 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
256 /* Deallocate all the pages used. */
257 Base
= (ULONG
)((char*)BaseAddress
- (char*)MiMdlMappingRegionBase
) / PAGE_SIZE
;
259 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
261 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
263 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
265 /* Reset the MDL state. */
266 Mdl
->MdlFlags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
267 Mdl
->MappedSystemVa
= NULL
;
274 ASSERT(Mdl
->Process
== PsGetCurrentProcess());
276 Marea
= MmLocateMemoryAreaByAddress( (PMADDRESS_SPACE
)&(Mdl
->Process
)->VadRoot
, BaseAddress
);
279 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
283 MmFreeMemoryArea( (PMADDRESS_SPACE
)&(Mdl
->Process
)->VadRoot
, Marea
, NULL
, NULL
);
296 MmUnmapReservedMapping (
297 IN PVOID BaseAddress
,
299 IN PMDL MemoryDescriptorList
308 MmBuildMdlFromPages(PMDL Mdl
, PPFN_TYPE Pages
)
310 memcpy(Mdl
+ 1, Pages
, sizeof(PFN_TYPE
) * (PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
));
312 /* FIXME: this flag should be set by the caller perhaps? */
313 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
323 IN ULONG NumberOfLists
,
324 IN PREAD_LIST
*ReadLists
328 return STATUS_NOT_IMPLEMENTED
;
337 MmProtectMdlSystemAddress (
338 IN PMDL MemoryDescriptorList
,
343 return STATUS_NOT_IMPLEMENTED
;
350 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
351 KPROCESSOR_MODE AccessMode
,
352 LOCK_OPERATION Operation
)
354 * FUNCTION: Probes the specified pages, makes them resident and locks them
357 * AccessMode = Access at which to probe the buffer
358 * Operation = Operation to probe for
360 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
361 * used in cases where you know that the mdl address is paged memory or
362 * you don't know where the mdl address comes from. MmProbeAndLockPages will
363 * work no matter what kind of mdl address you have.
370 KPROCESSOR_MODE Mode
;
372 PEPROCESS CurrentProcess
= PsGetCurrentProcess();
373 PMADDRESS_SPACE AddressSpace
;
375 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
377 ASSERT(!(Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL
|
378 MDL_IO_SPACE
|MDL_SOURCE_IS_NONPAGED_POOL
)));
380 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
381 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
383 /* mdl must have enough page entries */
384 ASSERT(NrPages
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
387 if (Mdl
->StartVa
>= MmSystemRangeStart
&&
388 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) >= MmPageArraySize
)
390 /* phys addr is not phys memory so this must be io memory */
392 for (i
= 0; i
< NrPages
; i
++)
394 MdlPages
[i
] = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
));
397 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
402 if (Mdl
->StartVa
>= MmSystemRangeStart
)
404 /* FIXME: why isn't AccessMode used? */
407 AddressSpace
= MmGetKernelAddressSpace();
411 /* FIXME: why isn't AccessMode used? */
413 Mdl
->Process
= CurrentProcess
;
414 AddressSpace
= (PMADDRESS_SPACE
)&(CurrentProcess
)->VadRoot
;
421 MmLockAddressSpace(AddressSpace
);
423 for (i
= 0; i
< NrPages
; i
++)
427 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
430 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
434 if (!MmIsPagePresent(NULL
, Address
))
436 Status
= MmAccessFault(FALSE
, Address
, Mode
, NULL
);
437 if (!NT_SUCCESS(Status
))
439 for (j
= 0; j
< i
; j
++)
442 if (Page
< MmPageArraySize
)
445 MmDereferencePage(Page
);
448 MmUnlockAddressSpace(AddressSpace
);
449 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
454 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
457 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
458 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
460 Status
= MmAccessFault(TRUE
, Address
, Mode
, NULL
);
461 if (!NT_SUCCESS(Status
))
463 for (j
= 0; j
< i
; j
++)
466 if (Page
< MmPageArraySize
)
469 MmDereferencePage(Page
);
472 MmUnlockAddressSpace(AddressSpace
);
473 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
476 Page
= MmGetPfnForProcess(NULL
, Address
);
478 if (Page
>= MmPageArraySize
)
479 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
481 MmReferencePage(Page
);
484 MmUnlockAddressSpace(AddressSpace
);
485 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
494 MmProbeAndLockProcessPages (
495 IN OUT PMDL MemoryDescriptorList
,
496 IN PEPROCESS Process
,
497 IN KPROCESSOR_MODE AccessMode
,
498 IN LOCK_OPERATION Operation
510 MmProbeAndLockSelectedPages(
511 IN OUT PMDL MemoryDescriptorList
,
512 IN LARGE_INTEGER PageList
[],
513 IN KPROCESSOR_MODE AccessMode
,
514 IN LOCK_OPERATION Operation
524 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
527 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
528 * the given address range
530 * Base = base virtual address
531 * Length = number of bytes to map
536 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
538 return(sizeof(MDL
)+(len
*sizeof(PFN_TYPE
)));
546 MmBuildMdlForNonPagedPool (PMDL Mdl
)
548 * FUNCTION: Fills in the corresponding physical page array of a given
549 * MDL for a buffer in nonpaged system space
551 * Mdl = Points to an MDL that supplies a virtual address,
552 * byte offset and length
554 * This function can be seen as a fast version of MmProbeAndLockPages in case
555 * you _know_ that the mdl address is within nonpaged kernel space.
563 * mdl buffer must (at least) be in kernel space, thou this doesn't
564 * necesarely mean that the buffer in within _nonpaged_ kernel space...
566 ASSERT(Mdl
->StartVa
>= MmSystemRangeStart
);
568 PageCount
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
569 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
571 /* mdl must have enough page entries */
572 ASSERT(PageCount
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
574 for (i
=0; i
< PageCount
; i
++)
576 *MdlPages
++ = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
* PAGE_SIZE
));
579 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
581 Mdl
->MappedSystemVa
= (char*)Mdl
->StartVa
+ Mdl
->ByteOffset
;
589 MmCreateMdl (PMDL Mdl
,
593 * FUNCTION: Allocates and initalizes an MDL
595 * MemoryDescriptorList = Points to MDL to initalize. If this is
596 * NULL then one is allocated
597 * Base = Base virtual address of the buffer
598 * Length = Length in bytes of the buffer
599 * RETURNS: A pointer to initalized MDL
606 Size
= MmSizeOfMdl(Base
,Length
);
608 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
615 MmInitializeMdl(Mdl
, (char*)Base
, Length
);
625 MmMapMemoryDumpMdl (PVOID Unknown0
)
627 * FIXME: Has something to do with crash dumps. Do we want to implement
639 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress
,
640 IN PHYSICAL_ADDRESS HighAddress
,
641 IN PHYSICAL_ADDRESS SkipBytes
,
642 IN SIZE_T Totalbytes
)
645 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
647 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
648 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
649 Designed to be used by an AGP driver
651 LowAddress is the lowest acceptable physical address it wants to allocate
652 and HighAddress is the highest. SkipBytes are the number of bytes that the
653 kernel should keep free above LowAddress and below the address at which it
654 starts to allocate physical memory. TotalBytes are the number of bytes that
655 the driver wants to allocate. The return value of the function is a MDL
656 that if non-zero describes the physical memory the kernel has given the
657 driver. To access portions of the memory the driver must create sub-MDLs
658 from the returned MDL that describe appropriate portions of the physical
659 memory. When a driver wants to access physical memory described by a
660 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
667 ULONG NumberOfPagesWanted
, NumberOfPagesAllocated
;
670 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
671 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
672 LowAddress
.QuadPart
, HighAddress
.QuadPart
,
673 SkipBytes
.QuadPart
, Totalbytes
);
675 /* SkipBytes must be a multiple of the page size */
676 if ((SkipBytes
.QuadPart
% PAGE_SIZE
) != 0)
678 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
682 /* Allocate memory for the MDL */
683 Mdl
= MmCreateMdl(NULL
, 0, Totalbytes
);
689 /* Allocate pages into the MDL */
690 NumberOfPagesAllocated
= 0;
691 NumberOfPagesWanted
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
692 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
693 while (NumberOfPagesWanted
> 0)
695 Ret
= MmAllocPagesSpecifyRange(
700 Pages
+ NumberOfPagesAllocated
);
701 if (Ret
== (ULONG
)-1)
704 NumberOfPagesAllocated
+= Ret
;
705 NumberOfPagesWanted
-= Ret
;
707 if (SkipBytes
.QuadPart
== 0)
709 LowAddress
.QuadPart
+= SkipBytes
.QuadPart
;
710 HighAddress
.QuadPart
+= SkipBytes
.QuadPart
;
713 if (NumberOfPagesAllocated
== 0)
718 else if (NumberOfPagesWanted
> 0)
720 Mdl
->ByteCount
= (ULONG
)(NumberOfPagesAllocated
* PAGE_SIZE
);
721 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
731 MmFreePagesFromMdl ( IN PMDL Mdl
)
734 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
735 FreeUserPhysicalPages, to free the physical memory it has allocated with
736 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
738 Note that a driver is responsible for deallocating the MDL returned by
739 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
740 does not free the MDL.
748 NumberOfPages
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
749 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
751 while (--NumberOfPages
>= 0)
753 MmDereferencePage(Pages
[NumberOfPages
]);
762 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl
,
763 IN KPROCESSOR_MODE AccessMode
,
764 IN MEMORY_CACHING_TYPE CacheType
,
765 IN PVOID BaseAddress
,
766 IN ULONG BugCheckOnFailure
,
767 IN MM_PAGE_PRIORITY Priority
)
773 ULONG StartingOffset
;
774 PEPROCESS CurrentProcess
;
778 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
779 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
780 Mdl
, AccessMode
, CacheType
, BaseAddress
, BugCheckOnFailure
, Priority
);
782 /* FIXME: Implement Priority */
785 Protect
= PAGE_READWRITE
;
786 if (CacheType
== MmNonCached
)
787 Protect
|= PAGE_NOCACHE
;
788 else if (CacheType
== MmWriteCombined
)
789 DPRINT("CacheType MmWriteCombined not supported!\n");
791 /* Calculate the number of pages required. */
792 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
794 if (AccessMode
!= KernelMode
)
797 LARGE_INTEGER BoundaryAddressMultiple
;
800 /* pretty sure you can't map partial mdl's to user space */
801 ASSERT(!(Mdl
->MdlFlags
& MDL_PARTIAL
));
803 BoundaryAddressMultiple
.QuadPart
= 0;
806 CurrentProcess
= PsGetCurrentProcess();
808 MmLockAddressSpace((PMADDRESS_SPACE
)&CurrentProcess
->VadRoot
);
809 Status
= MmCreateMemoryArea((PMADDRESS_SPACE
)&CurrentProcess
->VadRoot
,
810 MEMORY_AREA_MDL_MAPPING
,
812 PageCount
* PAGE_SIZE
,
817 BoundaryAddressMultiple
);
818 MmUnlockAddressSpace((PMADDRESS_SPACE
)&CurrentProcess
->VadRoot
);
819 if (!NT_SUCCESS(Status
))
821 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
826 /* Throw exception */
827 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
831 Mdl
->Process
= (PEPROCESS
)CurrentProcess
;
833 else /* if (AccessMode == KernelMode) */
835 /* can't map mdl twice */
836 ASSERT(!(Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL_HAS_BEEN_MAPPED
)));
837 /* can't map mdl buildt from non paged pool into kernel space */
838 ASSERT(!(Mdl
->MdlFlags
& (MDL_SOURCE_IS_NONPAGED_POOL
)));
840 CurrentProcess
= NULL
;
842 /* Allocate that number of pages from the mdl mapping region. */
843 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
845 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, PageCount
, MiMdlMappingRegionHint
);
847 if (StartingOffset
== 0xffffffff)
849 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
851 DPRINT1("Out of MDL mapping space\n");
853 if ((Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) || !BugCheckOnFailure
)
861 Base
= (PVOID
)((ULONG_PTR
)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
);
863 if (MiMdlMappingRegionHint
== StartingOffset
)
865 MiMdlMappingRegionHint
+= PageCount
;
868 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
873 /* Set the virtual mappings for the MDL pages. */
874 MdlPages
= (PULONG
)(Mdl
+ 1);
876 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
877 Status
= MmCreateVirtualMappingUnsafe(CurrentProcess
,
883 Status
= MmCreateVirtualMapping(CurrentProcess
,
888 if (!NT_SUCCESS(Status
))
890 DbgPrint("Unable to create virtual mapping\n");
891 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
895 if (AccessMode
!= KernelMode
)
897 /* Throw exception */
898 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
901 else /* AccessMode == KernelMode */
903 if (!BugCheckOnFailure
)
906 /* FIXME: Use some bugcheck code instead of 0 */
911 /* Mark the MDL has having being mapped. */
912 if (AccessMode
== KernelMode
)
914 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
916 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
920 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
922 Mdl
->MappedSystemVa
= (char*)Base
+ Mdl
->ByteOffset
;
925 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG
)Base
+ Mdl
->ByteOffset
);
927 return((char*)Base
+ Mdl
->ByteOffset
);
935 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
937 * FUNCTION: Maps the physical pages described by a given MDL
939 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
940 * MmAllocatePagesForMdl or IoBuildPartialMdl.
941 * AccessMode = Specifies the portion of the address space to map the
943 * RETURNS: The base virtual address that maps the locked pages for the
944 * range described by the MDL
946 * If mapping into user space, pages are mapped into current address space.
949 return MmMapLockedPagesSpecifyCache(Mdl
,