3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
15 #include <internal/debug.h>
17 /* GLOBALS *******************************************************************/
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
23 static PVOID MiMdlMappingRegionBase
= NULL
;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
25 static ULONG MiMdlMappingRegionHint
;
26 static KSPIN_LOCK MiMdlMappingRegionLock
;
27 extern ULONG MmPageArraySize
;
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
40 /* FUNCTIONS *****************************************************************/
50 IN ULONG NumberOfBytes
54 return STATUS_NOT_IMPLEMENTED
;
59 MmInitializeMdlImplementation(VOID
)
64 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
66 BoundaryAddressMultiple
.QuadPart
= 0;
67 MiMdlMappingRegionHint
= 0;
68 MiMdlMappingRegionBase
= NULL
;
70 MmLockAddressSpace(MmGetKernelAddressSpace());
71 Status
= MmCreateMemoryArea(NULL
,
72 MmGetKernelAddressSpace(),
73 MEMORY_AREA_MDL_MAPPING
,
74 &MiMdlMappingRegionBase
,
75 MI_MDL_MAPPING_REGION_SIZE
,
80 BoundaryAddressMultiple
);
81 if (!NT_SUCCESS(Status
))
83 MmUnlockAddressSpace(MmGetKernelAddressSpace());
86 MmUnlockAddressSpace(MmGetKernelAddressSpace());
88 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
89 MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
92 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
93 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
95 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
100 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
102 PPFN_NUMBER MdlPages
;
104 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
106 return((PVOID
)MdlPages
[((ULONG_PTR
)Offset
) / PAGE_SIZE
]);
114 MmUnlockPages(PMDL Mdl
)
116 * FUNCTION: Unlocks the physical pages described by a given MDL
118 * MemoryDescriptorList = MDL describing the buffer to be unlocked
119 * NOTES: The memory described by the specified MDL must have been locked
120 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
123 * May be called in any process context.
127 PPFN_NUMBER MdlPages
;
131 * MmProbeAndLockPages MUST have been called to lock this mdl!
133 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
134 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
136 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
138 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
142 /* If mdl buffer is mapped io space -> do nothing */
143 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
145 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
149 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
150 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
152 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
156 * FIXME: I don't know whether this right, but it looks sensible
158 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
159 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
165 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
166 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
170 MmDereferencePage(Page
);
173 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
182 MmMapLockedPagesWithReservedMapping (
183 IN PVOID MappingAddress
,
185 IN PMDL MemoryDescriptorList
,
186 IN MEMORY_CACHING_TYPE CacheType
198 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
200 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
202 * BaseAddress = Base virtual address to which the pages were mapped
203 * MemoryDescriptorList = MDL describing the mapped pages
205 * User space unmappings _must_ be done from the original process context!
213 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
216 * In this case, the MDL has the same system address as the base address
217 * so there is no need to free it
219 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) &&
220 ((ULONG_PTR
)BaseAddress
>= KERNEL_BASE
))
226 /* Calculate the number of pages we mapped. */
227 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
230 * Docs says that BaseAddress should be a _base_ address, but every example
231 * I've seen pass the actual address. -Gunnar
233 BaseAddress
= PAGE_ALIGN(BaseAddress
);
235 /* Unmap all the pages. */
236 for (i
= 0; i
< PageCount
; i
++)
238 MmDeleteVirtualMapping(Mdl
->Process
,
239 (char*)BaseAddress
+ (i
* PAGE_SIZE
),
245 if ((ULONG_PTR
)BaseAddress
>= KERNEL_BASE
)
247 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
249 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
250 /* Deallocate all the pages used. */
251 Base
= (ULONG
)((char*)BaseAddress
- (char*)MiMdlMappingRegionBase
) / PAGE_SIZE
;
253 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
255 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
257 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
259 /* Reset the MDL state. */
260 Mdl
->MdlFlags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
261 Mdl
->MappedSystemVa
= NULL
;
268 ASSERT(Mdl
->Process
== PsGetCurrentProcess());
270 Marea
= MmLocateMemoryAreaByAddress( &Mdl
->Process
->AddressSpace
, BaseAddress
);
273 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
277 MmFreeMemoryArea( &Mdl
->Process
->AddressSpace
, Marea
, NULL
, NULL
);
290 MmUnmapReservedMapping (
291 IN PVOID BaseAddress
,
293 IN PMDL MemoryDescriptorList
301 MmBuildMdlFromPages(PMDL Mdl
, PPFN_TYPE Pages
)
303 memcpy(Mdl
+ 1, Pages
, sizeof(PFN_TYPE
) * (PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
));
305 /* FIXME: this flag should be set by the caller perhaps? */
306 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
316 IN ULONG NumberOfLists
,
317 IN PREAD_LIST
*ReadLists
321 return STATUS_NOT_IMPLEMENTED
;
330 MmProtectMdlSystemAddress (
331 IN PMDL MemoryDescriptorList
,
336 return STATUS_NOT_IMPLEMENTED
;
343 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
344 KPROCESSOR_MODE AccessMode
,
345 LOCK_OPERATION Operation
)
347 * FUNCTION: Probes the specified pages, makes them resident and locks them
350 * AccessMode = Access at which to probe the buffer
351 * Operation = Operation to probe for
353 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
354 * used in cases where you know that the mdl address is paged memory or
355 * you don't know where the mdl address comes from. MmProbeAndLockPages will
356 * work no matter what kind of mdl address you have.
363 KPROCESSOR_MODE Mode
;
365 PEPROCESS CurrentProcess
= PsGetCurrentProcess();
366 PMADDRESS_SPACE AddressSpace
;
368 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
370 ASSERT(!(Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL
|
371 MDL_IO_SPACE
|MDL_SOURCE_IS_NONPAGED_POOL
)));
373 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
374 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
376 /* mdl must have enough page entries */
377 ASSERT(NrPages
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
380 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
&&
381 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) >= MmPageArraySize
)
383 /* phys addr is not phys memory so this must be io memory */
385 for (i
= 0; i
< NrPages
; i
++)
387 MdlPages
[i
] = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
));
390 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
395 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
)
397 /* FIXME: why isn't AccessMode used? */
400 AddressSpace
= MmGetKernelAddressSpace();
404 /* FIXME: why isn't AccessMode used? */
406 Mdl
->Process
= CurrentProcess
;
407 AddressSpace
= &CurrentProcess
->AddressSpace
;
414 MmLockAddressSpace(AddressSpace
);
416 for (i
= 0; i
< NrPages
; i
++)
420 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
423 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
427 if (!MmIsPagePresent(NULL
, Address
))
429 Status
= MmNotPresentFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
430 if (!NT_SUCCESS(Status
))
432 for (j
= 0; j
< i
; j
++)
435 if (Page
< MmPageArraySize
)
438 MmDereferencePage(Page
);
441 MmUnlockAddressSpace(AddressSpace
);
442 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
447 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
450 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
451 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
453 Status
= MmAccessFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
454 if (!NT_SUCCESS(Status
))
456 for (j
= 0; j
< i
; j
++)
459 if (Page
< MmPageArraySize
)
462 MmDereferencePage(Page
);
465 MmUnlockAddressSpace(AddressSpace
);
466 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
469 Page
= MmGetPfnForProcess(NULL
, Address
);
471 if (Page
>= MmPageArraySize
)
472 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
474 MmReferencePage(Page
);
477 MmUnlockAddressSpace(AddressSpace
);
478 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
487 MmProbeAndLockProcessPages (
488 IN OUT PMDL MemoryDescriptorList
,
489 IN PEPROCESS Process
,
490 IN KPROCESSOR_MODE AccessMode
,
491 IN LOCK_OPERATION Operation
503 MmProbeAndLockSelectedPages(
504 IN OUT PMDL MemoryDescriptorList
,
505 IN LARGE_INTEGER PageList
[],
506 IN KPROCESSOR_MODE AccessMode
,
507 IN LOCK_OPERATION Operation
517 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
520 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
521 * the given address range
523 * Base = base virtual address
524 * Length = number of bytes to map
529 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
531 return(sizeof(MDL
)+(len
*sizeof(PFN_TYPE
)));
539 MmBuildMdlForNonPagedPool (PMDL Mdl
)
541 * FUNCTION: Fills in the corresponding physical page array of a given
542 * MDL for a buffer in nonpaged system space
544 * Mdl = Points to an MDL that supplies a virtual address,
545 * byte offset and length
547 * This function can be seen as a fast version of MmProbeAndLockPages in case
548 * you _know_ that the mdl address is within nonpaged kernel space.
556 * mdl buffer must (at least) be in kernel space, thou this doesn't
557 * necesarely mean that the buffer in within _nonpaged_ kernel space...
559 ASSERT((ULONG_PTR
)Mdl
->StartVa
>= KERNEL_BASE
);
561 PageCount
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
562 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
564 /* mdl must have enough page entries */
565 ASSERT(PageCount
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
567 for (i
=0; i
< PageCount
; i
++)
569 *MdlPages
++ = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
* PAGE_SIZE
));
572 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
574 Mdl
->MappedSystemVa
= (char*)Mdl
->StartVa
+ Mdl
->ByteOffset
;
582 MmCreateMdl (PMDL Mdl
,
586 * FUNCTION: Allocates and initalizes an MDL
588 * MemoryDescriptorList = Points to MDL to initalize. If this is
589 * NULL then one is allocated
590 * Base = Base virtual address of the buffer
591 * Length = Length in bytes of the buffer
592 * RETURNS: A pointer to initalized MDL
599 Size
= MmSizeOfMdl(Base
,Length
);
601 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
608 MmInitializeMdl(Mdl
, (char*)Base
, Length
);
618 MmMapMemoryDumpMdl (PVOID Unknown0
)
620 * FIXME: Has something to do with crash dumps. Do we want to implement
632 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress
,
633 IN PHYSICAL_ADDRESS HighAddress
,
634 IN PHYSICAL_ADDRESS SkipBytes
,
635 IN SIZE_T Totalbytes
)
638 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
640 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
641 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
642 Designed to be used by an AGP driver
644 LowAddress is the lowest acceptable physical address it wants to allocate
645 and HighAddress is the highest. SkipBytes are the number of bytes that the
646 kernel should keep free above LowAddress and below the address at which it
647 starts to allocate physical memory. TotalBytes are the number of bytes that
648 the driver wants to allocate. The return value of the function is a MDL
649 that if non-zero describes the physical memory the kernel has given the
650 driver. To access portions of the memory the driver must create sub-MDLs
651 from the returned MDL that describe appropriate portions of the physical
652 memory. When a driver wants to access physical memory described by a
653 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
660 ULONG NumberOfPagesWanted
, NumberOfPagesAllocated
;
663 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
664 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
665 LowAddress
.QuadPart
, HighAddress
.QuadPart
,
666 SkipBytes
.QuadPart
, Totalbytes
);
668 /* SkipBytes must be a multiple of the page size */
669 if ((SkipBytes
.QuadPart
% PAGE_SIZE
) != 0)
671 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
675 /* Allocate memory for the MDL */
676 Mdl
= MmCreateMdl(NULL
, 0, Totalbytes
);
682 /* Allocate pages into the MDL */
683 NumberOfPagesAllocated
= 0;
684 NumberOfPagesWanted
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
685 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
686 while (NumberOfPagesWanted
> 0)
688 Ret
= MmAllocPagesSpecifyRange(
693 Pages
+ NumberOfPagesAllocated
);
694 if (Ret
== (ULONG
)-1)
697 NumberOfPagesAllocated
+= Ret
;
698 NumberOfPagesWanted
-= Ret
;
700 if (SkipBytes
.QuadPart
== 0)
702 LowAddress
.QuadPart
+= SkipBytes
.QuadPart
;
703 HighAddress
.QuadPart
+= SkipBytes
.QuadPart
;
706 if (NumberOfPagesAllocated
== 0)
711 else if (NumberOfPagesWanted
> 0)
713 Mdl
->ByteCount
= (ULONG
)(NumberOfPagesAllocated
* PAGE_SIZE
);
714 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
724 MmFreePagesFromMdl ( IN PMDL Mdl
)
727 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
728 FreeUserPhysicalPages, to free the physical memory it has allocated with
729 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
731 Note that a driver is responsible for deallocating the MDL returned by
732 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
733 does not free the MDL.
741 NumberOfPages
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
742 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
744 while (--NumberOfPages
>= 0)
746 MmDereferencePage(Pages
[NumberOfPages
]);
755 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl
,
756 IN KPROCESSOR_MODE AccessMode
,
757 IN MEMORY_CACHING_TYPE CacheType
,
758 IN PVOID BaseAddress
,
759 IN ULONG BugCheckOnFailure
,
760 IN MM_PAGE_PRIORITY Priority
)
766 ULONG StartingOffset
;
767 PEPROCESS CurrentProcess
;
771 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
772 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
773 Mdl
, AccessMode
, CacheType
, BaseAddress
, BugCheckOnFailure
, Priority
);
775 /* FIXME: Implement Priority */
778 /* Calculate the number of pages required. */
779 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
781 if (AccessMode
== UserMode
)
784 LARGE_INTEGER BoundaryAddressMultiple
;
787 /* pretty sure you can't map partial mdl's to user space */
788 ASSERT(!(Mdl
->MdlFlags
& MDL_PARTIAL
));
790 BoundaryAddressMultiple
.QuadPart
= 0;
793 CurrentProcess
= PsGetCurrentProcess();
795 MmLockAddressSpace(&CurrentProcess
->AddressSpace
);
796 Status
= MmCreateMemoryArea(CurrentProcess
,
797 &CurrentProcess
->AddressSpace
,
798 MEMORY_AREA_MDL_MAPPING
,
800 PageCount
* PAGE_SIZE
,
801 0, /* PAGE_READWRITE? */
805 BoundaryAddressMultiple
);
806 MmUnlockAddressSpace(&CurrentProcess
->AddressSpace
);
807 if (!NT_SUCCESS(Status
))
809 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
814 /* Throw exception */
815 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
819 Mdl
->Process
= CurrentProcess
;
821 else /* if (AccessMode == KernelMode) */
823 /* can't map mdl twice */
824 ASSERT(!(Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL_HAS_BEEN_MAPPED
)));
825 /* can't map mdl buildt from non paged pool into kernel space */
826 ASSERT(!(Mdl
->MdlFlags
& (MDL_SOURCE_IS_NONPAGED_POOL
)));
828 CurrentProcess
= NULL
;
830 /* Allocate that number of pages from the mdl mapping region. */
831 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
833 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, PageCount
, MiMdlMappingRegionHint
);
835 if (StartingOffset
== 0xffffffff)
837 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
839 DPRINT1("Out of MDL mapping space\n");
841 if ((Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) || !BugCheckOnFailure
)
849 Base
= (PVOID
)((ULONG_PTR
)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
);
851 if (MiMdlMappingRegionHint
== StartingOffset
)
853 MiMdlMappingRegionHint
+= PageCount
;
856 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
861 /* Set the virtual mappings for the MDL pages. */
862 MdlPages
= (PULONG
)(Mdl
+ 1);
864 Protect
= PAGE_READWRITE
;
865 if (CacheType
== MmNonCached
)
866 Protect
|= PAGE_NOCACHE
;
867 else if (CacheType
== MmWriteCombined
)
868 DPRINT("CacheType MmWriteCombined not supported!\n");
869 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
870 Status
= MmCreateVirtualMappingUnsafe(CurrentProcess
,
876 Status
= MmCreateVirtualMapping(CurrentProcess
,
881 if (!NT_SUCCESS(Status
))
883 DbgPrint("Unable to create virtual mapping\n");
884 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
888 if (AccessMode
== UserMode
)
890 /* Throw exception */
891 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
894 else /* AccessMode == KernelMode */
896 if (!BugCheckOnFailure
)
899 /* FIXME: Use some bugcheck code instead of 0 */
904 /* Mark the MDL has having being mapped. */
905 if (AccessMode
== KernelMode
)
907 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
909 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
913 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
915 Mdl
->MappedSystemVa
= (char*)Base
+ Mdl
->ByteOffset
;
918 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG
)Base
+ Mdl
->ByteOffset
);
920 return((char*)Base
+ Mdl
->ByteOffset
);
928 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
930 * FUNCTION: Maps the physical pages described by a given MDL
932 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
933 * MmAllocatePagesForMdl or IoBuildPartialMdl.
934 * AccessMode = Specifies the portion of the address space to map the
936 * RETURNS: The base virtual address that maps the locked pages for the
937 * range described by the MDL
939 * If mapping into user space, pages are mapped into current address space.
942 return MmMapLockedPagesSpecifyCache(Mdl
,