3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
15 #include <internal/debug.h>
17 /* GLOBALS *******************************************************************/
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
23 static PVOID MiMdlMappingRegionBase
= NULL
;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
25 static ULONG MiMdlMappingRegionHint
;
26 static KSPIN_LOCK MiMdlMappingRegionLock
;
27 extern ULONG MmPageArraySize
;
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
40 /* FUNCTIONS *****************************************************************/
50 IN ULONG NumberOfBytes
54 return STATUS_NOT_IMPLEMENTED
;
59 MmInitializeMdlImplementation(VOID
)
64 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
66 BoundaryAddressMultiple
.QuadPart
= 0;
67 MiMdlMappingRegionHint
= 0;
68 MiMdlMappingRegionBase
= NULL
;
70 MmLockAddressSpace(MmGetKernelAddressSpace());
71 Status
= MmCreateMemoryArea(NULL
,
72 MmGetKernelAddressSpace(),
73 MEMORY_AREA_MDL_MAPPING
,
74 &MiMdlMappingRegionBase
,
75 MI_MDL_MAPPING_REGION_SIZE
,
80 BoundaryAddressMultiple
);
81 if (!NT_SUCCESS(Status
))
83 MmUnlockAddressSpace(MmGetKernelAddressSpace());
86 MmUnlockAddressSpace(MmGetKernelAddressSpace());
88 Buffer
= ExAllocatePool(NonPagedPool
, MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
90 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
91 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
93 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
98 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
100 PPFN_NUMBER MdlPages
;
102 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
104 return((PVOID
)MdlPages
[((ULONG_PTR
)Offset
) / PAGE_SIZE
]);
112 MmUnlockPages(PMDL Mdl
)
114 * FUNCTION: Unlocks the physical pages described by a given MDL
116 * MemoryDescriptorList = MDL describing the buffer to be unlocked
117 * NOTES: The memory described by the specified MDL must have been locked
118 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
121 * May be called in any process context.
125 PPFN_NUMBER MdlPages
;
129 * MmProbeAndLockPages MUST have been called to lock this mdl!
131 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
132 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
134 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
136 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
140 /* If mdl buffer is mapped io space -> do nothing */
141 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
143 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
147 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
148 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
150 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
154 * FIXME: I don't know whether this right, but it looks sensible
156 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
157 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
163 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
164 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
168 MmDereferencePage(Page
);
171 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
180 MmMapLockedPagesWithReservedMapping (
181 IN PVOID MappingAddress
,
183 IN PMDL MemoryDescriptorList
,
184 IN MEMORY_CACHING_TYPE CacheType
196 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
198 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
200 * BaseAddress = Base virtual address to which the pages were mapped
201 * MemoryDescriptorList = MDL describing the mapped pages
203 * User space unmappings _must_ be done from the original process context!
211 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
214 * In this case, the MDL has the same system address as the base address
215 * so there is no need to free it
217 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) &&
218 ((ULONG_PTR
)BaseAddress
>= KERNEL_BASE
))
224 /* Calculate the number of pages we mapped. */
225 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
228 * Docs says that BaseAddress should be a _base_ address, but every example
229 * I've seen pass the actual address. -Gunnar
231 BaseAddress
= PAGE_ALIGN(BaseAddress
);
233 /* Unmap all the pages. */
234 for (i
= 0; i
< PageCount
; i
++)
236 MmDeleteVirtualMapping(Mdl
->Process
,
237 (char*)BaseAddress
+ (i
* PAGE_SIZE
),
243 if ((ULONG_PTR
)BaseAddress
>= KERNEL_BASE
)
245 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
247 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
248 /* Deallocate all the pages used. */
249 Base
= (ULONG
)((char*)BaseAddress
- (char*)MiMdlMappingRegionBase
) / PAGE_SIZE
;
251 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
253 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
255 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
257 /* Reset the MDL state. */
258 Mdl
->MdlFlags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
259 Mdl
->MappedSystemVa
= NULL
;
266 ASSERT(Mdl
->Process
== PsGetCurrentProcess());
268 Marea
= MmLocateMemoryAreaByAddress( &Mdl
->Process
->AddressSpace
, BaseAddress
);
271 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
275 MmFreeMemoryArea( &Mdl
->Process
->AddressSpace
, Marea
, NULL
, NULL
);
288 MmUnmapReservedMapping (
289 IN PVOID BaseAddress
,
291 IN PMDL MemoryDescriptorList
299 MmBuildMdlFromPages(PMDL Mdl
, PPFN_TYPE Pages
)
301 memcpy(Mdl
+ 1, Pages
, sizeof(PFN_TYPE
) * (PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
));
303 /* FIXME: this flag should be set by the caller perhaps? */
304 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
314 IN ULONG NumberOfLists
,
315 IN PREAD_LIST
*ReadLists
319 return STATUS_NOT_IMPLEMENTED
;
328 MmProtectMdlSystemAddress (
329 IN PMDL MemoryDescriptorList
,
334 return STATUS_NOT_IMPLEMENTED
;
341 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
342 KPROCESSOR_MODE AccessMode
,
343 LOCK_OPERATION Operation
)
345 * FUNCTION: Probes the specified pages, makes them resident and locks them
348 * AccessMode = Access at which to probe the buffer
349 * Operation = Operation to probe for
351 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
352 * used in cases where you know that the mdl address is paged memory or
353 * you don't know where the mdl address comes from. MmProbeAndLockPages will
354 * work no matter what kind of mdl address you have.
361 KPROCESSOR_MODE Mode
;
363 PEPROCESS CurrentProcess
= PsGetCurrentProcess();
364 PMADDRESS_SPACE AddressSpace
;
366 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
368 ASSERT(!(Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL
|
369 MDL_IO_SPACE
|MDL_SOURCE_IS_NONPAGED_POOL
)));
371 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
372 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
374 /* mdl must have enough page entries */
375 ASSERT(NrPages
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
378 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
&&
379 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) >= MmPageArraySize
)
381 /* phys addr is not phys memory so this must be io memory */
383 for (i
= 0; i
< NrPages
; i
++)
385 MdlPages
[i
] = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
));
388 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
393 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
)
395 /* FIXME: why isn't AccessMode used? */
398 AddressSpace
= MmGetKernelAddressSpace();
402 /* FIXME: why isn't AccessMode used? */
404 Mdl
->Process
= CurrentProcess
;
405 AddressSpace
= &CurrentProcess
->AddressSpace
;
412 MmLockAddressSpace(AddressSpace
);
414 for (i
= 0; i
< NrPages
; i
++)
418 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
421 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
425 if (!MmIsPagePresent(NULL
, Address
))
427 Status
= MmNotPresentFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
428 if (!NT_SUCCESS(Status
))
430 for (j
= 0; j
< i
; j
++)
433 if (Page
< MmPageArraySize
)
436 MmDereferencePage(Page
);
439 MmUnlockAddressSpace(AddressSpace
);
440 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
445 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
448 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
449 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
451 Status
= MmAccessFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
452 if (!NT_SUCCESS(Status
))
454 for (j
= 0; j
< i
; j
++)
457 if (Page
< MmPageArraySize
)
460 MmDereferencePage(Page
);
463 MmUnlockAddressSpace(AddressSpace
);
464 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
467 Page
= MmGetPfnForProcess(NULL
, Address
);
469 if (Page
>= MmPageArraySize
)
470 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
472 MmReferencePage(Page
);
475 MmUnlockAddressSpace(AddressSpace
);
476 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
485 MmProbeAndLockProcessPages (
486 IN OUT PMDL MemoryDescriptorList
,
487 IN PEPROCESS Process
,
488 IN KPROCESSOR_MODE AccessMode
,
489 IN LOCK_OPERATION Operation
501 MmProbeAndLockSelectedPages(
502 IN OUT PMDL MemoryDescriptorList
,
503 IN LARGE_INTEGER PageList
[],
504 IN KPROCESSOR_MODE AccessMode
,
505 IN LOCK_OPERATION Operation
515 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
518 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
519 * the given address range
521 * Base = base virtual address
522 * Length = number of bytes to map
527 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
529 return(sizeof(MDL
)+(len
*sizeof(PFN_TYPE
)));
537 MmBuildMdlForNonPagedPool (PMDL Mdl
)
539 * FUNCTION: Fills in the corresponding physical page array of a given
540 * MDL for a buffer in nonpaged system space
542 * Mdl = Points to an MDL that supplies a virtual address,
543 * byte offset and length
545 * This function can be seen as a fast version of MmProbeAndLockPages in case
546 * you _know_ that the mdl address is within nonpaged kernel space.
554 * mdl buffer must (at least) be in kernel space, thou this doesn't
555 * necesarely mean that the buffer in within _nonpaged_ kernel space...
557 ASSERT((ULONG_PTR
)Mdl
->StartVa
>= KERNEL_BASE
);
559 PageCount
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
560 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
562 /* mdl must have enough page entries */
563 ASSERT(PageCount
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
565 for (i
=0; i
< PageCount
; i
++)
567 *MdlPages
++ = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
* PAGE_SIZE
));
570 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
572 Mdl
->MappedSystemVa
= (char*)Mdl
->StartVa
+ Mdl
->ByteOffset
;
580 MmCreateMdl (PMDL Mdl
,
584 * FUNCTION: Allocates and initalizes an MDL
586 * MemoryDescriptorList = Points to MDL to initalize. If this is
587 * NULL then one is allocated
588 * Base = Base virtual address of the buffer
589 * Length = Length in bytes of the buffer
590 * RETURNS: A pointer to initalized MDL
597 Size
= MmSizeOfMdl(Base
,Length
);
599 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
606 MmInitializeMdl(Mdl
, (char*)Base
, Length
);
616 MmMapMemoryDumpMdl (PVOID Unknown0
)
618 * FIXME: Has something to do with crash dumps. Do we want to implement
630 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress
,
631 IN PHYSICAL_ADDRESS HighAddress
,
632 IN PHYSICAL_ADDRESS SkipBytes
,
633 IN SIZE_T Totalbytes
)
636 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
638 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
639 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
640 Designed to be used by an AGP driver
642 LowAddress is the lowest acceptable physical address it wants to allocate
643 and HighAddress is the highest. SkipBytes are the number of bytes that the
644 kernel should keep free above LowAddress and below the address at which it
645 starts to allocate physical memory. TotalBytes are the number of bytes that
646 the driver wants to allocate. The return value of the function is a MDL
647 that if non-zero describes the physical memory the kernel has given the
648 driver. To access portions of the memory the driver must create sub-MDLs
649 from the returned MDL that describe appropriate portions of the physical
650 memory. When a driver wants to access physical memory described by a
651 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
658 ULONG NumberOfPagesWanted
, NumberOfPagesAllocated
;
661 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
662 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
663 LowAddress
.QuadPart
, HighAddress
.QuadPart
,
664 SkipBytes
.QuadPart
, Totalbytes
);
666 /* SkipBytes must be a multiple of the page size */
667 if ((SkipBytes
.QuadPart
% PAGE_SIZE
) != 0)
669 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
673 /* Allocate memory for the MDL */
674 Mdl
= MmCreateMdl(NULL
, 0, Totalbytes
);
680 /* Allocate pages into the MDL */
681 NumberOfPagesAllocated
= 0;
682 NumberOfPagesWanted
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
683 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
684 while (NumberOfPagesWanted
> 0)
686 Ret
= MmAllocPagesSpecifyRange(
691 Pages
+ NumberOfPagesAllocated
);
695 NumberOfPagesAllocated
+= Ret
;
696 NumberOfPagesWanted
-= Ret
;
698 if (SkipBytes
.QuadPart
== 0)
700 LowAddress
.QuadPart
+= SkipBytes
.QuadPart
;
701 HighAddress
.QuadPart
+= SkipBytes
.QuadPart
;
704 if (NumberOfPagesAllocated
== 0)
709 else if (NumberOfPagesWanted
> 0)
711 Mdl
->ByteCount
= (ULONG
)(NumberOfPagesAllocated
* PAGE_SIZE
);
712 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
722 MmFreePagesFromMdl ( IN PMDL Mdl
)
725 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
726 FreeUserPhysicalPages, to free the physical memory it has allocated with
727 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
729 Note that a driver is responsible for deallocating the MDL returned by
730 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
731 does not free the MDL.
739 NumberOfPages
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
740 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
742 while (--NumberOfPages
>= 0)
744 MmDereferencePage(Pages
[NumberOfPages
]);
753 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl
,
754 IN KPROCESSOR_MODE AccessMode
,
755 IN MEMORY_CACHING_TYPE CacheType
,
756 IN PVOID BaseAddress
,
757 IN ULONG BugCheckOnFailure
,
758 IN MM_PAGE_PRIORITY Priority
)
764 ULONG StartingOffset
;
765 PEPROCESS CurrentProcess
;
769 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
770 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
771 Mdl
, AccessMode
, CacheType
, BaseAddress
, BugCheckOnFailure
, Priority
);
773 /* FIXME: Implement Priority */
776 /* Calculate the number of pages required. */
777 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
779 if (AccessMode
== UserMode
)
782 LARGE_INTEGER BoundaryAddressMultiple
;
785 /* pretty sure you can't map partial mdl's to user space */
786 ASSERT(!(Mdl
->MdlFlags
& MDL_PARTIAL
));
788 BoundaryAddressMultiple
.QuadPart
= 0;
791 CurrentProcess
= PsGetCurrentProcess();
793 MmLockAddressSpace(&CurrentProcess
->AddressSpace
);
794 Status
= MmCreateMemoryArea(CurrentProcess
,
795 &CurrentProcess
->AddressSpace
,
796 MEMORY_AREA_MDL_MAPPING
,
798 PageCount
* PAGE_SIZE
,
799 0, /* PAGE_READWRITE? */
803 BoundaryAddressMultiple
);
804 MmUnlockAddressSpace(&CurrentProcess
->AddressSpace
);
805 if (!NT_SUCCESS(Status
))
807 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
812 /* Throw exception */
813 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
817 Mdl
->Process
= CurrentProcess
;
819 else /* if (AccessMode == KernelMode) */
821 /* can't map mdl twice */
822 ASSERT(!(Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL_HAS_BEEN_MAPPED
)));
823 /* can't map mdl buildt from non paged pool into kernel space */
824 ASSERT(!(Mdl
->MdlFlags
& (MDL_SOURCE_IS_NONPAGED_POOL
)));
826 CurrentProcess
= NULL
;
828 /* Allocate that number of pages from the mdl mapping region. */
829 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
831 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, PageCount
, MiMdlMappingRegionHint
);
833 if (StartingOffset
== 0xffffffff)
835 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
837 DPRINT1("Out of MDL mapping space\n");
839 if ((Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) || !BugCheckOnFailure
)
847 Base
= (PVOID
)((ULONG_PTR
)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
);
849 if (MiMdlMappingRegionHint
== StartingOffset
)
851 MiMdlMappingRegionHint
+= PageCount
;
854 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
859 /* Set the virtual mappings for the MDL pages. */
860 MdlPages
= (PULONG
)(Mdl
+ 1);
862 Protect
= PAGE_READWRITE
;
863 if (CacheType
== MmNonCached
)
864 Protect
|= PAGE_NOCACHE
;
865 else if (CacheType
== MmWriteCombined
)
866 DPRINT("CacheType MmWriteCombined not supported!\n");
867 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
868 Status
= MmCreateVirtualMappingUnsafe(CurrentProcess
,
874 Status
= MmCreateVirtualMapping(CurrentProcess
,
879 if (!NT_SUCCESS(Status
))
881 DbgPrint("Unable to create virtual mapping\n");
882 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
886 if (AccessMode
== UserMode
)
888 /* Throw exception */
889 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
892 else /* AccessMode == KernelMode */
894 if (!BugCheckOnFailure
)
897 /* FIXME: Use some bugcheck code instead of 0 */
902 /* Mark the MDL has having being mapped. */
903 if (AccessMode
== KernelMode
)
905 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
907 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
911 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
913 Mdl
->MappedSystemVa
= (char*)Base
+ Mdl
->ByteOffset
;
916 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG
)Base
+ Mdl
->ByteOffset
);
918 return((char*)Base
+ Mdl
->ByteOffset
);
926 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
928 * FUNCTION: Maps the physical pages described by a given MDL
930 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
931 * MmAllocatePagesForMdl or IoBuildPartialMdl.
932 * AccessMode = Specifies the portion of the address space to map the
934 * RETURNS: The base virtual address that maps the locked pages for the
935 * range described by the MDL
937 * If mapping into user space, pages are mapped into current address space.
940 return MmMapLockedPagesSpecifyCache(Mdl
,