3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
11 /* INCLUDES ****************************************************************/
15 #include <internal/debug.h>
17 /* GLOBALS *******************************************************************/
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
23 static PVOID MiMdlMappingRegionBase
= NULL
;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
25 static ULONG MiMdlMappingRegionHint
;
26 static KSPIN_LOCK MiMdlMappingRegionLock
;
27 extern ULONG MmPageArraySize
;
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
40 /* FUNCTIONS *****************************************************************/
50 IN ULONG NumberOfBytes
54 return STATUS_NOT_IMPLEMENTED
;
61 MmInitializeMdlImplementation(VOID
)
66 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
68 BoundaryAddressMultiple
.QuadPart
= 0;
69 MiMdlMappingRegionHint
= 0;
70 MiMdlMappingRegionBase
= NULL
;
72 MmLockAddressSpace(MmGetKernelAddressSpace());
73 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
74 MEMORY_AREA_MDL_MAPPING
,
75 &MiMdlMappingRegionBase
,
76 MI_MDL_MAPPING_REGION_SIZE
,
81 BoundaryAddressMultiple
);
82 if (!NT_SUCCESS(Status
))
84 MmUnlockAddressSpace(MmGetKernelAddressSpace());
87 MmUnlockAddressSpace(MmGetKernelAddressSpace());
89 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
90 MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
93 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
94 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
96 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
102 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
104 PPFN_NUMBER MdlPages
;
106 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
108 return((PVOID
)MdlPages
[((ULONG_PTR
)Offset
) / PAGE_SIZE
]);
116 MmUnlockPages(PMDL Mdl
)
118 * FUNCTION: Unlocks the physical pages described by a given MDL
120 * MemoryDescriptorList = MDL describing the buffer to be unlocked
121 * NOTES: The memory described by the specified MDL must have been locked
122 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
125 * May be called in any process context.
129 PPFN_NUMBER MdlPages
;
133 * MmProbeAndLockPages MUST have been called to lock this mdl!
135 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
136 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
138 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
140 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
144 /* If mdl buffer is mapped io space -> do nothing */
145 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
147 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
151 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
152 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
154 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
158 * FIXME: I don't know whether this right, but it looks sensible
160 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
161 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
167 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
168 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
172 MmDereferencePage(Page
);
175 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
184 MmMapLockedPagesWithReservedMapping (
185 IN PVOID MappingAddress
,
187 IN PMDL MemoryDescriptorList
,
188 IN MEMORY_CACHING_TYPE CacheType
200 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
202 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
204 * BaseAddress = Base virtual address to which the pages were mapped
205 * MemoryDescriptorList = MDL describing the mapped pages
207 * User space unmappings _must_ be done from the original process context!
215 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
218 * In this case, the MDL has the same system address as the base address
219 * so there is no need to free it
221 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) &&
222 (BaseAddress
>= MmSystemRangeStart
))
228 /* Calculate the number of pages we mapped. */
229 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
232 * Docs says that BaseAddress should be a _base_ address, but every example
233 * I've seen pass the actual address. -Gunnar
235 BaseAddress
= PAGE_ALIGN(BaseAddress
);
237 /* Unmap all the pages. */
238 for (i
= 0; i
< PageCount
; i
++)
240 MmDeleteVirtualMapping(Mdl
->Process
,
241 (char*)BaseAddress
+ (i
* PAGE_SIZE
),
247 if (BaseAddress
>= MmSystemRangeStart
)
249 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
251 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
252 /* Deallocate all the pages used. */
253 Base
= (ULONG
)((char*)BaseAddress
- (char*)MiMdlMappingRegionBase
) / PAGE_SIZE
;
255 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
257 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
259 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
261 /* Reset the MDL state. */
262 Mdl
->MdlFlags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
263 Mdl
->MappedSystemVa
= NULL
;
270 ASSERT(Mdl
->Process
== PsGetCurrentProcess());
272 Marea
= MmLocateMemoryAreaByAddress( &Mdl
->Process
->AddressSpace
, BaseAddress
);
275 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
279 MmFreeMemoryArea( &Mdl
->Process
->AddressSpace
, Marea
, NULL
, NULL
);
292 MmUnmapReservedMapping (
293 IN PVOID BaseAddress
,
295 IN PMDL MemoryDescriptorList
304 MmBuildMdlFromPages(PMDL Mdl
, PPFN_TYPE Pages
)
306 memcpy(Mdl
+ 1, Pages
, sizeof(PFN_TYPE
) * (PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
));
308 /* FIXME: this flag should be set by the caller perhaps? */
309 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
319 IN ULONG NumberOfLists
,
320 IN PREAD_LIST
*ReadLists
324 return STATUS_NOT_IMPLEMENTED
;
333 MmProtectMdlSystemAddress (
334 IN PMDL MemoryDescriptorList
,
339 return STATUS_NOT_IMPLEMENTED
;
346 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
347 KPROCESSOR_MODE AccessMode
,
348 LOCK_OPERATION Operation
)
350 * FUNCTION: Probes the specified pages, makes them resident and locks them
353 * AccessMode = Access at which to probe the buffer
354 * Operation = Operation to probe for
356 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
357 * used in cases where you know that the mdl address is paged memory or
358 * you don't know where the mdl address comes from. MmProbeAndLockPages will
359 * work no matter what kind of mdl address you have.
366 KPROCESSOR_MODE Mode
;
368 PEPROCESS CurrentProcess
= PsGetCurrentProcess();
369 PMADDRESS_SPACE AddressSpace
;
371 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
373 ASSERT(!(Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL
|
374 MDL_IO_SPACE
|MDL_SOURCE_IS_NONPAGED_POOL
)));
376 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
377 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
379 /* mdl must have enough page entries */
380 ASSERT(NrPages
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
383 if (Mdl
->StartVa
>= MmSystemRangeStart
&&
384 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) >= MmPageArraySize
)
386 /* phys addr is not phys memory so this must be io memory */
388 for (i
= 0; i
< NrPages
; i
++)
390 MdlPages
[i
] = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
));
393 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
398 if (Mdl
->StartVa
>= MmSystemRangeStart
)
400 /* FIXME: why isn't AccessMode used? */
403 AddressSpace
= MmGetKernelAddressSpace();
407 /* FIXME: why isn't AccessMode used? */
409 Mdl
->Process
= CurrentProcess
;
410 AddressSpace
= &CurrentProcess
->AddressSpace
;
417 MmLockAddressSpace(AddressSpace
);
419 for (i
= 0; i
< NrPages
; i
++)
423 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
426 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
430 if (!MmIsPagePresent(NULL
, Address
))
432 Status
= MmNotPresentFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
433 if (!NT_SUCCESS(Status
))
435 for (j
= 0; j
< i
; j
++)
438 if (Page
< MmPageArraySize
)
441 MmDereferencePage(Page
);
444 MmUnlockAddressSpace(AddressSpace
);
445 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
450 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
453 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
454 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
456 Status
= MmAccessFault(Mode
, (ULONG_PTR
)Address
, TRUE
);
457 if (!NT_SUCCESS(Status
))
459 for (j
= 0; j
< i
; j
++)
462 if (Page
< MmPageArraySize
)
465 MmDereferencePage(Page
);
468 MmUnlockAddressSpace(AddressSpace
);
469 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
472 Page
= MmGetPfnForProcess(NULL
, Address
);
474 if (Page
>= MmPageArraySize
)
475 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
477 MmReferencePage(Page
);
480 MmUnlockAddressSpace(AddressSpace
);
481 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
490 MmProbeAndLockProcessPages (
491 IN OUT PMDL MemoryDescriptorList
,
492 IN PEPROCESS Process
,
493 IN KPROCESSOR_MODE AccessMode
,
494 IN LOCK_OPERATION Operation
506 MmProbeAndLockSelectedPages(
507 IN OUT PMDL MemoryDescriptorList
,
508 IN LARGE_INTEGER PageList
[],
509 IN KPROCESSOR_MODE AccessMode
,
510 IN LOCK_OPERATION Operation
520 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
523 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
524 * the given address range
526 * Base = base virtual address
527 * Length = number of bytes to map
532 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
534 return(sizeof(MDL
)+(len
*sizeof(PFN_TYPE
)));
542 MmBuildMdlForNonPagedPool (PMDL Mdl
)
544 * FUNCTION: Fills in the corresponding physical page array of a given
545 * MDL for a buffer in nonpaged system space
547 * Mdl = Points to an MDL that supplies a virtual address,
548 * byte offset and length
550 * This function can be seen as a fast version of MmProbeAndLockPages in case
551 * you _know_ that the mdl address is within nonpaged kernel space.
559 * mdl buffer must (at least) be in kernel space, thou this doesn't
560 * necesarely mean that the buffer in within _nonpaged_ kernel space...
562 ASSERT(Mdl
->StartVa
>= MmSystemRangeStart
);
564 PageCount
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
565 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
567 /* mdl must have enough page entries */
568 ASSERT(PageCount
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
570 for (i
=0; i
< PageCount
; i
++)
572 *MdlPages
++ = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
* PAGE_SIZE
));
575 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
577 Mdl
->MappedSystemVa
= (char*)Mdl
->StartVa
+ Mdl
->ByteOffset
;
585 MmCreateMdl (PMDL Mdl
,
589 * FUNCTION: Allocates and initalizes an MDL
591 * MemoryDescriptorList = Points to MDL to initalize. If this is
592 * NULL then one is allocated
593 * Base = Base virtual address of the buffer
594 * Length = Length in bytes of the buffer
595 * RETURNS: A pointer to initalized MDL
602 Size
= MmSizeOfMdl(Base
,Length
);
604 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
611 MmInitializeMdl(Mdl
, (char*)Base
, Length
);
621 MmMapMemoryDumpMdl (PVOID Unknown0
)
623 * FIXME: Has something to do with crash dumps. Do we want to implement
635 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress
,
636 IN PHYSICAL_ADDRESS HighAddress
,
637 IN PHYSICAL_ADDRESS SkipBytes
,
638 IN SIZE_T Totalbytes
)
641 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
643 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
644 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
645 Designed to be used by an AGP driver
647 LowAddress is the lowest acceptable physical address it wants to allocate
648 and HighAddress is the highest. SkipBytes are the number of bytes that the
649 kernel should keep free above LowAddress and below the address at which it
650 starts to allocate physical memory. TotalBytes are the number of bytes that
651 the driver wants to allocate. The return value of the function is a MDL
652 that if non-zero describes the physical memory the kernel has given the
653 driver. To access portions of the memory the driver must create sub-MDLs
654 from the returned MDL that describe appropriate portions of the physical
655 memory. When a driver wants to access physical memory described by a
656 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
663 ULONG NumberOfPagesWanted
, NumberOfPagesAllocated
;
666 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
667 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
668 LowAddress
.QuadPart
, HighAddress
.QuadPart
,
669 SkipBytes
.QuadPart
, Totalbytes
);
671 /* SkipBytes must be a multiple of the page size */
672 if ((SkipBytes
.QuadPart
% PAGE_SIZE
) != 0)
674 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
678 /* Allocate memory for the MDL */
679 Mdl
= MmCreateMdl(NULL
, 0, Totalbytes
);
685 /* Allocate pages into the MDL */
686 NumberOfPagesAllocated
= 0;
687 NumberOfPagesWanted
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
688 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
689 while (NumberOfPagesWanted
> 0)
691 Ret
= MmAllocPagesSpecifyRange(
696 Pages
+ NumberOfPagesAllocated
);
697 if (Ret
== (ULONG
)-1)
700 NumberOfPagesAllocated
+= Ret
;
701 NumberOfPagesWanted
-= Ret
;
703 if (SkipBytes
.QuadPart
== 0)
705 LowAddress
.QuadPart
+= SkipBytes
.QuadPart
;
706 HighAddress
.QuadPart
+= SkipBytes
.QuadPart
;
709 if (NumberOfPagesAllocated
== 0)
714 else if (NumberOfPagesWanted
> 0)
716 Mdl
->ByteCount
= (ULONG
)(NumberOfPagesAllocated
* PAGE_SIZE
);
717 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
727 MmFreePagesFromMdl ( IN PMDL Mdl
)
730 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
731 FreeUserPhysicalPages, to free the physical memory it has allocated with
732 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
734 Note that a driver is responsible for deallocating the MDL returned by
735 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
736 does not free the MDL.
744 NumberOfPages
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
745 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
747 while (--NumberOfPages
>= 0)
749 MmDereferencePage(Pages
[NumberOfPages
]);
758 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl
,
759 IN KPROCESSOR_MODE AccessMode
,
760 IN MEMORY_CACHING_TYPE CacheType
,
761 IN PVOID BaseAddress
,
762 IN ULONG BugCheckOnFailure
,
763 IN MM_PAGE_PRIORITY Priority
)
769 ULONG StartingOffset
;
770 PEPROCESS CurrentProcess
;
774 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
775 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
776 Mdl
, AccessMode
, CacheType
, BaseAddress
, BugCheckOnFailure
, Priority
);
778 /* FIXME: Implement Priority */
781 Protect
= PAGE_READWRITE
;
782 if (CacheType
== MmNonCached
)
783 Protect
|= PAGE_NOCACHE
;
784 else if (CacheType
== MmWriteCombined
)
785 DPRINT("CacheType MmWriteCombined not supported!\n");
787 /* Calculate the number of pages required. */
788 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
790 if (AccessMode
!= KernelMode
)
793 LARGE_INTEGER BoundaryAddressMultiple
;
796 /* pretty sure you can't map partial mdl's to user space */
797 ASSERT(!(Mdl
->MdlFlags
& MDL_PARTIAL
));
799 BoundaryAddressMultiple
.QuadPart
= 0;
802 CurrentProcess
= PsGetCurrentProcess();
804 MmLockAddressSpace(&CurrentProcess
->AddressSpace
);
805 Status
= MmCreateMemoryArea(&CurrentProcess
->AddressSpace
,
806 MEMORY_AREA_MDL_MAPPING
,
808 PageCount
* PAGE_SIZE
,
813 BoundaryAddressMultiple
);
814 MmUnlockAddressSpace(&CurrentProcess
->AddressSpace
);
815 if (!NT_SUCCESS(Status
))
817 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
822 /* Throw exception */
823 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
827 Mdl
->Process
= CurrentProcess
;
829 else /* if (AccessMode == KernelMode) */
831 /* can't map mdl twice */
832 ASSERT(!(Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL_HAS_BEEN_MAPPED
)));
833 /* can't map mdl buildt from non paged pool into kernel space */
834 ASSERT(!(Mdl
->MdlFlags
& (MDL_SOURCE_IS_NONPAGED_POOL
)));
836 CurrentProcess
= NULL
;
838 /* Allocate that number of pages from the mdl mapping region. */
839 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
841 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, PageCount
, MiMdlMappingRegionHint
);
843 if (StartingOffset
== 0xffffffff)
845 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
847 DPRINT1("Out of MDL mapping space\n");
849 if ((Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) || !BugCheckOnFailure
)
857 Base
= (PVOID
)((ULONG_PTR
)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
);
859 if (MiMdlMappingRegionHint
== StartingOffset
)
861 MiMdlMappingRegionHint
+= PageCount
;
864 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
869 /* Set the virtual mappings for the MDL pages. */
870 MdlPages
= (PULONG
)(Mdl
+ 1);
872 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
873 Status
= MmCreateVirtualMappingUnsafe(CurrentProcess
,
879 Status
= MmCreateVirtualMapping(CurrentProcess
,
884 if (!NT_SUCCESS(Status
))
886 DbgPrint("Unable to create virtual mapping\n");
887 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
891 if (AccessMode
!= KernelMode
)
893 /* Throw exception */
894 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
897 else /* AccessMode == KernelMode */
899 if (!BugCheckOnFailure
)
902 /* FIXME: Use some bugcheck code instead of 0 */
907 /* Mark the MDL has having being mapped. */
908 if (AccessMode
== KernelMode
)
910 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
912 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
916 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
918 Mdl
->MappedSystemVa
= (char*)Base
+ Mdl
->ByteOffset
;
921 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG
)Base
+ Mdl
->ByteOffset
);
923 return((char*)Base
+ Mdl
->ByteOffset
);
931 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
933 * FUNCTION: Maps the physical pages described by a given MDL
935 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
936 * MmAllocatePagesForMdl or IoBuildPartialMdl.
937 * AccessMode = Specifies the portion of the address space to map the
939 * RETURNS: The base virtual address that maps the locked pages for the
940 * range described by the MDL
942 * If mapping into user space, pages are mapped into current address space.
945 return MmMapLockedPagesSpecifyCache(Mdl
,