2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/mdl.c
5 * PURPOSE: Manipulates MDLs
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
10 /* INCLUDES ****************************************************************/
16 #if defined (ALLOC_PRAGMA)
17 #pragma alloc_text(INIT, MmInitializeMdlImplementation)
20 /* GLOBALS *******************************************************************/
22 #define TAG_MDL TAG('M', 'D', 'L', ' ')
23 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
25 PVOID MiMdlMappingRegionBase
= NULL
;
26 RTL_BITMAP MiMdlMappingRegionAllocMap
;
27 ULONG MiMdlMappingRegionHint
;
28 KSPIN_LOCK MiMdlMappingRegionLock
;
29 extern ULONG MmPageArraySize
;
31 /* PRIVATE FUNCTIONS **********************************************************/
36 MmInitializeMdlImplementation(VOID
)
41 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
43 BoundaryAddressMultiple
.QuadPart
= 0;
44 MiMdlMappingRegionHint
= 0;
45 MiMdlMappingRegionBase
= NULL
;
47 MmLockAddressSpace(MmGetKernelAddressSpace());
48 Status
= MmCreateMemoryArea(MmGetKernelAddressSpace(),
49 MEMORY_AREA_MDL_MAPPING
,
50 &MiMdlMappingRegionBase
,
51 MI_MDL_MAPPING_REGION_SIZE
,
56 BoundaryAddressMultiple
);
57 if (!NT_SUCCESS(Status
))
59 MmUnlockAddressSpace(MmGetKernelAddressSpace());
62 MmUnlockAddressSpace(MmGetKernelAddressSpace());
64 Buffer
= ExAllocatePoolWithTag(NonPagedPool
,
65 MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8),
68 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
69 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
71 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
74 /* PUBLIC FUNCTIONS ***********************************************************/
82 MmCreateMdl(IN PMDL Mdl
,
88 /* Check if we don't have an MDL built */
91 /* Calcualte the size we'll need and allocate the MDL */
92 Size
= MmSizeOfMdl(Base
, Length
);
93 Mdl
= ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
94 if (!Mdl
) return NULL
;
98 MmInitializeMdl(Mdl
, Base
, Length
);
99 DPRINT("Creating MDL: %p\n", Mdl
);
100 DPRINT("Base: %p. Length: %lx\n", Base
, Length
);
109 MmSizeOfMdl(IN PVOID Base
,
112 /* Return the MDL size */
113 return sizeof(MDL
) + (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Length
) * sizeof(PFN_NUMBER
));
121 MmBuildMdlForNonPagedPool(IN PMDL Mdl
)
125 PPFN_NUMBER MdlPages
;
127 DPRINT("Building MDL: %p\n", Mdl
);
130 ASSERT(Mdl
->ByteCount
!= 0);
131 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
132 MDL_MAPPED_TO_SYSTEM_VA
|
133 MDL_SOURCE_IS_NONPAGED_POOL
|
136 /* We know the MDL isn't associated to a process now */
139 /* Get page and VA information */
140 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
143 /* Set the system address and now get the page count */
144 Mdl
->MappedSystemVa
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
145 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl
->MappedSystemVa
, Mdl
->ByteCount
);
146 ASSERT(PageCount
!= 0);
148 /* Go through each page */
149 for (i
= 0; i
< PageCount
; i
++)
152 *MdlPages
++ = MmGetPfnForProcess(NULL
,
153 (PVOID
)((ULONG_PTR
)Base
+ (i
* PAGE_SIZE
)));
156 /* Set the final flag */
157 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
165 MmFreePagesFromMdl(IN PMDL Mdl
)
170 DPRINT("Freeing MDL: %p\n", Mdl
);
173 ASSERT(KeGetCurrentIrql() <= APC_LEVEL
);
174 ASSERT((Mdl
->MdlFlags
& MDL_IO_SPACE
) == 0);
175 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
177 /* Get address and page information */
178 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
179 NumberOfPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
181 /* Loop all the MDL pages */
182 Pages
= (PPFN_NUMBER
)(Mdl
+ 1);
183 while (--NumberOfPages
>= 0)
185 /* Dereference each one of them */
186 MmDereferencePage(Pages
[NumberOfPages
]);
189 /* Remove the pages locked flag */
190 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
198 MmMapLockedPages(IN PMDL Mdl
,
199 IN KPROCESSOR_MODE AccessMode
)
201 /* Call the extended version */
202 return MmMapLockedPagesSpecifyCache(Mdl
,
215 MmUnlockPages(IN PMDL Mdl
)
218 PPFN_NUMBER MdlPages
;
222 ULONG Flags
, PageCount
;
223 DPRINT("Unlocking MDL: %p\n", Mdl
);
226 ASSERT((Mdl
->MdlFlags
& MDL_PAGES_LOCKED
) != 0);
227 ASSERT((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) == 0);
228 ASSERT((Mdl
->MdlFlags
& MDL_PARTIAL
) == 0);
229 ASSERT(Mdl
->ByteCount
!= 0);
231 /* Get the process associated and capture the flags which are volatile */
232 Process
= Mdl
->Process
;
233 Flags
= Mdl
->MdlFlags
;
235 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
236 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
238 /* Unmap the pages from system spage */
239 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
242 /* Get the page count */
243 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
244 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
245 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
246 ASSERT(PageCount
!= 0);
248 /* We don't support AWE */
249 if (Flags
& MDL_DESCRIBES_AWE
) ASSERT(FALSE
);
251 /* Check if the buffer is mapped I/O space */
252 if (Flags
& MDL_IO_SPACE
)
254 /* Check if this was a wirte */
255 if (Flags
& MDL_WRITE_OPERATION
)
257 /* Windows keeps track of the modified bit */
260 /* Check if we have a process */
263 /* Handle the accounting of locked pages */
264 /* ASSERT(Process->NumberOfLockedPages >= 0); */ // always true
265 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
270 Mdl
->MdlFlags
&= ~MDL_IO_SPACE
;
271 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
275 /* Check if we have a process */
278 /* Handle the accounting of locked pages */
279 /* ASSERT(Process->NumberOfLockedPages >= 0); */ // always true
280 InterlockedExchangeAddSizeT(&Process
->NumberOfLockedPages
,
285 for (i
= 0; i
< PageCount
; i
++)
287 /* Get the page entry */
289 /* Unlock and dereference it */
292 MmDereferencePage(Page
);
296 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
304 MmUnmapLockedPages(IN PVOID BaseAddress
,
310 MEMORY_AREA
*MemoryArea
;
311 DPRINT("Unmapping MDL: %p\n", Mdl
);
312 DPRINT("Base: %p\n", BaseAddress
);
315 ASSERT(Mdl
->ByteCount
!= 0);
317 /* Check if this is a kernel request */
318 if (BaseAddress
> MM_HIGHEST_USER_ADDRESS
)
320 /* Get base and count information */
321 Base
= (ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
;
322 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
325 ASSERT((Mdl
->MdlFlags
& MDL_PARENT_MAPPED_SYSTEM_VA
) == 0);
326 ASSERT(PageCount
!= 0);
327 ASSERT(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
329 /* ReactOS does not support this flag */
330 if (Mdl
->MdlFlags
& MDL_FREE_EXTRA_PTES
) ASSERT(FALSE
);
333 Mdl
->MdlFlags
&= ~(MDL_MAPPED_TO_SYSTEM_VA
|
334 MDL_PARTIAL_HAS_BEEN_MAPPED
|
335 MDL_FREE_EXTRA_PTES
);
337 /* If we came from non-paged pool, on ReactOS, we can leave */
338 if (Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) return;
341 BaseAddress
= PAGE_ALIGN(BaseAddress
);
342 for (i
= 0; i
< PageCount
; i
++)
345 MmDeleteVirtualMapping(NULL
,
346 (PVOID
)((ULONG_PTR
)BaseAddress
+ (i
* PAGE_SIZE
)),
352 /* Lock the mapping region */
353 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
355 /* Deallocate all the pages used. */
356 Base
= ((ULONG_PTR
)BaseAddress
- (ULONG_PTR
)MiMdlMappingRegionBase
) / PAGE_SIZE
;
357 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
358 MiMdlMappingRegionHint
= min(MiMdlMappingRegionHint
, Base
);
360 /* Release the lock */
361 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
366 ASSERT(Mdl
->Process
== PsGetCurrentProcess());
368 /* Find the memory area */
369 MemoryArea
= MmLocateMemoryAreaByAddress(&Mdl
->Process
->VadRoot
,
374 MmFreeMemoryArea(&Mdl
->Process
->VadRoot
,
386 MmProbeAndLockPages(IN PMDL Mdl
,
387 IN KPROCESSOR_MODE AccessMode
,
388 IN LOCK_OPERATION Operation
)
394 NTSTATUS Status
= STATUS_SUCCESS
;
396 PEPROCESS CurrentProcess
;
398 PMM_AVL_TABLE AddressSpace
;
399 KIRQL OldIrql
= KeGetCurrentIrql();
400 DPRINT("Probing MDL: %p\n", Mdl
);
403 ASSERT(Mdl
->ByteCount
!= 0);
404 ASSERT(((ULONG
)Mdl
->ByteOffset
& ~(PAGE_SIZE
- 1)) == 0);
405 ASSERT(((ULONG_PTR
)Mdl
->StartVa
& (PAGE_SIZE
- 1)) == 0);
406 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|
407 MDL_MAPPED_TO_SYSTEM_VA
|
408 MDL_SOURCE_IS_NONPAGED_POOL
|
410 MDL_IO_SPACE
)) == 0);
412 /* Get page and base information */
413 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
414 Base
= (PVOID
)Mdl
->StartVa
;
415 Address
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
416 NrPages
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address
, Mdl
->ByteCount
);
417 ASSERT(NrPages
!= 0);
419 /* Check if this is an MDL in I/O Space */
420 if (Mdl
->StartVa
>= MmSystemRangeStart
&&
421 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) >= MmPageArraySize
)
423 /* Just loop each page */
424 for (i
= 0; i
< NrPages
; i
++)
427 MdlPages
[i
] = MmGetPfnForProcess(NULL
,
428 (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ (i
* PAGE_SIZE
)));
431 /* Set the flags and exit */
432 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
436 /* Get the thread and process */
437 Thread
= PsGetCurrentThread();
438 if (Address
<= MM_HIGHEST_USER_ADDRESS
)
440 /* Get the process */
441 CurrentProcess
= PsGetCurrentProcess();
446 CurrentProcess
= NULL
;
449 /* Check what kind of operaiton this is */
450 if (Operation
!= IoReadAccess
)
452 /* Set the write flag */
453 Mdl
->MdlFlags
|= MDL_WRITE_OPERATION
;
457 /* Remove the write flag */
458 Mdl
->MdlFlags
&= ~(MDL_WRITE_OPERATION
);
461 /* Check if this came from kernel mode */
462 if (Base
>= MM_HIGHEST_USER_ADDRESS
)
464 /* We should not have a process */
465 ASSERT(CurrentProcess
== NULL
);
467 AddressSpace
= MmGetKernelAddressSpace();
472 ASSERT(NrPages
!= 0);
473 ASSERT(CurrentProcess
== PsGetCurrentProcess());
475 /* Track locked pages */
476 InterlockedExchangeAddSizeT(&CurrentProcess
->NumberOfLockedPages
,
479 /* Save the process */
480 Mdl
->Process
= CurrentProcess
;
482 /* Use the process lock */
483 AddressSpace
= &CurrentProcess
->VadRoot
;
490 if (OldIrql
< DISPATCH_LEVEL
)
491 MmLockAddressSpace(AddressSpace
);
493 MmAcquirePageListLock(&OldIrql
);
495 for (i
= 0; i
< NrPages
; i
++)
499 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
501 if (!MmIsPagePresent(NULL
, Address
))
503 /* Fault the page in */
504 Status
= MmAccessFault(FALSE
, Address
, AccessMode
, NULL
);
505 if (!NT_SUCCESS(Status
))
512 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
515 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
516 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
518 Status
= MmAccessFault(TRUE
, Address
, AccessMode
, NULL
);
519 if (!NT_SUCCESS(Status
))
521 for (j
= 0; j
< i
; j
++)
524 if (Page
< MmPageArraySize
)
527 MmDereferencePage(Page
);
533 Page
= MmGetPfnForProcess(NULL
, Address
);
535 if (Page
>= MmPageArraySize
)
537 Mdl
->MdlFlags
|= MDL_IO_SPACE
;
541 MmReferencePage(Page
);
546 if (OldIrql
< DISPATCH_LEVEL
)
547 MmUnlockAddressSpace(AddressSpace
);
549 MmReleasePageListLock(OldIrql
);
551 if (!NT_SUCCESS(Status
))
552 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
553 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
562 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress
,
563 IN PHYSICAL_ADDRESS HighAddress
,
564 IN PHYSICAL_ADDRESS SkipBytes
,
565 IN SIZE_T Totalbytes
)
569 ULONG NumberOfPagesWanted
, NumberOfPagesAllocated
;
571 DPRINT("Allocating pages: %p\n", LowAddress
.LowPart
);
573 /* SkipBytes must be a multiple of the page size */
574 if (BYTE_OFFSET(SkipBytes
.LowPart
)) return NULL
;
576 /* Create the actual MDL */
577 Mdl
= MmCreateMdl(NULL
, NULL
, Totalbytes
);
578 if (!Mdl
) return NULL
;
580 /* Allocate pages into the MDL */
581 NumberOfPagesAllocated
= 0;
582 NumberOfPagesWanted
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
583 Pages
= (PPFN_TYPE
)(Mdl
+ 1);
584 while (NumberOfPagesWanted
> 0)
586 Ret
= MmAllocPagesSpecifyRange(MC_NPPOOL
,
590 Pages
+ NumberOfPagesAllocated
);
591 if (Ret
== (ULONG
)-1) break;
593 NumberOfPagesAllocated
+= Ret
;
594 NumberOfPagesWanted
-= Ret
;
596 if (SkipBytes
.QuadPart
== 0) break;
597 LowAddress
.QuadPart
+= SkipBytes
.QuadPart
;
598 HighAddress
.QuadPart
+= SkipBytes
.QuadPart
;
601 /* If nothing was allocated, fail */
602 if (NumberOfPagesAllocated
)
609 /* Zero out the MDL pages */
610 //RtlZeroMemory(LowAddress.LowPart, NumberOfPagesAllocated * PAGE_SIZE);
613 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
614 Mdl
->ByteCount
= (ULONG
)(NumberOfPagesAllocated
* PAGE_SIZE
);
623 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress
,
624 IN PHYSICAL_ADDRESS HighAddress
,
625 IN PHYSICAL_ADDRESS SkipBytes
,
626 IN SIZE_T Totalbytes
,
627 IN MEMORY_CACHING_TYPE CacheType
,
639 MmMapLockedPagesSpecifyCache(IN PMDL Mdl
,
640 IN KPROCESSOR_MODE AccessMode
,
641 IN MEMORY_CACHING_TYPE CacheType
,
642 IN PVOID BaseAddress
,
643 IN ULONG BugCheckOnFailure
,
644 IN MM_PAGE_PRIORITY Priority
)
650 ULONG StartingOffset
;
651 PEPROCESS CurrentProcess
;
655 LARGE_INTEGER BoundaryAddressMultiple
;
656 DPRINT("Mapping MDL: %p\n", Mdl
);
657 DPRINT("Base: %p\n", BaseAddress
);
660 ASSERT(Mdl
->ByteCount
!= 0);
663 Base
= (PVOID
)((ULONG_PTR
)Mdl
->StartVa
+ Mdl
->ByteOffset
);
665 /* Set default page protection */
666 Protect
= PAGE_READWRITE
;
667 if (CacheType
== MmNonCached
) Protect
|= PAGE_NOCACHE
;
669 /* Handle kernel case first */
670 if (AccessMode
== KernelMode
)
672 /* Get the list of pages and count */
673 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
674 PageCount
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
, Mdl
->ByteCount
);
677 ASSERT((Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|
678 MDL_SOURCE_IS_NONPAGED_POOL
|
679 MDL_PARTIAL_HAS_BEEN_MAPPED
)) == 0);
680 ASSERT((Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
| MDL_PARTIAL
)) != 0);
682 /* Allocate that number of pages from the mdl mapping region. */
683 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
684 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
,
686 MiMdlMappingRegionHint
);
687 if (StartingOffset
== 0xffffffff)
689 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
690 DPRINT("Out of MDL mapping space\n");
691 if ((Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) || !BugCheckOnFailure
)
697 Base
= (PVOID
)((ULONG_PTR
)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
);
698 if (MiMdlMappingRegionHint
== StartingOffset
) MiMdlMappingRegionHint
+= PageCount
;
699 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
701 /* Set the virtual mappings for the MDL pages. */
702 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
705 Status
= MmCreateVirtualMappingUnsafe(NULL
,
714 Status
= MmCreateVirtualMapping(NULL
,
721 /* Check if the mapping suceeded */
722 if (!NT_SUCCESS(Status
))
724 /* If it can fail, return NULL */
725 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
727 /* Should we bugcheck? */
728 if (!BugCheckOnFailure
) return NULL
;
730 /* Yes, crash the system */
731 KeBugCheckEx(NO_MORE_SYSTEM_PTES
, 0, PageCount
, 0, 0);
734 /* Mark it as mapped */
735 ASSERT((Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
) == 0);
736 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
738 /* Check if it was partial */
739 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
741 /* Write the appropriate flag here too */
742 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
745 /* Save the mapped address */
746 Base
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
747 Mdl
->MappedSystemVa
= Base
;
752 /* Calculate the number of pages required. */
753 MdlPages
= (PPFN_NUMBER
)(Mdl
+ 1);
754 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
756 BoundaryAddressMultiple
.QuadPart
= 0;
759 CurrentProcess
= PsGetCurrentProcess();
761 MmLockAddressSpace(&CurrentProcess
->VadRoot
);
762 Status
= MmCreateMemoryArea(&CurrentProcess
->VadRoot
,
763 MEMORY_AREA_MDL_MAPPING
,
765 PageCount
* PAGE_SIZE
,
770 BoundaryAddressMultiple
);
771 MmUnlockAddressSpace(&CurrentProcess
->VadRoot
);
772 if (!NT_SUCCESS(Status
))
774 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
779 /* Throw exception */
780 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
784 /* Set the virtual mappings for the MDL pages. */
785 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
788 Status
= MmCreateVirtualMappingUnsafe(CurrentProcess
,
797 Status
= MmCreateVirtualMapping(CurrentProcess
,
804 /* Check if the mapping suceeded */
805 if (!NT_SUCCESS(Status
))
807 /* If it can fail, return NULL */
808 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
) return NULL
;
810 /* Throw exception */
811 ExRaiseStatus(STATUS_ACCESS_VIOLATION
);
814 /* Return the base */
815 Base
= (PVOID
)((ULONG_PTR
)Base
+ Mdl
->ByteOffset
);
824 MmAdvanceMdl(IN PMDL Mdl
,
825 IN ULONG NumberOfBytes
)
828 return STATUS_NOT_IMPLEMENTED
;
836 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress
,
838 IN PMDL MemoryDescriptorList
,
839 IN MEMORY_CACHING_TYPE CacheType
)
850 MmUnmapReservedMapping(IN PVOID BaseAddress
,
852 IN PMDL MemoryDescriptorList
)
862 MmPrefetchPages(IN ULONG NumberOfLists
,
863 IN PREAD_LIST
*ReadLists
)
866 return STATUS_NOT_IMPLEMENTED
;
874 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList
,
878 return STATUS_NOT_IMPLEMENTED
;
886 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList
,
887 IN PEPROCESS Process
,
888 IN KPROCESSOR_MODE AccessMode
,
889 IN LOCK_OPERATION Operation
)
900 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList
,
901 IN LARGE_INTEGER PageList
[],
902 IN KPROCESSOR_MODE AccessMode
,
903 IN LOCK_OPERATION Operation
)
913 MmMapMemoryDumpMdl(IN PMDL Mdl
)