1 /* $Id: mdl.c,v 1.68 2004/08/15 16:39:07 chorns Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
16 #include <internal/debug.h>
18 /* GLOBALS *******************************************************************/
20 #define TAG_MDL TAG('M', 'M', 'D', 'L')
22 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
24 static PVOID MiMdlMappingRegionBase
= NULL
;
25 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
26 static ULONG MiMdlMappingRegionHint
;
27 static KSPIN_LOCK MiMdlMappingRegionLock
;
28 extern ULONG MmPageArraySize
;
33 MDL_PAGES_LOCKED MmProbelAndLockPages has been called for this mdl
34 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
35 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
36 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
37 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
38 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
41 /* FUNCTIONS *****************************************************************/
50 IN ULONG NumberOfBytes
54 return STATUS_NOT_IMPLEMENTED
;
58 MmInitializeMdlImplementation(VOID
)
63 PHYSICAL_ADDRESS BoundaryAddressMultiple
;
65 BoundaryAddressMultiple
.QuadPart
= 0;
66 MiMdlMappingRegionHint
= 0;
67 MiMdlMappingRegionBase
= NULL
;
69 MmLockAddressSpace(MmGetKernelAddressSpace());
70 Status
= MmCreateMemoryArea(NULL
,
71 MmGetKernelAddressSpace(),
72 MEMORY_AREA_MDL_MAPPING
,
73 &MiMdlMappingRegionBase
,
74 MI_MDL_MAPPING_REGION_SIZE
,
79 BoundaryAddressMultiple
);
80 if (!NT_SUCCESS(Status
))
82 MmUnlockAddressSpace(MmGetKernelAddressSpace());
85 MmUnlockAddressSpace(MmGetKernelAddressSpace());
87 Buffer
= ExAllocatePool(NonPagedPool
, MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
89 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
90 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
92 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
96 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
100 MdlPages
= (PULONG
)(Mdl
+ 1);
102 return((PVOID
)MdlPages
[((ULONG
)Offset
) / PAGE_SIZE
]);
110 MmUnlockPages(PMDL Mdl
)
112 * FUNCTION: Unlocks the physical pages described by a given MDL
114 * MemoryDescriptorList = MDL describing the buffer to be unlocked
115 * NOTES: The memory described by the specified MDL must have been locked
116 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
119 * May be called in any process context.
127 * MmProbeAndLockPages MUST have been called to lock this mdl!
129 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
130 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
132 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
134 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
138 /* If mdl buffer is mapped io space -> do nothing */
139 if (Mdl
->MdlFlags
& MDL_IO_SPACE
)
141 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
145 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
146 if (Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
)
148 MmUnmapLockedPages(Mdl
->MappedSystemVa
, Mdl
);
152 * FIXME: I don't know whether this right, but it looks sensible
154 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
155 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
161 MdlPages
= (PULONG
)(Mdl
+ 1);
162 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
166 MmDereferencePage(Page
);
169 Mdl
->MdlFlags
&= ~MDL_PAGES_LOCKED
;
178 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
180 * FUNCTION: Maps the physical pages described by a given MDL
182 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool
183 * or IoBuildPartialMdl.
184 * AccessMode = Specifies the portion of the address space to map the
186 * RETURNS: The base virtual address that maps the locked pages for the
187 * range described by the MDL
189 * If mapping into user space, pages are mapped into current address space.
196 ULONG StartingOffset
;
197 PEPROCESS CurrentProcess
;
200 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl
, AccessMode
);
202 /* Calculate the number of pages required. */
203 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
205 if (AccessMode
== UserMode
)
208 LARGE_INTEGER BoundaryAddressMultiple
;
211 /* pretty sure you can't map partial mdl's to user space */
212 assert(!(Mdl
->MdlFlags
& MDL_PARTIAL
));
214 BoundaryAddressMultiple
.QuadPart
= 0;
217 CurrentProcess
= PsGetCurrentProcess();
219 MmLockAddressSpace(&CurrentProcess
->AddressSpace
);
220 Status
= MmCreateMemoryArea(CurrentProcess
,
221 &CurrentProcess
->AddressSpace
,
222 MEMORY_AREA_MDL_MAPPING
,
224 PageCount
* PAGE_SIZE
,
225 0, /* PAGE_READWRITE? */
229 BoundaryAddressMultiple
);
230 MmUnlockAddressSpace(&CurrentProcess
->AddressSpace
);
231 if (!NT_SUCCESS(Status
))
233 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
239 /* FIXME: handle this? */
242 Mdl
->Process
= CurrentProcess
;
244 else /* if (AccessMode == KernelMode) */
246 /* can't map mdl twice */
247 assert(!(Mdl
->MdlFlags
& (MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL_HAS_BEEN_MAPPED
)));
248 /* can't map mdl buildt from non paged pool into kernel space */
249 assert(!(Mdl
->MdlFlags
& (MDL_SOURCE_IS_NONPAGED_POOL
)));
251 CurrentProcess
= NULL
;
253 /* Allocate that number of pages from the mdl mapping region. */
254 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
256 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, PageCount
, MiMdlMappingRegionHint
);
258 if (StartingOffset
== 0xffffffff)
260 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
262 DPRINT1("Out of MDL mapping space\n");
264 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
272 Base
= (char*)MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
274 if (MiMdlMappingRegionHint
== StartingOffset
)
276 MiMdlMappingRegionHint
+= PageCount
;
279 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
284 /* Set the virtual mappings for the MDL pages. */
285 MdlPages
= (PULONG
)(Mdl
+ 1);
287 Status
= MmCreateVirtualMapping(CurrentProcess
,
292 if (!NT_SUCCESS(Status
))
294 DbgPrint("Unable to create virtual mapping\n");
295 if (Mdl
->MdlFlags
& MDL_MAPPING_CAN_FAIL
)
302 /* Mark the MDL has having being mapped. */
303 if (AccessMode
== KernelMode
)
305 if (Mdl
->MdlFlags
& MDL_PARTIAL
)
307 Mdl
->MdlFlags
|= MDL_PARTIAL_HAS_BEEN_MAPPED
;
311 Mdl
->MdlFlags
|= MDL_MAPPED_TO_SYSTEM_VA
;
313 Mdl
->MappedSystemVa
= (char*)Base
+ Mdl
->ByteOffset
;
316 return((char*)Base
+ Mdl
->ByteOffset
);
325 MmMapLockedPagesWithReservedMapping (
326 IN PVOID MappingAddress
,
328 IN PMDL MemoryDescriptorList
,
329 IN MEMORY_CACHING_TYPE CacheType
340 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
342 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
344 * BaseAddress = Base virtual address to which the pages were mapped
345 * MemoryDescriptorList = MDL describing the mapped pages
347 * User space unmappings _must_ be done from the original process context!
355 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
358 * In this case, the MDL has the same system address as the base address
359 * so there is no need to free it
361 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) &&
362 ((ULONG_PTR
)BaseAddress
>= KERNEL_BASE
))
368 /* Calculate the number of pages we mapped. */
369 PageCount
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
372 * Docs says that BaseAddress should be a _base_ address, but every example
373 * I've seen pass the actual address. -Gunnar
375 BaseAddress
= PAGE_ALIGN(BaseAddress
);
377 /* Unmap all the pages. */
378 for (i
= 0; i
< PageCount
; i
++)
380 MmDeleteVirtualMapping(NULL
,
381 (char*)BaseAddress
+ (i
* PAGE_SIZE
),
387 if ((DWORD
)BaseAddress
>= KERNEL_BASE
)
389 assert(Mdl
->MdlFlags
& MDL_MAPPED_TO_SYSTEM_VA
);
391 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
392 /* Deallocate all the pages used. */
393 Base
= (ULONG
)((char*)BaseAddress
- (char*)MiMdlMappingRegionBase
) / PAGE_SIZE
;
395 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, PageCount
);
397 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
399 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
401 /* Reset the MDL state. */
402 Mdl
->MdlFlags
&= ~MDL_MAPPED_TO_SYSTEM_VA
;
403 Mdl
->MappedSystemVa
= NULL
;
410 assert(Mdl
->Process
== PsGetCurrentProcess());
412 Marea
= MmOpenMemoryAreaByAddress( &Mdl
->Process
->AddressSpace
, BaseAddress
);
415 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
419 MmFreeMemoryArea( &Mdl
->Process
->AddressSpace
, Marea
->BaseAddress
, 0, NULL
, NULL
);
431 MmUnmapReservedMapping (
432 IN PVOID BaseAddress
,
434 IN PMDL MemoryDescriptorList
442 MmBuildMdlFromPages(PMDL Mdl
, PPFN_TYPE Pages
)
444 memcpy(Mdl
+ 1, Pages
, sizeof(PFN_TYPE
) * (PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
));
446 //FIXME: this flag should be set by the caller perhaps?
447 Mdl
->MdlFlags
|= MDL_IO_PAGE_READ
;
456 IN ULONG NumberOfLists
,
457 IN PREAD_LIST
*ReadLists
461 return STATUS_NOT_IMPLEMENTED
;
469 MmProtectMdlSystemAddress (
470 IN PMDL MemoryDescriptorList
,
475 return STATUS_NOT_IMPLEMENTED
;
482 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
483 KPROCESSOR_MODE AccessMode
,
484 LOCK_OPERATION Operation
)
486 * FUNCTION: Probes the specified pages, makes them resident and locks them
489 * AccessMode = Access at which to probe the buffer
490 * Operation = Operation to probe for
492 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
493 * used in cases where you know that the mdl address is paged memory or
494 * you don't know where the mdl address comes from. MmProbeAndLockPages will
495 * work no matter what kind of mdl address you have.
502 KPROCESSOR_MODE Mode
;
504 PEPROCESS CurrentProcess
= PsGetCurrentProcess();
506 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
508 assert(!(Mdl
->MdlFlags
& (MDL_PAGES_LOCKED
|MDL_MAPPED_TO_SYSTEM_VA
|MDL_PARTIAL
|
509 MDL_IO_SPACE
|MDL_SOURCE_IS_NONPAGED_POOL
)));
511 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
512 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
514 /* mdl must have enough page entries */
515 assert(NrPages
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
518 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
&&
519 MmGetPfnForProcess(NULL
, Mdl
->StartVa
) > MmPageArraySize
)
521 /* phys addr is not phys memory so this must be io memory */
523 for (i
= 0; i
< NrPages
; i
++)
525 MdlPages
[i
] = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
));
528 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
|MDL_IO_SPACE
;
533 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
)
535 //FIXME: why isn't AccessMode used?
541 //FIXME: why isn't AccessMode used?
543 Mdl
->Process
= CurrentProcess
;
550 MmLockAddressSpace(&CurrentProcess
->AddressSpace
);
552 for (i
= 0; i
< NrPages
; i
++)
556 Address
= (char*)Mdl
->StartVa
+ (i
*PAGE_SIZE
);
559 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
563 if (!MmIsPagePresent(NULL
, Address
))
565 Status
= MmNotPresentFault(Mode
, (ULONG
)Address
, TRUE
);
566 if (!NT_SUCCESS(Status
))
568 for (j
= 0; j
< i
; j
++)
572 MmDereferencePage(Page
);
574 ExRaiseStatus(Status
);
579 MmLockPage(MmGetPfnForProcess(NULL
, Address
));
582 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
583 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
585 Status
= MmAccessFault(Mode
, (ULONG
)Address
, TRUE
);
586 if (!NT_SUCCESS(Status
))
588 for (j
= 0; j
< i
; j
++)
592 MmDereferencePage(Page
);
594 ExRaiseStatus(Status
);
597 Page
= MmGetPfnForProcess(NULL
, Address
);
599 MmReferencePage(Page
);
602 MmUnlockAddressSpace(&CurrentProcess
->AddressSpace
);
603 Mdl
->MdlFlags
|= MDL_PAGES_LOCKED
;
611 MmProbeAndLockProcessPages (
612 IN OUT PMDL MemoryDescriptorList
,
613 IN PEPROCESS Process
,
614 IN KPROCESSOR_MODE AccessMode
,
615 IN LOCK_OPERATION Operation
626 MmProbeAndLockSelectedPages(
627 IN OUT PMDL MemoryDescriptorList
,
628 IN LARGE_INTEGER PageList
[],
629 IN KPROCESSOR_MODE AccessMode
,
630 IN LOCK_OPERATION Operation
639 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
642 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
643 * the given address range
645 * Base = base virtual address
646 * Length = number of bytes to map
651 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
653 return(sizeof(MDL
)+(len
*sizeof(PFN_TYPE
)));
661 MmBuildMdlForNonPagedPool (PMDL Mdl
)
663 * FUNCTION: Fills in the corresponding physical page array of a given
664 * MDL for a buffer in nonpaged system space
666 * Mdl = Points to an MDL that supplies a virtual address,
667 * byte offset and length
669 * This function can be seen as a fast version of MmProbeAndLockPages in case
670 * you _know_ that the mdl address is within nonpaged kernel space.
678 * mdl buffer must (at least) be in kernel space, thou this doesn't
679 * necesarely mean that the buffer in within _nonpaged_ kernel space...
681 assert((ULONG
)Mdl
->StartVa
>= KERNEL_BASE
);
683 PageCount
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
684 MdlPages
= (PPFN_TYPE
)(Mdl
+ 1);
686 /* mdl must have enough page entries */
687 assert(PageCount
<= (Mdl
->Size
- sizeof(MDL
))/sizeof(PFN_TYPE
));
689 for (i
=0; i
< PageCount
; i
++)
691 *MdlPages
++ = MmGetPfnForProcess(NULL
, (char*)Mdl
->StartVa
+ (i
* PAGE_SIZE
));
694 Mdl
->MdlFlags
|= MDL_SOURCE_IS_NONPAGED_POOL
;
696 Mdl
->MappedSystemVa
= (char*)Mdl
->StartVa
+ Mdl
->ByteOffset
;
704 MmCreateMdl (PMDL Mdl
,
708 * FUNCTION: Allocates and initalizes an MDL
710 * MemoryDescriptorList = Points to MDL to initalize. If this is
711 * NULL then one is allocated
712 * Base = Base virtual address of the buffer
713 * Length = Length in bytes of the buffer
714 * RETURNS: A pointer to initalized MDL
721 Size
= MmSizeOfMdl(Base
,Length
);
723 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
730 MmInitializeMdl(Mdl
, (char*)Base
, Length
);
739 MmMapMemoryDumpMdl (PVOID Unknown0
)
741 * FIXME: Has something to do with crash dumps. Do we want to implement
752 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress
,
753 IN PHYSICAL_ADDRESS HighAddress
,
754 IN PHYSICAL_ADDRESS SkipBytes
,
755 IN SIZE_T Totalbytes
)
758 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
760 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
761 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
762 Designed to be used by an AGP driver
764 LowAddress is the lowest acceptable physical address it wants to allocate
765 and HighAddress is the highest. SkipBytes are the number of bytes that the
766 kernel should keep free above LowAddress and below the address at which it
767 starts to allocate physical memory. TotalBytes are the number of bytes that
768 the driver wants to allocate. The return value of the function is a MDL
769 that if non-zero describes the physical memory the kernel has given the
770 driver. To access portions of the memory the driver must create sub-MDLs
771 from the returned MDL that describe appropriate portions of the physical
772 memory. When a driver wants to access physical memory described by a
773 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
779 /* SkipBytes must be a multiple of the page size */
780 assert((SkipBytes
.QuadPart
% PAGE_SIZE
) == 0);
790 MmFreePagesFromMdl ( IN PMDL Mdl
)
793 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
794 FreeUserPhysicalPages, to free the physical memory it has allocated with
795 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
797 Note that a driver is responsible for deallocating the MDL returned by
798 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
799 does not free the MDL.
812 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl
,
813 IN KPROCESSOR_MODE AccessMode
,
814 IN MEMORY_CACHING_TYPE CacheType
,
815 IN PVOID BaseAddress
,
816 IN ULONG BugCheckOnFailure
,
820 return MmMapLockedPages (Mdl
, AccessMode
);