1 /* $Id: mdl.c,v 1.54 2003/10/12 17:05:48 hbirr Exp $
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
12 /* INCLUDES ****************************************************************/
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
18 #include <ntos/minmax.h>
21 #include <internal/debug.h>
23 /* GLOBALS *******************************************************************/
25 #define TAG_MDL TAG('M', 'M', 'D', 'L')
27 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
29 static PVOID MiMdlMappingRegionBase
= NULL
;
30 static RTL_BITMAP MiMdlMappingRegionAllocMap
;
31 static ULONG MiMdlMappingRegionHint
;
32 static KSPIN_LOCK MiMdlMappingRegionLock
;
34 /* FUNCTIONS *****************************************************************/
37 MmInitializeMdlImplementation(VOID
)
43 MiMdlMappingRegionHint
= 0;
44 MiMdlMappingRegionBase
= NULL
;
46 MmLockAddressSpace(MmGetKernelAddressSpace());
47 Status
= MmCreateMemoryArea(NULL
,
48 MmGetKernelAddressSpace(),
49 MEMORY_AREA_MDL_MAPPING
,
50 &MiMdlMappingRegionBase
,
51 MI_MDL_MAPPING_REGION_SIZE
,
56 if (!NT_SUCCESS(Status
))
58 MmUnlockAddressSpace(MmGetKernelAddressSpace());
61 MmUnlockAddressSpace(MmGetKernelAddressSpace());
63 Buffer
= ExAllocatePool(NonPagedPool
, MI_MDL_MAPPING_REGION_SIZE
/ (PAGE_SIZE
* 8));
65 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap
, Buffer
, MI_MDL_MAPPING_REGION_SIZE
/ PAGE_SIZE
);
66 RtlClearAllBits(&MiMdlMappingRegionAllocMap
);
68 KeInitializeSpinLock(&MiMdlMappingRegionLock
);
72 MmGetMdlPageAddress(PMDL Mdl
, PVOID Offset
)
76 MdlPages
= (PULONG
)(Mdl
+ 1);
78 return((PVOID
)MdlPages
[((ULONG
)Offset
) / PAGE_SIZE
]);
85 MmUnlockPages(PMDL Mdl
)
87 * FUNCTION: Unlocks the physical pages described by a given MDL
89 * MemoryDescriptorList = MDL describing the buffer to be unlocked
90 * NOTES: The memory described by the specified MDL must have been locked
91 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
99 * FIXME: I don't know whether this right, but it looks sensible
101 if ((Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
) ||
102 (Mdl
->MdlFlags
& MDL_IO_PAGE_READ
))
108 * FIXME: Seems sensible
110 if (!(Mdl
->MdlFlags
& MDL_PAGES_LOCKED
))
115 MdlPages
= (PULONG
)(Mdl
+ 1);
116 for (i
=0; i
<(PAGE_ROUND_UP(Mdl
->ByteCount
+Mdl
->ByteOffset
)/PAGE_SIZE
); i
++)
118 MmUnlockPage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[i
]);
119 MmDereferencePage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[i
]);
121 Mdl
->MdlFlags
= Mdl
->MdlFlags
& (~MDL_PAGES_LOCKED
);
128 MmMapLockedPages(PMDL Mdl
, KPROCESSOR_MODE AccessMode
)
130 * FUNCTION: Maps the physical pages described by a given MDL
132 * Mdl = Points to an MDL updated by MmProbeAndLockPages
133 * AccessMode = Specifies the portion of the address space to map the
135 * RETURNS: The base virtual address that maps the locked pages for the
136 * range described by the MDL
144 ULONG StartingOffset
;
146 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl
, AccessMode
);
148 if (Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
)
150 return(Mdl
->MappedSystemVa
);
153 if (AccessMode
== UserMode
)
155 DPRINT1("MDL mapping to user-mode not yet handled.\n");
159 /* Calculate the number of pages required. */
160 RegionSize
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
162 /* Allocate that number of pages from the mdl mapping region. */
163 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
165 StartingOffset
= RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap
, RegionSize
, MiMdlMappingRegionHint
);
167 if (StartingOffset
== 0xffffffff)
169 DPRINT1("Out of MDL mapping space\n");
173 Base
= MiMdlMappingRegionBase
+ StartingOffset
* PAGE_SIZE
;
175 if (MiMdlMappingRegionHint
== StartingOffset
)
177 MiMdlMappingRegionHint
+=RegionSize
;
180 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
182 /* Set the virtual mappings for the MDL pages. */
183 MdlPages
= (PULONG
)(Mdl
+ 1);
184 for (i
= 0; i
< RegionSize
; i
++)
187 Status
= MmCreateVirtualMapping(NULL
,
188 (PVOID
)((ULONG
)Base
+(i
*PAGE_SIZE
)),
190 (LARGE_INTEGER
)(LONGLONG
)MdlPages
[i
],
192 if (!NT_SUCCESS(Status
))
194 DbgPrint("Unable to create virtual mapping\n");
199 /* Mark the MDL has having being mapped. */
200 Mdl
->MdlFlags
= Mdl
->MdlFlags
| MDL_MAPPED_TO_SYSTEM_VA
;
201 Mdl
->MappedSystemVa
= Base
+ Mdl
->ByteOffset
;
202 return(Base
+ Mdl
->ByteOffset
);
209 MmUnmapLockedPages(PVOID BaseAddress
, PMDL Mdl
)
211 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
213 * BaseAddress = Base virtual address to which the pages were mapped
214 * MemoryDescriptorList = MDL describing the mapped pages
222 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress
, Mdl
);
225 * In this case, the MDL has the same system address as the base address
226 * so there is no need to free it
228 if (Mdl
->MdlFlags
& MDL_SOURCE_IS_NONPAGED_POOL
)
233 /* Calculate the number of pages we mapped. */
234 RegionSize
= PAGE_ROUND_UP(Mdl
->ByteCount
+ Mdl
->ByteOffset
) / PAGE_SIZE
;
235 BaseAddress
-= Mdl
->ByteOffset
;
237 /* Unmap all the pages. */
238 for (i
= 0; i
< RegionSize
; i
++)
240 MmDeleteVirtualMapping(NULL
,
241 BaseAddress
+ (i
* PAGE_SIZE
),
247 KeAcquireSpinLock(&MiMdlMappingRegionLock
, &oldIrql
);
248 /* Deallocate all the pages used. */
249 Base
= (ULONG
)(BaseAddress
- MiMdlMappingRegionBase
) / PAGE_SIZE
;
251 RtlClearBits(&MiMdlMappingRegionAllocMap
, Base
, RegionSize
);
253 MiMdlMappingRegionHint
= min (MiMdlMappingRegionHint
, Base
);
255 KeReleaseSpinLock(&MiMdlMappingRegionLock
, oldIrql
);
257 /* Reset the MDL state. */
258 Mdl
->MdlFlags
= Mdl
->MdlFlags
& ~MDL_MAPPED_TO_SYSTEM_VA
;
259 Mdl
->MappedSystemVa
= NULL
;
264 MmBuildMdlFromPages(PMDL Mdl
, PULONG Pages
)
269 Mdl
->MdlFlags
= Mdl
->MdlFlags
|
270 (MDL_PAGES_LOCKED
| MDL_IO_PAGE_READ
);
272 MdlPages
= (PULONG
)(Mdl
+ 1);
274 for (i
=0;i
<(PAGE_ROUND_UP(Mdl
->ByteOffset
+Mdl
->ByteCount
)/PAGE_SIZE
);i
++)
276 MdlPages
[i
] = Pages
[i
];
283 VOID STDCALL
MmProbeAndLockPages (PMDL Mdl
,
284 KPROCESSOR_MODE AccessMode
,
285 LOCK_OPERATION Operation
)
287 * FUNCTION: Probes the specified pages, makes them resident and locks them
290 * AccessMode = Access at which to probe the buffer
291 * Operation = Operation to probe for
298 KPROCESSOR_MODE Mode
;
299 PEPROCESS CurrentProcess
= NULL
;
301 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl
);
304 * FIXME: Check behaviour against NT
306 if (Mdl
->MdlFlags
& MDL_PAGES_LOCKED
)
313 if (Mdl
->StartVa
>= (PVOID
)KERNEL_BASE
)
320 CurrentProcess
= PsGetCurrentProcess();
321 if (Mdl
->Process
!= CurrentProcess
)
323 KeAttachProcess(Mdl
->Process
);
331 MmLockAddressSpace(&Mdl
->Process
->AddressSpace
);
332 MdlPages
= (ULONG
*)(Mdl
+ 1);
333 NrPages
= PAGE_ROUND_UP(Mdl
->ByteOffset
+ Mdl
->ByteCount
) / PAGE_SIZE
;
334 for (i
= 0; i
< NrPages
; i
++)
338 Address
= Mdl
->StartVa
+ (i
*PAGE_SIZE
);
340 if (!MmIsPagePresent(NULL
, Address
))
342 Status
= MmNotPresentFault(Mode
, (ULONG
)Address
, TRUE
);
343 if (!NT_SUCCESS(Status
))
345 for (j
= 0; j
< i
; j
++)
347 MmUnlockPage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[j
]);
348 MmDereferencePage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[j
]);
350 ExRaiseStatus(Status
);
355 MmLockPage(MmGetPhysicalAddressForProcess(NULL
, Address
));
357 if ((Operation
== IoWriteAccess
|| Operation
== IoModifyAccess
) &&
358 (!(MmGetPageProtect(NULL
, (PVOID
)Address
) & PAGE_READWRITE
)))
360 Status
= MmAccessFault(Mode
, (ULONG
)Address
, TRUE
);
361 if (!NT_SUCCESS(Status
))
363 for (j
= 0; j
< i
; j
++)
365 MmUnlockPage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[j
]);
367 (LARGE_INTEGER
)(LONGLONG
)MdlPages
[j
]);
369 ExRaiseStatus(Status
);
372 MdlPages
[i
] = MmGetPhysicalAddressForProcess(NULL
, Address
).u
.LowPart
;
373 MmReferencePage((LARGE_INTEGER
)(LONGLONG
)MdlPages
[i
]);
375 MmUnlockAddressSpace(&Mdl
->Process
->AddressSpace
);
376 if (Mode
== UserMode
&& Mdl
->Process
!= CurrentProcess
)
380 Mdl
->MdlFlags
= Mdl
->MdlFlags
| MDL_PAGES_LOCKED
;
387 ULONG STDCALL
MmSizeOfMdl (PVOID Base
,
390 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
391 * the given address range
393 * Base = base virtual address
394 * Length = number of bytes to map
399 len
= ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base
,Length
);
401 return(sizeof(MDL
)+(len
*sizeof(ULONG
)));
409 MmBuildMdlForNonPagedPool (PMDL Mdl
)
411 * FUNCTION: Fills in the corresponding physical page array of a given
412 * MDL for a buffer in nonpaged system space
414 * Mdl = Points to an MDL that supplies a virtual address,
415 * byte offset and length
419 Mdl
->MdlFlags
= Mdl
->MdlFlags
|
420 (MDL_SOURCE_IS_NONPAGED_POOL
| MDL_PAGES_LOCKED
);
421 for (va
=0; va
< ((Mdl
->Size
- sizeof(MDL
)) / sizeof(ULONG
)); va
++)
423 ((PULONG
)(Mdl
+ 1))[va
] =
424 (MmGetPhysicalAddress(Mdl
->StartVa
+ (va
* PAGE_SIZE
))).u
.LowPart
;
426 Mdl
->MappedSystemVa
= Mdl
->StartVa
+ Mdl
->ByteOffset
;
434 MmCreateMdl (PMDL MemoryDescriptorList
,
438 * FUNCTION: Allocates and initalizes an MDL
440 * MemoryDescriptorList = Points to MDL to initalize. If this is
441 * NULL then one is allocated
442 * Base = Base virtual address of the buffer
443 * Length = Length in bytes of the buffer
444 * RETURNS: A pointer to initalized MDL
447 if (MemoryDescriptorList
== NULL
)
451 Size
= MmSizeOfMdl(Base
,Length
);
452 MemoryDescriptorList
=
453 (PMDL
)ExAllocatePoolWithTag(NonPagedPool
, Size
, TAG_MDL
);
454 if (MemoryDescriptorList
== NULL
)
460 MmInitializeMdl(MemoryDescriptorList
,Base
,Length
);
462 return(MemoryDescriptorList
);
469 MmMapMemoryDumpMdl (PVOID Unknown0
)
471 * FIXME: Has something to do with crash dumps. Do we want to implement