e615f23354326d9d0c5521527470b54fa04ef1fd
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id: mdl.c,v 1.54 2003/10/12 17:05:48 hbirr Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
18 #include <ntos/minmax.h>
19
20 #define NDEBUG
21 #include <internal/debug.h>
22
23 /* GLOBALS *******************************************************************/
24
25 #define TAG_MDL TAG('M', 'M', 'D', 'L')
26
27 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
28
29 static PVOID MiMdlMappingRegionBase = NULL;
30 static RTL_BITMAP MiMdlMappingRegionAllocMap;
31 static ULONG MiMdlMappingRegionHint;
32 static KSPIN_LOCK MiMdlMappingRegionLock;
33
34 /* FUNCTIONS *****************************************************************/
35
36 VOID INIT_FUNCTION
37 MmInitializeMdlImplementation(VOID)
38 {
39 MEMORY_AREA* Result;
40 NTSTATUS Status;
41 PVOID Buffer;
42
43 MiMdlMappingRegionHint = 0;
44 MiMdlMappingRegionBase = NULL;
45
46 MmLockAddressSpace(MmGetKernelAddressSpace());
47 Status = MmCreateMemoryArea(NULL,
48 MmGetKernelAddressSpace(),
49 MEMORY_AREA_MDL_MAPPING,
50 &MiMdlMappingRegionBase,
51 MI_MDL_MAPPING_REGION_SIZE,
52 0,
53 &Result,
54 FALSE,
55 FALSE);
56 if (!NT_SUCCESS(Status))
57 {
58 MmUnlockAddressSpace(MmGetKernelAddressSpace());
59 KEBUGCHECK(0);
60 }
61 MmUnlockAddressSpace(MmGetKernelAddressSpace());
62
63 Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
64
65 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
66 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
67
68 KeInitializeSpinLock(&MiMdlMappingRegionLock);
69 }
70
71 PVOID
72 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
73 {
74 PULONG MdlPages;
75
76 MdlPages = (PULONG)(Mdl + 1);
77
78 return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
79 }
80
81 /*
82 * @unimplemented
83 */
84 VOID STDCALL
85 MmUnlockPages(PMDL Mdl)
86 /*
87 * FUNCTION: Unlocks the physical pages described by a given MDL
88 * ARGUMENTS:
89 * MemoryDescriptorList = MDL describing the buffer to be unlocked
90 * NOTES: The memory described by the specified MDL must have been locked
91 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
92 * MDL is updated
93 */
94 {
95 ULONG i;
96 PULONG MdlPages;
97
98 /*
99 * FIXME: I don't know whether this right, but it looks sensible
100 */
101 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
102 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
103 {
104 return;
105 }
106
107 /*
108 * FIXME: Seems sensible
109 */
110 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
111 {
112 return;
113 }
114
115 MdlPages = (PULONG)(Mdl + 1);
116 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
117 {
118 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
119 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
120 }
121 Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
122 }
123
124 /*
125 * @implemented
126 */
127 PVOID STDCALL
128 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
129 /*
130 * FUNCTION: Maps the physical pages described by a given MDL
131 * ARGUMENTS:
132 * Mdl = Points to an MDL updated by MmProbeAndLockPages
133 * AccessMode = Specifies the portion of the address space to map the
134 * pages.
135 * RETURNS: The base virtual address that maps the locked pages for the
136 * range described by the MDL
137 */
138 {
139 PVOID Base;
140 ULONG i;
141 PULONG MdlPages;
142 KIRQL oldIrql;
143 ULONG RegionSize;
144 ULONG StartingOffset;
145
146 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
147
148 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
149 {
150 return(Mdl->MappedSystemVa);
151 }
152
153 if (AccessMode == UserMode)
154 {
155 DPRINT1("MDL mapping to user-mode not yet handled.\n");
156 KEBUGCHECK(0);
157 }
158
159 /* Calculate the number of pages required. */
160 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
161
162 /* Allocate that number of pages from the mdl mapping region. */
163 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
164
165 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, RegionSize, MiMdlMappingRegionHint);
166
167 if (StartingOffset == 0xffffffff)
168 {
169 DPRINT1("Out of MDL mapping space\n");
170 KEBUGCHECK(0);
171 }
172
173 Base = MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE;
174
175 if (MiMdlMappingRegionHint == StartingOffset)
176 {
177 MiMdlMappingRegionHint +=RegionSize;
178 }
179
180 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
181
182 /* Set the virtual mappings for the MDL pages. */
183 MdlPages = (PULONG)(Mdl + 1);
184 for (i = 0; i < RegionSize; i++)
185 {
186 NTSTATUS Status;
187 Status = MmCreateVirtualMapping(NULL,
188 (PVOID)((ULONG)Base+(i*PAGE_SIZE)),
189 PAGE_READWRITE,
190 (LARGE_INTEGER)(LONGLONG)MdlPages[i],
191 FALSE);
192 if (!NT_SUCCESS(Status))
193 {
194 DbgPrint("Unable to create virtual mapping\n");
195 KEBUGCHECK(0);
196 }
197 }
198
199 /* Mark the MDL has having being mapped. */
200 Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
201 Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
202 return(Base + Mdl->ByteOffset);
203 }
204
205 /*
206 * @implemented
207 */
208 VOID STDCALL
209 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
210 /*
211 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
212 * ARGUMENTS:
213 * BaseAddress = Base virtual address to which the pages were mapped
214 * MemoryDescriptorList = MDL describing the mapped pages
215 */
216 {
217 KIRQL oldIrql;
218 ULONG i;
219 ULONG RegionSize;
220 ULONG Base;
221
222 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
223
224 /*
225 * In this case, the MDL has the same system address as the base address
226 * so there is no need to free it
227 */
228 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
229 {
230 return;
231 }
232
233 /* Calculate the number of pages we mapped. */
234 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
235 BaseAddress -= Mdl->ByteOffset;
236
237 /* Unmap all the pages. */
238 for (i = 0; i < RegionSize; i++)
239 {
240 MmDeleteVirtualMapping(NULL,
241 BaseAddress + (i * PAGE_SIZE),
242 FALSE,
243 NULL,
244 NULL);
245 }
246
247 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
248 /* Deallocate all the pages used. */
249 Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase) / PAGE_SIZE;
250
251 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, RegionSize);
252
253 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
254
255 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
256
257 /* Reset the MDL state. */
258 Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
259 Mdl->MappedSystemVa = NULL;
260 }
261
262
263 VOID
264 MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
265 {
266 ULONG i;
267 PULONG MdlPages;
268
269 Mdl->MdlFlags = Mdl->MdlFlags |
270 (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
271
272 MdlPages = (PULONG)(Mdl + 1);
273
274 for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE);i++)
275 {
276 MdlPages[i] = Pages[i];
277 }
278 }
279
280 /*
281 * @unimplemented
282 */
283 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
284 KPROCESSOR_MODE AccessMode,
285 LOCK_OPERATION Operation)
286 /*
287 * FUNCTION: Probes the specified pages, makes them resident and locks them
288 * ARGUMENTS:
289 * Mdl = MDL to probe
290 * AccessMode = Access at which to probe the buffer
291 * Operation = Operation to probe for
292 */
293 {
294 PULONG MdlPages;
295 ULONG i, j;
296 ULONG NrPages;
297 NTSTATUS Status;
298 KPROCESSOR_MODE Mode;
299 PEPROCESS CurrentProcess = NULL;
300
301 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
302
303 /*
304 * FIXME: Check behaviour against NT
305 */
306 if (Mdl->MdlFlags & MDL_PAGES_LOCKED)
307 {
308 return;
309 }
310
311
312
313 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
314 {
315 Mode = KernelMode;
316 }
317 else
318 {
319 Mode = UserMode;
320 CurrentProcess = PsGetCurrentProcess();
321 if (Mdl->Process != CurrentProcess)
322 {
323 KeAttachProcess(Mdl->Process);
324 }
325 }
326
327 /*
328 * Lock the pages
329 */
330
331 MmLockAddressSpace(&Mdl->Process->AddressSpace);
332 MdlPages = (ULONG *)(Mdl + 1);
333 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
334 for (i = 0; i < NrPages; i++)
335 {
336 PVOID Address;
337
338 Address = Mdl->StartVa + (i*PAGE_SIZE);
339
340 if (!MmIsPagePresent(NULL, Address))
341 {
342 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
343 if (!NT_SUCCESS(Status))
344 {
345 for (j = 0; j < i; j++)
346 {
347 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
348 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
349 }
350 ExRaiseStatus(Status);
351 }
352 }
353 else
354 {
355 MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
356 }
357 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
358 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
359 {
360 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
361 if (!NT_SUCCESS(Status))
362 {
363 for (j = 0; j < i; j++)
364 {
365 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
366 MmDereferencePage(
367 (LARGE_INTEGER)(LONGLONG)MdlPages[j]);
368 }
369 ExRaiseStatus(Status);
370 }
371 }
372 MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address).u.LowPart;
373 MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
374 }
375 MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
376 if (Mode == UserMode && Mdl->Process != CurrentProcess)
377 {
378 KeDetachProcess();
379 }
380 Mdl->MdlFlags = Mdl->MdlFlags | MDL_PAGES_LOCKED;
381 }
382
383
384 /*
385 * @implemented
386 */
387 ULONG STDCALL MmSizeOfMdl (PVOID Base,
388 ULONG Length)
389 /*
390 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
391 * the given address range
392 * ARGUMENTS:
393 * Base = base virtual address
394 * Length = number of bytes to map
395 */
396 {
397 ULONG len;
398
399 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
400
401 return(sizeof(MDL)+(len*sizeof(ULONG)));
402 }
403
404
405 /*
406 * @implemented
407 */
408 VOID STDCALL
409 MmBuildMdlForNonPagedPool (PMDL Mdl)
410 /*
411 * FUNCTION: Fills in the corresponding physical page array of a given
412 * MDL for a buffer in nonpaged system space
413 * ARGUMENTS:
414 * Mdl = Points to an MDL that supplies a virtual address,
415 * byte offset and length
416 */
417 {
418 ULONG va;
419 Mdl->MdlFlags = Mdl->MdlFlags |
420 (MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
421 for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
422 {
423 ((PULONG)(Mdl + 1))[va] =
424 (MmGetPhysicalAddress(Mdl->StartVa + (va * PAGE_SIZE))).u.LowPart;
425 }
426 Mdl->MappedSystemVa = Mdl->StartVa + Mdl->ByteOffset;
427 }
428
429
430 /*
431 * @implemented
432 */
433 PMDL STDCALL
434 MmCreateMdl (PMDL MemoryDescriptorList,
435 PVOID Base,
436 ULONG Length)
437 /*
438 * FUNCTION: Allocates and initalizes an MDL
439 * ARGUMENTS:
440 * MemoryDescriptorList = Points to MDL to initalize. If this is
441 * NULL then one is allocated
442 * Base = Base virtual address of the buffer
443 * Length = Length in bytes of the buffer
444 * RETURNS: A pointer to initalized MDL
445 */
446 {
447 if (MemoryDescriptorList == NULL)
448 {
449 ULONG Size;
450
451 Size = MmSizeOfMdl(Base,Length);
452 MemoryDescriptorList =
453 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
454 if (MemoryDescriptorList == NULL)
455 {
456 return(NULL);
457 }
458 }
459
460 MmInitializeMdl(MemoryDescriptorList,Base,Length);
461
462 return(MemoryDescriptorList);
463 }
464
465 /*
466 * @unimplemented
467 */
468 VOID STDCALL
469 MmMapMemoryDumpMdl (PVOID Unknown0)
470 /*
471 * FIXME: Has something to do with crash dumps. Do we want to implement
472 * this?
473 */
474 {
475 UNIMPLEMENTED;
476 }
477
478 /* EOF */
479
480
481
482
483
484
485
486
487