Reverted latest changes.
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id: mdl.c,v 1.44 2002/09/08 10:23:34 chorns Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ddk/ntddk.h>
15 #include <internal/mm.h>
16 #include <internal/ps.h>
17 #include <internal/pool.h>
18
19 #define NDEBUG
20 #include <internal/debug.h>
21
22 /* GLOBALS *******************************************************************/
23
24 #define TAG_MDL TAG('M', 'M', 'D', 'L')
25
26 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
27
28 static PVOID MiMdlMappingRegionBase = NULL;
29 static PULONG MiMdlMappingRegionAllocMap = NULL;
30 static ULONG MiMdlMappingRegionHighWaterMark = 0;
31 static KSPIN_LOCK MiMdlMappingRegionLock;
32
33 /* FUNCTIONS *****************************************************************/
34
35 VOID
36 MmInitializeMdlImplementation(VOID)
37 {
38 MEMORY_AREA* Result;
39 NTSTATUS Status;
40
41 MiMdlMappingRegionBase = NULL;
42
43 MmLockAddressSpace(MmGetKernelAddressSpace());
44 Status = MmCreateMemoryArea(NULL,
45 MmGetKernelAddressSpace(),
46 MEMORY_AREA_MDL_MAPPING,
47 &MiMdlMappingRegionBase,
48 MI_MDL_MAPPING_REGION_SIZE,
49 0,
50 &Result,
51 FALSE);
52 if (!NT_SUCCESS(Status))
53 {
54 MmUnlockAddressSpace(MmGetKernelAddressSpace());
55 KeBugCheck(0);
56 }
57 MmUnlockAddressSpace(MmGetKernelAddressSpace());
58
59 MiMdlMappingRegionAllocMap =
60 ExAllocatePool(NonPagedPool,
61 MI_MDL_MAPPING_REGION_SIZE / (PAGESIZE * 32));
62 MiMdlMappingRegionHighWaterMark = 0;
63 KeInitializeSpinLock(&MiMdlMappingRegionLock);
64 }
65
66 PVOID
67 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
68 {
69 PULONG MdlPages;
70
71 MdlPages = (PULONG)(Mdl + 1);
72
73 return((PVOID)MdlPages[((ULONG)Offset) / PAGESIZE]);
74 }
75
76 VOID STDCALL
77 MmUnlockPages(PMDL Mdl)
78 /*
79 * FUNCTION: Unlocks the physical pages described by a given MDL
80 * ARGUMENTS:
81 * MemoryDescriptorList = MDL describing the buffer to be unlocked
82 * NOTES: The memory described by the specified MDL must have been locked
83 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
84 * MDL is updated
85 */
86 {
87 ULONG i;
88 PULONG MdlPages;
89
90 /*
91 * FIXME: I don't know whether this right, but it looks sensible
92 */
93 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
94 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
95 {
96 return;
97 }
98
99 /*
100 * FIXME: Seems sensible
101 */
102 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
103 {
104 return;
105 }
106
107 MdlPages = (PULONG)(Mdl + 1);
108 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGESIZE); i++)
109 {
110 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
111 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
112 }
113 Mdl->MdlFlags = Mdl->MdlFlags & (~MDL_PAGES_LOCKED);
114 }
115
116 PVOID STDCALL
117 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
118 /*
119 * FUNCTION: Maps the physical pages described by a given MDL
120 * ARGUMENTS:
121 * Mdl = Points to an MDL updated by MmProbeAndLockPages
122 * AccessMode = Specifies the portion of the address space to map the
123 * pages.
124 * RETURNS: The base virtual address that maps the locked pages for the
125 * range described by the MDL
126 */
127 {
128 PVOID Base;
129 ULONG i;
130 PULONG MdlPages;
131 KIRQL oldIrql;
132 ULONG RegionSize;
133
134 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
135
136 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
137 {
138 return(Mdl->MappedSystemVa);
139 }
140
141 if (AccessMode == UserMode)
142 {
143 DPRINT1("MDL mapping to user-mode not yet handled.\n");
144 KeBugCheck(0);
145 }
146
147 /* Calculate the number of pages required. */
148 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGESIZE;
149
150 /* Allocate that number of pages from the mdl mapping region. */
151 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
152 Base = MiMdlMappingRegionBase + MiMdlMappingRegionHighWaterMark * PAGESIZE;
153 for (i = 0; i < RegionSize; i++)
154 {
155 ULONG Offset = MiMdlMappingRegionHighWaterMark + i;
156 MiMdlMappingRegionAllocMap[Offset / 32] |= (1 << (Offset % 32));
157 }
158 MiMdlMappingRegionHighWaterMark += RegionSize;
159 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
160
161 /* Set the virtual mappings for the MDL pages. */
162 MdlPages = (PULONG)(Mdl + 1);
163 for (i = 0; i < RegionSize; i++)
164 {
165 NTSTATUS Status;
166 Status = MmCreateVirtualMapping(NULL,
167 (PVOID)((ULONG)Base+(i*PAGESIZE)),
168 PAGE_READWRITE,
169 (LARGE_INTEGER)(LONGLONG)MdlPages[i],
170 FALSE);
171 if (!NT_SUCCESS(Status))
172 {
173 DbgPrint("Unable to create virtual mapping\n");
174 KeBugCheck(0);
175 }
176 }
177
178 /* Mark the MDL has having being mapped. */
179 Mdl->MdlFlags = Mdl->MdlFlags | MDL_MAPPED_TO_SYSTEM_VA;
180 Mdl->MappedSystemVa = Base + Mdl->ByteOffset;
181 return(Base + Mdl->ByteOffset);
182 }
183
184 VOID STDCALL
185 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
186 /*
187 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
188 * ARGUMENTS:
189 * BaseAddress = Base virtual address to which the pages were mapped
190 * MemoryDescriptorList = MDL describing the mapped pages
191 */
192 {
193 KIRQL oldIrql;
194 ULONG i;
195 ULONG RegionSize;
196 ULONG Base;
197
198 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", Mdl, BaseAddress);
199
200 /*
201 * In this case, the MDL has the same system address as the base address
202 * so there is no need to free it
203 */
204 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL)
205 {
206 return;
207 }
208
209 /* Calculate the number of pages we mapped. */
210 RegionSize = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGESIZE;
211
212 /* Unmap all the pages. */
213 for (i = 0; i < RegionSize; i++)
214 {
215 MmDeleteVirtualMapping(NULL,
216 BaseAddress + (i * PAGESIZE),
217 FALSE,
218 NULL,
219 NULL);
220 }
221
222 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
223 /* Deallocate all the pages used. */
224 Base = (ULONG)(BaseAddress - MiMdlMappingRegionBase - Mdl->ByteOffset);
225 Base = Base / PAGESIZE;
226 for (i = 0; i < RegionSize; i++)
227 {
228 ULONG Offset = Base + i;
229 MiMdlMappingRegionAllocMap[Offset / 32] &= ~(1 << (Offset % 32));
230 }
231 /* If all the pages below the high-water mark are free then move it down. */
232 if ((Base + RegionSize) == MiMdlMappingRegionHighWaterMark)
233 {
234 MiMdlMappingRegionHighWaterMark = Base;
235 }
236 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
237
238 /* Reset the MDL state. */
239 Mdl->MdlFlags = Mdl->MdlFlags & ~MDL_MAPPED_TO_SYSTEM_VA;
240 Mdl->MappedSystemVa = NULL;
241 }
242
243
244 VOID
245 MmBuildMdlFromPages(PMDL Mdl, PULONG Pages)
246 {
247 ULONG i;
248 PULONG MdlPages;
249
250 Mdl->MdlFlags = Mdl->MdlFlags |
251 (MDL_PAGES_LOCKED | MDL_IO_PAGE_READ);
252
253 MdlPages = (PULONG)(Mdl + 1);
254
255 for (i=0;i<(PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGESIZE);i++)
256 {
257 MdlPages[i] = Pages[i];
258 }
259 }
260
261 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
262 KPROCESSOR_MODE AccessMode,
263 LOCK_OPERATION Operation)
264 /*
265 * FUNCTION: Probes the specified pages, makes them resident and locks them
266 * ARGUMENTS:
267 * Mdl = MDL to probe
268 * AccessMode = Access at which to probe the buffer
269 * Operation = Operation to probe for
270 */
271 {
272 PULONG MdlPages;
273 ULONG i, j;
274 ULONG NrPages;
275 NTSTATUS Status;
276 KPROCESSOR_MODE Mode;
277 PEPROCESS CurrentProcess;
278
279 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
280
281 /*
282 * FIXME: Check behaviour against NT
283 */
284 if (Mdl->MdlFlags & MDL_PAGES_LOCKED)
285 {
286 return;
287 }
288
289 CurrentProcess = PsGetCurrentProcess();
290
291 if (Mdl->Process != CurrentProcess)
292 {
293 KeAttachProcess(Mdl->Process);
294 }
295
296 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
297 {
298 Mode = KernelMode;
299 }
300 else
301 {
302 Mode = UserMode;
303 }
304
305 /*
306 * Lock the pages
307 */
308
309 MmLockAddressSpace(&Mdl->Process->AddressSpace);
310 MdlPages = (ULONG *)(Mdl + 1);
311 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGESIZE;
312 for (i = 0; i < NrPages; i++)
313 {
314 PVOID Address;
315
316 Address = Mdl->StartVa + (i*PAGESIZE);
317
318 if (!MmIsPagePresent(NULL, Address))
319 {
320 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
321 if (!NT_SUCCESS(Status))
322 {
323 for (j = 0; j < i; j++)
324 {
325 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
326 MmDereferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
327 }
328 ExRaiseStatus(Status);
329 }
330 }
331 else
332 {
333 MmLockPage(MmGetPhysicalAddressForProcess(NULL, Address));
334 }
335 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
336 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
337 {
338 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
339 if (!NT_SUCCESS(Status))
340 {
341 for (j = 0; j < i; j++)
342 {
343 MmUnlockPage((LARGE_INTEGER)(LONGLONG)MdlPages[j]);
344 MmDereferencePage(
345 (LARGE_INTEGER)(LONGLONG)MdlPages[j]);
346 }
347 ExRaiseStatus(Status);
348 }
349 }
350 MdlPages[i] = MmGetPhysicalAddressForProcess(NULL, Address).u.LowPart;
351 MmReferencePage((LARGE_INTEGER)(LONGLONG)MdlPages[i]);
352 }
353 MmUnlockAddressSpace(&Mdl->Process->AddressSpace);
354 if (Mdl->Process != CurrentProcess)
355 {
356 KeDetachProcess();
357 }
358 Mdl->MdlFlags = Mdl->MdlFlags | MDL_PAGES_LOCKED;
359 }
360
361
362 ULONG STDCALL MmSizeOfMdl (PVOID Base,
363 ULONG Length)
364 /*
365 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
366 * the given address range
367 * ARGUMENTS:
368 * Base = base virtual address
369 * Length = number of bytes to map
370 */
371 {
372 ULONG len;
373
374 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
375
376 return(sizeof(MDL)+(len*sizeof(ULONG)));
377 }
378
379
380 VOID STDCALL
381 MmBuildMdlForNonPagedPool (PMDL Mdl)
382 /*
383 * FUNCTION: Fills in the corresponding physical page array of a given
384 * MDL for a buffer in nonpaged system space
385 * ARGUMENTS:
386 * Mdl = Points to an MDL that supplies a virtual address,
387 * byte offset and length
388 */
389 {
390 int va;
391 Mdl->MdlFlags = Mdl->MdlFlags |
392 (MDL_SOURCE_IS_NONPAGED_POOL | MDL_PAGES_LOCKED);
393 for (va=0; va < ((Mdl->Size - sizeof(MDL)) / sizeof(ULONG)); va++)
394 {
395 ((PULONG)(Mdl + 1))[va] =
396 (MmGetPhysicalAddress(Mdl->StartVa + (va * PAGESIZE))).u.LowPart;
397 }
398 Mdl->MappedSystemVa = Mdl->StartVa + Mdl->ByteOffset;
399 }
400
401
402 PMDL STDCALL
403 MmCreateMdl (PMDL MemoryDescriptorList,
404 PVOID Base,
405 ULONG Length)
406 /*
407 * FUNCTION: Allocates and initalizes an MDL
408 * ARGUMENTS:
409 * MemoryDescriptorList = Points to MDL to initalize. If this is
410 * NULL then one is allocated
411 * Base = Base virtual address of the buffer
412 * Length = Length in bytes of the buffer
413 * RETURNS: A pointer to initalized MDL
414 */
415 {
416 if (MemoryDescriptorList == NULL)
417 {
418 ULONG Size;
419
420 Size = MmSizeOfMdl(Base,Length);
421 MemoryDescriptorList =
422 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
423 if (MemoryDescriptorList == NULL)
424 {
425 return(NULL);
426 }
427 }
428
429 MmInitializeMdl(MemoryDescriptorList,Base,Length);
430
431 return(MemoryDescriptorList);
432 }
433
434 VOID STDCALL
435 MmMapMemoryDumpMdl (PVOID Unknown0)
436 /*
437 * FIXME: Has something to do with crash dumps. Do we want to implement
438 * this?
439 */
440 {
441 UNIMPLEMENTED;
442 }
443
444 /* EOF */
445
446
447
448
449
450
451
452
453