merge trunk head (37902)
[reactos.git] / reactos / ntoskrnl / mm / mdlsup.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/mdl.c
5 * PURPOSE: Manipulates MDLs
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 */
9
10 /* INCLUDES ****************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #if defined (ALLOC_PRAGMA)
17 #pragma alloc_text(INIT, MmInitializeMdlImplementation)
18 #endif
19
20 /* GLOBALS *******************************************************************/
21
22 #define TAG_MDL TAG('M', 'D', 'L', ' ')
23 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
24
25 PVOID MiMdlMappingRegionBase = NULL;
26 RTL_BITMAP MiMdlMappingRegionAllocMap;
27 ULONG MiMdlMappingRegionHint;
28 KSPIN_LOCK MiMdlMappingRegionLock;
29 extern ULONG MmPageArraySize;
30
31 /* PRIVATE FUNCTIONS **********************************************************/
32
33 VOID
34 INIT_FUNCTION
35 NTAPI
36 MmInitializeMdlImplementation(VOID)
37 {
38 MEMORY_AREA* Result;
39 NTSTATUS Status;
40 PVOID Buffer;
41 PHYSICAL_ADDRESS BoundaryAddressMultiple;
42
43 BoundaryAddressMultiple.QuadPart = 0;
44 MiMdlMappingRegionHint = 0;
45 MiMdlMappingRegionBase = NULL;
46
47 MmLockAddressSpace(MmGetKernelAddressSpace());
48 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
49 MEMORY_AREA_MDL_MAPPING,
50 &MiMdlMappingRegionBase,
51 MI_MDL_MAPPING_REGION_SIZE,
52 PAGE_READWRITE,
53 &Result,
54 FALSE,
55 0,
56 BoundaryAddressMultiple);
57 if (!NT_SUCCESS(Status))
58 {
59 MmUnlockAddressSpace(MmGetKernelAddressSpace());
60 ASSERT(FALSE);
61 }
62 MmUnlockAddressSpace(MmGetKernelAddressSpace());
63
64 Buffer = ExAllocatePoolWithTag(NonPagedPool,
65 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
66 TAG_MDL);
67
68 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
69 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
70
71 KeInitializeSpinLock(&MiMdlMappingRegionLock);
72 }
73
74 /* PUBLIC FUNCTIONS ***********************************************************/
75
76
77 /*
78 * @implemented
79 */
80 PMDL
81 NTAPI
82 MmCreateMdl(IN PMDL Mdl,
83 IN PVOID Base,
84 IN SIZE_T Length)
85 {
86 ULONG Size;
87
88 /* Check if we don't have an MDL built */
89 if (!Mdl)
90 {
91 /* Calcualte the size we'll need and allocate the MDL */
92 Size = MmSizeOfMdl(Base, Length);
93 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
94 if (!Mdl) return NULL;
95 }
96
97 /* Initialize it */
98 MmInitializeMdl(Mdl, Base, Length);
99 DPRINT("Creating MDL: %p\n", Mdl);
100 DPRINT("Base: %p. Length: %lx\n", Base, Length);
101 return Mdl;
102 }
103
104 /*
105 * @implemented
106 */
107 ULONG
108 NTAPI
109 MmSizeOfMdl(IN PVOID Base,
110 IN SIZE_T Length)
111 {
112 /* Return the MDL size */
113 return sizeof(MDL) + (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
114 }
115
116 /*
117 * @implemented
118 */
119 VOID
120 NTAPI
121 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
122 {
123 ULONG i;
124 ULONG PageCount;
125 PPFN_NUMBER MdlPages;
126 PVOID Base;
127 DPRINT("Building MDL: %p\n", Mdl);
128
129 /* Sanity checks */
130 ASSERT(Mdl->ByteCount != 0);
131 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
132 MDL_MAPPED_TO_SYSTEM_VA |
133 MDL_SOURCE_IS_NONPAGED_POOL |
134 MDL_PARTIAL)) == 0);
135
136 /* We know the MDL isn't associated to a process now */
137 Mdl->Process = NULL;
138
139 /* Get page and VA information */
140 MdlPages = (PPFN_NUMBER)(Mdl + 1);
141 Base = Mdl->StartVa;
142
143 /* Set the system address and now get the page count */
144 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
145 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, Mdl->ByteCount);
146 ASSERT(PageCount != 0);
147
148 /* Go through each page */
149 for (i = 0; i < PageCount; i++)
150 {
151 /* Map it */
152 *MdlPages++ = MmGetPfnForProcess(NULL,
153 (PVOID)((ULONG_PTR)Base + (i * PAGE_SIZE)));
154 }
155
156 /* Set the final flag */
157 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
158 }
159
160 /*
161 * @implemented
162 */
163 VOID
164 NTAPI
165 MmFreePagesFromMdl(IN PMDL Mdl)
166 {
167 PVOID Base;
168 PPFN_NUMBER Pages;
169 LONG NumberOfPages;
170 DPRINT("Freeing MDL: %p\n", Mdl);
171
172 /* Sanity checks */
173 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
174 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
175 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
176
177 /* Get address and page information */
178 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
179 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
180
181 /* Loop all the MDL pages */
182 Pages = (PPFN_NUMBER)(Mdl + 1);
183 while (--NumberOfPages >= 0)
184 {
185 /* Dereference each one of them */
186 MmDereferencePage(Pages[NumberOfPages]);
187 }
188
189 /* Remove the pages locked flag */
190 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
191 }
192
193 /*
194 * @implemented
195 */
196 PVOID
197 NTAPI
198 MmMapLockedPages(IN PMDL Mdl,
199 IN KPROCESSOR_MODE AccessMode)
200 {
201 /* Call the extended version */
202 return MmMapLockedPagesSpecifyCache(Mdl,
203 AccessMode,
204 MmCached,
205 NULL,
206 TRUE,
207 HighPagePriority);
208 }
209
210 /*
211 * @implemented
212 */
213 VOID
214 NTAPI
215 MmUnlockPages(IN PMDL Mdl)
216 {
217 ULONG i;
218 PPFN_NUMBER MdlPages;
219 PFN_NUMBER Page;
220 PEPROCESS Process;
221 PVOID Base;
222 ULONG Flags, PageCount;
223 DPRINT("Unlocking MDL: %p\n", Mdl);
224
225 /* Sanity checks */
226 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
227 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
228 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
229 ASSERT(Mdl->ByteCount != 0);
230
231 /* Get the process associated and capture the flags which are volatile */
232 Process = Mdl->Process;
233 Flags = Mdl->MdlFlags;
234
235 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
236 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
237 {
238 /* Unmap the pages from system spage */
239 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
240 }
241
242 /* Get the page count */
243 MdlPages = (PPFN_NUMBER)(Mdl + 1);
244 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
245 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
246 ASSERT(PageCount != 0);
247
248 /* We don't support AWE */
249 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
250
251 /* Check if the buffer is mapped I/O space */
252 if (Flags & MDL_IO_SPACE)
253 {
254 /* Check if this was a wirte */
255 if (Flags & MDL_WRITE_OPERATION)
256 {
257 /* Windows keeps track of the modified bit */
258 }
259
260 /* Check if we have a process */
261 if (Process)
262 {
263 /* Handle the accounting of locked pages */
264 /* ASSERT(Process->NumberOfLockedPages >= 0); */ // always true
265 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
266 -PageCount);
267 }
268
269 /* We're done */
270 Mdl->MdlFlags &= ~MDL_IO_SPACE;
271 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
272 return;
273 }
274
275 /* Check if we have a process */
276 if (Process)
277 {
278 /* Handle the accounting of locked pages */
279 /* ASSERT(Process->NumberOfLockedPages >= 0); */ // always true
280 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
281 -PageCount);
282 }
283
284 /* Scan each page */
285 for (i = 0; i < PageCount; i++)
286 {
287 /* Get the page entry */
288
289 /* Unlock and dereference it */
290 Page = MdlPages[i];
291 MmUnlockPage(Page);
292 MmDereferencePage(Page);
293 }
294
295 /* We're done */
296 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
297 }
298
299 /*
300 * @implemented
301 */
302 VOID
303 NTAPI
304 MmUnmapLockedPages(IN PVOID BaseAddress,
305 IN PMDL Mdl)
306 {
307 KIRQL oldIrql;
308 ULONG i, PageCount;
309 ULONG Base;
310 MEMORY_AREA *MemoryArea;
311 DPRINT("Unmapping MDL: %p\n", Mdl);
312 DPRINT("Base: %p\n", BaseAddress);
313
314 /* Sanity check */
315 ASSERT(Mdl->ByteCount != 0);
316
317 /* Check if this is a kernel request */
318 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
319 {
320 /* Get base and count information */
321 Base = (ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset;
322 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
323
324 /* Sanity checks */
325 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
326 ASSERT(PageCount != 0);
327 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
328
329 /* ReactOS does not support this flag */
330 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) ASSERT(FALSE);
331
332 /* Remove flags */
333 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
334 MDL_PARTIAL_HAS_BEEN_MAPPED |
335 MDL_FREE_EXTRA_PTES);
336
337 /* If we came from non-paged pool, on ReactOS, we can leave */
338 if (Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) return;
339
340 /* Loop each page */
341 BaseAddress = PAGE_ALIGN(BaseAddress);
342 for (i = 0; i < PageCount; i++)
343 {
344 /* Delete it */
345 MmDeleteVirtualMapping(NULL,
346 (PVOID)((ULONG_PTR)BaseAddress + (i * PAGE_SIZE)),
347 FALSE,
348 NULL,
349 NULL);
350 }
351
352 /* Lock the mapping region */
353 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
354
355 /* Deallocate all the pages used. */
356 Base = ((ULONG_PTR)BaseAddress - (ULONG_PTR)MiMdlMappingRegionBase) / PAGE_SIZE;
357 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
358 MiMdlMappingRegionHint = min(MiMdlMappingRegionHint, Base);
359
360 /* Release the lock */
361 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
362 }
363 else
364 {
365 /* Sanity check */
366 ASSERT(Mdl->Process == PsGetCurrentProcess());
367
368 /* Find the memory area */
369 MemoryArea = MmLocateMemoryAreaByAddress(&Mdl->Process->VadRoot,
370 BaseAddress);
371 ASSERT(MemoryArea);
372
373 /* Free it */
374 MmFreeMemoryArea(&Mdl->Process->VadRoot,
375 MemoryArea,
376 NULL,
377 NULL);
378 }
379 }
380
381 /*
382 * @implemented
383 */
384 VOID
385 NTAPI
386 MmProbeAndLockPages(IN PMDL Mdl,
387 IN KPROCESSOR_MODE AccessMode,
388 IN LOCK_OPERATION Operation)
389 {
390 PPFN_TYPE MdlPages;
391 PVOID Base, Address;
392 ULONG i, j;
393 ULONG NrPages;
394 NTSTATUS Status = STATUS_SUCCESS;
395 PFN_TYPE Page;
396 PEPROCESS CurrentProcess;
397 PETHREAD Thread;
398 PMM_AVL_TABLE AddressSpace;
399 KIRQL OldIrql = KeGetCurrentIrql();
400 DPRINT("Probing MDL: %p\n", Mdl);
401
402 /* Sanity checks */
403 ASSERT(Mdl->ByteCount != 0);
404 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
405 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
406 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
407 MDL_MAPPED_TO_SYSTEM_VA |
408 MDL_SOURCE_IS_NONPAGED_POOL |
409 MDL_PARTIAL |
410 MDL_IO_SPACE)) == 0);
411
412 /* Get page and base information */
413 MdlPages = (PPFN_NUMBER)(Mdl + 1);
414 Base = (PVOID)Mdl->StartVa;
415 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
416 NrPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
417 ASSERT(NrPages != 0);
418
419 /* Check if this is an MDL in I/O Space */
420 if (Mdl->StartVa >= MmSystemRangeStart &&
421 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
422 {
423 /* Just loop each page */
424 for (i = 0; i < NrPages; i++)
425 {
426 /* And map it */
427 MdlPages[i] = MmGetPfnForProcess(NULL,
428 (PVOID)((ULONG_PTR)Mdl->StartVa + (i * PAGE_SIZE)));
429 }
430
431 /* Set the flags and exit */
432 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
433 return;
434 }
435
436 /* Get the thread and process */
437 Thread = PsGetCurrentThread();
438 if (Address <= MM_HIGHEST_USER_ADDRESS)
439 {
440 /* Get the process */
441 CurrentProcess = PsGetCurrentProcess();
442 }
443 else
444 {
445 /* No process */
446 CurrentProcess = NULL;
447 }
448
449 /* Check what kind of operaiton this is */
450 if (Operation != IoReadAccess)
451 {
452 /* Set the write flag */
453 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
454 }
455 else
456 {
457 /* Remove the write flag */
458 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
459 }
460
461 /* Check if this came from kernel mode */
462 if (Base >= MM_HIGHEST_USER_ADDRESS)
463 {
464 /* We should not have a process */
465 ASSERT(CurrentProcess == NULL);
466 Mdl->Process = NULL;
467 AddressSpace = MmGetKernelAddressSpace();
468 }
469 else
470 {
471 /* Sanity checks */
472 ASSERT(NrPages != 0);
473 ASSERT(CurrentProcess == PsGetCurrentProcess());
474
475 /* Track locked pages */
476 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
477 NrPages);
478
479 /* Save the process */
480 Mdl->Process = CurrentProcess;
481
482 /* Use the process lock */
483 AddressSpace = &CurrentProcess->VadRoot;
484 }
485
486
487 /*
488 * Lock the pages
489 */
490 if (OldIrql < DISPATCH_LEVEL)
491 MmLockAddressSpace(AddressSpace);
492 else
493 MmAcquirePageListLock(&OldIrql);
494
495 for (i = 0; i < NrPages; i++)
496 {
497 PVOID Address;
498
499 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
500
501 if (!MmIsPagePresent(NULL, Address))
502 {
503 /* Fault the page in */
504 Status = MmAccessFault(FALSE, Address, AccessMode, NULL);
505 if (!NT_SUCCESS(Status))
506 {
507 goto cleanup;
508 }
509 }
510 else
511 {
512 MmLockPage(MmGetPfnForProcess(NULL, Address));
513 }
514
515 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
516 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
517 {
518 Status = MmAccessFault(TRUE, Address, AccessMode, NULL);
519 if (!NT_SUCCESS(Status))
520 {
521 for (j = 0; j < i; j++)
522 {
523 Page = MdlPages[j];
524 if (Page < MmPageArraySize)
525 {
526 MmUnlockPage(Page);
527 MmDereferencePage(Page);
528 }
529 }
530 goto cleanup;
531 }
532 }
533 Page = MmGetPfnForProcess(NULL, Address);
534 MdlPages[i] = Page;
535 if (Page >= MmPageArraySize)
536 {
537 Mdl->MdlFlags |= MDL_IO_SPACE;
538 }
539 else
540 {
541 MmReferencePage(Page);
542 }
543 }
544
545 cleanup:
546 if (OldIrql < DISPATCH_LEVEL)
547 MmUnlockAddressSpace(AddressSpace);
548 else
549 MmReleasePageListLock(OldIrql);
550
551 if (!NT_SUCCESS(Status))
552 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
553 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
554 return;
555 }
556
557 /*
558 * @implemented
559 */
560 PMDL
561 NTAPI
562 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
563 IN PHYSICAL_ADDRESS HighAddress,
564 IN PHYSICAL_ADDRESS SkipBytes,
565 IN SIZE_T Totalbytes)
566 {
567 PMDL Mdl;
568 PPFN_TYPE Pages;
569 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
570 ULONG Ret;
571 DPRINT("Allocating pages: %p\n", LowAddress.LowPart);
572
573 /* SkipBytes must be a multiple of the page size */
574 if (BYTE_OFFSET(SkipBytes.LowPart)) return NULL;
575
576 /* Create the actual MDL */
577 Mdl = MmCreateMdl(NULL, NULL, Totalbytes);
578 if (!Mdl) return NULL;
579
580 /* Allocate pages into the MDL */
581 NumberOfPagesAllocated = 0;
582 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
583 Pages = (PPFN_TYPE)(Mdl + 1);
584 while (NumberOfPagesWanted > 0)
585 {
586 Ret = MmAllocPagesSpecifyRange(MC_NPPOOL,
587 LowAddress,
588 HighAddress,
589 NumberOfPagesWanted,
590 Pages + NumberOfPagesAllocated);
591 if (Ret == (ULONG)-1) break;
592
593 NumberOfPagesAllocated += Ret;
594 NumberOfPagesWanted -= Ret;
595
596 if (SkipBytes.QuadPart == 0) break;
597 LowAddress.QuadPart += SkipBytes.QuadPart;
598 HighAddress.QuadPart += SkipBytes.QuadPart;
599 }
600
601 /* If nothing was allocated, fail */
602 if (NumberOfPagesAllocated)
603 {
604 /* Free our MDL */
605 ExFreePool(Mdl);
606 return NULL;
607 }
608
609 /* Zero out the MDL pages */
610 //RtlZeroMemory(LowAddress.LowPart, NumberOfPagesAllocated * PAGE_SIZE);
611
612 /* Return the MDL */
613 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
614 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
615 return Mdl;
616 }
617
618 /*
619 * @unimplemented
620 */
621 PMDL
622 NTAPI
623 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
624 IN PHYSICAL_ADDRESS HighAddress,
625 IN PHYSICAL_ADDRESS SkipBytes,
626 IN SIZE_T Totalbytes,
627 IN MEMORY_CACHING_TYPE CacheType,
628 IN ULONG Flags)
629 {
630 UNIMPLEMENTED;
631 return NULL;
632 }
633
634 /*
635 * @implemented
636 */
637 PVOID
638 NTAPI
639 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
640 IN KPROCESSOR_MODE AccessMode,
641 IN MEMORY_CACHING_TYPE CacheType,
642 IN PVOID BaseAddress,
643 IN ULONG BugCheckOnFailure,
644 IN MM_PAGE_PRIORITY Priority)
645 {
646 PVOID Base;
647 PULONG MdlPages;
648 KIRQL oldIrql;
649 ULONG PageCount;
650 ULONG StartingOffset;
651 PEPROCESS CurrentProcess;
652 NTSTATUS Status;
653 ULONG Protect;
654 MEMORY_AREA *Result;
655 LARGE_INTEGER BoundaryAddressMultiple;
656 DPRINT("Mapping MDL: %p\n", Mdl);
657 DPRINT("Base: %p\n", BaseAddress);
658
659 /* Sanity checks */
660 ASSERT(Mdl->ByteCount != 0);
661
662 /* Get the base */
663 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
664
665 /* Set default page protection */
666 Protect = PAGE_READWRITE;
667 if (CacheType == MmNonCached) Protect |= PAGE_NOCACHE;
668
669 /* Handle kernel case first */
670 if (AccessMode == KernelMode)
671 {
672 /* Get the list of pages and count */
673 MdlPages = (PPFN_NUMBER)(Mdl + 1);
674 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
675
676 /* Sanity checks */
677 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
678 MDL_SOURCE_IS_NONPAGED_POOL |
679 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
680 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
681
682 /* Allocate that number of pages from the mdl mapping region. */
683 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
684 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap,
685 PageCount,
686 MiMdlMappingRegionHint);
687 if (StartingOffset == 0xffffffff)
688 {
689 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
690 DPRINT("Out of MDL mapping space\n");
691 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
692 {
693 return NULL;
694 }
695 ASSERT(FALSE);
696 }
697 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
698 if (MiMdlMappingRegionHint == StartingOffset) MiMdlMappingRegionHint += PageCount;
699 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
700
701 /* Set the virtual mappings for the MDL pages. */
702 if (Mdl->MdlFlags & MDL_IO_SPACE)
703 {
704 /* Map the pages */
705 Status = MmCreateVirtualMappingUnsafe(NULL,
706 Base,
707 Protect,
708 MdlPages,
709 PageCount);
710 }
711 else
712 {
713 /* Map the pages */
714 Status = MmCreateVirtualMapping(NULL,
715 Base,
716 Protect,
717 MdlPages,
718 PageCount);
719 }
720
721 /* Check if the mapping suceeded */
722 if (!NT_SUCCESS(Status))
723 {
724 /* If it can fail, return NULL */
725 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
726
727 /* Should we bugcheck? */
728 if (!BugCheckOnFailure) return NULL;
729
730 /* Yes, crash the system */
731 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
732 }
733
734 /* Mark it as mapped */
735 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
736 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
737
738 /* Check if it was partial */
739 if (Mdl->MdlFlags & MDL_PARTIAL)
740 {
741 /* Write the appropriate flag here too */
742 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
743 }
744
745 /* Save the mapped address */
746 Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
747 Mdl->MappedSystemVa = Base;
748 return Base;
749 }
750
751
752 /* Calculate the number of pages required. */
753 MdlPages = (PPFN_NUMBER)(Mdl + 1);
754 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
755
756 BoundaryAddressMultiple.QuadPart = 0;
757 Base = BaseAddress;
758
759 CurrentProcess = PsGetCurrentProcess();
760
761 MmLockAddressSpace(&CurrentProcess->VadRoot);
762 Status = MmCreateMemoryArea(&CurrentProcess->VadRoot,
763 MEMORY_AREA_MDL_MAPPING,
764 &Base,
765 PageCount * PAGE_SIZE,
766 Protect,
767 &Result,
768 (Base != NULL),
769 0,
770 BoundaryAddressMultiple);
771 MmUnlockAddressSpace(&CurrentProcess->VadRoot);
772 if (!NT_SUCCESS(Status))
773 {
774 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
775 {
776 return NULL;
777 }
778
779 /* Throw exception */
780 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
781 ASSERT(0);
782 }
783
784 /* Set the virtual mappings for the MDL pages. */
785 if (Mdl->MdlFlags & MDL_IO_SPACE)
786 {
787 /* Map the pages */
788 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
789 Base,
790 Protect,
791 MdlPages,
792 PageCount);
793 }
794 else
795 {
796 /* Map the pages */
797 Status = MmCreateVirtualMapping(CurrentProcess,
798 Base,
799 Protect,
800 MdlPages,
801 PageCount);
802 }
803
804 /* Check if the mapping suceeded */
805 if (!NT_SUCCESS(Status))
806 {
807 /* If it can fail, return NULL */
808 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
809
810 /* Throw exception */
811 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
812 }
813
814 /* Return the base */
815 Base = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
816 return Base;
817 }
818
819 /*
820 * @unimplemented
821 */
822 NTSTATUS
823 NTAPI
824 MmAdvanceMdl(IN PMDL Mdl,
825 IN ULONG NumberOfBytes)
826 {
827 UNIMPLEMENTED;
828 return STATUS_NOT_IMPLEMENTED;
829 }
830
831 /*
832 * @unimplemented
833 */
834 PVOID
835 NTAPI
836 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
837 IN ULONG PoolTag,
838 IN PMDL MemoryDescriptorList,
839 IN MEMORY_CACHING_TYPE CacheType)
840 {
841 UNIMPLEMENTED;
842 return 0;
843 }
844
845 /*
846 * @unimplemented
847 */
848 VOID
849 NTAPI
850 MmUnmapReservedMapping(IN PVOID BaseAddress,
851 IN ULONG PoolTag,
852 IN PMDL MemoryDescriptorList)
853 {
854 UNIMPLEMENTED;
855 }
856
857 /*
858 * @unimplemented
859 */
860 NTSTATUS
861 NTAPI
862 MmPrefetchPages(IN ULONG NumberOfLists,
863 IN PREAD_LIST *ReadLists)
864 {
865 UNIMPLEMENTED;
866 return STATUS_NOT_IMPLEMENTED;
867 }
868
869 /*
870 * @unimplemented
871 */
872 NTSTATUS
873 NTAPI
874 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
875 IN ULONG NewProtect)
876 {
877 UNIMPLEMENTED;
878 return STATUS_NOT_IMPLEMENTED;
879 }
880
881 /*
882 * @unimplemented
883 */
884 VOID
885 NTAPI
886 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
887 IN PEPROCESS Process,
888 IN KPROCESSOR_MODE AccessMode,
889 IN LOCK_OPERATION Operation)
890 {
891 UNIMPLEMENTED;
892 }
893
894
895 /*
896 * @unimplemented
897 */
898 VOID
899 NTAPI
900 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
901 IN LARGE_INTEGER PageList[],
902 IN KPROCESSOR_MODE AccessMode,
903 IN LOCK_OPERATION Operation)
904 {
905 UNIMPLEMENTED;
906 }
907
908 /*
909 * @unimplemented
910 */
911 VOID
912 NTAPI
913 MmMapMemoryDumpMdl(IN PMDL Mdl)
914 {
915 UNIMPLEMENTED;
916 }
917
918 /* EOF */
919