Revert 15473 and 15474 as Alex doesn't like them
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <internal/debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
20
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
22
23 static PVOID MiMdlMappingRegionBase = NULL;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap;
25 static ULONG MiMdlMappingRegionHint;
26 static KSPIN_LOCK MiMdlMappingRegionLock;
27 extern ULONG MmPageArraySize;
28
29 /*
30 MDL Flags desc.
31
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
38 */
39
40 /* FUNCTIONS *****************************************************************/
41
42
43 /*
44 * @unimplemented
45 */
46 NTSTATUS
47 STDCALL
48 MmAdvanceMdl (
49 IN PMDL Mdl,
50 IN ULONG NumberOfBytes
51 )
52 {
53 UNIMPLEMENTED;
54 return STATUS_NOT_IMPLEMENTED;
55 }
56
57
58 VOID INIT_FUNCTION
59 MmInitializeMdlImplementation(VOID)
60 {
61 MEMORY_AREA* Result;
62 NTSTATUS Status;
63 PVOID Buffer;
64 PHYSICAL_ADDRESS BoundaryAddressMultiple;
65
66 BoundaryAddressMultiple.QuadPart = 0;
67 MiMdlMappingRegionHint = 0;
68 MiMdlMappingRegionBase = NULL;
69
70 MmLockAddressSpace(MmGetKernelAddressSpace());
71 Status = MmCreateMemoryArea(NULL,
72 MmGetKernelAddressSpace(),
73 MEMORY_AREA_MDL_MAPPING,
74 &MiMdlMappingRegionBase,
75 MI_MDL_MAPPING_REGION_SIZE,
76 0,
77 &Result,
78 FALSE,
79 FALSE,
80 BoundaryAddressMultiple);
81 if (!NT_SUCCESS(Status))
82 {
83 MmUnlockAddressSpace(MmGetKernelAddressSpace());
84 KEBUGCHECK(0);
85 }
86 MmUnlockAddressSpace(MmGetKernelAddressSpace());
87
88 Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
89
90 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
91 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
92
93 KeInitializeSpinLock(&MiMdlMappingRegionLock);
94 }
95
96
97 PVOID
98 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
99 {
100 PPFN_NUMBER MdlPages;
101
102 MdlPages = (PPFN_NUMBER)(Mdl + 1);
103
104 return((PVOID)MdlPages[((ULONG_PTR)Offset) / PAGE_SIZE]);
105 }
106
107
108 /*
109 * @implemented
110 */
111 VOID STDCALL
112 MmUnlockPages(PMDL Mdl)
113 /*
114 * FUNCTION: Unlocks the physical pages described by a given MDL
115 * ARGUMENTS:
116 * MemoryDescriptorList = MDL describing the buffer to be unlocked
117 * NOTES: The memory described by the specified MDL must have been locked
118 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
119 * MDL is updated
120 *
121 * May be called in any process context.
122 */
123 {
124 ULONG i;
125 PPFN_NUMBER MdlPages;
126 PFN_NUMBER Page;
127
128 /*
129 * MmProbeAndLockPages MUST have been called to lock this mdl!
130 *
131 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
132 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
133 */
134 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
135 {
136 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
137 return;
138 }
139
140 /* If mdl buffer is mapped io space -> do nothing */
141 if (Mdl->MdlFlags & MDL_IO_SPACE)
142 {
143 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
144 return;
145 }
146
147 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
148 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
149 {
150 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
151 }
152
153 /*
154 * FIXME: I don't know whether this right, but it looks sensible
155 */
156 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
157 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
158 {
159 return;
160 }
161
162
163 MdlPages = (PPFN_NUMBER)(Mdl + 1);
164 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
165 {
166 Page = MdlPages[i];
167 MmUnlockPage(Page);
168 MmDereferencePage(Page);
169 }
170
171 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
172 }
173
174
175 /*
176 * @unimplemented
177 */
178 PVOID
179 STDCALL
180 MmMapLockedPagesWithReservedMapping (
181 IN PVOID MappingAddress,
182 IN ULONG PoolTag,
183 IN PMDL MemoryDescriptorList,
184 IN MEMORY_CACHING_TYPE CacheType
185 )
186 {
187 UNIMPLEMENTED;
188 return 0;
189 }
190
191
192 /*
193 * @implemented
194 */
195 VOID STDCALL
196 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
197 /*
198 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
199 * ARGUMENTS:
200 * BaseAddress = Base virtual address to which the pages were mapped
201 * MemoryDescriptorList = MDL describing the mapped pages
202 *
203 * User space unmappings _must_ be done from the original process context!
204 */
205 {
206 KIRQL oldIrql;
207 ULONG i;
208 ULONG PageCount;
209 ULONG Base;
210
211 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
212
213 /*
214 * In this case, the MDL has the same system address as the base address
215 * so there is no need to free it
216 */
217 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
218 ((ULONG_PTR)BaseAddress >= KERNEL_BASE))
219 {
220 return;
221 }
222
223
224 /* Calculate the number of pages we mapped. */
225 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
226
227 /*
228 * Docs says that BaseAddress should be a _base_ address, but every example
229 * I've seen pass the actual address. -Gunnar
230 */
231 BaseAddress = PAGE_ALIGN(BaseAddress);
232
233 /* Unmap all the pages. */
234 for (i = 0; i < PageCount; i++)
235 {
236 MmDeleteVirtualMapping(Mdl->Process,
237 (char*)BaseAddress + (i * PAGE_SIZE),
238 FALSE,
239 NULL,
240 NULL);
241 }
242
243 if ((ULONG_PTR)BaseAddress >= KERNEL_BASE)
244 {
245 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
246
247 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
248 /* Deallocate all the pages used. */
249 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
250
251 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
252
253 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
254
255 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
256
257 /* Reset the MDL state. */
258 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
259 Mdl->MappedSystemVa = NULL;
260
261 }
262 else
263 {
264 MEMORY_AREA *Marea;
265
266 ASSERT(Mdl->Process == PsGetCurrentProcess());
267
268 Marea = MmLocateMemoryAreaByAddress( &Mdl->Process->AddressSpace, BaseAddress );
269 if (Marea == NULL)
270 {
271 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
272 KEBUGCHECK(0);
273 }
274
275 MmFreeMemoryArea( &Mdl->Process->AddressSpace, Marea, NULL, NULL );
276
277 Mdl->Process = NULL;
278 }
279
280 }
281
282
283 /*
284 * @unimplemented
285 */
286 VOID
287 STDCALL
288 MmUnmapReservedMapping (
289 IN PVOID BaseAddress,
290 IN ULONG PoolTag,
291 IN PMDL MemoryDescriptorList
292 )
293 {
294 UNIMPLEMENTED;
295 }
296
297
298 VOID
299 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
300 {
301 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
302
303 /* FIXME: this flag should be set by the caller perhaps? */
304 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
305 }
306
307
308 /*
309 * @unimplemented
310 */
311 NTSTATUS
312 STDCALL
313 MmPrefetchPages (
314 IN ULONG NumberOfLists,
315 IN PREAD_LIST *ReadLists
316 )
317 {
318 UNIMPLEMENTED;
319 return STATUS_NOT_IMPLEMENTED;
320 }
321
322
323 /*
324 * @unimplemented
325 */
326 NTSTATUS
327 STDCALL
328 MmProtectMdlSystemAddress (
329 IN PMDL MemoryDescriptorList,
330 IN ULONG NewProtect
331 )
332 {
333 UNIMPLEMENTED;
334 return STATUS_NOT_IMPLEMENTED;
335 }
336
337
338 /*
339 * @implemented
340 */
341 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
342 KPROCESSOR_MODE AccessMode,
343 LOCK_OPERATION Operation)
344 /*
345 * FUNCTION: Probes the specified pages, makes them resident and locks them
346 * ARGUMENTS:
347 * Mdl = MDL to probe
348 * AccessMode = Access at which to probe the buffer
349 * Operation = Operation to probe for
350 *
351 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
352 * used in cases where you know that the mdl address is paged memory or
353 * you don't know where the mdl address comes from. MmProbeAndLockPages will
354 * work no matter what kind of mdl address you have.
355 */
356 {
357 PPFN_TYPE MdlPages;
358 ULONG i, j;
359 ULONG NrPages;
360 NTSTATUS Status;
361 KPROCESSOR_MODE Mode;
362 PFN_TYPE Page;
363 PEPROCESS CurrentProcess = PsGetCurrentProcess();
364 PMADDRESS_SPACE AddressSpace;
365
366 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
367
368 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
369 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
370
371 MdlPages = (PPFN_TYPE)(Mdl + 1);
372 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
373
374 /* mdl must have enough page entries */
375 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
376
377
378 if (Mdl->StartVa >= (PVOID)KERNEL_BASE &&
379 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
380 {
381 /* phys addr is not phys memory so this must be io memory */
382
383 for (i = 0; i < NrPages; i++)
384 {
385 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
386 }
387
388 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
389 return;
390 }
391
392
393 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
394 {
395 /* FIXME: why isn't AccessMode used? */
396 Mode = KernelMode;
397 Mdl->Process = NULL;
398 AddressSpace = MmGetKernelAddressSpace();
399 }
400 else
401 {
402 /* FIXME: why isn't AccessMode used? */
403 Mode = UserMode;
404 Mdl->Process = CurrentProcess;
405 AddressSpace = &CurrentProcess->AddressSpace;
406 }
407
408
409 /*
410 * Lock the pages
411 */
412 MmLockAddressSpace(AddressSpace);
413
414 for (i = 0; i < NrPages; i++)
415 {
416 PVOID Address;
417
418 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
419
420 /*
421 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
422 * -Gunnar
423 */
424
425 if (!MmIsPagePresent(NULL, Address))
426 {
427 Status = MmNotPresentFault(Mode, (ULONG_PTR)Address, TRUE);
428 if (!NT_SUCCESS(Status))
429 {
430 for (j = 0; j < i; j++)
431 {
432 Page = MdlPages[j];
433 if (Page < MmPageArraySize)
434 {
435 MmUnlockPage(Page);
436 MmDereferencePage(Page);
437 }
438 }
439 MmUnlockAddressSpace(AddressSpace);
440 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
441 }
442 }
443 else
444 {
445 MmLockPage(MmGetPfnForProcess(NULL, Address));
446 }
447
448 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
449 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
450 {
451 Status = MmAccessFault(Mode, (ULONG_PTR)Address, TRUE);
452 if (!NT_SUCCESS(Status))
453 {
454 for (j = 0; j < i; j++)
455 {
456 Page = MdlPages[j];
457 if (Page < MmPageArraySize)
458 {
459 MmUnlockPage(Page);
460 MmDereferencePage(Page);
461 }
462 }
463 MmUnlockAddressSpace(AddressSpace);
464 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
465 }
466 }
467 Page = MmGetPfnForProcess(NULL, Address);
468 MdlPages[i] = Page;
469 if (Page >= MmPageArraySize)
470 Mdl->MdlFlags |= MDL_IO_SPACE;
471 else
472 MmReferencePage(Page);
473 }
474
475 MmUnlockAddressSpace(AddressSpace);
476 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
477 }
478
479
480 /*
481 * @unimplemented
482 */
483 VOID
484 STDCALL
485 MmProbeAndLockProcessPages (
486 IN OUT PMDL MemoryDescriptorList,
487 IN PEPROCESS Process,
488 IN KPROCESSOR_MODE AccessMode,
489 IN LOCK_OPERATION Operation
490 )
491 {
492 UNIMPLEMENTED;
493 }
494
495
496 /*
497 * @unimplemented
498 */
499 VOID
500 STDCALL
501 MmProbeAndLockSelectedPages(
502 IN OUT PMDL MemoryDescriptorList,
503 IN LARGE_INTEGER PageList[],
504 IN KPROCESSOR_MODE AccessMode,
505 IN LOCK_OPERATION Operation
506 )
507 {
508 UNIMPLEMENTED;
509 }
510
511
512 /*
513 * @implemented
514 */
515 ULONG STDCALL MmSizeOfMdl (PVOID Base,
516 ULONG Length)
517 /*
518 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
519 * the given address range
520 * ARGUMENTS:
521 * Base = base virtual address
522 * Length = number of bytes to map
523 */
524 {
525 ULONG len;
526
527 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
528
529 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
530 }
531
532
533 /*
534 * @implemented
535 */
536 VOID STDCALL
537 MmBuildMdlForNonPagedPool (PMDL Mdl)
538 /*
539 * FUNCTION: Fills in the corresponding physical page array of a given
540 * MDL for a buffer in nonpaged system space
541 * ARGUMENTS:
542 * Mdl = Points to an MDL that supplies a virtual address,
543 * byte offset and length
544 *
545 * This function can be seen as a fast version of MmProbeAndLockPages in case
546 * you _know_ that the mdl address is within nonpaged kernel space.
547 */
548 {
549 ULONG i;
550 ULONG PageCount;
551 PPFN_TYPE MdlPages;
552
553 /*
554 * mdl buffer must (at least) be in kernel space, thou this doesn't
555 * necesarely mean that the buffer in within _nonpaged_ kernel space...
556 */
557 ASSERT((ULONG_PTR)Mdl->StartVa >= KERNEL_BASE);
558
559 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
560 MdlPages = (PPFN_TYPE)(Mdl + 1);
561
562 /* mdl must have enough page entries */
563 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
564
565 for (i=0; i < PageCount; i++)
566 {
567 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
568 }
569
570 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
571 Mdl->Process = NULL;
572 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
573 }
574
575
576 /*
577 * @implemented
578 */
579 PMDL STDCALL
580 MmCreateMdl (PMDL Mdl,
581 PVOID Base,
582 ULONG Length)
583 /*
584 * FUNCTION: Allocates and initalizes an MDL
585 * ARGUMENTS:
586 * MemoryDescriptorList = Points to MDL to initalize. If this is
587 * NULL then one is allocated
588 * Base = Base virtual address of the buffer
589 * Length = Length in bytes of the buffer
590 * RETURNS: A pointer to initalized MDL
591 */
592 {
593 if (Mdl == NULL)
594 {
595 ULONG Size;
596
597 Size = MmSizeOfMdl(Base,Length);
598 Mdl =
599 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
600 if (Mdl == NULL)
601 {
602 return(NULL);
603 }
604 }
605
606 MmInitializeMdl(Mdl, (char*)Base, Length);
607
608 return(Mdl);
609 }
610
611
612 /*
613 * @unimplemented
614 */
615 VOID STDCALL
616 MmMapMemoryDumpMdl (PVOID Unknown0)
617 /*
618 * FIXME: Has something to do with crash dumps. Do we want to implement
619 * this?
620 */
621 {
622 UNIMPLEMENTED;
623 }
624
625
626 /*
627 * @implemented
628 */
629 PMDL STDCALL
630 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
631 IN PHYSICAL_ADDRESS HighAddress,
632 IN PHYSICAL_ADDRESS SkipBytes,
633 IN SIZE_T Totalbytes )
634 {
635 /*
636 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
637
638 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
639 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
640 Designed to be used by an AGP driver
641
642 LowAddress is the lowest acceptable physical address it wants to allocate
643 and HighAddress is the highest. SkipBytes are the number of bytes that the
644 kernel should keep free above LowAddress and below the address at which it
645 starts to allocate physical memory. TotalBytes are the number of bytes that
646 the driver wants to allocate. The return value of the function is a MDL
647 that if non-zero describes the physical memory the kernel has given the
648 driver. To access portions of the memory the driver must create sub-MDLs
649 from the returned MDL that describe appropriate portions of the physical
650 memory. When a driver wants to access physical memory described by a
651 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
652
653 Konstantin Gusev
654 */
655
656 PMDL Mdl;
657 PPFN_TYPE Pages;
658 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
659 ULONG Ret;
660
661 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
662 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
663 LowAddress.QuadPart, HighAddress.QuadPart,
664 SkipBytes.QuadPart, Totalbytes);
665
666 /* SkipBytes must be a multiple of the page size */
667 if ((SkipBytes.QuadPart % PAGE_SIZE) != 0)
668 {
669 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
670 return NULL;
671 }
672
673 /* Allocate memory for the MDL */
674 Mdl = MmCreateMdl(NULL, 0, Totalbytes);
675 if (Mdl == NULL)
676 {
677 return NULL;
678 }
679
680 /* Allocate pages into the MDL */
681 NumberOfPagesAllocated = 0;
682 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
683 Pages = (PPFN_TYPE)(Mdl + 1);
684 while (NumberOfPagesWanted > 0)
685 {
686 Ret = MmAllocPagesSpecifyRange(
687 MC_NPPOOL,
688 LowAddress,
689 HighAddress,
690 NumberOfPagesWanted,
691 Pages + NumberOfPagesAllocated);
692 if (Ret == -1)
693 break;
694
695 NumberOfPagesAllocated += Ret;
696 NumberOfPagesWanted -= Ret;
697
698 if (SkipBytes.QuadPart == 0)
699 break;
700 LowAddress.QuadPart += SkipBytes.QuadPart;
701 HighAddress.QuadPart += SkipBytes.QuadPart;
702 }
703
704 if (NumberOfPagesAllocated == 0)
705 {
706 ExFreePool(Mdl);
707 Mdl = NULL;
708 }
709 else if (NumberOfPagesWanted > 0)
710 {
711 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
712 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
713 }
714 return Mdl;
715 }
716
717
718 /*
719 * @implemented
720 */
721 VOID STDCALL
722 MmFreePagesFromMdl ( IN PMDL Mdl )
723 {
724 /*
725 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
726 FreeUserPhysicalPages, to free the physical memory it has allocated with
727 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
728
729 Note that a driver is responsible for deallocating the MDL returned by
730 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
731 does not free the MDL.
732
733 Konstantin Gusev
734
735 */
736 PPFN_TYPE Pages;
737 LONG NumberOfPages;
738
739 NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
740 Pages = (PPFN_TYPE)(Mdl + 1);
741
742 while (--NumberOfPages >= 0)
743 {
744 MmDereferencePage(Pages[NumberOfPages]);
745 }
746 }
747
748
749 /*
750 * @implemented
751 */
752 PVOID STDCALL
753 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
754 IN KPROCESSOR_MODE AccessMode,
755 IN MEMORY_CACHING_TYPE CacheType,
756 IN PVOID BaseAddress,
757 IN ULONG BugCheckOnFailure,
758 IN MM_PAGE_PRIORITY Priority)
759 {
760 PVOID Base;
761 PULONG MdlPages;
762 KIRQL oldIrql;
763 ULONG PageCount;
764 ULONG StartingOffset;
765 PEPROCESS CurrentProcess;
766 NTSTATUS Status;
767 ULONG Protect;
768
769 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
770 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
771 Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
772
773 /* FIXME: Implement Priority */
774 (void) Priority;
775
776 /* Calculate the number of pages required. */
777 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
778
779 if (AccessMode == UserMode)
780 {
781 MEMORY_AREA *Result;
782 LARGE_INTEGER BoundaryAddressMultiple;
783 NTSTATUS Status;
784
785 /* pretty sure you can't map partial mdl's to user space */
786 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
787
788 BoundaryAddressMultiple.QuadPart = 0;
789 Base = BaseAddress;
790
791 CurrentProcess = PsGetCurrentProcess();
792
793 MmLockAddressSpace(&CurrentProcess->AddressSpace);
794 Status = MmCreateMemoryArea(CurrentProcess,
795 &CurrentProcess->AddressSpace,
796 MEMORY_AREA_MDL_MAPPING,
797 &Base,
798 PageCount * PAGE_SIZE,
799 0, /* PAGE_READWRITE? */
800 &Result,
801 (Base != NULL),
802 FALSE,
803 BoundaryAddressMultiple);
804 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
805 if (!NT_SUCCESS(Status))
806 {
807 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
808 {
809 return NULL;
810 }
811
812 /* Throw exception */
813 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
814 ASSERT(0);
815 }
816
817 Mdl->Process = CurrentProcess;
818 }
819 else /* if (AccessMode == KernelMode) */
820 {
821 /* can't map mdl twice */
822 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
823 /* can't map mdl buildt from non paged pool into kernel space */
824 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
825
826 CurrentProcess = NULL;
827
828 /* Allocate that number of pages from the mdl mapping region. */
829 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
830
831 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
832
833 if (StartingOffset == 0xffffffff)
834 {
835 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
836
837 DPRINT1("Out of MDL mapping space\n");
838
839 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
840 {
841 return NULL;
842 }
843
844 KEBUGCHECK(0);
845 }
846
847 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
848
849 if (MiMdlMappingRegionHint == StartingOffset)
850 {
851 MiMdlMappingRegionHint += PageCount;
852 }
853
854 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
855
856 Mdl->Process = NULL;
857 }
858
859 /* Set the virtual mappings for the MDL pages. */
860 MdlPages = (PULONG)(Mdl + 1);
861
862 Protect = PAGE_READWRITE;
863 if (CacheType == MmNonCached)
864 Protect |= PAGE_NOCACHE;
865 else if (CacheType == MmWriteCombined)
866 DPRINT("CacheType MmWriteCombined not supported!\n");
867 if (Mdl->MdlFlags & MDL_IO_SPACE)
868 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
869 Base,
870 Protect,
871 MdlPages,
872 PageCount);
873 else
874 Status = MmCreateVirtualMapping(CurrentProcess,
875 Base,
876 Protect,
877 MdlPages,
878 PageCount);
879 if (!NT_SUCCESS(Status))
880 {
881 DbgPrint("Unable to create virtual mapping\n");
882 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
883 {
884 return NULL;
885 }
886 if (AccessMode == UserMode)
887 {
888 /* Throw exception */
889 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
890 ASSERT(0);
891 }
892 else /* AccessMode == KernelMode */
893 {
894 if (!BugCheckOnFailure)
895 return NULL;
896
897 /* FIXME: Use some bugcheck code instead of 0 */
898 KEBUGCHECK(0);
899 }
900 }
901
902 /* Mark the MDL has having being mapped. */
903 if (AccessMode == KernelMode)
904 {
905 if (Mdl->MdlFlags & MDL_PARTIAL)
906 {
907 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
908 }
909 else
910 {
911 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
912 }
913 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
914 }
915 else
916 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG)Base + Mdl->ByteOffset);
917
918 return((char*)Base + Mdl->ByteOffset);
919 }
920
921
922 /*
923 * @implemented
924 */
925 PVOID STDCALL
926 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
927 /*
928 * FUNCTION: Maps the physical pages described by a given MDL
929 * ARGUMENTS:
930 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
931 * MmAllocatePagesForMdl or IoBuildPartialMdl.
932 * AccessMode = Specifies the portion of the address space to map the
933 * pages.
934 * RETURNS: The base virtual address that maps the locked pages for the
935 * range described by the MDL
936 *
937 * If mapping into user space, pages are mapped into current address space.
938 */
939 {
940 return MmMapLockedPagesSpecifyCache(Mdl,
941 AccessMode,
942 MmCached,
943 NULL,
944 TRUE,
945 NormalPagePriority);
946 }
947
948
949 /* EOF */
950
951
952
953
954
955
956
957
958