Fix splitting of cells (noticed by Hartmut).
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <internal/debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
20
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
22
23 static PVOID MiMdlMappingRegionBase = NULL;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap;
25 static ULONG MiMdlMappingRegionHint;
26 static KSPIN_LOCK MiMdlMappingRegionLock;
27 extern ULONG MmPageArraySize;
28
29 /*
30 MDL Flags desc.
31
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
38 */
39
40 /* FUNCTIONS *****************************************************************/
41
42
43 /*
44 * @unimplemented
45 */
46 NTSTATUS
47 STDCALL
48 MmAdvanceMdl (
49 IN PMDL Mdl,
50 IN ULONG NumberOfBytes
51 )
52 {
53 UNIMPLEMENTED;
54 return STATUS_NOT_IMPLEMENTED;
55 }
56
57
58 VOID
59 INIT_FUNCTION
60 NTAPI
61 MmInitializeMdlImplementation(VOID)
62 {
63 MEMORY_AREA* Result;
64 NTSTATUS Status;
65 PVOID Buffer;
66 PHYSICAL_ADDRESS BoundaryAddressMultiple;
67
68 BoundaryAddressMultiple.QuadPart = 0;
69 MiMdlMappingRegionHint = 0;
70 MiMdlMappingRegionBase = NULL;
71
72 MmLockAddressSpace(MmGetKernelAddressSpace());
73 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
74 MEMORY_AREA_MDL_MAPPING,
75 &MiMdlMappingRegionBase,
76 MI_MDL_MAPPING_REGION_SIZE,
77 PAGE_READWRITE,
78 &Result,
79 FALSE,
80 0,
81 BoundaryAddressMultiple);
82 if (!NT_SUCCESS(Status))
83 {
84 MmUnlockAddressSpace(MmGetKernelAddressSpace());
85 KEBUGCHECK(0);
86 }
87 MmUnlockAddressSpace(MmGetKernelAddressSpace());
88
89 Buffer = ExAllocatePoolWithTag(NonPagedPool,
90 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
91 TAG_MDL);
92
93 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
94 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
95
96 KeInitializeSpinLock(&MiMdlMappingRegionLock);
97 }
98
99
100 PVOID
101 NTAPI
102 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
103 {
104 PPFN_NUMBER MdlPages;
105
106 MdlPages = (PPFN_NUMBER)(Mdl + 1);
107
108 return((PVOID)MdlPages[((ULONG_PTR)Offset) / PAGE_SIZE]);
109 }
110
111
112 /*
113 * @implemented
114 */
115 VOID STDCALL
116 MmUnlockPages(PMDL Mdl)
117 /*
118 * FUNCTION: Unlocks the physical pages described by a given MDL
119 * ARGUMENTS:
120 * MemoryDescriptorList = MDL describing the buffer to be unlocked
121 * NOTES: The memory described by the specified MDL must have been locked
122 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
123 * MDL is updated
124 *
125 * May be called in any process context.
126 */
127 {
128 ULONG i;
129 PPFN_NUMBER MdlPages;
130 PFN_NUMBER Page;
131
132 /*
133 * MmProbeAndLockPages MUST have been called to lock this mdl!
134 *
135 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
136 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
137 */
138 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
139 {
140 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
141 return;
142 }
143
144 /* If mdl buffer is mapped io space -> do nothing */
145 if (Mdl->MdlFlags & MDL_IO_SPACE)
146 {
147 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
148 return;
149 }
150
151 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
152 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
153 {
154 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
155 }
156
157 /*
158 * FIXME: I don't know whether this right, but it looks sensible
159 */
160 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
161 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
162 {
163 return;
164 }
165
166
167 MdlPages = (PPFN_NUMBER)(Mdl + 1);
168 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
169 {
170 Page = MdlPages[i];
171 MmUnlockPage(Page);
172 MmDereferencePage(Page);
173 }
174
175 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
176 }
177
178
179 /*
180 * @unimplemented
181 */
182 PVOID
183 STDCALL
184 MmMapLockedPagesWithReservedMapping (
185 IN PVOID MappingAddress,
186 IN ULONG PoolTag,
187 IN PMDL MemoryDescriptorList,
188 IN MEMORY_CACHING_TYPE CacheType
189 )
190 {
191 UNIMPLEMENTED;
192 return 0;
193 }
194
195
196 /*
197 * @implemented
198 */
199 VOID STDCALL
200 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
201 /*
202 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
203 * ARGUMENTS:
204 * BaseAddress = Base virtual address to which the pages were mapped
205 * MemoryDescriptorList = MDL describing the mapped pages
206 *
207 * User space unmappings _must_ be done from the original process context!
208 */
209 {
210 KIRQL oldIrql;
211 ULONG i;
212 ULONG PageCount;
213 ULONG Base;
214
215 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
216
217 /*
218 * In this case, the MDL has the same system address as the base address
219 * so there is no need to free it
220 */
221 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
222 (BaseAddress >= MmSystemRangeStart))
223 {
224 return;
225 }
226
227
228 /* Calculate the number of pages we mapped. */
229 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
230
231 /*
232 * Docs says that BaseAddress should be a _base_ address, but every example
233 * I've seen pass the actual address. -Gunnar
234 */
235 BaseAddress = PAGE_ALIGN(BaseAddress);
236
237 /* Unmap all the pages. */
238 for (i = 0; i < PageCount; i++)
239 {
240 MmDeleteVirtualMapping(Mdl->Process,
241 (char*)BaseAddress + (i * PAGE_SIZE),
242 FALSE,
243 NULL,
244 NULL);
245 }
246
247 if (BaseAddress >= MmSystemRangeStart)
248 {
249 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
250
251 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
252 /* Deallocate all the pages used. */
253 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
254
255 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
256
257 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
258
259 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
260
261 /* Reset the MDL state. */
262 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
263 Mdl->MappedSystemVa = NULL;
264
265 }
266 else
267 {
268 MEMORY_AREA *Marea;
269
270 ASSERT(Mdl->Process == PsGetCurrentProcess());
271
272 Marea = MmLocateMemoryAreaByAddress( &Mdl->Process->AddressSpace, BaseAddress );
273 if (Marea == NULL)
274 {
275 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
276 KEBUGCHECK(0);
277 }
278
279 MmFreeMemoryArea( &Mdl->Process->AddressSpace, Marea, NULL, NULL );
280
281 Mdl->Process = NULL;
282 }
283
284 }
285
286
287 /*
288 * @unimplemented
289 */
290 VOID
291 STDCALL
292 MmUnmapReservedMapping (
293 IN PVOID BaseAddress,
294 IN ULONG PoolTag,
295 IN PMDL MemoryDescriptorList
296 )
297 {
298 UNIMPLEMENTED;
299 }
300
301
302 VOID
303 NTAPI
304 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
305 {
306 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
307
308 /* FIXME: this flag should be set by the caller perhaps? */
309 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
310 }
311
312
313 /*
314 * @unimplemented
315 */
316 NTSTATUS
317 STDCALL
318 MmPrefetchPages (
319 IN ULONG NumberOfLists,
320 IN PREAD_LIST *ReadLists
321 )
322 {
323 UNIMPLEMENTED;
324 return STATUS_NOT_IMPLEMENTED;
325 }
326
327
328 /*
329 * @unimplemented
330 */
331 NTSTATUS
332 STDCALL
333 MmProtectMdlSystemAddress (
334 IN PMDL MemoryDescriptorList,
335 IN ULONG NewProtect
336 )
337 {
338 UNIMPLEMENTED;
339 return STATUS_NOT_IMPLEMENTED;
340 }
341
342
343 /*
344 * @implemented
345 */
346 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
347 KPROCESSOR_MODE AccessMode,
348 LOCK_OPERATION Operation)
349 /*
350 * FUNCTION: Probes the specified pages, makes them resident and locks them
351 * ARGUMENTS:
352 * Mdl = MDL to probe
353 * AccessMode = Access at which to probe the buffer
354 * Operation = Operation to probe for
355 *
356 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
357 * used in cases where you know that the mdl address is paged memory or
358 * you don't know where the mdl address comes from. MmProbeAndLockPages will
359 * work no matter what kind of mdl address you have.
360 */
361 {
362 PPFN_TYPE MdlPages;
363 ULONG i, j;
364 ULONG NrPages;
365 NTSTATUS Status;
366 KPROCESSOR_MODE Mode;
367 PFN_TYPE Page;
368 PEPROCESS CurrentProcess = PsGetCurrentProcess();
369 PMADDRESS_SPACE AddressSpace;
370
371 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
372
373 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
374 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
375
376 MdlPages = (PPFN_TYPE)(Mdl + 1);
377 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
378
379 /* mdl must have enough page entries */
380 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
381
382
383 if (Mdl->StartVa >= MmSystemRangeStart &&
384 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
385 {
386 /* phys addr is not phys memory so this must be io memory */
387
388 for (i = 0; i < NrPages; i++)
389 {
390 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
391 }
392
393 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
394 return;
395 }
396
397
398 if (Mdl->StartVa >= MmSystemRangeStart)
399 {
400 /* FIXME: why isn't AccessMode used? */
401 Mode = KernelMode;
402 Mdl->Process = NULL;
403 AddressSpace = MmGetKernelAddressSpace();
404 }
405 else
406 {
407 /* FIXME: why isn't AccessMode used? */
408 Mode = UserMode;
409 Mdl->Process = CurrentProcess;
410 AddressSpace = &CurrentProcess->AddressSpace;
411 }
412
413
414 /*
415 * Lock the pages
416 */
417 MmLockAddressSpace(AddressSpace);
418
419 for (i = 0; i < NrPages; i++)
420 {
421 PVOID Address;
422
423 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
424
425 /*
426 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
427 * -Gunnar
428 */
429
430 if (!MmIsPagePresent(NULL, Address))
431 {
432 Status = MmNotPresentFault(Mode, (ULONG_PTR)Address, TRUE);
433 if (!NT_SUCCESS(Status))
434 {
435 for (j = 0; j < i; j++)
436 {
437 Page = MdlPages[j];
438 if (Page < MmPageArraySize)
439 {
440 MmUnlockPage(Page);
441 MmDereferencePage(Page);
442 }
443 }
444 MmUnlockAddressSpace(AddressSpace);
445 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
446 }
447 }
448 else
449 {
450 MmLockPage(MmGetPfnForProcess(NULL, Address));
451 }
452
453 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
454 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
455 {
456 Status = MmAccessFault(Mode, (ULONG_PTR)Address, TRUE);
457 if (!NT_SUCCESS(Status))
458 {
459 for (j = 0; j < i; j++)
460 {
461 Page = MdlPages[j];
462 if (Page < MmPageArraySize)
463 {
464 MmUnlockPage(Page);
465 MmDereferencePage(Page);
466 }
467 }
468 MmUnlockAddressSpace(AddressSpace);
469 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
470 }
471 }
472 Page = MmGetPfnForProcess(NULL, Address);
473 MdlPages[i] = Page;
474 if (Page >= MmPageArraySize)
475 Mdl->MdlFlags |= MDL_IO_SPACE;
476 else
477 MmReferencePage(Page);
478 }
479
480 MmUnlockAddressSpace(AddressSpace);
481 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
482 }
483
484
485 /*
486 * @unimplemented
487 */
488 VOID
489 STDCALL
490 MmProbeAndLockProcessPages (
491 IN OUT PMDL MemoryDescriptorList,
492 IN PEPROCESS Process,
493 IN KPROCESSOR_MODE AccessMode,
494 IN LOCK_OPERATION Operation
495 )
496 {
497 UNIMPLEMENTED;
498 }
499
500
501 /*
502 * @unimplemented
503 */
504 VOID
505 STDCALL
506 MmProbeAndLockSelectedPages(
507 IN OUT PMDL MemoryDescriptorList,
508 IN LARGE_INTEGER PageList[],
509 IN KPROCESSOR_MODE AccessMode,
510 IN LOCK_OPERATION Operation
511 )
512 {
513 UNIMPLEMENTED;
514 }
515
516
517 /*
518 * @implemented
519 */
520 ULONG STDCALL MmSizeOfMdl (PVOID Base,
521 ULONG Length)
522 /*
523 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
524 * the given address range
525 * ARGUMENTS:
526 * Base = base virtual address
527 * Length = number of bytes to map
528 */
529 {
530 ULONG len;
531
532 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
533
534 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
535 }
536
537
538 /*
539 * @implemented
540 */
541 VOID STDCALL
542 MmBuildMdlForNonPagedPool (PMDL Mdl)
543 /*
544 * FUNCTION: Fills in the corresponding physical page array of a given
545 * MDL for a buffer in nonpaged system space
546 * ARGUMENTS:
547 * Mdl = Points to an MDL that supplies a virtual address,
548 * byte offset and length
549 *
550 * This function can be seen as a fast version of MmProbeAndLockPages in case
551 * you _know_ that the mdl address is within nonpaged kernel space.
552 */
553 {
554 ULONG i;
555 ULONG PageCount;
556 PPFN_TYPE MdlPages;
557
558 /*
559 * mdl buffer must (at least) be in kernel space, thou this doesn't
560 * necesarely mean that the buffer in within _nonpaged_ kernel space...
561 */
562 ASSERT(Mdl->StartVa >= MmSystemRangeStart);
563
564 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
565 MdlPages = (PPFN_TYPE)(Mdl + 1);
566
567 /* mdl must have enough page entries */
568 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
569
570 for (i=0; i < PageCount; i++)
571 {
572 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
573 }
574
575 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
576 Mdl->Process = NULL;
577 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
578 }
579
580
581 /*
582 * @implemented
583 */
584 PMDL STDCALL
585 MmCreateMdl (PMDL Mdl,
586 PVOID Base,
587 ULONG Length)
588 /*
589 * FUNCTION: Allocates and initalizes an MDL
590 * ARGUMENTS:
591 * MemoryDescriptorList = Points to MDL to initalize. If this is
592 * NULL then one is allocated
593 * Base = Base virtual address of the buffer
594 * Length = Length in bytes of the buffer
595 * RETURNS: A pointer to initalized MDL
596 */
597 {
598 if (Mdl == NULL)
599 {
600 ULONG Size;
601
602 Size = MmSizeOfMdl(Base,Length);
603 Mdl =
604 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
605 if (Mdl == NULL)
606 {
607 return(NULL);
608 }
609 }
610
611 MmInitializeMdl(Mdl, (char*)Base, Length);
612
613 return(Mdl);
614 }
615
616
617 /*
618 * @unimplemented
619 */
620 VOID STDCALL
621 MmMapMemoryDumpMdl (PVOID Unknown0)
622 /*
623 * FIXME: Has something to do with crash dumps. Do we want to implement
624 * this?
625 */
626 {
627 UNIMPLEMENTED;
628 }
629
630
631 /*
632 * @implemented
633 */
634 PMDL STDCALL
635 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
636 IN PHYSICAL_ADDRESS HighAddress,
637 IN PHYSICAL_ADDRESS SkipBytes,
638 IN SIZE_T Totalbytes )
639 {
640 /*
641 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
642
643 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
644 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
645 Designed to be used by an AGP driver
646
647 LowAddress is the lowest acceptable physical address it wants to allocate
648 and HighAddress is the highest. SkipBytes are the number of bytes that the
649 kernel should keep free above LowAddress and below the address at which it
650 starts to allocate physical memory. TotalBytes are the number of bytes that
651 the driver wants to allocate. The return value of the function is a MDL
652 that if non-zero describes the physical memory the kernel has given the
653 driver. To access portions of the memory the driver must create sub-MDLs
654 from the returned MDL that describe appropriate portions of the physical
655 memory. When a driver wants to access physical memory described by a
656 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
657
658 Konstantin Gusev
659 */
660
661 PMDL Mdl;
662 PPFN_TYPE Pages;
663 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
664 ULONG Ret;
665
666 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
667 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
668 LowAddress.QuadPart, HighAddress.QuadPart,
669 SkipBytes.QuadPart, Totalbytes);
670
671 /* SkipBytes must be a multiple of the page size */
672 if ((SkipBytes.QuadPart % PAGE_SIZE) != 0)
673 {
674 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
675 return NULL;
676 }
677
678 /* Allocate memory for the MDL */
679 Mdl = MmCreateMdl(NULL, 0, Totalbytes);
680 if (Mdl == NULL)
681 {
682 return NULL;
683 }
684
685 /* Allocate pages into the MDL */
686 NumberOfPagesAllocated = 0;
687 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
688 Pages = (PPFN_TYPE)(Mdl + 1);
689 while (NumberOfPagesWanted > 0)
690 {
691 Ret = MmAllocPagesSpecifyRange(
692 MC_NPPOOL,
693 LowAddress,
694 HighAddress,
695 NumberOfPagesWanted,
696 Pages + NumberOfPagesAllocated);
697 if (Ret == (ULONG)-1)
698 break;
699
700 NumberOfPagesAllocated += Ret;
701 NumberOfPagesWanted -= Ret;
702
703 if (SkipBytes.QuadPart == 0)
704 break;
705 LowAddress.QuadPart += SkipBytes.QuadPart;
706 HighAddress.QuadPart += SkipBytes.QuadPart;
707 }
708
709 if (NumberOfPagesAllocated == 0)
710 {
711 ExFreePool(Mdl);
712 Mdl = NULL;
713 }
714 else if (NumberOfPagesWanted > 0)
715 {
716 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
717 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
718 }
719 return Mdl;
720 }
721
722
723 /*
724 * @implemented
725 */
726 VOID STDCALL
727 MmFreePagesFromMdl ( IN PMDL Mdl )
728 {
729 /*
730 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
731 FreeUserPhysicalPages, to free the physical memory it has allocated with
732 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
733
734 Note that a driver is responsible for deallocating the MDL returned by
735 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
736 does not free the MDL.
737
738 Konstantin Gusev
739
740 */
741 PPFN_TYPE Pages;
742 LONG NumberOfPages;
743
744 NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
745 Pages = (PPFN_TYPE)(Mdl + 1);
746
747 while (--NumberOfPages >= 0)
748 {
749 MmDereferencePage(Pages[NumberOfPages]);
750 }
751 }
752
753
754 /*
755 * @implemented
756 */
757 PVOID STDCALL
758 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
759 IN KPROCESSOR_MODE AccessMode,
760 IN MEMORY_CACHING_TYPE CacheType,
761 IN PVOID BaseAddress,
762 IN ULONG BugCheckOnFailure,
763 IN MM_PAGE_PRIORITY Priority)
764 {
765 PVOID Base;
766 PULONG MdlPages;
767 KIRQL oldIrql;
768 ULONG PageCount;
769 ULONG StartingOffset;
770 PEPROCESS CurrentProcess;
771 NTSTATUS Status;
772 ULONG Protect;
773
774 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
775 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
776 Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
777
778 /* FIXME: Implement Priority */
779 (void) Priority;
780
781 Protect = PAGE_READWRITE;
782 if (CacheType == MmNonCached)
783 Protect |= PAGE_NOCACHE;
784 else if (CacheType == MmWriteCombined)
785 DPRINT("CacheType MmWriteCombined not supported!\n");
786
787 /* Calculate the number of pages required. */
788 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
789
790 if (AccessMode != KernelMode)
791 {
792 MEMORY_AREA *Result;
793 LARGE_INTEGER BoundaryAddressMultiple;
794 NTSTATUS Status;
795
796 /* pretty sure you can't map partial mdl's to user space */
797 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
798
799 BoundaryAddressMultiple.QuadPart = 0;
800 Base = BaseAddress;
801
802 CurrentProcess = PsGetCurrentProcess();
803
804 MmLockAddressSpace(&CurrentProcess->AddressSpace);
805 Status = MmCreateMemoryArea(&CurrentProcess->AddressSpace,
806 MEMORY_AREA_MDL_MAPPING,
807 &Base,
808 PageCount * PAGE_SIZE,
809 Protect,
810 &Result,
811 (Base != NULL),
812 0,
813 BoundaryAddressMultiple);
814 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
815 if (!NT_SUCCESS(Status))
816 {
817 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
818 {
819 return NULL;
820 }
821
822 /* Throw exception */
823 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
824 ASSERT(0);
825 }
826
827 Mdl->Process = CurrentProcess;
828 }
829 else /* if (AccessMode == KernelMode) */
830 {
831 /* can't map mdl twice */
832 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
833 /* can't map mdl buildt from non paged pool into kernel space */
834 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
835
836 CurrentProcess = NULL;
837
838 /* Allocate that number of pages from the mdl mapping region. */
839 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
840
841 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
842
843 if (StartingOffset == 0xffffffff)
844 {
845 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
846
847 DPRINT1("Out of MDL mapping space\n");
848
849 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
850 {
851 return NULL;
852 }
853
854 KEBUGCHECK(0);
855 }
856
857 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
858
859 if (MiMdlMappingRegionHint == StartingOffset)
860 {
861 MiMdlMappingRegionHint += PageCount;
862 }
863
864 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
865
866 Mdl->Process = NULL;
867 }
868
869 /* Set the virtual mappings for the MDL pages. */
870 MdlPages = (PULONG)(Mdl + 1);
871
872 if (Mdl->MdlFlags & MDL_IO_SPACE)
873 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
874 Base,
875 Protect,
876 MdlPages,
877 PageCount);
878 else
879 Status = MmCreateVirtualMapping(CurrentProcess,
880 Base,
881 Protect,
882 MdlPages,
883 PageCount);
884 if (!NT_SUCCESS(Status))
885 {
886 DbgPrint("Unable to create virtual mapping\n");
887 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
888 {
889 return NULL;
890 }
891 if (AccessMode != KernelMode)
892 {
893 /* Throw exception */
894 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
895 ASSERT(0);
896 }
897 else /* AccessMode == KernelMode */
898 {
899 if (!BugCheckOnFailure)
900 return NULL;
901
902 /* FIXME: Use some bugcheck code instead of 0 */
903 KEBUGCHECK(0);
904 }
905 }
906
907 /* Mark the MDL has having being mapped. */
908 if (AccessMode == KernelMode)
909 {
910 if (Mdl->MdlFlags & MDL_PARTIAL)
911 {
912 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
913 }
914 else
915 {
916 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
917 }
918 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
919 }
920 else
921 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG)Base + Mdl->ByteOffset);
922
923 return((char*)Base + Mdl->ByteOffset);
924 }
925
926
927 /*
928 * @implemented
929 */
930 PVOID STDCALL
931 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
932 /*
933 * FUNCTION: Maps the physical pages described by a given MDL
934 * ARGUMENTS:
935 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
936 * MmAllocatePagesForMdl or IoBuildPartialMdl.
937 * AccessMode = Specifies the portion of the address space to map the
938 * pages.
939 * RETURNS: The base virtual address that maps the locked pages for the
940 * range described by the MDL
941 *
942 * If mapping into user space, pages are mapped into current address space.
943 */
944 {
945 return MmMapLockedPagesSpecifyCache(Mdl,
946 AccessMode,
947 MmCached,
948 NULL,
949 TRUE,
950 NormalPagePriority);
951 }
952
953
954 /* EOF */
955
956
957
958
959
960
961
962
963