c97638118daa8f9b46d1947f6f716d65140c9fe7
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <internal/debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
20
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
22
23 static PVOID MiMdlMappingRegionBase = NULL;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap;
25 static ULONG MiMdlMappingRegionHint;
26 static KSPIN_LOCK MiMdlMappingRegionLock;
27 extern ULONG MmPageArraySize;
28
29 /*
30 MDL Flags desc.
31
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
38 */
39
40 /* FUNCTIONS *****************************************************************/
41
42
43 /*
44 * @unimplemented
45 */
46 NTSTATUS
47 STDCALL
48 MmAdvanceMdl (
49 IN PMDL Mdl,
50 IN ULONG NumberOfBytes
51 )
52 {
53 UNIMPLEMENTED;
54 return STATUS_NOT_IMPLEMENTED;
55 }
56
57
58 VOID
59 INIT_FUNCTION
60 NTAPI
61 MmInitializeMdlImplementation(VOID)
62 {
63 MEMORY_AREA* Result;
64 NTSTATUS Status;
65 PVOID Buffer;
66 PHYSICAL_ADDRESS BoundaryAddressMultiple;
67
68 BoundaryAddressMultiple.QuadPart = 0;
69 MiMdlMappingRegionHint = 0;
70 MiMdlMappingRegionBase = NULL;
71
72 MmLockAddressSpace(MmGetKernelAddressSpace());
73 Status = MmCreateMemoryArea(NULL,
74 MmGetKernelAddressSpace(),
75 MEMORY_AREA_MDL_MAPPING,
76 &MiMdlMappingRegionBase,
77 MI_MDL_MAPPING_REGION_SIZE,
78 0,
79 &Result,
80 FALSE,
81 FALSE,
82 BoundaryAddressMultiple);
83 if (!NT_SUCCESS(Status))
84 {
85 MmUnlockAddressSpace(MmGetKernelAddressSpace());
86 KEBUGCHECK(0);
87 }
88 MmUnlockAddressSpace(MmGetKernelAddressSpace());
89
90 Buffer = ExAllocatePoolWithTag(NonPagedPool,
91 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
92 TAG_MDL);
93
94 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
95 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
96
97 KeInitializeSpinLock(&MiMdlMappingRegionLock);
98 }
99
100
101 PVOID
102 NTAPI
103 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
104 {
105 PPFN_NUMBER MdlPages;
106
107 MdlPages = (PPFN_NUMBER)(Mdl + 1);
108
109 return((PVOID)MdlPages[((ULONG_PTR)Offset) / PAGE_SIZE]);
110 }
111
112
113 /*
114 * @implemented
115 */
116 VOID STDCALL
117 MmUnlockPages(PMDL Mdl)
118 /*
119 * FUNCTION: Unlocks the physical pages described by a given MDL
120 * ARGUMENTS:
121 * MemoryDescriptorList = MDL describing the buffer to be unlocked
122 * NOTES: The memory described by the specified MDL must have been locked
123 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
124 * MDL is updated
125 *
126 * May be called in any process context.
127 */
128 {
129 ULONG i;
130 PPFN_NUMBER MdlPages;
131 PFN_NUMBER Page;
132
133 /*
134 * MmProbeAndLockPages MUST have been called to lock this mdl!
135 *
136 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
137 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
138 */
139 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
140 {
141 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
142 return;
143 }
144
145 /* If mdl buffer is mapped io space -> do nothing */
146 if (Mdl->MdlFlags & MDL_IO_SPACE)
147 {
148 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
149 return;
150 }
151
152 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
153 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
154 {
155 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
156 }
157
158 /*
159 * FIXME: I don't know whether this right, but it looks sensible
160 */
161 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
162 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
163 {
164 return;
165 }
166
167
168 MdlPages = (PPFN_NUMBER)(Mdl + 1);
169 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
170 {
171 Page = MdlPages[i];
172 MmUnlockPage(Page);
173 MmDereferencePage(Page);
174 }
175
176 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
177 }
178
179
180 /*
181 * @unimplemented
182 */
183 PVOID
184 STDCALL
185 MmMapLockedPagesWithReservedMapping (
186 IN PVOID MappingAddress,
187 IN ULONG PoolTag,
188 IN PMDL MemoryDescriptorList,
189 IN MEMORY_CACHING_TYPE CacheType
190 )
191 {
192 UNIMPLEMENTED;
193 return 0;
194 }
195
196
197 /*
198 * @implemented
199 */
200 VOID STDCALL
201 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
202 /*
203 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
204 * ARGUMENTS:
205 * BaseAddress = Base virtual address to which the pages were mapped
206 * MemoryDescriptorList = MDL describing the mapped pages
207 *
208 * User space unmappings _must_ be done from the original process context!
209 */
210 {
211 KIRQL oldIrql;
212 ULONG i;
213 ULONG PageCount;
214 ULONG Base;
215
216 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
217
218 /*
219 * In this case, the MDL has the same system address as the base address
220 * so there is no need to free it
221 */
222 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
223 (BaseAddress >= MmSystemRangeStart))
224 {
225 return;
226 }
227
228
229 /* Calculate the number of pages we mapped. */
230 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
231
232 /*
233 * Docs says that BaseAddress should be a _base_ address, but every example
234 * I've seen pass the actual address. -Gunnar
235 */
236 BaseAddress = PAGE_ALIGN(BaseAddress);
237
238 /* Unmap all the pages. */
239 for (i = 0; i < PageCount; i++)
240 {
241 MmDeleteVirtualMapping(Mdl->Process,
242 (char*)BaseAddress + (i * PAGE_SIZE),
243 FALSE,
244 NULL,
245 NULL);
246 }
247
248 if (BaseAddress >= MmSystemRangeStart)
249 {
250 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
251
252 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
253 /* Deallocate all the pages used. */
254 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
255
256 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
257
258 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
259
260 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
261
262 /* Reset the MDL state. */
263 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
264 Mdl->MappedSystemVa = NULL;
265
266 }
267 else
268 {
269 MEMORY_AREA *Marea;
270
271 ASSERT(Mdl->Process == PsGetCurrentProcess());
272
273 Marea = MmLocateMemoryAreaByAddress( &Mdl->Process->AddressSpace, BaseAddress );
274 if (Marea == NULL)
275 {
276 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
277 KEBUGCHECK(0);
278 }
279
280 MmFreeMemoryArea( &Mdl->Process->AddressSpace, Marea, NULL, NULL );
281
282 Mdl->Process = NULL;
283 }
284
285 }
286
287
288 /*
289 * @unimplemented
290 */
291 VOID
292 STDCALL
293 MmUnmapReservedMapping (
294 IN PVOID BaseAddress,
295 IN ULONG PoolTag,
296 IN PMDL MemoryDescriptorList
297 )
298 {
299 UNIMPLEMENTED;
300 }
301
302
303 VOID
304 NTAPI
305 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
306 {
307 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
308
309 /* FIXME: this flag should be set by the caller perhaps? */
310 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
311 }
312
313
314 /*
315 * @unimplemented
316 */
317 NTSTATUS
318 STDCALL
319 MmPrefetchPages (
320 IN ULONG NumberOfLists,
321 IN PREAD_LIST *ReadLists
322 )
323 {
324 UNIMPLEMENTED;
325 return STATUS_NOT_IMPLEMENTED;
326 }
327
328
329 /*
330 * @unimplemented
331 */
332 NTSTATUS
333 STDCALL
334 MmProtectMdlSystemAddress (
335 IN PMDL MemoryDescriptorList,
336 IN ULONG NewProtect
337 )
338 {
339 UNIMPLEMENTED;
340 return STATUS_NOT_IMPLEMENTED;
341 }
342
343
344 /*
345 * @implemented
346 */
347 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
348 KPROCESSOR_MODE AccessMode,
349 LOCK_OPERATION Operation)
350 /*
351 * FUNCTION: Probes the specified pages, makes them resident and locks them
352 * ARGUMENTS:
353 * Mdl = MDL to probe
354 * AccessMode = Access at which to probe the buffer
355 * Operation = Operation to probe for
356 *
357 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
358 * used in cases where you know that the mdl address is paged memory or
359 * you don't know where the mdl address comes from. MmProbeAndLockPages will
360 * work no matter what kind of mdl address you have.
361 */
362 {
363 PPFN_TYPE MdlPages;
364 ULONG i, j;
365 ULONG NrPages;
366 NTSTATUS Status;
367 KPROCESSOR_MODE Mode;
368 PFN_TYPE Page;
369 PEPROCESS CurrentProcess = PsGetCurrentProcess();
370 PMADDRESS_SPACE AddressSpace;
371
372 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
373
374 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
375 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
376
377 MdlPages = (PPFN_TYPE)(Mdl + 1);
378 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
379
380 /* mdl must have enough page entries */
381 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
382
383
384 if (Mdl->StartVa >= MmSystemRangeStart &&
385 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
386 {
387 /* phys addr is not phys memory so this must be io memory */
388
389 for (i = 0; i < NrPages; i++)
390 {
391 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
392 }
393
394 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
395 return;
396 }
397
398
399 if (Mdl->StartVa >= MmSystemRangeStart)
400 {
401 /* FIXME: why isn't AccessMode used? */
402 Mode = KernelMode;
403 Mdl->Process = NULL;
404 AddressSpace = MmGetKernelAddressSpace();
405 }
406 else
407 {
408 /* FIXME: why isn't AccessMode used? */
409 Mode = UserMode;
410 Mdl->Process = CurrentProcess;
411 AddressSpace = &CurrentProcess->AddressSpace;
412 }
413
414
415 /*
416 * Lock the pages
417 */
418 MmLockAddressSpace(AddressSpace);
419
420 for (i = 0; i < NrPages; i++)
421 {
422 PVOID Address;
423
424 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
425
426 /*
427 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
428 * -Gunnar
429 */
430
431 if (!MmIsPagePresent(NULL, Address))
432 {
433 Status = MmNotPresentFault(Mode, (ULONG_PTR)Address, TRUE);
434 if (!NT_SUCCESS(Status))
435 {
436 for (j = 0; j < i; j++)
437 {
438 Page = MdlPages[j];
439 if (Page < MmPageArraySize)
440 {
441 MmUnlockPage(Page);
442 MmDereferencePage(Page);
443 }
444 }
445 MmUnlockAddressSpace(AddressSpace);
446 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
447 }
448 }
449 else
450 {
451 MmLockPage(MmGetPfnForProcess(NULL, Address));
452 }
453
454 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
455 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
456 {
457 Status = MmAccessFault(Mode, (ULONG_PTR)Address, TRUE);
458 if (!NT_SUCCESS(Status))
459 {
460 for (j = 0; j < i; j++)
461 {
462 Page = MdlPages[j];
463 if (Page < MmPageArraySize)
464 {
465 MmUnlockPage(Page);
466 MmDereferencePage(Page);
467 }
468 }
469 MmUnlockAddressSpace(AddressSpace);
470 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
471 }
472 }
473 Page = MmGetPfnForProcess(NULL, Address);
474 MdlPages[i] = Page;
475 if (Page >= MmPageArraySize)
476 Mdl->MdlFlags |= MDL_IO_SPACE;
477 else
478 MmReferencePage(Page);
479 }
480
481 MmUnlockAddressSpace(AddressSpace);
482 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
483 }
484
485
486 /*
487 * @unimplemented
488 */
489 VOID
490 STDCALL
491 MmProbeAndLockProcessPages (
492 IN OUT PMDL MemoryDescriptorList,
493 IN PEPROCESS Process,
494 IN KPROCESSOR_MODE AccessMode,
495 IN LOCK_OPERATION Operation
496 )
497 {
498 UNIMPLEMENTED;
499 }
500
501
502 /*
503 * @unimplemented
504 */
505 VOID
506 STDCALL
507 MmProbeAndLockSelectedPages(
508 IN OUT PMDL MemoryDescriptorList,
509 IN LARGE_INTEGER PageList[],
510 IN KPROCESSOR_MODE AccessMode,
511 IN LOCK_OPERATION Operation
512 )
513 {
514 UNIMPLEMENTED;
515 }
516
517
518 /*
519 * @implemented
520 */
521 ULONG STDCALL MmSizeOfMdl (PVOID Base,
522 ULONG Length)
523 /*
524 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
525 * the given address range
526 * ARGUMENTS:
527 * Base = base virtual address
528 * Length = number of bytes to map
529 */
530 {
531 ULONG len;
532
533 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
534
535 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
536 }
537
538
539 /*
540 * @implemented
541 */
542 VOID STDCALL
543 MmBuildMdlForNonPagedPool (PMDL Mdl)
544 /*
545 * FUNCTION: Fills in the corresponding physical page array of a given
546 * MDL for a buffer in nonpaged system space
547 * ARGUMENTS:
548 * Mdl = Points to an MDL that supplies a virtual address,
549 * byte offset and length
550 *
551 * This function can be seen as a fast version of MmProbeAndLockPages in case
552 * you _know_ that the mdl address is within nonpaged kernel space.
553 */
554 {
555 ULONG i;
556 ULONG PageCount;
557 PPFN_TYPE MdlPages;
558
559 /*
560 * mdl buffer must (at least) be in kernel space, thou this doesn't
561 * necesarely mean that the buffer in within _nonpaged_ kernel space...
562 */
563 ASSERT(Mdl->StartVa >= MmSystemRangeStart);
564
565 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
566 MdlPages = (PPFN_TYPE)(Mdl + 1);
567
568 /* mdl must have enough page entries */
569 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
570
571 for (i=0; i < PageCount; i++)
572 {
573 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
574 }
575
576 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
577 Mdl->Process = NULL;
578 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
579 }
580
581
582 /*
583 * @implemented
584 */
585 PMDL STDCALL
586 MmCreateMdl (PMDL Mdl,
587 PVOID Base,
588 ULONG Length)
589 /*
590 * FUNCTION: Allocates and initalizes an MDL
591 * ARGUMENTS:
592 * MemoryDescriptorList = Points to MDL to initalize. If this is
593 * NULL then one is allocated
594 * Base = Base virtual address of the buffer
595 * Length = Length in bytes of the buffer
596 * RETURNS: A pointer to initalized MDL
597 */
598 {
599 if (Mdl == NULL)
600 {
601 ULONG Size;
602
603 Size = MmSizeOfMdl(Base,Length);
604 Mdl =
605 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
606 if (Mdl == NULL)
607 {
608 return(NULL);
609 }
610 }
611
612 MmInitializeMdl(Mdl, (char*)Base, Length);
613
614 return(Mdl);
615 }
616
617
618 /*
619 * @unimplemented
620 */
621 VOID STDCALL
622 MmMapMemoryDumpMdl (PVOID Unknown0)
623 /*
624 * FIXME: Has something to do with crash dumps. Do we want to implement
625 * this?
626 */
627 {
628 UNIMPLEMENTED;
629 }
630
631
632 /*
633 * @implemented
634 */
635 PMDL STDCALL
636 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
637 IN PHYSICAL_ADDRESS HighAddress,
638 IN PHYSICAL_ADDRESS SkipBytes,
639 IN SIZE_T Totalbytes )
640 {
641 /*
642 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
643
644 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
645 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
646 Designed to be used by an AGP driver
647
648 LowAddress is the lowest acceptable physical address it wants to allocate
649 and HighAddress is the highest. SkipBytes are the number of bytes that the
650 kernel should keep free above LowAddress and below the address at which it
651 starts to allocate physical memory. TotalBytes are the number of bytes that
652 the driver wants to allocate. The return value of the function is a MDL
653 that if non-zero describes the physical memory the kernel has given the
654 driver. To access portions of the memory the driver must create sub-MDLs
655 from the returned MDL that describe appropriate portions of the physical
656 memory. When a driver wants to access physical memory described by a
657 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
658
659 Konstantin Gusev
660 */
661
662 PMDL Mdl;
663 PPFN_TYPE Pages;
664 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
665 ULONG Ret;
666
667 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
668 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
669 LowAddress.QuadPart, HighAddress.QuadPart,
670 SkipBytes.QuadPart, Totalbytes);
671
672 /* SkipBytes must be a multiple of the page size */
673 if ((SkipBytes.QuadPart % PAGE_SIZE) != 0)
674 {
675 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
676 return NULL;
677 }
678
679 /* Allocate memory for the MDL */
680 Mdl = MmCreateMdl(NULL, 0, Totalbytes);
681 if (Mdl == NULL)
682 {
683 return NULL;
684 }
685
686 /* Allocate pages into the MDL */
687 NumberOfPagesAllocated = 0;
688 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
689 Pages = (PPFN_TYPE)(Mdl + 1);
690 while (NumberOfPagesWanted > 0)
691 {
692 Ret = MmAllocPagesSpecifyRange(
693 MC_NPPOOL,
694 LowAddress,
695 HighAddress,
696 NumberOfPagesWanted,
697 Pages + NumberOfPagesAllocated);
698 if (Ret == (ULONG)-1)
699 break;
700
701 NumberOfPagesAllocated += Ret;
702 NumberOfPagesWanted -= Ret;
703
704 if (SkipBytes.QuadPart == 0)
705 break;
706 LowAddress.QuadPart += SkipBytes.QuadPart;
707 HighAddress.QuadPart += SkipBytes.QuadPart;
708 }
709
710 if (NumberOfPagesAllocated == 0)
711 {
712 ExFreePool(Mdl);
713 Mdl = NULL;
714 }
715 else if (NumberOfPagesWanted > 0)
716 {
717 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
718 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
719 }
720 return Mdl;
721 }
722
723
724 /*
725 * @implemented
726 */
727 VOID STDCALL
728 MmFreePagesFromMdl ( IN PMDL Mdl )
729 {
730 /*
731 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
732 FreeUserPhysicalPages, to free the physical memory it has allocated with
733 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
734
735 Note that a driver is responsible for deallocating the MDL returned by
736 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
737 does not free the MDL.
738
739 Konstantin Gusev
740
741 */
742 PPFN_TYPE Pages;
743 LONG NumberOfPages;
744
745 NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
746 Pages = (PPFN_TYPE)(Mdl + 1);
747
748 while (--NumberOfPages >= 0)
749 {
750 MmDereferencePage(Pages[NumberOfPages]);
751 }
752 }
753
754
755 /*
756 * @implemented
757 */
758 PVOID STDCALL
759 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
760 IN KPROCESSOR_MODE AccessMode,
761 IN MEMORY_CACHING_TYPE CacheType,
762 IN PVOID BaseAddress,
763 IN ULONG BugCheckOnFailure,
764 IN MM_PAGE_PRIORITY Priority)
765 {
766 PVOID Base;
767 PULONG MdlPages;
768 KIRQL oldIrql;
769 ULONG PageCount;
770 ULONG StartingOffset;
771 PEPROCESS CurrentProcess;
772 NTSTATUS Status;
773 ULONG Protect;
774
775 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
776 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
777 Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
778
779 /* FIXME: Implement Priority */
780 (void) Priority;
781
782 /* Calculate the number of pages required. */
783 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
784
785 if (AccessMode != KernelMode)
786 {
787 MEMORY_AREA *Result;
788 LARGE_INTEGER BoundaryAddressMultiple;
789 NTSTATUS Status;
790
791 /* pretty sure you can't map partial mdl's to user space */
792 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
793
794 BoundaryAddressMultiple.QuadPart = 0;
795 Base = BaseAddress;
796
797 CurrentProcess = PsGetCurrentProcess();
798
799 MmLockAddressSpace(&CurrentProcess->AddressSpace);
800 Status = MmCreateMemoryArea(CurrentProcess,
801 &CurrentProcess->AddressSpace,
802 MEMORY_AREA_MDL_MAPPING,
803 &Base,
804 PageCount * PAGE_SIZE,
805 0, /* PAGE_READWRITE? */
806 &Result,
807 (Base != NULL),
808 FALSE,
809 BoundaryAddressMultiple);
810 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
811 if (!NT_SUCCESS(Status))
812 {
813 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
814 {
815 return NULL;
816 }
817
818 /* Throw exception */
819 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
820 ASSERT(0);
821 }
822
823 Mdl->Process = CurrentProcess;
824 }
825 else /* if (AccessMode == KernelMode) */
826 {
827 /* can't map mdl twice */
828 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
829 /* can't map mdl buildt from non paged pool into kernel space */
830 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
831
832 CurrentProcess = NULL;
833
834 /* Allocate that number of pages from the mdl mapping region. */
835 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
836
837 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
838
839 if (StartingOffset == 0xffffffff)
840 {
841 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
842
843 DPRINT1("Out of MDL mapping space\n");
844
845 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
846 {
847 return NULL;
848 }
849
850 KEBUGCHECK(0);
851 }
852
853 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
854
855 if (MiMdlMappingRegionHint == StartingOffset)
856 {
857 MiMdlMappingRegionHint += PageCount;
858 }
859
860 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
861
862 Mdl->Process = NULL;
863 }
864
865 /* Set the virtual mappings for the MDL pages. */
866 MdlPages = (PULONG)(Mdl + 1);
867
868 Protect = PAGE_READWRITE;
869 if (CacheType == MmNonCached)
870 Protect |= PAGE_NOCACHE;
871 else if (CacheType == MmWriteCombined)
872 DPRINT("CacheType MmWriteCombined not supported!\n");
873 if (Mdl->MdlFlags & MDL_IO_SPACE)
874 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
875 Base,
876 Protect,
877 MdlPages,
878 PageCount);
879 else
880 Status = MmCreateVirtualMapping(CurrentProcess,
881 Base,
882 Protect,
883 MdlPages,
884 PageCount);
885 if (!NT_SUCCESS(Status))
886 {
887 DbgPrint("Unable to create virtual mapping\n");
888 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
889 {
890 return NULL;
891 }
892 if (AccessMode != KernelMode)
893 {
894 /* Throw exception */
895 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
896 ASSERT(0);
897 }
898 else /* AccessMode == KernelMode */
899 {
900 if (!BugCheckOnFailure)
901 return NULL;
902
903 /* FIXME: Use some bugcheck code instead of 0 */
904 KEBUGCHECK(0);
905 }
906 }
907
908 /* Mark the MDL has having being mapped. */
909 if (AccessMode == KernelMode)
910 {
911 if (Mdl->MdlFlags & MDL_PARTIAL)
912 {
913 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
914 }
915 else
916 {
917 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
918 }
919 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
920 }
921 else
922 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG)Base + Mdl->ByteOffset);
923
924 return((char*)Base + Mdl->ByteOffset);
925 }
926
927
928 /*
929 * @implemented
930 */
931 PVOID STDCALL
932 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
933 /*
934 * FUNCTION: Maps the physical pages described by a given MDL
935 * ARGUMENTS:
936 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
937 * MmAllocatePagesForMdl or IoBuildPartialMdl.
938 * AccessMode = Specifies the portion of the address space to map the
939 * pages.
940 * RETURNS: The base virtual address that maps the locked pages for the
941 * range described by the MDL
942 *
943 * If mapping into user space, pages are mapped into current address space.
944 */
945 {
946 return MmMapLockedPagesSpecifyCache(Mdl,
947 AccessMode,
948 MmCached,
949 NULL,
950 TRUE,
951 NormalPagePriority);
952 }
953
954
955 /* EOF */
956
957
958
959
960
961
962
963
964