- use inlined probing macros for basic types
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <internal/debug.h>
16
17 /* GLOBALS *******************************************************************/
18
19 #define TAG_MDL TAG('M', 'D', 'L', ' ')
20
21 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
22
23 static PVOID MiMdlMappingRegionBase = NULL;
24 static RTL_BITMAP MiMdlMappingRegionAllocMap;
25 static ULONG MiMdlMappingRegionHint;
26 static KSPIN_LOCK MiMdlMappingRegionLock;
27 extern ULONG MmPageArraySize;
28
29 /*
30 MDL Flags desc.
31
32 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
33 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
34 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
35 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
36 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
37 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
38 */
39
40 /* FUNCTIONS *****************************************************************/
41
42
43 /*
44 * @unimplemented
45 */
46 NTSTATUS
47 STDCALL
48 MmAdvanceMdl (
49 IN PMDL Mdl,
50 IN ULONG NumberOfBytes
51 )
52 {
53 UNIMPLEMENTED;
54 return STATUS_NOT_IMPLEMENTED;
55 }
56
57
58 VOID INIT_FUNCTION
59 MmInitializeMdlImplementation(VOID)
60 {
61 MEMORY_AREA* Result;
62 NTSTATUS Status;
63 PVOID Buffer;
64 PHYSICAL_ADDRESS BoundaryAddressMultiple;
65
66 BoundaryAddressMultiple.QuadPart = 0;
67 MiMdlMappingRegionHint = 0;
68 MiMdlMappingRegionBase = NULL;
69
70 MmLockAddressSpace(MmGetKernelAddressSpace());
71 Status = MmCreateMemoryArea(NULL,
72 MmGetKernelAddressSpace(),
73 MEMORY_AREA_MDL_MAPPING,
74 &MiMdlMappingRegionBase,
75 MI_MDL_MAPPING_REGION_SIZE,
76 0,
77 &Result,
78 FALSE,
79 FALSE,
80 BoundaryAddressMultiple);
81 if (!NT_SUCCESS(Status))
82 {
83 MmUnlockAddressSpace(MmGetKernelAddressSpace());
84 KEBUGCHECK(0);
85 }
86 MmUnlockAddressSpace(MmGetKernelAddressSpace());
87
88 Buffer = ExAllocatePoolWithTag(NonPagedPool,
89 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
90 TAG_MDL);
91
92 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
93 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
94
95 KeInitializeSpinLock(&MiMdlMappingRegionLock);
96 }
97
98
99 PVOID
100 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
101 {
102 PPFN_NUMBER MdlPages;
103
104 MdlPages = (PPFN_NUMBER)(Mdl + 1);
105
106 return((PVOID)MdlPages[((ULONG_PTR)Offset) / PAGE_SIZE]);
107 }
108
109
110 /*
111 * @implemented
112 */
113 VOID STDCALL
114 MmUnlockPages(PMDL Mdl)
115 /*
116 * FUNCTION: Unlocks the physical pages described by a given MDL
117 * ARGUMENTS:
118 * MemoryDescriptorList = MDL describing the buffer to be unlocked
119 * NOTES: The memory described by the specified MDL must have been locked
120 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
121 * MDL is updated
122 *
123 * May be called in any process context.
124 */
125 {
126 ULONG i;
127 PPFN_NUMBER MdlPages;
128 PFN_NUMBER Page;
129
130 /*
131 * MmProbeAndLockPages MUST have been called to lock this mdl!
132 *
133 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
134 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
135 */
136 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
137 {
138 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
139 return;
140 }
141
142 /* If mdl buffer is mapped io space -> do nothing */
143 if (Mdl->MdlFlags & MDL_IO_SPACE)
144 {
145 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
146 return;
147 }
148
149 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
150 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
151 {
152 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
153 }
154
155 /*
156 * FIXME: I don't know whether this right, but it looks sensible
157 */
158 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
159 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
160 {
161 return;
162 }
163
164
165 MdlPages = (PPFN_NUMBER)(Mdl + 1);
166 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
167 {
168 Page = MdlPages[i];
169 MmUnlockPage(Page);
170 MmDereferencePage(Page);
171 }
172
173 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
174 }
175
176
177 /*
178 * @unimplemented
179 */
180 PVOID
181 STDCALL
182 MmMapLockedPagesWithReservedMapping (
183 IN PVOID MappingAddress,
184 IN ULONG PoolTag,
185 IN PMDL MemoryDescriptorList,
186 IN MEMORY_CACHING_TYPE CacheType
187 )
188 {
189 UNIMPLEMENTED;
190 return 0;
191 }
192
193
194 /*
195 * @implemented
196 */
197 VOID STDCALL
198 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
199 /*
200 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
201 * ARGUMENTS:
202 * BaseAddress = Base virtual address to which the pages were mapped
203 * MemoryDescriptorList = MDL describing the mapped pages
204 *
205 * User space unmappings _must_ be done from the original process context!
206 */
207 {
208 KIRQL oldIrql;
209 ULONG i;
210 ULONG PageCount;
211 ULONG Base;
212
213 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
214
215 /*
216 * In this case, the MDL has the same system address as the base address
217 * so there is no need to free it
218 */
219 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
220 (BaseAddress >= MmSystemRangeStart))
221 {
222 return;
223 }
224
225
226 /* Calculate the number of pages we mapped. */
227 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
228
229 /*
230 * Docs says that BaseAddress should be a _base_ address, but every example
231 * I've seen pass the actual address. -Gunnar
232 */
233 BaseAddress = PAGE_ALIGN(BaseAddress);
234
235 /* Unmap all the pages. */
236 for (i = 0; i < PageCount; i++)
237 {
238 MmDeleteVirtualMapping(Mdl->Process,
239 (char*)BaseAddress + (i * PAGE_SIZE),
240 FALSE,
241 NULL,
242 NULL);
243 }
244
245 if (BaseAddress >= MmSystemRangeStart)
246 {
247 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
248
249 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
250 /* Deallocate all the pages used. */
251 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
252
253 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
254
255 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
256
257 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
258
259 /* Reset the MDL state. */
260 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
261 Mdl->MappedSystemVa = NULL;
262
263 }
264 else
265 {
266 MEMORY_AREA *Marea;
267
268 ASSERT(Mdl->Process == PsGetCurrentProcess());
269
270 Marea = MmLocateMemoryAreaByAddress( &Mdl->Process->AddressSpace, BaseAddress );
271 if (Marea == NULL)
272 {
273 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
274 KEBUGCHECK(0);
275 }
276
277 MmFreeMemoryArea( &Mdl->Process->AddressSpace, Marea, NULL, NULL );
278
279 Mdl->Process = NULL;
280 }
281
282 }
283
284
285 /*
286 * @unimplemented
287 */
288 VOID
289 STDCALL
290 MmUnmapReservedMapping (
291 IN PVOID BaseAddress,
292 IN ULONG PoolTag,
293 IN PMDL MemoryDescriptorList
294 )
295 {
296 UNIMPLEMENTED;
297 }
298
299
300 VOID
301 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
302 {
303 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
304
305 /* FIXME: this flag should be set by the caller perhaps? */
306 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
307 }
308
309
310 /*
311 * @unimplemented
312 */
313 NTSTATUS
314 STDCALL
315 MmPrefetchPages (
316 IN ULONG NumberOfLists,
317 IN PREAD_LIST *ReadLists
318 )
319 {
320 UNIMPLEMENTED;
321 return STATUS_NOT_IMPLEMENTED;
322 }
323
324
325 /*
326 * @unimplemented
327 */
328 NTSTATUS
329 STDCALL
330 MmProtectMdlSystemAddress (
331 IN PMDL MemoryDescriptorList,
332 IN ULONG NewProtect
333 )
334 {
335 UNIMPLEMENTED;
336 return STATUS_NOT_IMPLEMENTED;
337 }
338
339
340 /*
341 * @implemented
342 */
343 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
344 KPROCESSOR_MODE AccessMode,
345 LOCK_OPERATION Operation)
346 /*
347 * FUNCTION: Probes the specified pages, makes them resident and locks them
348 * ARGUMENTS:
349 * Mdl = MDL to probe
350 * AccessMode = Access at which to probe the buffer
351 * Operation = Operation to probe for
352 *
353 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
354 * used in cases where you know that the mdl address is paged memory or
355 * you don't know where the mdl address comes from. MmProbeAndLockPages will
356 * work no matter what kind of mdl address you have.
357 */
358 {
359 PPFN_TYPE MdlPages;
360 ULONG i, j;
361 ULONG NrPages;
362 NTSTATUS Status;
363 KPROCESSOR_MODE Mode;
364 PFN_TYPE Page;
365 PEPROCESS CurrentProcess = PsGetCurrentProcess();
366 PMADDRESS_SPACE AddressSpace;
367
368 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
369
370 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
371 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
372
373 MdlPages = (PPFN_TYPE)(Mdl + 1);
374 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
375
376 /* mdl must have enough page entries */
377 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
378
379
380 if (Mdl->StartVa >= MmSystemRangeStart &&
381 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
382 {
383 /* phys addr is not phys memory so this must be io memory */
384
385 for (i = 0; i < NrPages; i++)
386 {
387 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
388 }
389
390 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
391 return;
392 }
393
394
395 if (Mdl->StartVa >= MmSystemRangeStart)
396 {
397 /* FIXME: why isn't AccessMode used? */
398 Mode = KernelMode;
399 Mdl->Process = NULL;
400 AddressSpace = MmGetKernelAddressSpace();
401 }
402 else
403 {
404 /* FIXME: why isn't AccessMode used? */
405 Mode = UserMode;
406 Mdl->Process = CurrentProcess;
407 AddressSpace = &CurrentProcess->AddressSpace;
408 }
409
410
411 /*
412 * Lock the pages
413 */
414 MmLockAddressSpace(AddressSpace);
415
416 for (i = 0; i < NrPages; i++)
417 {
418 PVOID Address;
419
420 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
421
422 /*
423 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
424 * -Gunnar
425 */
426
427 if (!MmIsPagePresent(NULL, Address))
428 {
429 Status = MmNotPresentFault(Mode, (ULONG_PTR)Address, TRUE);
430 if (!NT_SUCCESS(Status))
431 {
432 for (j = 0; j < i; j++)
433 {
434 Page = MdlPages[j];
435 if (Page < MmPageArraySize)
436 {
437 MmUnlockPage(Page);
438 MmDereferencePage(Page);
439 }
440 }
441 MmUnlockAddressSpace(AddressSpace);
442 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
443 }
444 }
445 else
446 {
447 MmLockPage(MmGetPfnForProcess(NULL, Address));
448 }
449
450 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
451 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
452 {
453 Status = MmAccessFault(Mode, (ULONG_PTR)Address, TRUE);
454 if (!NT_SUCCESS(Status))
455 {
456 for (j = 0; j < i; j++)
457 {
458 Page = MdlPages[j];
459 if (Page < MmPageArraySize)
460 {
461 MmUnlockPage(Page);
462 MmDereferencePage(Page);
463 }
464 }
465 MmUnlockAddressSpace(AddressSpace);
466 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
467 }
468 }
469 Page = MmGetPfnForProcess(NULL, Address);
470 MdlPages[i] = Page;
471 if (Page >= MmPageArraySize)
472 Mdl->MdlFlags |= MDL_IO_SPACE;
473 else
474 MmReferencePage(Page);
475 }
476
477 MmUnlockAddressSpace(AddressSpace);
478 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
479 }
480
481
482 /*
483 * @unimplemented
484 */
485 VOID
486 STDCALL
487 MmProbeAndLockProcessPages (
488 IN OUT PMDL MemoryDescriptorList,
489 IN PEPROCESS Process,
490 IN KPROCESSOR_MODE AccessMode,
491 IN LOCK_OPERATION Operation
492 )
493 {
494 UNIMPLEMENTED;
495 }
496
497
498 /*
499 * @unimplemented
500 */
501 VOID
502 STDCALL
503 MmProbeAndLockSelectedPages(
504 IN OUT PMDL MemoryDescriptorList,
505 IN LARGE_INTEGER PageList[],
506 IN KPROCESSOR_MODE AccessMode,
507 IN LOCK_OPERATION Operation
508 )
509 {
510 UNIMPLEMENTED;
511 }
512
513
514 /*
515 * @implemented
516 */
517 ULONG STDCALL MmSizeOfMdl (PVOID Base,
518 ULONG Length)
519 /*
520 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
521 * the given address range
522 * ARGUMENTS:
523 * Base = base virtual address
524 * Length = number of bytes to map
525 */
526 {
527 ULONG len;
528
529 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
530
531 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
532 }
533
534
535 /*
536 * @implemented
537 */
538 VOID STDCALL
539 MmBuildMdlForNonPagedPool (PMDL Mdl)
540 /*
541 * FUNCTION: Fills in the corresponding physical page array of a given
542 * MDL for a buffer in nonpaged system space
543 * ARGUMENTS:
544 * Mdl = Points to an MDL that supplies a virtual address,
545 * byte offset and length
546 *
547 * This function can be seen as a fast version of MmProbeAndLockPages in case
548 * you _know_ that the mdl address is within nonpaged kernel space.
549 */
550 {
551 ULONG i;
552 ULONG PageCount;
553 PPFN_TYPE MdlPages;
554
555 /*
556 * mdl buffer must (at least) be in kernel space, thou this doesn't
557 * necesarely mean that the buffer in within _nonpaged_ kernel space...
558 */
559 ASSERT(Mdl->StartVa >= MmSystemRangeStart);
560
561 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
562 MdlPages = (PPFN_TYPE)(Mdl + 1);
563
564 /* mdl must have enough page entries */
565 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
566
567 for (i=0; i < PageCount; i++)
568 {
569 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
570 }
571
572 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
573 Mdl->Process = NULL;
574 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
575 }
576
577
578 /*
579 * @implemented
580 */
581 PMDL STDCALL
582 MmCreateMdl (PMDL Mdl,
583 PVOID Base,
584 ULONG Length)
585 /*
586 * FUNCTION: Allocates and initalizes an MDL
587 * ARGUMENTS:
588 * MemoryDescriptorList = Points to MDL to initalize. If this is
589 * NULL then one is allocated
590 * Base = Base virtual address of the buffer
591 * Length = Length in bytes of the buffer
592 * RETURNS: A pointer to initalized MDL
593 */
594 {
595 if (Mdl == NULL)
596 {
597 ULONG Size;
598
599 Size = MmSizeOfMdl(Base,Length);
600 Mdl =
601 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
602 if (Mdl == NULL)
603 {
604 return(NULL);
605 }
606 }
607
608 MmInitializeMdl(Mdl, (char*)Base, Length);
609
610 return(Mdl);
611 }
612
613
614 /*
615 * @unimplemented
616 */
617 VOID STDCALL
618 MmMapMemoryDumpMdl (PVOID Unknown0)
619 /*
620 * FIXME: Has something to do with crash dumps. Do we want to implement
621 * this?
622 */
623 {
624 UNIMPLEMENTED;
625 }
626
627
628 /*
629 * @implemented
630 */
631 PMDL STDCALL
632 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
633 IN PHYSICAL_ADDRESS HighAddress,
634 IN PHYSICAL_ADDRESS SkipBytes,
635 IN SIZE_T Totalbytes )
636 {
637 /*
638 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
639
640 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
641 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
642 Designed to be used by an AGP driver
643
644 LowAddress is the lowest acceptable physical address it wants to allocate
645 and HighAddress is the highest. SkipBytes are the number of bytes that the
646 kernel should keep free above LowAddress and below the address at which it
647 starts to allocate physical memory. TotalBytes are the number of bytes that
648 the driver wants to allocate. The return value of the function is a MDL
649 that if non-zero describes the physical memory the kernel has given the
650 driver. To access portions of the memory the driver must create sub-MDLs
651 from the returned MDL that describe appropriate portions of the physical
652 memory. When a driver wants to access physical memory described by a
653 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
654
655 Konstantin Gusev
656 */
657
658 PMDL Mdl;
659 PPFN_TYPE Pages;
660 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
661 ULONG Ret;
662
663 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
664 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
665 LowAddress.QuadPart, HighAddress.QuadPart,
666 SkipBytes.QuadPart, Totalbytes);
667
668 /* SkipBytes must be a multiple of the page size */
669 if ((SkipBytes.QuadPart % PAGE_SIZE) != 0)
670 {
671 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
672 return NULL;
673 }
674
675 /* Allocate memory for the MDL */
676 Mdl = MmCreateMdl(NULL, 0, Totalbytes);
677 if (Mdl == NULL)
678 {
679 return NULL;
680 }
681
682 /* Allocate pages into the MDL */
683 NumberOfPagesAllocated = 0;
684 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
685 Pages = (PPFN_TYPE)(Mdl + 1);
686 while (NumberOfPagesWanted > 0)
687 {
688 Ret = MmAllocPagesSpecifyRange(
689 MC_NPPOOL,
690 LowAddress,
691 HighAddress,
692 NumberOfPagesWanted,
693 Pages + NumberOfPagesAllocated);
694 if (Ret == (ULONG)-1)
695 break;
696
697 NumberOfPagesAllocated += Ret;
698 NumberOfPagesWanted -= Ret;
699
700 if (SkipBytes.QuadPart == 0)
701 break;
702 LowAddress.QuadPart += SkipBytes.QuadPart;
703 HighAddress.QuadPart += SkipBytes.QuadPart;
704 }
705
706 if (NumberOfPagesAllocated == 0)
707 {
708 ExFreePool(Mdl);
709 Mdl = NULL;
710 }
711 else if (NumberOfPagesWanted > 0)
712 {
713 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
714 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
715 }
716 return Mdl;
717 }
718
719
720 /*
721 * @implemented
722 */
723 VOID STDCALL
724 MmFreePagesFromMdl ( IN PMDL Mdl )
725 {
726 /*
727 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
728 FreeUserPhysicalPages, to free the physical memory it has allocated with
729 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
730
731 Note that a driver is responsible for deallocating the MDL returned by
732 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
733 does not free the MDL.
734
735 Konstantin Gusev
736
737 */
738 PPFN_TYPE Pages;
739 LONG NumberOfPages;
740
741 NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
742 Pages = (PPFN_TYPE)(Mdl + 1);
743
744 while (--NumberOfPages >= 0)
745 {
746 MmDereferencePage(Pages[NumberOfPages]);
747 }
748 }
749
750
751 /*
752 * @implemented
753 */
754 PVOID STDCALL
755 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
756 IN KPROCESSOR_MODE AccessMode,
757 IN MEMORY_CACHING_TYPE CacheType,
758 IN PVOID BaseAddress,
759 IN ULONG BugCheckOnFailure,
760 IN MM_PAGE_PRIORITY Priority)
761 {
762 PVOID Base;
763 PULONG MdlPages;
764 KIRQL oldIrql;
765 ULONG PageCount;
766 ULONG StartingOffset;
767 PEPROCESS CurrentProcess;
768 NTSTATUS Status;
769 ULONG Protect;
770
771 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
772 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
773 Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
774
775 /* FIXME: Implement Priority */
776 (void) Priority;
777
778 /* Calculate the number of pages required. */
779 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
780
781 if (AccessMode != KernelMode)
782 {
783 MEMORY_AREA *Result;
784 LARGE_INTEGER BoundaryAddressMultiple;
785 NTSTATUS Status;
786
787 /* pretty sure you can't map partial mdl's to user space */
788 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
789
790 BoundaryAddressMultiple.QuadPart = 0;
791 Base = BaseAddress;
792
793 CurrentProcess = PsGetCurrentProcess();
794
795 MmLockAddressSpace(&CurrentProcess->AddressSpace);
796 Status = MmCreateMemoryArea(CurrentProcess,
797 &CurrentProcess->AddressSpace,
798 MEMORY_AREA_MDL_MAPPING,
799 &Base,
800 PageCount * PAGE_SIZE,
801 0, /* PAGE_READWRITE? */
802 &Result,
803 (Base != NULL),
804 FALSE,
805 BoundaryAddressMultiple);
806 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
807 if (!NT_SUCCESS(Status))
808 {
809 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
810 {
811 return NULL;
812 }
813
814 /* Throw exception */
815 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
816 ASSERT(0);
817 }
818
819 Mdl->Process = CurrentProcess;
820 }
821 else /* if (AccessMode == KernelMode) */
822 {
823 /* can't map mdl twice */
824 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
825 /* can't map mdl buildt from non paged pool into kernel space */
826 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
827
828 CurrentProcess = NULL;
829
830 /* Allocate that number of pages from the mdl mapping region. */
831 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
832
833 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
834
835 if (StartingOffset == 0xffffffff)
836 {
837 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
838
839 DPRINT1("Out of MDL mapping space\n");
840
841 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
842 {
843 return NULL;
844 }
845
846 KEBUGCHECK(0);
847 }
848
849 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
850
851 if (MiMdlMappingRegionHint == StartingOffset)
852 {
853 MiMdlMappingRegionHint += PageCount;
854 }
855
856 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
857
858 Mdl->Process = NULL;
859 }
860
861 /* Set the virtual mappings for the MDL pages. */
862 MdlPages = (PULONG)(Mdl + 1);
863
864 Protect = PAGE_READWRITE;
865 if (CacheType == MmNonCached)
866 Protect |= PAGE_NOCACHE;
867 else if (CacheType == MmWriteCombined)
868 DPRINT("CacheType MmWriteCombined not supported!\n");
869 if (Mdl->MdlFlags & MDL_IO_SPACE)
870 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
871 Base,
872 Protect,
873 MdlPages,
874 PageCount);
875 else
876 Status = MmCreateVirtualMapping(CurrentProcess,
877 Base,
878 Protect,
879 MdlPages,
880 PageCount);
881 if (!NT_SUCCESS(Status))
882 {
883 DbgPrint("Unable to create virtual mapping\n");
884 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
885 {
886 return NULL;
887 }
888 if (AccessMode != KernelMode)
889 {
890 /* Throw exception */
891 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
892 ASSERT(0);
893 }
894 else /* AccessMode == KernelMode */
895 {
896 if (!BugCheckOnFailure)
897 return NULL;
898
899 /* FIXME: Use some bugcheck code instead of 0 */
900 KEBUGCHECK(0);
901 }
902 }
903
904 /* Mark the MDL has having being mapped. */
905 if (AccessMode == KernelMode)
906 {
907 if (Mdl->MdlFlags & MDL_PARTIAL)
908 {
909 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
910 }
911 else
912 {
913 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
914 }
915 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
916 }
917 else
918 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG)Base + Mdl->ByteOffset);
919
920 return((char*)Base + Mdl->ByteOffset);
921 }
922
923
924 /*
925 * @implemented
926 */
927 PVOID STDCALL
928 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
929 /*
930 * FUNCTION: Maps the physical pages described by a given MDL
931 * ARGUMENTS:
932 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
933 * MmAllocatePagesForMdl or IoBuildPartialMdl.
934 * AccessMode = Specifies the portion of the address space to map the
935 * pages.
936 * RETURNS: The base virtual address that maps the locked pages for the
937 * range described by the MDL
938 *
939 * If mapping into user space, pages are mapped into current address space.
940 */
941 {
942 return MmMapLockedPagesSpecifyCache(Mdl,
943 AccessMode,
944 MmCached,
945 NULL,
946 TRUE,
947 NormalPagePriority);
948 }
949
950
951 /* EOF */
952
953
954
955
956
957
958
959
960