- Fix KiDispatchException to unmask KI_EXCEPTION_INTERNAL when setting the exception...
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 *
8 * PROGRAMMERS: David Welch (welch@cwcom.net)
9 */
10
11 /* INCLUDES ****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <internal/debug.h>
16
17 #if defined (ALLOC_PRAGMA)
18 #pragma alloc_text(INIT, MmInitializeMdlImplementation)
19 #endif
20
21 /* GLOBALS *******************************************************************/
22
23 #define TAG_MDL TAG('M', 'D', 'L', ' ')
24
25 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
26
27 static PVOID MiMdlMappingRegionBase = NULL;
28 static RTL_BITMAP MiMdlMappingRegionAllocMap;
29 static ULONG MiMdlMappingRegionHint;
30 static KSPIN_LOCK MiMdlMappingRegionLock;
31 extern ULONG MmPageArraySize;
32
33 /*
34 MDL Flags desc.
35
36 MDL_PAGES_LOCKED MmProbeAndLockPages has been called for this mdl
37 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
38 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
39 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
40 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
41 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
42 */
43
44 /* FUNCTIONS *****************************************************************/
45
46
47 /*
48 * @unimplemented
49 */
50 NTSTATUS
51 STDCALL
52 MmAdvanceMdl (
53 IN PMDL Mdl,
54 IN ULONG NumberOfBytes
55 )
56 {
57 UNIMPLEMENTED;
58 return STATUS_NOT_IMPLEMENTED;
59 }
60
61
62 VOID
63 INIT_FUNCTION
64 NTAPI
65 MmInitializeMdlImplementation(VOID)
66 {
67 MEMORY_AREA* Result;
68 NTSTATUS Status;
69 PVOID Buffer;
70 PHYSICAL_ADDRESS BoundaryAddressMultiple;
71
72 BoundaryAddressMultiple.QuadPart = 0;
73 MiMdlMappingRegionHint = 0;
74 MiMdlMappingRegionBase = NULL;
75
76 MmLockAddressSpace(MmGetKernelAddressSpace());
77 Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
78 MEMORY_AREA_MDL_MAPPING,
79 &MiMdlMappingRegionBase,
80 MI_MDL_MAPPING_REGION_SIZE,
81 PAGE_READWRITE,
82 &Result,
83 FALSE,
84 0,
85 BoundaryAddressMultiple);
86 if (!NT_SUCCESS(Status))
87 {
88 MmUnlockAddressSpace(MmGetKernelAddressSpace());
89 KEBUGCHECK(0);
90 }
91 MmUnlockAddressSpace(MmGetKernelAddressSpace());
92
93 Buffer = ExAllocatePoolWithTag(NonPagedPool,
94 MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8),
95 TAG_MDL);
96
97 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
98 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
99
100 KeInitializeSpinLock(&MiMdlMappingRegionLock);
101 }
102
103
104 PVOID
105 NTAPI
106 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
107 {
108 PPFN_NUMBER MdlPages;
109
110 MdlPages = (PPFN_NUMBER)(Mdl + 1);
111
112 return((PVOID)MdlPages[((ULONG_PTR)Offset) / PAGE_SIZE]);
113 }
114
115
116 /*
117 * @implemented
118 */
119 VOID STDCALL
120 MmUnlockPages(PMDL Mdl)
121 /*
122 * FUNCTION: Unlocks the physical pages described by a given MDL
123 * ARGUMENTS:
124 * MemoryDescriptorList = MDL describing the buffer to be unlocked
125 * NOTES: The memory described by the specified MDL must have been locked
126 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
127 * MDL is updated
128 *
129 * May be called in any process context.
130 */
131 {
132 ULONG i;
133 PPFN_NUMBER MdlPages;
134 PFN_NUMBER Page;
135
136 /*
137 * MmProbeAndLockPages MUST have been called to lock this mdl!
138 *
139 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
140 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
141 */
142 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
143 {
144 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
145 return;
146 }
147
148 /* If mdl buffer is mapped io space -> do nothing */
149 if (Mdl->MdlFlags & MDL_IO_SPACE)
150 {
151 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
152 return;
153 }
154
155 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
156 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
157 {
158 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
159 }
160
161 /*
162 * FIXME: I don't know whether this right, but it looks sensible
163 */
164 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
165 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
166 {
167 return;
168 }
169
170
171 MdlPages = (PPFN_NUMBER)(Mdl + 1);
172 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
173 {
174 Page = MdlPages[i];
175 MmUnlockPage(Page);
176 MmDereferencePage(Page);
177 }
178
179 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
180 }
181
182
183 /*
184 * @unimplemented
185 */
186 PVOID
187 STDCALL
188 MmMapLockedPagesWithReservedMapping (
189 IN PVOID MappingAddress,
190 IN ULONG PoolTag,
191 IN PMDL MemoryDescriptorList,
192 IN MEMORY_CACHING_TYPE CacheType
193 )
194 {
195 UNIMPLEMENTED;
196 return 0;
197 }
198
199
200 /*
201 * @implemented
202 */
203 VOID STDCALL
204 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
205 /*
206 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
207 * ARGUMENTS:
208 * BaseAddress = Base virtual address to which the pages were mapped
209 * MemoryDescriptorList = MDL describing the mapped pages
210 *
211 * User space unmappings _must_ be done from the original process context!
212 */
213 {
214 KIRQL oldIrql;
215 ULONG i;
216 ULONG PageCount;
217 ULONG Base;
218
219 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
220
221 /*
222 * In this case, the MDL has the same system address as the base address
223 * so there is no need to free it
224 */
225 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
226 (BaseAddress >= MmSystemRangeStart))
227 {
228 return;
229 }
230
231
232 /* Calculate the number of pages we mapped. */
233 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
234
235 /*
236 * Docs says that BaseAddress should be a _base_ address, but every example
237 * I've seen pass the actual address. -Gunnar
238 */
239 BaseAddress = PAGE_ALIGN(BaseAddress);
240
241 /* Unmap all the pages. */
242 for (i = 0; i < PageCount; i++)
243 {
244 MmDeleteVirtualMapping(Mdl->Process,
245 (char*)BaseAddress + (i * PAGE_SIZE),
246 FALSE,
247 NULL,
248 NULL);
249 }
250
251 if (BaseAddress >= MmSystemRangeStart)
252 {
253 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
254
255 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
256 /* Deallocate all the pages used. */
257 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
258
259 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
260
261 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
262
263 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
264
265 /* Reset the MDL state. */
266 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
267 Mdl->MappedSystemVa = NULL;
268
269 }
270 else
271 {
272 MEMORY_AREA *Marea;
273
274 ASSERT(Mdl->Process == PsGetCurrentProcess());
275
276 Marea = MmLocateMemoryAreaByAddress( (PMADDRESS_SPACE)&(Mdl->Process)->VadRoot, BaseAddress );
277 if (Marea == NULL)
278 {
279 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
280 KEBUGCHECK(0);
281 }
282
283 MmFreeMemoryArea( (PMADDRESS_SPACE)&(Mdl->Process)->VadRoot, Marea, NULL, NULL );
284
285 Mdl->Process = NULL;
286 }
287
288 }
289
290
291 /*
292 * @unimplemented
293 */
294 VOID
295 STDCALL
296 MmUnmapReservedMapping (
297 IN PVOID BaseAddress,
298 IN ULONG PoolTag,
299 IN PMDL MemoryDescriptorList
300 )
301 {
302 UNIMPLEMENTED;
303 }
304
305
306 VOID
307 NTAPI
308 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
309 {
310 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
311
312 /* FIXME: this flag should be set by the caller perhaps? */
313 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
314 }
315
316
317 /*
318 * @unimplemented
319 */
320 NTSTATUS
321 STDCALL
322 MmPrefetchPages (
323 IN ULONG NumberOfLists,
324 IN PREAD_LIST *ReadLists
325 )
326 {
327 UNIMPLEMENTED;
328 return STATUS_NOT_IMPLEMENTED;
329 }
330
331
332 /*
333 * @unimplemented
334 */
335 NTSTATUS
336 STDCALL
337 MmProtectMdlSystemAddress (
338 IN PMDL MemoryDescriptorList,
339 IN ULONG NewProtect
340 )
341 {
342 UNIMPLEMENTED;
343 return STATUS_NOT_IMPLEMENTED;
344 }
345
346
347 /*
348 * @implemented
349 */
350 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
351 KPROCESSOR_MODE AccessMode,
352 LOCK_OPERATION Operation)
353 /*
354 * FUNCTION: Probes the specified pages, makes them resident and locks them
355 * ARGUMENTS:
356 * Mdl = MDL to probe
357 * AccessMode = Access at which to probe the buffer
358 * Operation = Operation to probe for
359 *
360 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
361 * used in cases where you know that the mdl address is paged memory or
362 * you don't know where the mdl address comes from. MmProbeAndLockPages will
363 * work no matter what kind of mdl address you have.
364 */
365 {
366 PPFN_TYPE MdlPages;
367 ULONG i, j;
368 ULONG NrPages;
369 NTSTATUS Status;
370 KPROCESSOR_MODE Mode;
371 PFN_TYPE Page;
372 PEPROCESS CurrentProcess = PsGetCurrentProcess();
373 PMADDRESS_SPACE AddressSpace;
374
375 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
376
377 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
378 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
379
380 MdlPages = (PPFN_TYPE)(Mdl + 1);
381 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
382
383 /* mdl must have enough page entries */
384 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
385
386
387 if (Mdl->StartVa >= MmSystemRangeStart &&
388 MmGetPfnForProcess(NULL, Mdl->StartVa) >= MmPageArraySize)
389 {
390 /* phys addr is not phys memory so this must be io memory */
391
392 for (i = 0; i < NrPages; i++)
393 {
394 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
395 }
396
397 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
398 return;
399 }
400
401
402 if (Mdl->StartVa >= MmSystemRangeStart)
403 {
404 /* FIXME: why isn't AccessMode used? */
405 Mode = KernelMode;
406 Mdl->Process = NULL;
407 AddressSpace = MmGetKernelAddressSpace();
408 }
409 else
410 {
411 /* FIXME: why isn't AccessMode used? */
412 Mode = UserMode;
413 Mdl->Process = CurrentProcess;
414 AddressSpace = (PMADDRESS_SPACE)&(CurrentProcess)->VadRoot;
415 }
416
417
418 /*
419 * Lock the pages
420 */
421 MmLockAddressSpace(AddressSpace);
422
423 for (i = 0; i < NrPages; i++)
424 {
425 PVOID Address;
426
427 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
428
429 /*
430 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
431 * -Gunnar
432 */
433
434 if (!MmIsPagePresent(NULL, Address))
435 {
436 Status = MmAccessFault(FALSE, Address, Mode, NULL);
437 if (!NT_SUCCESS(Status))
438 {
439 for (j = 0; j < i; j++)
440 {
441 Page = MdlPages[j];
442 if (Page < MmPageArraySize)
443 {
444 MmUnlockPage(Page);
445 MmDereferencePage(Page);
446 }
447 }
448 MmUnlockAddressSpace(AddressSpace);
449 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
450 }
451 }
452 else
453 {
454 MmLockPage(MmGetPfnForProcess(NULL, Address));
455 }
456
457 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
458 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
459 {
460 Status = MmAccessFault(TRUE, Address, Mode, NULL);
461 if (!NT_SUCCESS(Status))
462 {
463 for (j = 0; j < i; j++)
464 {
465 Page = MdlPages[j];
466 if (Page < MmPageArraySize)
467 {
468 MmUnlockPage(Page);
469 MmDereferencePage(Page);
470 }
471 }
472 MmUnlockAddressSpace(AddressSpace);
473 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
474 }
475 }
476 Page = MmGetPfnForProcess(NULL, Address);
477 MdlPages[i] = Page;
478 if (Page >= MmPageArraySize)
479 Mdl->MdlFlags |= MDL_IO_SPACE;
480 else
481 MmReferencePage(Page);
482 }
483
484 MmUnlockAddressSpace(AddressSpace);
485 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
486 }
487
488
489 /*
490 * @unimplemented
491 */
492 VOID
493 STDCALL
494 MmProbeAndLockProcessPages (
495 IN OUT PMDL MemoryDescriptorList,
496 IN PEPROCESS Process,
497 IN KPROCESSOR_MODE AccessMode,
498 IN LOCK_OPERATION Operation
499 )
500 {
501 UNIMPLEMENTED;
502 }
503
504
505 /*
506 * @unimplemented
507 */
508 VOID
509 STDCALL
510 MmProbeAndLockSelectedPages(
511 IN OUT PMDL MemoryDescriptorList,
512 IN LARGE_INTEGER PageList[],
513 IN KPROCESSOR_MODE AccessMode,
514 IN LOCK_OPERATION Operation
515 )
516 {
517 UNIMPLEMENTED;
518 }
519
520
521 /*
522 * @implemented
523 */
524 ULONG STDCALL MmSizeOfMdl (PVOID Base,
525 ULONG Length)
526 /*
527 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
528 * the given address range
529 * ARGUMENTS:
530 * Base = base virtual address
531 * Length = number of bytes to map
532 */
533 {
534 ULONG len;
535
536 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
537
538 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
539 }
540
541
542 /*
543 * @implemented
544 */
545 VOID STDCALL
546 MmBuildMdlForNonPagedPool (PMDL Mdl)
547 /*
548 * FUNCTION: Fills in the corresponding physical page array of a given
549 * MDL for a buffer in nonpaged system space
550 * ARGUMENTS:
551 * Mdl = Points to an MDL that supplies a virtual address,
552 * byte offset and length
553 *
554 * This function can be seen as a fast version of MmProbeAndLockPages in case
555 * you _know_ that the mdl address is within nonpaged kernel space.
556 */
557 {
558 ULONG i;
559 ULONG PageCount;
560 PPFN_TYPE MdlPages;
561
562 /*
563 * mdl buffer must (at least) be in kernel space, thou this doesn't
564 * necesarely mean that the buffer in within _nonpaged_ kernel space...
565 */
566 ASSERT(Mdl->StartVa >= MmSystemRangeStart);
567
568 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
569 MdlPages = (PPFN_TYPE)(Mdl + 1);
570
571 /* mdl must have enough page entries */
572 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
573
574 for (i=0; i < PageCount; i++)
575 {
576 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
577 }
578
579 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
580 Mdl->Process = NULL;
581 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
582 }
583
584
585 /*
586 * @implemented
587 */
588 PMDL STDCALL
589 MmCreateMdl (PMDL Mdl,
590 PVOID Base,
591 ULONG Length)
592 /*
593 * FUNCTION: Allocates and initalizes an MDL
594 * ARGUMENTS:
595 * MemoryDescriptorList = Points to MDL to initalize. If this is
596 * NULL then one is allocated
597 * Base = Base virtual address of the buffer
598 * Length = Length in bytes of the buffer
599 * RETURNS: A pointer to initalized MDL
600 */
601 {
602 if (Mdl == NULL)
603 {
604 ULONG Size;
605
606 Size = MmSizeOfMdl(Base,Length);
607 Mdl =
608 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
609 if (Mdl == NULL)
610 {
611 return(NULL);
612 }
613 }
614
615 MmInitializeMdl(Mdl, (char*)Base, Length);
616
617 return(Mdl);
618 }
619
620
621 /*
622 * @unimplemented
623 */
624 VOID STDCALL
625 MmMapMemoryDumpMdl (PVOID Unknown0)
626 /*
627 * FIXME: Has something to do with crash dumps. Do we want to implement
628 * this?
629 */
630 {
631 UNIMPLEMENTED;
632 }
633
634
635 /*
636 * @implemented
637 */
638 PMDL STDCALL
639 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
640 IN PHYSICAL_ADDRESS HighAddress,
641 IN PHYSICAL_ADDRESS SkipBytes,
642 IN SIZE_T Totalbytes )
643 {
644 /*
645 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
646
647 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
648 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
649 Designed to be used by an AGP driver
650
651 LowAddress is the lowest acceptable physical address it wants to allocate
652 and HighAddress is the highest. SkipBytes are the number of bytes that the
653 kernel should keep free above LowAddress and below the address at which it
654 starts to allocate physical memory. TotalBytes are the number of bytes that
655 the driver wants to allocate. The return value of the function is a MDL
656 that if non-zero describes the physical memory the kernel has given the
657 driver. To access portions of the memory the driver must create sub-MDLs
658 from the returned MDL that describe appropriate portions of the physical
659 memory. When a driver wants to access physical memory described by a
660 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
661
662 Konstantin Gusev
663 */
664
665 PMDL Mdl;
666 PPFN_TYPE Pages;
667 ULONG NumberOfPagesWanted, NumberOfPagesAllocated;
668 ULONG Ret;
669
670 DPRINT("MmAllocatePagesForMdl - LowAddress = 0x%I64x, HighAddress = 0x%I64x, "
671 "SkipBytes = 0x%I64x, Totalbytes = 0x%x\n",
672 LowAddress.QuadPart, HighAddress.QuadPart,
673 SkipBytes.QuadPart, Totalbytes);
674
675 /* SkipBytes must be a multiple of the page size */
676 if ((SkipBytes.QuadPart % PAGE_SIZE) != 0)
677 {
678 DPRINT1("Warning: SkipBytes is not a multiple of PAGE_SIZE\n");
679 return NULL;
680 }
681
682 /* Allocate memory for the MDL */
683 Mdl = MmCreateMdl(NULL, 0, Totalbytes);
684 if (Mdl == NULL)
685 {
686 return NULL;
687 }
688
689 /* Allocate pages into the MDL */
690 NumberOfPagesAllocated = 0;
691 NumberOfPagesWanted = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
692 Pages = (PPFN_TYPE)(Mdl + 1);
693 while (NumberOfPagesWanted > 0)
694 {
695 Ret = MmAllocPagesSpecifyRange(
696 MC_NPPOOL,
697 LowAddress,
698 HighAddress,
699 NumberOfPagesWanted,
700 Pages + NumberOfPagesAllocated);
701 if (Ret == (ULONG)-1)
702 break;
703
704 NumberOfPagesAllocated += Ret;
705 NumberOfPagesWanted -= Ret;
706
707 if (SkipBytes.QuadPart == 0)
708 break;
709 LowAddress.QuadPart += SkipBytes.QuadPart;
710 HighAddress.QuadPart += SkipBytes.QuadPart;
711 }
712
713 if (NumberOfPagesAllocated == 0)
714 {
715 ExFreePool(Mdl);
716 Mdl = NULL;
717 }
718 else if (NumberOfPagesWanted > 0)
719 {
720 Mdl->ByteCount = (ULONG)(NumberOfPagesAllocated * PAGE_SIZE);
721 /* FIXME: I don't know if Mdl->Size should also be changed -- blight */
722 }
723 return Mdl;
724 }
725
726
727 /*
728 * @implemented
729 */
730 VOID STDCALL
731 MmFreePagesFromMdl ( IN PMDL Mdl )
732 {
733 /*
734 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
735 FreeUserPhysicalPages, to free the physical memory it has allocated with
736 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
737
738 Note that a driver is responsible for deallocating the MDL returned by
739 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
740 does not free the MDL.
741
742 Konstantin Gusev
743
744 */
745 PPFN_TYPE Pages;
746 LONG NumberOfPages;
747
748 NumberOfPages = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
749 Pages = (PPFN_TYPE)(Mdl + 1);
750
751 while (--NumberOfPages >= 0)
752 {
753 MmDereferencePage(Pages[NumberOfPages]);
754 }
755 }
756
757
758 /*
759 * @implemented
760 */
761 PVOID STDCALL
762 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
763 IN KPROCESSOR_MODE AccessMode,
764 IN MEMORY_CACHING_TYPE CacheType,
765 IN PVOID BaseAddress,
766 IN ULONG BugCheckOnFailure,
767 IN MM_PAGE_PRIORITY Priority)
768 {
769 PVOID Base;
770 PULONG MdlPages;
771 KIRQL oldIrql;
772 ULONG PageCount;
773 ULONG StartingOffset;
774 PEPROCESS CurrentProcess;
775 NTSTATUS Status;
776 ULONG Protect;
777
778 DPRINT("MmMapLockedPagesSpecifyCache(Mdl 0x%x, AccessMode 0x%x, CacheType 0x%x, "
779 "BaseAddress 0x%x, BugCheckOnFailure 0x%x, Priority 0x%x)\n",
780 Mdl, AccessMode, CacheType, BaseAddress, BugCheckOnFailure, Priority);
781
782 /* FIXME: Implement Priority */
783 (void) Priority;
784
785 Protect = PAGE_READWRITE;
786 if (CacheType == MmNonCached)
787 Protect |= PAGE_NOCACHE;
788 else if (CacheType == MmWriteCombined)
789 DPRINT("CacheType MmWriteCombined not supported!\n");
790
791 /* Calculate the number of pages required. */
792 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
793
794 if (AccessMode != KernelMode)
795 {
796 MEMORY_AREA *Result;
797 LARGE_INTEGER BoundaryAddressMultiple;
798 NTSTATUS Status;
799
800 /* pretty sure you can't map partial mdl's to user space */
801 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
802
803 BoundaryAddressMultiple.QuadPart = 0;
804 Base = BaseAddress;
805
806 CurrentProcess = PsGetCurrentProcess();
807
808 MmLockAddressSpace((PMADDRESS_SPACE)&CurrentProcess->VadRoot);
809 Status = MmCreateMemoryArea((PMADDRESS_SPACE)&CurrentProcess->VadRoot,
810 MEMORY_AREA_MDL_MAPPING,
811 &Base,
812 PageCount * PAGE_SIZE,
813 Protect,
814 &Result,
815 (Base != NULL),
816 0,
817 BoundaryAddressMultiple);
818 MmUnlockAddressSpace((PMADDRESS_SPACE)&CurrentProcess->VadRoot);
819 if (!NT_SUCCESS(Status))
820 {
821 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
822 {
823 return NULL;
824 }
825
826 /* Throw exception */
827 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
828 ASSERT(0);
829 }
830
831 Mdl->Process = (PEPROCESS)CurrentProcess;
832 }
833 else /* if (AccessMode == KernelMode) */
834 {
835 /* can't map mdl twice */
836 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
837 /* can't map mdl buildt from non paged pool into kernel space */
838 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
839
840 CurrentProcess = NULL;
841
842 /* Allocate that number of pages from the mdl mapping region. */
843 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
844
845 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
846
847 if (StartingOffset == 0xffffffff)
848 {
849 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
850
851 DPRINT1("Out of MDL mapping space\n");
852
853 if ((Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) || !BugCheckOnFailure)
854 {
855 return NULL;
856 }
857
858 KEBUGCHECK(0);
859 }
860
861 Base = (PVOID)((ULONG_PTR)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE);
862
863 if (MiMdlMappingRegionHint == StartingOffset)
864 {
865 MiMdlMappingRegionHint += PageCount;
866 }
867
868 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
869
870 Mdl->Process = NULL;
871 }
872
873 /* Set the virtual mappings for the MDL pages. */
874 MdlPages = (PULONG)(Mdl + 1);
875
876 if (Mdl->MdlFlags & MDL_IO_SPACE)
877 Status = MmCreateVirtualMappingUnsafe(CurrentProcess,
878 Base,
879 Protect,
880 MdlPages,
881 PageCount);
882 else
883 Status = MmCreateVirtualMapping(CurrentProcess,
884 Base,
885 Protect,
886 MdlPages,
887 PageCount);
888 if (!NT_SUCCESS(Status))
889 {
890 DbgPrint("Unable to create virtual mapping\n");
891 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
892 {
893 return NULL;
894 }
895 if (AccessMode != KernelMode)
896 {
897 /* Throw exception */
898 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
899 ASSERT(0);
900 }
901 else /* AccessMode == KernelMode */
902 {
903 if (!BugCheckOnFailure)
904 return NULL;
905
906 /* FIXME: Use some bugcheck code instead of 0 */
907 KEBUGCHECK(0);
908 }
909 }
910
911 /* Mark the MDL has having being mapped. */
912 if (AccessMode == KernelMode)
913 {
914 if (Mdl->MdlFlags & MDL_PARTIAL)
915 {
916 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
917 }
918 else
919 {
920 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
921 }
922 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
923 }
924 else
925 DPRINT1("UserMode mapping - returning 0x%x\n", (ULONG)Base + Mdl->ByteOffset);
926
927 return((char*)Base + Mdl->ByteOffset);
928 }
929
930
931 /*
932 * @implemented
933 */
934 PVOID STDCALL
935 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
936 /*
937 * FUNCTION: Maps the physical pages described by a given MDL
938 * ARGUMENTS:
939 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool,
940 * MmAllocatePagesForMdl or IoBuildPartialMdl.
941 * AccessMode = Specifies the portion of the address space to map the
942 * pages.
943 * RETURNS: The base virtual address that maps the locked pages for the
944 * range described by the MDL
945 *
946 * If mapping into user space, pages are mapped into current address space.
947 */
948 {
949 return MmMapLockedPagesSpecifyCache(Mdl,
950 AccessMode,
951 MmCached,
952 NULL,
953 TRUE,
954 NormalPagePriority);
955 }
956
957
958 /* EOF */
959
960
961
962
963
964
965
966
967
968