Use upper-case ASSERT macros.
[reactos.git] / reactos / ntoskrnl / mm / mdl.c
1 /* $Id: mdl.c,v 1.69 2004/10/22 20:38:22 ekohl Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/mdl.c
6 * PURPOSE: Manipulates MDLs
7 * PROGRAMMER: David Welch (welch@cwcom.net)
8 * UPDATE HISTORY:
9 * 27/05/98: Created
10 */
11
12 /* INCLUDES ****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <internal/debug.h>
17
18 /* GLOBALS *******************************************************************/
19
20 #define TAG_MDL TAG('M', 'M', 'D', 'L')
21
22 #define MI_MDL_MAPPING_REGION_SIZE (256*1024*1024)
23
24 static PVOID MiMdlMappingRegionBase = NULL;
25 static RTL_BITMAP MiMdlMappingRegionAllocMap;
26 static ULONG MiMdlMappingRegionHint;
27 static KSPIN_LOCK MiMdlMappingRegionLock;
28 extern ULONG MmPageArraySize;
29
30 /*
31 MDL Flags desc.
32
33 MDL_PAGES_LOCKED MmProbelAndLockPages has been called for this mdl
34 MDL_SOURCE_IS_NONPAGED_POOL mdl has been build by MmBuildMdlForNonPagedPool
35 MDL_PARTIAL mdl has been built by IoBuildPartialMdl
36 MDL_MAPPING_CAN_FAIL in case of an error, MmMapLockedPages will return NULL instead of to bugcheck
37 MDL_MAPPED_TO_SYSTEM_VA mdl has been mapped into kernel space using MmMapLockedPages
38 MDL_PARTIAL_HAS_BEEN_MAPPED mdl flagged MDL_PARTIAL has been mapped into kernel space using MmMapLockedPages
39 */
40
41 /* FUNCTIONS *****************************************************************/
42
43 /*
44 * @unimplemented
45 */
46 NTSTATUS
47 STDCALL
48 MmAdvanceMdl (
49 IN PMDL Mdl,
50 IN ULONG NumberOfBytes
51 )
52 {
53 UNIMPLEMENTED;
54 return STATUS_NOT_IMPLEMENTED;
55 }
56
57 VOID INIT_FUNCTION
58 MmInitializeMdlImplementation(VOID)
59 {
60 MEMORY_AREA* Result;
61 NTSTATUS Status;
62 PVOID Buffer;
63 PHYSICAL_ADDRESS BoundaryAddressMultiple;
64
65 BoundaryAddressMultiple.QuadPart = 0;
66 MiMdlMappingRegionHint = 0;
67 MiMdlMappingRegionBase = NULL;
68
69 MmLockAddressSpace(MmGetKernelAddressSpace());
70 Status = MmCreateMemoryArea(NULL,
71 MmGetKernelAddressSpace(),
72 MEMORY_AREA_MDL_MAPPING,
73 &MiMdlMappingRegionBase,
74 MI_MDL_MAPPING_REGION_SIZE,
75 0,
76 &Result,
77 FALSE,
78 FALSE,
79 BoundaryAddressMultiple);
80 if (!NT_SUCCESS(Status))
81 {
82 MmUnlockAddressSpace(MmGetKernelAddressSpace());
83 KEBUGCHECK(0);
84 }
85 MmUnlockAddressSpace(MmGetKernelAddressSpace());
86
87 Buffer = ExAllocatePool(NonPagedPool, MI_MDL_MAPPING_REGION_SIZE / (PAGE_SIZE * 8));
88
89 RtlInitializeBitMap(&MiMdlMappingRegionAllocMap, Buffer, MI_MDL_MAPPING_REGION_SIZE / PAGE_SIZE);
90 RtlClearAllBits(&MiMdlMappingRegionAllocMap);
91
92 KeInitializeSpinLock(&MiMdlMappingRegionLock);
93 }
94
95 PVOID
96 MmGetMdlPageAddress(PMDL Mdl, PVOID Offset)
97 {
98 PULONG MdlPages;
99
100 MdlPages = (PULONG)(Mdl + 1);
101
102 return((PVOID)MdlPages[((ULONG)Offset) / PAGE_SIZE]);
103 }
104
105
106 /*
107 * @implemented
108 */
109 VOID STDCALL
110 MmUnlockPages(PMDL Mdl)
111 /*
112 * FUNCTION: Unlocks the physical pages described by a given MDL
113 * ARGUMENTS:
114 * MemoryDescriptorList = MDL describing the buffer to be unlocked
115 * NOTES: The memory described by the specified MDL must have been locked
116 * previously by a call to MmProbeAndLockPages. As the pages unlocked, the
117 * MDL is updated
118 *
119 * May be called in any process context.
120 */
121 {
122 ULONG i;
123 PULONG MdlPages;
124 PFN_TYPE Page;
125
126 /*
127 * MmProbeAndLockPages MUST have been called to lock this mdl!
128 *
129 * Windows will bugcheck if you pass MmUnlockPages an mdl that hasn't been
130 * locked with MmLockAndProbePages, but (for now) we'll be more forgiving...
131 */
132 if (!(Mdl->MdlFlags & MDL_PAGES_LOCKED))
133 {
134 DPRINT1("MmUnlockPages called for non-locked mdl!\n");
135 return;
136 }
137
138 /* If mdl buffer is mapped io space -> do nothing */
139 if (Mdl->MdlFlags & MDL_IO_SPACE)
140 {
141 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
142 return;
143 }
144
145 /* Automagically undo any calls to MmGetSystemAddressForMdl's for this mdl */
146 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
147 {
148 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
149 }
150
151 /*
152 * FIXME: I don't know whether this right, but it looks sensible
153 */
154 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) ||
155 (Mdl->MdlFlags & MDL_IO_PAGE_READ))
156 {
157 return;
158 }
159
160
161 MdlPages = (PULONG)(Mdl + 1);
162 for (i=0; i<(PAGE_ROUND_UP(Mdl->ByteCount+Mdl->ByteOffset)/PAGE_SIZE); i++)
163 {
164 Page = MdlPages[i];
165 MmUnlockPage(Page);
166 MmDereferencePage(Page);
167 }
168
169 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
170 }
171
172
173
174 /*
175 * @implemented
176 */
177 PVOID STDCALL
178 MmMapLockedPages(PMDL Mdl, KPROCESSOR_MODE AccessMode)
179 /*
180 * FUNCTION: Maps the physical pages described by a given MDL
181 * ARGUMENTS:
182 * Mdl = Points to an MDL updated by MmProbeAndLockPages, MmBuildMdlForNonPagedPool
183 * or IoBuildPartialMdl.
184 * AccessMode = Specifies the portion of the address space to map the
185 * pages.
186 * RETURNS: The base virtual address that maps the locked pages for the
187 * range described by the MDL
188 *
189 * If mapping into user space, pages are mapped into current address space.
190 */
191 {
192 PVOID Base;
193 PULONG MdlPages;
194 KIRQL oldIrql;
195 ULONG PageCount;
196 ULONG StartingOffset;
197 PEPROCESS CurrentProcess;
198 NTSTATUS Status;
199
200 DPRINT("MmMapLockedPages(Mdl %x, AccessMode %x)\n", Mdl, AccessMode);
201
202 /* Calculate the number of pages required. */
203 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
204
205 if (AccessMode == UserMode)
206 {
207 MEMORY_AREA *Result;
208 LARGE_INTEGER BoundaryAddressMultiple;
209 NTSTATUS Status;
210
211 /* pretty sure you can't map partial mdl's to user space */
212 ASSERT(!(Mdl->MdlFlags & MDL_PARTIAL));
213
214 BoundaryAddressMultiple.QuadPart = 0;
215 Base = NULL;
216
217 CurrentProcess = PsGetCurrentProcess();
218
219 MmLockAddressSpace(&CurrentProcess->AddressSpace);
220 Status = MmCreateMemoryArea(CurrentProcess,
221 &CurrentProcess->AddressSpace,
222 MEMORY_AREA_MDL_MAPPING,
223 &Base,
224 PageCount * PAGE_SIZE,
225 0, /* PAGE_READWRITE? */
226 &Result,
227 FALSE,
228 FALSE,
229 BoundaryAddressMultiple);
230 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
231 if (!NT_SUCCESS(Status))
232 {
233 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
234 {
235 return NULL;
236 }
237
238 KEBUGCHECK(0);
239 /* FIXME: handle this? */
240 }
241
242 Mdl->Process = CurrentProcess;
243 }
244 else /* if (AccessMode == KernelMode) */
245 {
246 /* can't map mdl twice */
247 ASSERT(!(Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL_HAS_BEEN_MAPPED)));
248 /* can't map mdl buildt from non paged pool into kernel space */
249 ASSERT(!(Mdl->MdlFlags & (MDL_SOURCE_IS_NONPAGED_POOL)));
250
251 CurrentProcess = NULL;
252
253 /* Allocate that number of pages from the mdl mapping region. */
254 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
255
256 StartingOffset = RtlFindClearBitsAndSet(&MiMdlMappingRegionAllocMap, PageCount, MiMdlMappingRegionHint);
257
258 if (StartingOffset == 0xffffffff)
259 {
260 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
261
262 DPRINT1("Out of MDL mapping space\n");
263
264 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
265 {
266 return NULL;
267 }
268
269 KEBUGCHECK(0);
270 }
271
272 Base = (char*)MiMdlMappingRegionBase + StartingOffset * PAGE_SIZE;
273
274 if (MiMdlMappingRegionHint == StartingOffset)
275 {
276 MiMdlMappingRegionHint += PageCount;
277 }
278
279 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
280 }
281
282
283
284 /* Set the virtual mappings for the MDL pages. */
285 MdlPages = (PULONG)(Mdl + 1);
286
287 Status = MmCreateVirtualMapping(CurrentProcess,
288 Base,
289 PAGE_READWRITE,
290 MdlPages,
291 PageCount);
292 if (!NT_SUCCESS(Status))
293 {
294 DbgPrint("Unable to create virtual mapping\n");
295 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL)
296 {
297 return NULL;
298 }
299 KEBUGCHECK(0);
300 }
301
302 /* Mark the MDL has having being mapped. */
303 if (AccessMode == KernelMode)
304 {
305 if (Mdl->MdlFlags & MDL_PARTIAL)
306 {
307 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
308 }
309 else
310 {
311 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
312 }
313 Mdl->MappedSystemVa = (char*)Base + Mdl->ByteOffset;
314 }
315
316 return((char*)Base + Mdl->ByteOffset);
317 }
318
319
320 /*
321 * @unimplemented
322 */
323 PVOID
324 STDCALL
325 MmMapLockedPagesWithReservedMapping (
326 IN PVOID MappingAddress,
327 IN ULONG PoolTag,
328 IN PMDL MemoryDescriptorList,
329 IN MEMORY_CACHING_TYPE CacheType
330 )
331 {
332 UNIMPLEMENTED;
333 return 0;
334 }
335
336 /*
337 * @implemented
338 */
339 VOID STDCALL
340 MmUnmapLockedPages(PVOID BaseAddress, PMDL Mdl)
341 /*
342 * FUNCTION: Releases a mapping set up by a preceding call to MmMapLockedPages
343 * ARGUMENTS:
344 * BaseAddress = Base virtual address to which the pages were mapped
345 * MemoryDescriptorList = MDL describing the mapped pages
346 *
347 * User space unmappings _must_ be done from the original process context!
348 */
349 {
350 KIRQL oldIrql;
351 ULONG i;
352 ULONG PageCount;
353 ULONG Base;
354
355 DPRINT("MmUnmapLockedPages(BaseAddress %x, Mdl %x)\n", BaseAddress, Mdl);
356
357 /*
358 * In this case, the MDL has the same system address as the base address
359 * so there is no need to free it
360 */
361 if ((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) &&
362 ((ULONG_PTR)BaseAddress >= KERNEL_BASE))
363 {
364 return;
365 }
366
367
368 /* Calculate the number of pages we mapped. */
369 PageCount = PAGE_ROUND_UP(Mdl->ByteCount + Mdl->ByteOffset) / PAGE_SIZE;
370
371 /*
372 * Docs says that BaseAddress should be a _base_ address, but every example
373 * I've seen pass the actual address. -Gunnar
374 */
375 BaseAddress = PAGE_ALIGN(BaseAddress);
376
377 /* Unmap all the pages. */
378 for (i = 0; i < PageCount; i++)
379 {
380 MmDeleteVirtualMapping(NULL,
381 (char*)BaseAddress + (i * PAGE_SIZE),
382 FALSE,
383 NULL,
384 NULL);
385 }
386
387 if ((DWORD)BaseAddress >= KERNEL_BASE)
388 {
389 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
390
391 KeAcquireSpinLock(&MiMdlMappingRegionLock, &oldIrql);
392 /* Deallocate all the pages used. */
393 Base = (ULONG)((char*)BaseAddress - (char*)MiMdlMappingRegionBase) / PAGE_SIZE;
394
395 RtlClearBits(&MiMdlMappingRegionAllocMap, Base, PageCount);
396
397 MiMdlMappingRegionHint = min (MiMdlMappingRegionHint, Base);
398
399 KeReleaseSpinLock(&MiMdlMappingRegionLock, oldIrql);
400
401 /* Reset the MDL state. */
402 Mdl->MdlFlags &= ~MDL_MAPPED_TO_SYSTEM_VA;
403 Mdl->MappedSystemVa = NULL;
404
405 }
406 else
407 {
408 MEMORY_AREA *Marea;
409
410 ASSERT(Mdl->Process == PsGetCurrentProcess());
411
412 Marea = MmOpenMemoryAreaByAddress( &Mdl->Process->AddressSpace, BaseAddress );
413 if (Marea == NULL)
414 {
415 DPRINT1( "Couldn't open memory area when unmapping user-space pages!\n" );
416 KEBUGCHECK(0);
417 }
418
419 MmFreeMemoryArea( &Mdl->Process->AddressSpace, Marea->BaseAddress, 0, NULL, NULL );
420
421 Mdl->Process = NULL;
422 }
423
424 }
425
426 /*
427 * @unimplemented
428 */
429 VOID
430 STDCALL
431 MmUnmapReservedMapping (
432 IN PVOID BaseAddress,
433 IN ULONG PoolTag,
434 IN PMDL MemoryDescriptorList
435 )
436 {
437 UNIMPLEMENTED;
438 }
439
440
441 VOID
442 MmBuildMdlFromPages(PMDL Mdl, PPFN_TYPE Pages)
443 {
444 memcpy(Mdl + 1, Pages, sizeof(PFN_TYPE) * (PAGE_ROUND_UP(Mdl->ByteOffset+Mdl->ByteCount)/PAGE_SIZE));
445
446 //FIXME: this flag should be set by the caller perhaps?
447 Mdl->MdlFlags |= MDL_IO_PAGE_READ;
448 }
449
450 /*
451 * @unimplemented
452 */
453 NTSTATUS
454 STDCALL
455 MmPrefetchPages (
456 IN ULONG NumberOfLists,
457 IN PREAD_LIST *ReadLists
458 )
459 {
460 UNIMPLEMENTED;
461 return STATUS_NOT_IMPLEMENTED;
462 }
463
464 /*
465 * @unimplemented
466 */
467 NTSTATUS
468 STDCALL
469 MmProtectMdlSystemAddress (
470 IN PMDL MemoryDescriptorList,
471 IN ULONG NewProtect
472 )
473 {
474 UNIMPLEMENTED;
475 return STATUS_NOT_IMPLEMENTED;
476 }
477
478
479 /*
480 * @unimplemented
481 */
482 VOID STDCALL MmProbeAndLockPages (PMDL Mdl,
483 KPROCESSOR_MODE AccessMode,
484 LOCK_OPERATION Operation)
485 /*
486 * FUNCTION: Probes the specified pages, makes them resident and locks them
487 * ARGUMENTS:
488 * Mdl = MDL to probe
489 * AccessMode = Access at which to probe the buffer
490 * Operation = Operation to probe for
491 *
492 * This function can be seen as a safe version of MmBuildMdlForNonPagedPool
493 * used in cases where you know that the mdl address is paged memory or
494 * you don't know where the mdl address comes from. MmProbeAndLockPages will
495 * work no matter what kind of mdl address you have.
496 */
497 {
498 PPFN_TYPE MdlPages;
499 ULONG i, j;
500 ULONG NrPages;
501 NTSTATUS Status;
502 KPROCESSOR_MODE Mode;
503 PFN_TYPE Page;
504 PEPROCESS CurrentProcess = PsGetCurrentProcess();
505
506 DPRINT("MmProbeAndLockPages(Mdl %x)\n", Mdl);
507
508 ASSERT(!(Mdl->MdlFlags & (MDL_PAGES_LOCKED|MDL_MAPPED_TO_SYSTEM_VA|MDL_PARTIAL|
509 MDL_IO_SPACE|MDL_SOURCE_IS_NONPAGED_POOL)));
510
511 MdlPages = (PPFN_TYPE)(Mdl + 1);
512 NrPages = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
513
514 /* mdl must have enough page entries */
515 ASSERT(NrPages <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
516
517
518 if (Mdl->StartVa >= (PVOID)KERNEL_BASE &&
519 MmGetPfnForProcess(NULL, Mdl->StartVa) > MmPageArraySize)
520 {
521 /* phys addr is not phys memory so this must be io memory */
522
523 for (i = 0; i < NrPages; i++)
524 {
525 MdlPages[i] = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i*PAGE_SIZE));
526 }
527
528 Mdl->MdlFlags |= MDL_PAGES_LOCKED|MDL_IO_SPACE;
529 return;
530 }
531
532
533 if (Mdl->StartVa >= (PVOID)KERNEL_BASE)
534 {
535 //FIXME: why isn't AccessMode used?
536 Mode = KernelMode;
537 Mdl->Process = NULL;
538 }
539 else
540 {
541 //FIXME: why isn't AccessMode used?
542 Mode = UserMode;
543 Mdl->Process = CurrentProcess;
544 }
545
546
547 /*
548 * Lock the pages
549 */
550 MmLockAddressSpace(&CurrentProcess->AddressSpace);
551
552 for (i = 0; i < NrPages; i++)
553 {
554 PVOID Address;
555
556 Address = (char*)Mdl->StartVa + (i*PAGE_SIZE);
557
558 /*
559 * FIXME: skip the probing/access stuff if buffer is nonpaged kernel space?
560 * -Gunnar
561 */
562
563 if (!MmIsPagePresent(NULL, Address))
564 {
565 Status = MmNotPresentFault(Mode, (ULONG)Address, TRUE);
566 if (!NT_SUCCESS(Status))
567 {
568 for (j = 0; j < i; j++)
569 {
570 Page = MdlPages[j];
571 MmUnlockPage(Page);
572 MmDereferencePage(Page);
573 }
574 ExRaiseStatus(Status);
575 }
576 }
577 else
578 {
579 MmLockPage(MmGetPfnForProcess(NULL, Address));
580 }
581
582 if ((Operation == IoWriteAccess || Operation == IoModifyAccess) &&
583 (!(MmGetPageProtect(NULL, (PVOID)Address) & PAGE_READWRITE)))
584 {
585 Status = MmAccessFault(Mode, (ULONG)Address, TRUE);
586 if (!NT_SUCCESS(Status))
587 {
588 for (j = 0; j < i; j++)
589 {
590 Page = MdlPages[j];
591 MmUnlockPage(Page);
592 MmDereferencePage(Page);
593 }
594 ExRaiseStatus(Status);
595 }
596 }
597 Page = MmGetPfnForProcess(NULL, Address);
598 MdlPages[i] = Page;
599 MmReferencePage(Page);
600 }
601
602 MmUnlockAddressSpace(&CurrentProcess->AddressSpace);
603 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
604 }
605
606 /*
607 * @unimplemented
608 */
609 VOID
610 STDCALL
611 MmProbeAndLockProcessPages (
612 IN OUT PMDL MemoryDescriptorList,
613 IN PEPROCESS Process,
614 IN KPROCESSOR_MODE AccessMode,
615 IN LOCK_OPERATION Operation
616 )
617 {
618 UNIMPLEMENTED;
619 }
620
621 /*
622 * @unimplemented
623 */
624 VOID
625 STDCALL
626 MmProbeAndLockSelectedPages(
627 IN OUT PMDL MemoryDescriptorList,
628 IN LARGE_INTEGER PageList[],
629 IN KPROCESSOR_MODE AccessMode,
630 IN LOCK_OPERATION Operation
631 )
632 {
633 UNIMPLEMENTED;
634 }
635
636 /*
637 * @implemented
638 */
639 ULONG STDCALL MmSizeOfMdl (PVOID Base,
640 ULONG Length)
641 /*
642 * FUNCTION: Returns the number of bytes to allocate for an MDL describing
643 * the given address range
644 * ARGUMENTS:
645 * Base = base virtual address
646 * Length = number of bytes to map
647 */
648 {
649 ULONG len;
650
651 len = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base,Length);
652
653 return(sizeof(MDL)+(len*sizeof(PFN_TYPE)));
654 }
655
656
657 /*
658 * @implemented
659 */
660 VOID STDCALL
661 MmBuildMdlForNonPagedPool (PMDL Mdl)
662 /*
663 * FUNCTION: Fills in the corresponding physical page array of a given
664 * MDL for a buffer in nonpaged system space
665 * ARGUMENTS:
666 * Mdl = Points to an MDL that supplies a virtual address,
667 * byte offset and length
668 *
669 * This function can be seen as a fast version of MmProbeAndLockPages in case
670 * you _know_ that the mdl address is within nonpaged kernel space.
671 */
672 {
673 ULONG i;
674 ULONG PageCount;
675 PPFN_TYPE MdlPages;
676
677 /*
678 * mdl buffer must (at least) be in kernel space, thou this doesn't
679 * necesarely mean that the buffer in within _nonpaged_ kernel space...
680 */
681 ASSERT((ULONG)Mdl->StartVa >= KERNEL_BASE);
682
683 PageCount = PAGE_ROUND_UP(Mdl->ByteOffset + Mdl->ByteCount) / PAGE_SIZE;
684 MdlPages = (PPFN_TYPE)(Mdl + 1);
685
686 /* mdl must have enough page entries */
687 ASSERT(PageCount <= (Mdl->Size - sizeof(MDL))/sizeof(PFN_TYPE));
688
689 for (i=0; i < PageCount; i++)
690 {
691 *MdlPages++ = MmGetPfnForProcess(NULL, (char*)Mdl->StartVa + (i * PAGE_SIZE));
692 }
693
694 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
695 Mdl->Process = NULL;
696 Mdl->MappedSystemVa = (char*)Mdl->StartVa + Mdl->ByteOffset;
697 }
698
699
700 /*
701 * @implemented
702 */
703 PMDL STDCALL
704 MmCreateMdl (PMDL Mdl,
705 PVOID Base,
706 ULONG Length)
707 /*
708 * FUNCTION: Allocates and initalizes an MDL
709 * ARGUMENTS:
710 * MemoryDescriptorList = Points to MDL to initalize. If this is
711 * NULL then one is allocated
712 * Base = Base virtual address of the buffer
713 * Length = Length in bytes of the buffer
714 * RETURNS: A pointer to initalized MDL
715 */
716 {
717 if (Mdl == NULL)
718 {
719 ULONG Size;
720
721 Size = MmSizeOfMdl(Base,Length);
722 Mdl =
723 (PMDL)ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
724 if (Mdl == NULL)
725 {
726 return(NULL);
727 }
728 }
729
730 MmInitializeMdl(Mdl, (char*)Base, Length);
731
732 return(Mdl);
733 }
734
735 /*
736 * @unimplemented
737 */
738 VOID STDCALL
739 MmMapMemoryDumpMdl (PVOID Unknown0)
740 /*
741 * FIXME: Has something to do with crash dumps. Do we want to implement
742 * this?
743 */
744 {
745 UNIMPLEMENTED;
746 }
747
748 /*
749 * @unimplemented
750 */
751 PMDL STDCALL
752 MmAllocatePagesForMdl ( IN PHYSICAL_ADDRESS LowAddress,
753 IN PHYSICAL_ADDRESS HighAddress,
754 IN PHYSICAL_ADDRESS SkipBytes,
755 IN SIZE_T Totalbytes )
756 {
757 /*
758 MmAllocatePagesForMdl allocates zero-filled, nonpaged, physical memory pages to an MDL
759
760 MmAllocatePagesForMdlSearch the PFN database for free, zeroed or standby
761 pagesAllocates pages and puts in MDLDoes not map pages (caller responsibility)
762 Designed to be used by an AGP driver
763
764 LowAddress is the lowest acceptable physical address it wants to allocate
765 and HighAddress is the highest. SkipBytes are the number of bytes that the
766 kernel should keep free above LowAddress and below the address at which it
767 starts to allocate physical memory. TotalBytes are the number of bytes that
768 the driver wants to allocate. The return value of the function is a MDL
769 that if non-zero describes the physical memory the kernel has given the
770 driver. To access portions of the memory the driver must create sub-MDLs
771 from the returned MDL that describe appropriate portions of the physical
772 memory. When a driver wants to access physical memory described by a
773 sub-MDL it must map the sub-MDL using MmGetSystemAddressForMdlSafe.
774
775 Konstantin Gusev
776
777 */
778
779 /* SkipBytes must be a multiple of the page size */
780 ASSERT((SkipBytes.QuadPart % PAGE_SIZE) == 0);
781
782 UNIMPLEMENTED;
783 return(NULL);
784 }
785
786 /*
787 * @unimplemented
788 */
789 VOID STDCALL
790 MmFreePagesFromMdl ( IN PMDL Mdl )
791 {
792 /*
793 Drivers use the MmFreePagesFromMdl, the kernel-mode equivalent of
794 FreeUserPhysicalPages, to free the physical memory it has allocated with
795 MmAllocatePagesForMdl. This function is also prototyped in ntddk.h:
796
797 Note that a driver is responsible for deallocating the MDL returned by
798 MmAllocatePagesForMdl with a call to ExFreePool, since MmFreePagesFromMdl
799 does not free the MDL.
800
801 Konstantin Gusev
802
803 */
804
805 UNIMPLEMENTED;
806 }
807
808 /*
809 * @unimplemented
810 */
811 PVOID STDCALL
812 MmMapLockedPagesSpecifyCache ( IN PMDL Mdl,
813 IN KPROCESSOR_MODE AccessMode,
814 IN MEMORY_CACHING_TYPE CacheType,
815 IN PVOID BaseAddress,
816 IN ULONG BugCheckOnFailure,
817 IN ULONG Priority )
818 {
819 UNIMPLEMENTED;
820 return MmMapLockedPages (Mdl, AccessMode);
821 }
822
823 /* EOF */
824
825
826
827
828
829
830
831
832