85bbfd18544ae20a4140dffa6a12698cd09a618a
[reactos.git] / ntoskrnl / mm / ARM3 / mdlsup.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c
5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17
18 /* GLOBALS ********************************************************************/
19
20 BOOLEAN MmTrackPtes;
21 BOOLEAN MmTrackLockedPages;
22 SIZE_T MmSystemLockPagesCount;
23
24 /* INTERNAL FUNCTIONS *********************************************************/
25 static
26 PVOID
27 NTAPI
28 MiMapLockedPagesInUserSpace(
29 _In_ PMDL Mdl,
30 _In_ PVOID StartVa,
31 _In_ MEMORY_CACHING_TYPE CacheType,
32 _In_opt_ PVOID BaseAddress)
33 {
34 NTSTATUS Status;
35 PEPROCESS Process = PsGetCurrentProcess();
36 PETHREAD Thread = PsGetCurrentThread();
37 TABLE_SEARCH_RESULT Result;
38 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
39 BOOLEAN IsIoMapping;
40 KIRQL OldIrql;
41 ULONG_PTR StartingVa;
42 ULONG_PTR EndingVa;
43 PMMADDRESS_NODE Parent;
44 PMMVAD_LONG Vad;
45 ULONG NumberOfPages;
46 PMMPTE PointerPte;
47 PMMPDE PointerPde;
48 MMPTE TempPte;
49 PPFN_NUMBER MdlPages;
50 PMMPFN Pfn1;
51 PMMPFN Pfn2;
52 BOOLEAN AddressSpaceLocked = FALSE;
53
54 PAGED_CODE();
55
56 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n",
57 Mdl, StartVa, CacheType, BaseAddress);
58
59 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa,
60 MmGetMdlByteCount(Mdl));
61 MdlPages = MmGetMdlPfnArray(Mdl);
62
63 ASSERT(CacheType <= MmWriteCombined);
64
65 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
66 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
67
68 /* Large pages are always cached, make sure we're not asking for those */
69 if (CacheAttribute != MiCached)
70 {
71 DPRINT1("FIXME: Need to check for large pages\n");
72 }
73
74 /* Allocate a VAD for our mapped region */
75 Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV');
76 if (Vad == NULL)
77 {
78 Status = STATUS_INSUFFICIENT_RESOURCES;
79 goto Error;
80 }
81
82 /* Initialize PhysicalMemory VAD */
83 RtlZeroMemory(Vad, sizeof(*Vad));
84 Vad->u2.VadFlags2.LongVad = 1;
85 Vad->u.VadFlags.VadType = VadDevicePhysicalMemory;
86 Vad->u.VadFlags.Protection = MM_READWRITE;
87 Vad->u.VadFlags.PrivateMemory = 1;
88
89 /* Did the caller specify an address? */
90 if (BaseAddress == NULL)
91 {
92 /* We get to pick the address */
93 MmLockAddressSpace(&Process->Vm);
94 AddressSpaceLocked = TRUE;
95 if (Process->VmDeleted)
96 {
97 Status = STATUS_PROCESS_IS_TERMINATING;
98 goto Error;
99 }
100
101 Result = MiFindEmptyAddressRangeInTree(NumberOfPages << PAGE_SHIFT,
102 MM_VIRTMEM_GRANULARITY,
103 &Process->VadRoot,
104 &Parent,
105 &StartingVa);
106 if (Result == TableFoundNode)
107 {
108 Status = STATUS_NO_MEMORY;
109 goto Error;
110 }
111 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
112 BaseAddress = (PVOID)StartingVa;
113 }
114 else
115 {
116 /* Caller specified a base address */
117 StartingVa = (ULONG_PTR)BaseAddress;
118 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1;
119
120 /* Make sure it's valid */
121 if (BYTE_OFFSET(StartingVa) != 0 ||
122 EndingVa <= StartingVa ||
123 EndingVa > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS)
124 {
125 Status = STATUS_INVALID_ADDRESS;
126 goto Error;
127 }
128
129 MmLockAddressSpace(&Process->Vm);
130 AddressSpaceLocked = TRUE;
131 if (Process->VmDeleted)
132 {
133 Status = STATUS_PROCESS_IS_TERMINATING;
134 goto Error;
135 }
136
137 /* Check if it's already in use */
138 Result = MiCheckForConflictingNode(StartingVa >> PAGE_SHIFT,
139 EndingVa >> PAGE_SHIFT,
140 &Process->VadRoot,
141 &Parent);
142 if (Result == TableFoundNode)
143 {
144 Status = STATUS_CONFLICTING_ADDRESSES;
145 goto Error;
146 }
147 }
148
149 Vad->StartingVpn = StartingVa >> PAGE_SHIFT;
150 Vad->EndingVpn = EndingVa >> PAGE_SHIFT;
151
152 MiLockProcessWorkingSetUnsafe(Process, Thread);
153
154 ASSERT(Vad->EndingVpn >= Vad->StartingVpn);
155
156 MiInsertVad((PMMVAD)Vad, &Process->VadRoot);
157
158 /* Check if this is uncached */
159 if (CacheAttribute != MiCached)
160 {
161 /* Flush all caches */
162 KeFlushEntireTb(TRUE, TRUE);
163 KeInvalidateAllCaches();
164 }
165
166 PointerPte = MiAddressToPte(BaseAddress);
167 while (NumberOfPages != 0 &&
168 *MdlPages != LIST_HEAD)
169 {
170 PointerPde = MiPteToPde(PointerPte);
171 MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL);
172 ASSERT(PointerPte->u.Hard.Valid == 0);
173
174 /* Add a PDE reference for each page */
175 MiIncrementPageTableReferences(BaseAddress);
176
177 /* Set up our basic user PTE */
178 MI_MAKE_HARDWARE_PTE_USER(&TempPte,
179 PointerPte,
180 MM_READWRITE,
181 *MdlPages);
182
183 /* FIXME: We need to respect the PFN's caching information in some cases */
184 Pfn2 = MiGetPfnEntry(*MdlPages);
185 if (Pfn2 != NULL)
186 {
187 ASSERT(Pfn2->u3.e2.ReferenceCount != 0);
188
189 if (Pfn2->u3.e1.CacheAttribute != CacheAttribute)
190 {
191 DPRINT1("FIXME: Using caller's cache attribute instead of PFN override\n");
192 }
193
194 /* We don't support AWE magic */
195 ASSERT(Pfn2->u3.e1.CacheAttribute != MiNotMapped);
196 }
197
198 /* Configure caching */
199 switch (CacheAttribute)
200 {
201 case MiNonCached:
202 MI_PAGE_DISABLE_CACHE(&TempPte);
203 MI_PAGE_WRITE_THROUGH(&TempPte);
204 break;
205 case MiCached:
206 break;
207 case MiWriteCombined:
208 MI_PAGE_DISABLE_CACHE(&TempPte);
209 MI_PAGE_WRITE_COMBINED(&TempPte);
210 break;
211 default:
212 ASSERT(FALSE);
213 break;
214 }
215
216 /* Make the page valid */
217 MI_WRITE_VALID_PTE(PointerPte, TempPte);
218
219 /* Acquire a share count */
220 Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber);
221 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
222 Pfn1->u2.ShareCount++;
223 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
224
225 /* Next page */
226 MdlPages++;
227 PointerPte++;
228 NumberOfPages--;
229 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
230 }
231
232 MiUnlockProcessWorkingSetUnsafe(Process, Thread);
233 ASSERT(AddressSpaceLocked);
234 MmUnlockAddressSpace(&Process->Vm);
235
236 ASSERT(StartingVa != 0);
237 return (PVOID)((ULONG_PTR)StartingVa + MmGetMdlByteOffset(Mdl));
238
239 Error:
240 if (AddressSpaceLocked)
241 {
242 MmUnlockAddressSpace(&Process->Vm);
243 }
244 if (Vad != NULL)
245 {
246 ExFreePoolWithTag(Vad, 'ldaV');
247 }
248 ExRaiseStatus(Status);
249 }
250
251 static
252 VOID
253 NTAPI
254 MiUnmapLockedPagesInUserSpace(
255 _In_ PVOID BaseAddress,
256 _In_ PMDL Mdl)
257 {
258 PEPROCESS Process = PsGetCurrentProcess();
259 PETHREAD Thread = PsGetCurrentThread();
260 PMMVAD Vad;
261 PMMPTE PointerPte;
262 PMMPDE PointerPde;
263 KIRQL OldIrql;
264 ULONG NumberOfPages;
265 PPFN_NUMBER MdlPages;
266 PFN_NUMBER PageTablePage;
267
268 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress, Mdl);
269
270 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl),
271 MmGetMdlByteCount(Mdl));
272 ASSERT(NumberOfPages != 0);
273 MdlPages = MmGetMdlPfnArray(Mdl);
274
275 /* Find the VAD */
276 MmLockAddressSpace(&Process->Vm);
277 Vad = MiLocateAddress(BaseAddress);
278 if (!Vad ||
279 Vad->u.VadFlags.VadType != VadDevicePhysicalMemory)
280 {
281 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress);
282 MmUnlockAddressSpace(&Process->Vm);
283 return;
284 }
285
286 MiLockProcessWorkingSetUnsafe(Process, Thread);
287
288 /* Remove it from the process VAD tree */
289 ASSERT(Process->VadRoot.NumberGenericTableElements >= 1);
290 MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot);
291
292 /* MiRemoveNode should have removed us if we were the hint */
293 ASSERT(Process->VadRoot.NodeHint != Vad);
294
295 PointerPte = MiAddressToPte(BaseAddress);
296 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
297 while (NumberOfPages != 0 &&
298 *MdlPages != LIST_HEAD)
299 {
300 ASSERT(MiAddressToPte(PointerPte)->u.Hard.Valid == 1);
301 ASSERT(PointerPte->u.Hard.Valid == 1);
302
303 /* Dereference the page */
304 MiDecrementPageTableReferences(BaseAddress);
305
306 /* Invalidate it */
307 MI_ERASE_PTE(PointerPte);
308
309 /* We invalidated this PTE, so dereference the PDE */
310 PointerPde = MiAddressToPde(BaseAddress);
311 PageTablePage = PointerPde->u.Hard.PageFrameNumber;
312 MiDecrementShareCount(MiGetPfnEntry(PageTablePage), PageTablePage);
313
314 /* Next page */
315 PointerPte++;
316 NumberOfPages--;
317 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE);
318 MdlPages++;
319
320 /* Moving to a new PDE? */
321 if (PointerPde != MiAddressToPde(BaseAddress))
322 {
323 /* See if we should delete it */
324 KeFlushProcessTb();
325 PointerPde = MiPteToPde(PointerPte - 1);
326 ASSERT(PointerPde->u.Hard.Valid == 1);
327 if (MiQueryPageTableReferences(BaseAddress) == 0)
328 {
329 ASSERT(PointerPde->u.Long != 0);
330 MiDeletePte(PointerPde,
331 MiPteToAddress(PointerPde),
332 Process,
333 NULL);
334 }
335 }
336 }
337
338 KeFlushProcessTb();
339 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
340 MiUnlockProcessWorkingSetUnsafe(Process, Thread);
341 MmUnlockAddressSpace(&Process->Vm);
342 ExFreePoolWithTag(Vad, 'ldaV');
343 }
344
345 /* PUBLIC FUNCTIONS ***********************************************************/
346
347 /*
348 * @implemented
349 */
350 PMDL
351 NTAPI
352 MmCreateMdl(IN PMDL Mdl,
353 IN PVOID Base,
354 IN SIZE_T Length)
355 {
356 SIZE_T Size;
357
358 //
359 // Check if we don't have an MDL built
360 //
361 if (!Mdl)
362 {
363 //
364 // Calculate the size we'll need and allocate the MDL
365 //
366 Size = MmSizeOfMdl(Base, Length);
367 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL);
368 if (!Mdl) return NULL;
369 }
370
371 //
372 // Initialize it
373 //
374 MmInitializeMdl(Mdl, Base, Length);
375 return Mdl;
376 }
377
378 /*
379 * @implemented
380 */
381 SIZE_T
382 NTAPI
383 MmSizeOfMdl(IN PVOID Base,
384 IN SIZE_T Length)
385 {
386 //
387 // Return the MDL size
388 //
389 return sizeof(MDL) +
390 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER));
391 }
392
393 /*
394 * @implemented
395 */
396 VOID
397 NTAPI
398 MmBuildMdlForNonPagedPool(IN PMDL Mdl)
399 {
400 PPFN_NUMBER MdlPages, EndPage;
401 PFN_NUMBER Pfn, PageCount;
402 PVOID Base;
403 PMMPTE PointerPte;
404
405 //
406 // Sanity checks
407 //
408 ASSERT(Mdl->ByteCount != 0);
409 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
410 MDL_MAPPED_TO_SYSTEM_VA |
411 MDL_SOURCE_IS_NONPAGED_POOL |
412 MDL_PARTIAL)) == 0);
413
414 //
415 // We know the MDL isn't associated to a process now
416 //
417 Mdl->Process = NULL;
418
419 //
420 // Get page and VA information
421 //
422 MdlPages = (PPFN_NUMBER)(Mdl + 1);
423 Base = Mdl->StartVa;
424
425 //
426 // Set the system address and now get the page count
427 //
428 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
429 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa,
430 Mdl->ByteCount);
431 ASSERT(PageCount != 0);
432 EndPage = MdlPages + PageCount;
433
434 //
435 // Loop the PTEs
436 //
437 PointerPte = MiAddressToPte(Base);
438 do
439 {
440 //
441 // Write the PFN
442 //
443 Pfn = PFN_FROM_PTE(PointerPte++);
444 *MdlPages++ = Pfn;
445 } while (MdlPages < EndPage);
446
447 //
448 // Set the nonpaged pool flag
449 //
450 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL;
451
452 //
453 // Check if this is an I/O mapping
454 //
455 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE;
456 }
457
458 /*
459 * @implemented
460 */
461 PMDL
462 NTAPI
463 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress,
464 IN PHYSICAL_ADDRESS HighAddress,
465 IN PHYSICAL_ADDRESS SkipBytes,
466 IN SIZE_T TotalBytes)
467 {
468 //
469 // Call the internal routine
470 //
471 return MiAllocatePagesForMdl(LowAddress,
472 HighAddress,
473 SkipBytes,
474 TotalBytes,
475 MiNotMapped,
476 0);
477 }
478
479 /*
480 * @implemented
481 */
482 PMDL
483 NTAPI
484 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress,
485 IN PHYSICAL_ADDRESS HighAddress,
486 IN PHYSICAL_ADDRESS SkipBytes,
487 IN SIZE_T TotalBytes,
488 IN MEMORY_CACHING_TYPE CacheType,
489 IN ULONG Flags)
490 {
491 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
492
493 //
494 // Check for invalid cache type
495 //
496 if (CacheType > MmWriteCombined)
497 {
498 //
499 // Normalize to default
500 //
501 CacheAttribute = MiNotMapped;
502 }
503 else
504 {
505 //
506 // Conver to internal caching attribute
507 //
508 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
509 }
510
511 //
512 // Only these flags are allowed
513 //
514 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY))
515 {
516 //
517 // Silently fail
518 //
519 return NULL;
520 }
521
522 //
523 // Call the internal routine
524 //
525 return MiAllocatePagesForMdl(LowAddress,
526 HighAddress,
527 SkipBytes,
528 TotalBytes,
529 CacheAttribute,
530 Flags);
531 }
532
533 /*
534 * @implemented
535 */
536 VOID
537 NTAPI
538 MmFreePagesFromMdl(IN PMDL Mdl)
539 {
540 PVOID Base;
541 PPFN_NUMBER Pages;
542 LONG NumberOfPages;
543 PMMPFN Pfn1;
544 KIRQL OldIrql;
545 DPRINT("Freeing MDL: %p\n", Mdl);
546
547 //
548 // Sanity checks
549 //
550 ASSERT(KeGetCurrentIrql() <= APC_LEVEL);
551 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0);
552 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
553
554 //
555 // Get address and page information
556 //
557 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
558 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
559
560 //
561 // Acquire PFN lock
562 //
563 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
564
565 //
566 // Loop all the MDL pages
567 //
568 Pages = (PPFN_NUMBER)(Mdl + 1);
569 do
570 {
571 //
572 // Reached the last page
573 //
574 if (*Pages == LIST_HEAD) break;
575
576 //
577 // Get the page entry
578 //
579 Pfn1 = MiGetPfnEntry(*Pages);
580 ASSERT(Pfn1);
581 ASSERT(Pfn1->u2.ShareCount == 1);
582 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE);
583 if (Pfn1->u4.PteFrame != 0x1FFEDCB)
584 {
585 /* Corrupted PFN entry or invalid free */
586 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages);
587 }
588
589 //
590 // Clear it
591 //
592 Pfn1->u3.e1.StartOfAllocation = 0;
593 Pfn1->u3.e1.EndOfAllocation = 0;
594 Pfn1->u3.e1.PageLocation = StandbyPageList;
595 Pfn1->u2.ShareCount = 0;
596
597 //
598 // Dereference it
599 //
600 ASSERT(Pfn1->u3.e2.ReferenceCount != 0);
601 if (Pfn1->u3.e2.ReferenceCount != 1)
602 {
603 /* Just take off one reference */
604 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount);
605 }
606 else
607 {
608 /* We'll be nuking the whole page */
609 MiDecrementReferenceCount(Pfn1, *Pages);
610 }
611
612 //
613 // Clear this page and move on
614 //
615 *Pages++ = LIST_HEAD;
616 } while (--NumberOfPages != 0);
617
618 //
619 // Release the lock
620 //
621 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
622
623 //
624 // Remove the pages locked flag
625 //
626 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
627 }
628
629 /*
630 * @implemented
631 */
632 PVOID
633 NTAPI
634 MmMapLockedPagesSpecifyCache(IN PMDL Mdl,
635 IN KPROCESSOR_MODE AccessMode,
636 IN MEMORY_CACHING_TYPE CacheType,
637 IN PVOID BaseAddress,
638 IN ULONG BugCheckOnFailure,
639 IN MM_PAGE_PRIORITY Priority)
640 {
641 PVOID Base;
642 PPFN_NUMBER MdlPages, LastPage;
643 PFN_COUNT PageCount;
644 BOOLEAN IsIoMapping;
645 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
646 PMMPTE PointerPte;
647 MMPTE TempPte;
648
649 //
650 // Sanity check
651 //
652 ASSERT(Mdl->ByteCount != 0);
653
654 //
655 // Get the base
656 //
657 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
658
659 //
660 // Handle kernel case first
661 //
662 if (AccessMode == KernelMode)
663 {
664 //
665 // Get the list of pages and count
666 //
667 MdlPages = (PPFN_NUMBER)(Mdl + 1);
668 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
669 LastPage = MdlPages + PageCount;
670
671 //
672 // Sanity checks
673 //
674 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA |
675 MDL_SOURCE_IS_NONPAGED_POOL |
676 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0);
677 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0);
678
679 //
680 // Get the correct cache type
681 //
682 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0;
683 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType];
684
685 //
686 // Reserve the PTEs
687 //
688 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace);
689 if (!PointerPte)
690 {
691 //
692 // If it can fail, return NULL
693 //
694 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL;
695
696 //
697 // Should we bugcheck?
698 //
699 if (!BugCheckOnFailure) return NULL;
700
701 //
702 // Yes, crash the system
703 //
704 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0);
705 }
706
707 //
708 // Get the mapped address
709 //
710 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset);
711
712 //
713 // Get the template
714 //
715 TempPte = ValidKernelPte;
716 switch (CacheAttribute)
717 {
718 case MiNonCached:
719
720 //
721 // Disable caching
722 //
723 MI_PAGE_DISABLE_CACHE(&TempPte);
724 MI_PAGE_WRITE_THROUGH(&TempPte);
725 break;
726
727 case MiWriteCombined:
728
729 //
730 // Enable write combining
731 //
732 MI_PAGE_DISABLE_CACHE(&TempPte);
733 MI_PAGE_WRITE_COMBINED(&TempPte);
734 break;
735
736 default:
737 //
738 // Nothing to do
739 //
740 break;
741 }
742
743 //
744 // Loop all PTEs
745 //
746 do
747 {
748 //
749 // We're done here
750 //
751 if (*MdlPages == LIST_HEAD) break;
752
753 //
754 // Write the PTE
755 //
756 TempPte.u.Hard.PageFrameNumber = *MdlPages;
757 MI_WRITE_VALID_PTE(PointerPte++, TempPte);
758 } while (++MdlPages < LastPage);
759
760 //
761 // Mark it as mapped
762 //
763 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0);
764 Mdl->MappedSystemVa = Base;
765 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA;
766
767 //
768 // Check if it was partial
769 //
770 if (Mdl->MdlFlags & MDL_PARTIAL)
771 {
772 //
773 // Write the appropriate flag here too
774 //
775 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
776 }
777
778 //
779 // Return the mapped address
780 //
781 return Base;
782 }
783
784 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress);
785 }
786
787 /*
788 * @implemented
789 */
790 PVOID
791 NTAPI
792 MmMapLockedPages(IN PMDL Mdl,
793 IN KPROCESSOR_MODE AccessMode)
794 {
795 //
796 // Call the extended version
797 //
798 return MmMapLockedPagesSpecifyCache(Mdl,
799 AccessMode,
800 MmCached,
801 NULL,
802 TRUE,
803 HighPagePriority);
804 }
805
806 /*
807 * @implemented
808 */
809 VOID
810 NTAPI
811 MmUnmapLockedPages(IN PVOID BaseAddress,
812 IN PMDL Mdl)
813 {
814 PVOID Base;
815 PFN_COUNT PageCount, ExtraPageCount;
816 PPFN_NUMBER MdlPages;
817 PMMPTE PointerPte;
818
819 //
820 // Sanity check
821 //
822 ASSERT(Mdl->ByteCount != 0);
823
824 //
825 // Check if this is a kernel request
826 //
827 if (BaseAddress > MM_HIGHEST_USER_ADDRESS)
828 {
829 //
830 // Get base and count information
831 //
832 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
833 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
834
835 //
836 // Sanity checks
837 //
838 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0);
839 ASSERT(PageCount != 0);
840 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA);
841
842 //
843 // Get the PTE
844 //
845 PointerPte = MiAddressToPte(BaseAddress);
846
847 //
848 // This should be a resident system PTE
849 //
850 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
851 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
852 ASSERT(PointerPte->u.Hard.Valid == 1);
853
854 //
855 // Check if the caller wants us to free advanced pages
856 //
857 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES)
858 {
859 //
860 // Get the MDL page array
861 //
862 MdlPages = MmGetMdlPfnArray(Mdl);
863
864 /* Number of extra pages stored after the PFN array */
865 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount);
866
867 //
868 // Do the math
869 //
870 PageCount += ExtraPageCount;
871 PointerPte -= ExtraPageCount;
872 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]);
873 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]);
874
875 //
876 // Get the new base address
877 //
878 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress -
879 (ExtraPageCount << PAGE_SHIFT));
880 }
881
882 //
883 // Remove flags
884 //
885 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA |
886 MDL_PARTIAL_HAS_BEEN_MAPPED |
887 MDL_FREE_EXTRA_PTES);
888
889 //
890 // Release the system PTEs
891 //
892 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace);
893 }
894 else
895 {
896 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl);
897 }
898 }
899
900 /*
901 * @implemented
902 */
903 VOID
904 NTAPI
905 MmProbeAndLockPages(IN PMDL Mdl,
906 IN KPROCESSOR_MODE AccessMode,
907 IN LOCK_OPERATION Operation)
908 {
909 PPFN_NUMBER MdlPages;
910 PVOID Base, Address, LastAddress, StartAddress;
911 ULONG LockPages, TotalPages;
912 NTSTATUS Status = STATUS_SUCCESS;
913 PEPROCESS CurrentProcess;
914 NTSTATUS ProbeStatus;
915 PMMPTE PointerPte, LastPte;
916 PMMPDE PointerPde;
917 #if (_MI_PAGING_LEVELS >= 3)
918 PMMPDE PointerPpe;
919 #endif
920 #if (_MI_PAGING_LEVELS == 4)
921 PMMPDE PointerPxe;
922 #endif
923 PFN_NUMBER PageFrameIndex;
924 BOOLEAN UsePfnLock;
925 KIRQL OldIrql;
926 PMMPFN Pfn1;
927 DPRINT("Probing MDL: %p\n", Mdl);
928
929 //
930 // Sanity checks
931 //
932 ASSERT(Mdl->ByteCount != 0);
933 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0);
934 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0);
935 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED |
936 MDL_MAPPED_TO_SYSTEM_VA |
937 MDL_SOURCE_IS_NONPAGED_POOL |
938 MDL_PARTIAL |
939 MDL_IO_SPACE)) == 0);
940
941 //
942 // Get page and base information
943 //
944 MdlPages = (PPFN_NUMBER)(Mdl + 1);
945 Base = Mdl->StartVa;
946
947 //
948 // Get the addresses and how many pages we span (and need to lock)
949 //
950 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset);
951 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount);
952 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount);
953 ASSERT(LockPages != 0);
954
955 /* Block invalid access */
956 if ((AccessMode != KernelMode) &&
957 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress)))
958 {
959 /* Caller should be in SEH, raise the error */
960 *MdlPages = LIST_HEAD;
961 ExRaiseStatus(STATUS_ACCESS_VIOLATION);
962 }
963
964 //
965 // Get the process
966 //
967 if (Address <= MM_HIGHEST_USER_ADDRESS)
968 {
969 //
970 // Get the process
971 //
972 CurrentProcess = PsGetCurrentProcess();
973 }
974 else
975 {
976 //
977 // No process
978 //
979 CurrentProcess = NULL;
980 }
981
982 //
983 // Save the number of pages we'll have to lock, and the start address
984 //
985 TotalPages = LockPages;
986 StartAddress = Address;
987
988 /* Large pages not supported */
989 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address));
990
991 //
992 // Now probe them
993 //
994 ProbeStatus = STATUS_SUCCESS;
995 _SEH2_TRY
996 {
997 //
998 // Enter probe loop
999 //
1000 do
1001 {
1002 //
1003 // Assume failure
1004 //
1005 *MdlPages = LIST_HEAD;
1006
1007 //
1008 // Read
1009 //
1010 *(volatile CHAR*)Address;
1011
1012 //
1013 // Check if this is write access (only probe for user-mode)
1014 //
1015 if ((Operation != IoReadAccess) &&
1016 (Address <= MM_HIGHEST_USER_ADDRESS))
1017 {
1018 //
1019 // Probe for write too
1020 //
1021 ProbeForWriteChar(Address);
1022 }
1023
1024 //
1025 // Next address...
1026 //
1027 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE);
1028
1029 //
1030 // Next page...
1031 //
1032 LockPages--;
1033 MdlPages++;
1034 } while (Address < LastAddress);
1035
1036 //
1037 // Reset back to the original page
1038 //
1039 ASSERT(LockPages == 0);
1040 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1041 }
1042 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
1043 {
1044 //
1045 // Oops :(
1046 //
1047 ProbeStatus = _SEH2_GetExceptionCode();
1048 }
1049 _SEH2_END;
1050
1051 //
1052 // So how did that go?
1053 //
1054 if (ProbeStatus != STATUS_SUCCESS)
1055 {
1056 //
1057 // Fail
1058 //
1059 DPRINT1("MDL PROBE FAILED!\n");
1060 Mdl->Process = NULL;
1061 ExRaiseStatus(ProbeStatus);
1062 }
1063
1064 //
1065 // Get the PTE and PDE
1066 //
1067 PointerPte = MiAddressToPte(StartAddress);
1068 PointerPde = MiAddressToPde(StartAddress);
1069 #if (_MI_PAGING_LEVELS >= 3)
1070 PointerPpe = MiAddressToPpe(StartAddress);
1071 #endif
1072 #if (_MI_PAGING_LEVELS == 4)
1073 PointerPxe = MiAddressToPxe(StartAddress);
1074 #endif
1075
1076 //
1077 // Sanity check
1078 //
1079 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1));
1080
1081 //
1082 // Check what kind of operation this is
1083 //
1084 if (Operation != IoReadAccess)
1085 {
1086 //
1087 // Set the write flag
1088 //
1089 Mdl->MdlFlags |= MDL_WRITE_OPERATION;
1090 }
1091 else
1092 {
1093 //
1094 // Remove the write flag
1095 //
1096 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION);
1097 }
1098
1099 //
1100 // Mark the MDL as locked *now*
1101 //
1102 Mdl->MdlFlags |= MDL_PAGES_LOCKED;
1103
1104 //
1105 // Check if this came from kernel mode
1106 //
1107 if (Base > MM_HIGHEST_USER_ADDRESS)
1108 {
1109 //
1110 // We should not have a process
1111 //
1112 ASSERT(CurrentProcess == NULL);
1113 Mdl->Process = NULL;
1114
1115 //
1116 // In kernel mode, we don't need to check for write access
1117 //
1118 Operation = IoReadAccess;
1119
1120 //
1121 // Use the PFN lock
1122 //
1123 UsePfnLock = TRUE;
1124 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1125 }
1126 else
1127 {
1128 //
1129 // Sanity checks
1130 //
1131 ASSERT(TotalPages != 0);
1132 ASSERT(CurrentProcess == PsGetCurrentProcess());
1133
1134 //
1135 // Track locked pages
1136 //
1137 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages,
1138 TotalPages);
1139
1140 //
1141 // Save the process
1142 //
1143 Mdl->Process = CurrentProcess;
1144
1145 /* Lock the process working set */
1146 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1147 UsePfnLock = FALSE;
1148 OldIrql = MM_NOIRQL;
1149 }
1150
1151 //
1152 // Get the last PTE
1153 //
1154 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1));
1155
1156 //
1157 // Loop the pages
1158 //
1159 do
1160 {
1161 //
1162 // Assume failure and check for non-mapped pages
1163 //
1164 *MdlPages = LIST_HEAD;
1165 while (
1166 #if (_MI_PAGING_LEVELS == 4)
1167 (PointerPxe->u.Hard.Valid == 0) ||
1168 #endif
1169 #if (_MI_PAGING_LEVELS >= 3)
1170 (PointerPpe->u.Hard.Valid == 0) ||
1171 #endif
1172 (PointerPde->u.Hard.Valid == 0) ||
1173 (PointerPte->u.Hard.Valid == 0))
1174 {
1175 //
1176 // What kind of lock were we using?
1177 //
1178 if (UsePfnLock)
1179 {
1180 //
1181 // Release PFN lock
1182 //
1183 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1184 }
1185 else
1186 {
1187 /* Release process working set */
1188 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1189 }
1190
1191 //
1192 // Access the page
1193 //
1194 Address = MiPteToAddress(PointerPte);
1195
1196 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1197 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)0xBADBADA3);
1198 if (!NT_SUCCESS(Status))
1199 {
1200 //
1201 // Fail
1202 //
1203 DPRINT1("Access fault failed\n");
1204 goto Cleanup;
1205 }
1206
1207 //
1208 // What lock should we use?
1209 //
1210 if (UsePfnLock)
1211 {
1212 //
1213 // Grab the PFN lock
1214 //
1215 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1216 }
1217 else
1218 {
1219 /* Lock the process working set */
1220 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1221 }
1222 }
1223
1224 //
1225 // Check if this was a write or modify
1226 //
1227 if (Operation != IoReadAccess)
1228 {
1229 //
1230 // Check if the PTE is not writable
1231 //
1232 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE)
1233 {
1234 //
1235 // Check if it's copy on write
1236 //
1237 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte))
1238 {
1239 //
1240 // Get the base address and allow a change for user-mode
1241 //
1242 Address = MiPteToAddress(PointerPte);
1243 if (Address <= MM_HIGHEST_USER_ADDRESS)
1244 {
1245 //
1246 // What kind of lock were we using?
1247 //
1248 if (UsePfnLock)
1249 {
1250 //
1251 // Release PFN lock
1252 //
1253 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1254 }
1255 else
1256 {
1257 /* Release process working set */
1258 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1259 }
1260
1261 //
1262 // Access the page
1263 //
1264
1265 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked
1266 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)0xBADBADA3);
1267 if (!NT_SUCCESS(Status))
1268 {
1269 //
1270 // Fail
1271 //
1272 DPRINT1("Access fault failed\n");
1273 goto Cleanup;
1274 }
1275
1276 //
1277 // Re-acquire the lock
1278 //
1279 if (UsePfnLock)
1280 {
1281 //
1282 // Grab the PFN lock
1283 //
1284 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1285 }
1286 else
1287 {
1288 /* Lock the process working set */
1289 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1290 }
1291
1292 //
1293 // Start over
1294 //
1295 continue;
1296 }
1297 }
1298
1299 //
1300 // Fail, since we won't allow this
1301 //
1302 Status = STATUS_ACCESS_VIOLATION;
1303 goto CleanupWithLock;
1304 }
1305 }
1306
1307 //
1308 // Grab the PFN
1309 //
1310 PageFrameIndex = PFN_FROM_PTE(PointerPte);
1311 Pfn1 = MiGetPfnEntry(PageFrameIndex);
1312 if (Pfn1)
1313 {
1314 /* Either this is for kernel-mode, or the working set is held */
1315 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE));
1316
1317 /* No Physical VADs supported yet */
1318 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL);
1319
1320 /* This address should already exist and be fully valid */
1321 MiReferenceProbedPageAndBumpLockCount(Pfn1);
1322 }
1323 else
1324 {
1325 //
1326 // For I/O addresses, just remember this
1327 //
1328 Mdl->MdlFlags |= MDL_IO_SPACE;
1329 }
1330
1331 //
1332 // Write the page and move on
1333 //
1334 *MdlPages++ = PageFrameIndex;
1335 PointerPte++;
1336
1337 /* Check if we're on a PDE boundary */
1338 if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++;
1339 #if (_MI_PAGING_LEVELS >= 3)
1340 if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++;
1341 #endif
1342 #if (_MI_PAGING_LEVELS == 4)
1343 if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++;
1344 #endif
1345
1346 } while (PointerPte <= LastPte);
1347
1348 //
1349 // What kind of lock were we using?
1350 //
1351 if (UsePfnLock)
1352 {
1353 //
1354 // Release PFN lock
1355 //
1356 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1357 }
1358 else
1359 {
1360 /* Release process working set */
1361 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1362 }
1363
1364 //
1365 // Sanity check
1366 //
1367 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0);
1368 return;
1369
1370 CleanupWithLock:
1371 //
1372 // This is the failure path
1373 //
1374 ASSERT(!NT_SUCCESS(Status));
1375
1376 //
1377 // What kind of lock were we using?
1378 //
1379 if (UsePfnLock)
1380 {
1381 //
1382 // Release PFN lock
1383 //
1384 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1385 }
1386 else
1387 {
1388 /* Release process working set */
1389 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread());
1390 }
1391 Cleanup:
1392 //
1393 // Pages must be locked so MmUnlock can work
1394 //
1395 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED);
1396 MmUnlockPages(Mdl);
1397
1398 //
1399 // Raise the error
1400 //
1401 ExRaiseStatus(Status);
1402 }
1403
1404 /*
1405 * @implemented
1406 */
1407 VOID
1408 NTAPI
1409 MmUnlockPages(IN PMDL Mdl)
1410 {
1411 PPFN_NUMBER MdlPages, LastPage;
1412 PEPROCESS Process;
1413 PVOID Base;
1414 ULONG Flags, PageCount;
1415 KIRQL OldIrql;
1416 PMMPFN Pfn1;
1417 DPRINT("Unlocking MDL: %p\n", Mdl);
1418
1419 //
1420 // Sanity checks
1421 //
1422 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0);
1423 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0);
1424 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0);
1425 ASSERT(Mdl->ByteCount != 0);
1426
1427 //
1428 // Get the process associated and capture the flags which are volatile
1429 //
1430 Process = Mdl->Process;
1431 Flags = Mdl->MdlFlags;
1432
1433 //
1434 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL
1435 //
1436 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA)
1437 {
1438 //
1439 // Unmap the pages from system space
1440 //
1441 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl);
1442 }
1443
1444 //
1445 // Get the page count
1446 //
1447 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1448 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset);
1449 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount);
1450 ASSERT(PageCount != 0);
1451
1452 //
1453 // We don't support AWE
1454 //
1455 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE);
1456
1457 //
1458 // Check if the buffer is mapped I/O space
1459 //
1460 if (Flags & MDL_IO_SPACE)
1461 {
1462 //
1463 // Acquire PFN lock
1464 //
1465 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1466
1467 //
1468 // Loop every page
1469 //
1470 LastPage = MdlPages + PageCount;
1471 do
1472 {
1473 //
1474 // Last page, break out
1475 //
1476 if (*MdlPages == LIST_HEAD) break;
1477
1478 //
1479 // Check if this page is in the PFN database
1480 //
1481 Pfn1 = MiGetPfnEntry(*MdlPages);
1482 if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1);
1483 } while (++MdlPages < LastPage);
1484
1485 //
1486 // Release the lock
1487 //
1488 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1489
1490 //
1491 // Check if we have a process
1492 //
1493 if (Process)
1494 {
1495 //
1496 // Handle the accounting of locked pages
1497 //
1498 ASSERT(Process->NumberOfLockedPages > 0);
1499 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1500 -(LONG_PTR)PageCount);
1501 }
1502
1503 //
1504 // We're done
1505 //
1506 Mdl->MdlFlags &= ~MDL_IO_SPACE;
1507 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1508 return;
1509 }
1510
1511 //
1512 // Check if we have a process
1513 //
1514 if (Process)
1515 {
1516 //
1517 // Handle the accounting of locked pages
1518 //
1519 ASSERT(Process->NumberOfLockedPages > 0);
1520 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages,
1521 -(LONG_PTR)PageCount);
1522 }
1523
1524 //
1525 // Loop every page
1526 //
1527 LastPage = MdlPages + PageCount;
1528 do
1529 {
1530 //
1531 // Last page reached
1532 //
1533 if (*MdlPages == LIST_HEAD)
1534 {
1535 //
1536 // Were there no pages at all?
1537 //
1538 if (MdlPages == (PPFN_NUMBER)(Mdl + 1))
1539 {
1540 //
1541 // We're already done
1542 //
1543 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1544 return;
1545 }
1546
1547 //
1548 // Otherwise, stop here
1549 //
1550 LastPage = MdlPages;
1551 break;
1552 }
1553
1554 /* Save the PFN entry instead for the secondary loop */
1555 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages);
1556 ASSERT(*MdlPages != 0);
1557 } while (++MdlPages < LastPage);
1558
1559 //
1560 // Reset pointer
1561 //
1562 MdlPages = (PPFN_NUMBER)(Mdl + 1);
1563
1564 //
1565 // Now grab the PFN lock for the actual unlock and dereference
1566 //
1567 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
1568 do
1569 {
1570 /* Get the current entry and reference count */
1571 Pfn1 = (PMMPFN)*MdlPages;
1572 MiDereferencePfnAndDropLockCount(Pfn1);
1573 } while (++MdlPages < LastPage);
1574
1575 //
1576 // Release the lock
1577 //
1578 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
1579
1580 //
1581 // We're done
1582 //
1583 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED;
1584 }
1585
1586 /*
1587 * @unimplemented
1588 */
1589 NTSTATUS
1590 NTAPI
1591 MmAdvanceMdl(IN PMDL Mdl,
1592 IN ULONG NumberOfBytes)
1593 {
1594 UNIMPLEMENTED;
1595 return STATUS_NOT_IMPLEMENTED;
1596 }
1597
1598 /*
1599 * @unimplemented
1600 */
1601 PVOID
1602 NTAPI
1603 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress,
1604 IN ULONG PoolTag,
1605 IN PMDL MemoryDescriptorList,
1606 IN MEMORY_CACHING_TYPE CacheType)
1607 {
1608 UNIMPLEMENTED;
1609 return 0;
1610 }
1611
1612 /*
1613 * @unimplemented
1614 */
1615 VOID
1616 NTAPI
1617 MmUnmapReservedMapping(IN PVOID BaseAddress,
1618 IN ULONG PoolTag,
1619 IN PMDL MemoryDescriptorList)
1620 {
1621 UNIMPLEMENTED;
1622 }
1623
1624 /*
1625 * @unimplemented
1626 */
1627 NTSTATUS
1628 NTAPI
1629 MmPrefetchPages(IN ULONG NumberOfLists,
1630 IN PREAD_LIST *ReadLists)
1631 {
1632 UNIMPLEMENTED;
1633 return STATUS_NOT_IMPLEMENTED;
1634 }
1635
1636 /*
1637 * @unimplemented
1638 */
1639 NTSTATUS
1640 NTAPI
1641 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList,
1642 IN ULONG NewProtect)
1643 {
1644 UNIMPLEMENTED;
1645 return STATUS_NOT_IMPLEMENTED;
1646 }
1647
1648 /*
1649 * @unimplemented
1650 */
1651 VOID
1652 NTAPI
1653 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList,
1654 IN PEPROCESS Process,
1655 IN KPROCESSOR_MODE AccessMode,
1656 IN LOCK_OPERATION Operation)
1657 {
1658 UNIMPLEMENTED;
1659 }
1660
1661
1662 /*
1663 * @unimplemented
1664 */
1665 VOID
1666 NTAPI
1667 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList,
1668 IN LARGE_INTEGER PageList[],
1669 IN KPROCESSOR_MODE AccessMode,
1670 IN LOCK_OPERATION Operation)
1671 {
1672 UNIMPLEMENTED;
1673 }
1674
1675 /*
1676 * @unimplemented
1677 */
1678 VOID
1679 NTAPI
1680 MmMapMemoryDumpMdl(IN PMDL Mdl)
1681 {
1682 UNIMPLEMENTED;
1683 }
1684
1685 /* EOF */