Do not crash, use valid kernel PTE.
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25 KGUARDED_MUTEX MmPagedPoolMutex;
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27 SIZE_T MmAllocatedNonPagedPool;
28 ULONG MmSpecialPoolTag;
29
30 /* PRIVATE FUNCTIONS **********************************************************/
31
32 VOID
33 NTAPI
34 MiInitializeArmPool(VOID)
35 {
36 ULONG i;
37 PFN_NUMBER PoolPages;
38 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
39 PMMPTE PointerPte;
40 PAGED_CODE();
41
42 //
43 // We keep 4 lists of free pages (4 lists help avoid contention)
44 //
45 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
46 {
47 //
48 // Initialize each of them
49 //
50 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
51 }
52
53 //
54 // Calculate how many pages the initial nonpaged pool has
55 //
56 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
57 MmNumberOfFreeNonPagedPool = PoolPages;
58
59 //
60 // Initialize the first free entry
61 //
62 FreeEntry = MmNonPagedPoolStart;
63 FirstEntry = FreeEntry;
64 FreeEntry->Size = PoolPages;
65 FreeEntry->Owner = FirstEntry;
66
67 //
68 // Insert it into the last list
69 //
70 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
71 &FreeEntry->List);
72
73 //
74 // Now create free entries for every single other page
75 //
76 while (PoolPages-- > 1)
77 {
78 //
79 // Link them all back to the original entry
80 //
81 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
82 FreeEntry->Owner = FirstEntry;
83 }
84
85 //
86 // Validate and remember first allocated pool page
87 //
88 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
89 ASSERT(PointerPte->u.Hard.Valid == 1);
90 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
91
92 //
93 // Keep track of where initial nonpaged pool ends
94 //
95 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
96 MmSizeOfNonPagedPoolInBytes);
97
98 //
99 // Validate and remember last allocated pool page
100 //
101 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
102 ASSERT(PointerPte->u.Hard.Valid == 1);
103 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
104
105 //
106 // Validate the first nonpaged pool expansion page (which is a guard page)
107 //
108 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
109 ASSERT(PointerPte->u.Hard.Valid == 0);
110
111 //
112 // Calculate the size of the expansion region alone
113 //
114 MiExpansionPoolPagesInitialCharge =
115 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
116
117 //
118 // Remove 2 pages, since there's a guard page on top and on the bottom
119 //
120 MiExpansionPoolPagesInitialCharge -= 2;
121
122 //
123 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
124 // guard page on top so make sure to skip it. The bottom guard page will be
125 // guaranteed by the fact our size is off by one.
126 //
127 MiInitializeSystemPtes(PointerPte + 1,
128 MiExpansionPoolPagesInitialCharge,
129 NonPagedPoolExpansion);
130 }
131
132 PVOID
133 NTAPI
134 MiAllocatePoolPages(IN POOL_TYPE PoolType,
135 IN SIZE_T SizeInBytes)
136 {
137 PFN_NUMBER SizeInPages, PageFrameNumber;
138 ULONG i;
139 KIRQL OldIrql;
140 PLIST_ENTRY NextEntry, NextHead, LastHead;
141 PMMPTE PointerPte, StartPte;
142 MMPTE TempPte;
143 PMMPFN Pfn1;
144 PVOID BaseVa, BaseVaStart;
145 PMMFREE_POOL_ENTRY FreeEntry;
146 PKSPIN_LOCK_QUEUE LockQueue;
147
148 //
149 // Figure out how big the allocation is in pages
150 //
151 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
152
153 //
154 // Handle paged pool
155 //
156 if (PoolType == PagedPool)
157 {
158 //
159 // Lock the paged pool mutex
160 //
161 KeAcquireGuardedMutex(&MmPagedPoolMutex);
162
163 //
164 // Find some empty allocation space
165 //
166 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
167 SizeInPages,
168 MmPagedPoolInfo.PagedPoolHint);
169 if (i == 0xFFFFFFFF)
170 {
171 //
172 // Get the page bit count
173 //
174 i = ((SizeInPages - 1) / 1024) + 1;
175
176 //
177 // Check if there is enougn paged pool expansion space left
178 //
179 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
180 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
181 {
182 //
183 // Out of memory!
184 //
185 DPRINT1("OUT OF PAGED POOL!!!\n");
186 KeReleaseGuardedMutex(&MmPagedPoolMutex);
187 return NULL;
188 }
189
190 //
191 // Check if we'll have to expand past the last PTE we have available
192 //
193 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
194 MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
195 {
196 //
197 // We can only support this much then
198 //
199 SizeInPages = MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool) -
200 MmPagedPoolInfo.NextPdeForPagedPoolExpansion +
201 1;
202 ASSERT(SizeInPages < i);
203 i = SizeInPages;
204 }
205 else
206 {
207 //
208 // Otherwise, there is plenty of space left for this expansion
209 //
210 SizeInPages = i;
211 }
212
213 //
214 // Get the template PTE we'll use to expand
215 //
216 TempPte = ValidKernelPte;
217
218 //
219 // Get the first PTE in expansion space
220 //
221 PointerPte = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
222 BaseVa = MiPteToAddress(PointerPte);
223 BaseVaStart = BaseVa;
224
225 //
226 // Lock the PFN database and loop pages
227 //
228 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
229 do
230 {
231 //
232 // It should not already be valid
233 //
234 ASSERT(PointerPte->u.Hard.Valid == 0);
235
236 //
237 // Request a paged pool page and write the PFN for it
238 //
239 PageFrameNumber = MmAllocPage(MC_PPOOL, 0);
240 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
241
242 //
243 // Save it into our double-buffered system page directory
244 //
245 MmSystemPagePtes[(ULONG_PTR)PointerPte & (PAGE_SIZE - 1) /
246 sizeof(MMPTE)] = TempPte;
247
248 //
249 // Write the actual PTE now
250 //
251 *PointerPte++ = TempPte;
252
253 //
254 // Move on to the next expansion address
255 //
256 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
257 } while (--i > 0);
258
259 //
260 // Release the PFN database lock
261 //
262 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
263
264 //
265 // These pages are now available, clear their availablity bits
266 //
267 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
268 (MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
269 MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
270 1024,
271 SizeInPages * 1024);
272
273 //
274 // Update the next expansion location
275 //
276 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += SizeInPages;
277
278 //
279 // Zero out the newly available memory
280 //
281 RtlZeroMemory(BaseVaStart, SizeInPages * PAGE_SIZE);
282
283 //
284 // Now try consuming the pages again
285 //
286 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
287 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
288 SizeInPages,
289 0);
290 if (i == 0xFFFFFFFF)
291 {
292 //
293 // Out of memory!
294 //
295 DPRINT1("OUT OF PAGED POOL!!!\n");
296 KeReleaseGuardedMutex(&MmPagedPoolMutex);
297 return NULL;
298 }
299 }
300
301 //
302 // Update the pool hint if the request was just one page
303 //
304 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
305
306 //
307 // Update the end bitmap so we know the bounds of this allocation when
308 // the time comes to free it
309 //
310 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i + SizeInPages - 1);
311
312 //
313 // Now we can release the lock (it mainly protects the bitmap)
314 //
315 KeReleaseGuardedMutex(&MmPagedPoolMutex);
316
317 //
318 // Now figure out where this allocation starts
319 //
320 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
321
322 //
323 // Flush the TLB
324 //
325 KeFlushEntireTb(TRUE, TRUE);
326
327 //
328 // Setup a demand-zero writable PTE
329 //
330 TempPte.u.Long = 0;
331 MI_MAKE_WRITE_PAGE(&TempPte);
332
333 //
334 // Find the first and last PTE, then loop them all
335 //
336 PointerPte = MiAddressToPte(BaseVa);
337 StartPte = PointerPte + SizeInPages;
338 do
339 {
340 //
341 // Write the demand zero PTE and keep going
342 //
343 *PointerPte++ = TempPte;
344 } while (PointerPte < StartPte);
345
346 //
347 // Return the allocation address to the caller
348 //
349 return BaseVa;
350 }
351
352 //
353 // Allocations of less than 4 pages go into their individual buckets
354 //
355 i = SizeInPages - 1;
356 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
357
358 //
359 // Loop through all the free page lists based on the page index
360 //
361 NextHead = &MmNonPagedPoolFreeListHead[i];
362 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
363
364 //
365 // Acquire the nonpaged pool lock
366 //
367 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
368 do
369 {
370 //
371 // Now loop through all the free page entries in this given list
372 //
373 NextEntry = NextHead->Flink;
374 while (NextEntry != NextHead)
375 {
376 //
377 // Grab the entry and see if it can handle our allocation
378 //
379 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
380 if (FreeEntry->Size >= SizeInPages)
381 {
382 //
383 // It does, so consume the pages from here
384 //
385 FreeEntry->Size -= SizeInPages;
386
387 //
388 // The allocation will begin in this free page area
389 //
390 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
391 (FreeEntry->Size << PAGE_SHIFT));
392
393 //
394 // This is not a free page segment anymore
395 //
396 RemoveEntryList(&FreeEntry->List);
397
398 //
399 // However, check if its' still got space left
400 //
401 if (FreeEntry->Size != 0)
402 {
403 //
404 // Insert it back into a different list, based on its pages
405 //
406 i = FreeEntry->Size - 1;
407 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
408 InsertTailList (&MmNonPagedPoolFreeListHead[i],
409 &FreeEntry->List);
410 }
411
412 //
413 // Grab the PTE for this allocation
414 //
415 PointerPte = MiAddressToPte(BaseVa);
416 ASSERT(PointerPte->u.Hard.Valid == 1);
417
418 //
419 // Grab the PFN NextEntry and index
420 //
421 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
422
423 //
424 // Now mark it as the beginning of an allocation
425 //
426 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
427 Pfn1->u3.e1.StartOfAllocation = 1;
428
429 //
430 // Check if the allocation is larger than one page
431 //
432 if (SizeInPages != 1)
433 {
434 //
435 // Navigate to the last PFN entry and PTE
436 //
437 PointerPte += SizeInPages - 1;
438 ASSERT(PointerPte->u.Hard.Valid == 1);
439 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
440 }
441
442 //
443 // Mark this PFN as the last (might be the same as the first)
444 //
445 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
446 Pfn1->u3.e1.EndOfAllocation = 1;
447
448 //
449 // Release the nonpaged pool lock, and return the allocation
450 //
451 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
452 return BaseVa;
453 }
454
455 //
456 // Try the next free page entry
457 //
458 NextEntry = FreeEntry->List.Flink;
459 }
460 } while (++NextHead < LastHead);
461
462 //
463 // If we got here, we're out of space.
464 // Start by releasing the lock
465 //
466 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
467
468 //
469 // Allocate some system PTEs
470 //
471 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
472 PointerPte = StartPte;
473 if (StartPte == NULL)
474 {
475 //
476 // Ran out of memory
477 //
478 DPRINT1("Out of NP Expansion Pool\n");
479 return NULL;
480 }
481
482 //
483 // Acquire the pool lock now
484 //
485 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
486
487 //
488 // Lock the PFN database too
489 //
490 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
491 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
492
493 //
494 // Loop the pages
495 //
496 TempPte = ValidKernelPte;
497 do
498 {
499 //
500 // Allocate a page
501 //
502 PageFrameNumber = MmAllocPage(MC_NPPOOL, 0);
503
504 //
505 // Get the PFN entry for it
506 //
507 Pfn1 = MiGetPfnEntry(PageFrameNumber);
508
509 //
510 // Write the PTE for it
511 //
512 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
513 ASSERT(PointerPte->u.Hard.Valid == 0);
514 ASSERT(TempPte.u.Hard.Valid == 1);
515 *PointerPte++ = TempPte;
516 } while (--SizeInPages > 0);
517
518 //
519 // This is the last page
520 //
521 Pfn1->u3.e1.EndOfAllocation = 1;
522
523 //
524 // Get the first page and mark it as such
525 //
526 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
527 Pfn1->u3.e1.StartOfAllocation = 1;
528
529 //
530 // Release the PFN and nonpaged pool lock
531 //
532 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
533 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
534
535 //
536 // Return the address
537 //
538 return MiPteToAddress(StartPte);
539 }
540
541 ULONG
542 NTAPI
543 MiFreePoolPages(IN PVOID StartingVa)
544 {
545 PMMPTE PointerPte, StartPte;
546 PMMPFN Pfn1, StartPfn;
547 PFN_NUMBER FreePages, NumberOfPages;
548 KIRQL OldIrql;
549 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
550 ULONG i, End;
551
552 //
553 // Handle paged pool
554 //
555 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
556 {
557 //
558 // Calculate the offset from the beginning of paged pool, and convert it
559 // into pages
560 //
561 i = ((ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart) >> PAGE_SHIFT;
562 End = i;
563
564 //
565 // Now use the end bitmap to scan until we find a set bit, meaning that
566 // this allocation finishes here
567 //
568 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
569
570 //
571 // Now calculate the total number of pages this allocation spans
572 //
573 NumberOfPages = End - i + 1;
574
575 //
576 // Acquire the paged pool lock
577 //
578 KeAcquireGuardedMutex(&MmPagedPoolMutex);
579
580 //
581 // Clear the allocation and free bits
582 //
583 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, i);
584 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
585
586 //
587 // Update the hint if we need to
588 //
589 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
590
591 //
592 // Release the lock protecting the bitmaps
593 //
594 KeReleaseGuardedMutex(&MmPagedPoolMutex);
595
596 //
597 // And finally return the number of pages freed
598 //
599 return NumberOfPages;
600 }
601
602 //
603 // Get the first PTE and its corresponding PFN entry
604 //
605 StartPte = PointerPte = MiAddressToPte(StartingVa);
606 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
607
608 //
609 // Loop until we find the last PTE
610 //
611 while (Pfn1->u3.e1.EndOfAllocation == 0)
612 {
613 //
614 // Keep going
615 //
616 PointerPte++;
617 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
618 }
619
620 //
621 // Now we know how many pages we have
622 //
623 NumberOfPages = PointerPte - StartPte + 1;
624
625 //
626 // Acquire the nonpaged pool lock
627 //
628 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
629
630 //
631 // Mark the first and last PTEs as not part of an allocation anymore
632 //
633 StartPfn->u3.e1.StartOfAllocation = 0;
634 Pfn1->u3.e1.EndOfAllocation = 0;
635
636 //
637 // Assume we will free as many pages as the allocation was
638 //
639 FreePages = NumberOfPages;
640
641 //
642 // Peek one page past the end of the allocation
643 //
644 PointerPte++;
645
646 //
647 // Guard against going past initial nonpaged pool
648 //
649 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
650 {
651 //
652 // This page is on the outskirts of initial nonpaged pool, so ignore it
653 //
654 Pfn1 = NULL;
655 }
656 else
657 {
658 //
659 // Otherwise, our entire allocation must've fit within the initial non
660 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
661 // the next allocation
662 //
663 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
664 if (PointerPte->u.Hard.Valid == 1)
665 {
666 //
667 // It's either expansion or initial: get the PFN entry
668 //
669 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
670 }
671 else
672 {
673 //
674 // This means we've reached the guard page that protects the end of
675 // the expansion nonpaged pool
676 //
677 Pfn1 = NULL;
678 }
679
680 }
681
682 //
683 // Check if this allocation actually exists
684 //
685 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
686 {
687 //
688 // It doesn't, so we should actually locate a free entry descriptor
689 //
690 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
691 (NumberOfPages << PAGE_SHIFT));
692 ASSERT(FreeEntry->Owner == FreeEntry);
693
694 //
695 // Consume this entry's pages, and remove it from its free list
696 //
697 FreePages += FreeEntry->Size;
698 RemoveEntryList (&FreeEntry->List);
699 }
700
701 //
702 // Now get the official free entry we'll create for the caller's allocation
703 //
704 FreeEntry = StartingVa;
705
706 //
707 // Check if the our allocation is the very first page
708 //
709 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
710 {
711 //
712 // Then we can't do anything or we'll risk underflowing
713 //
714 Pfn1 = NULL;
715 }
716 else
717 {
718 //
719 // Otherwise, get the PTE for the page right before our allocation
720 //
721 PointerPte -= NumberOfPages + 1;
722 if (PointerPte->u.Hard.Valid == 1)
723 {
724 //
725 // It's either expansion or initial nonpaged pool, get the PFN entry
726 //
727 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
728 }
729 else
730 {
731 //
732 // We must've reached the guard page, so don't risk touching it
733 //
734 Pfn1 = NULL;
735 }
736 }
737
738 //
739 // Check if there is a valid PFN entry for the page before the allocation
740 // and then check if this page was actually the end of an allocation.
741 // If it wasn't, then we know for sure it's a free page
742 //
743 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
744 {
745 //
746 // Get the free entry descriptor for that given page range
747 //
748 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
749 FreeEntry = FreeEntry->Owner;
750
751 //
752 // Check if the entry is small enough to be indexed on a free list
753 // If it is, we'll want to re-insert it, since we're about to
754 // collapse our pages on top of it, which will change its count
755 //
756 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
757 {
758 //
759 // Remove the list from where it is now
760 //
761 RemoveEntryList(&FreeEntry->List);
762
763 //
764 // Update its size
765 //
766 FreeEntry->Size += FreePages;
767
768 //
769 // And now find the new appropriate list to place it in
770 //
771 i = (ULONG)(FreeEntry->Size - 1);
772 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
773
774 //
775 // Do it
776 //
777 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
778 }
779 else
780 {
781 //
782 // Otherwise, just combine our free pages into this entry
783 //
784 FreeEntry->Size += FreePages;
785 }
786 }
787
788 //
789 // Check if we were unable to do any compaction, and we'll stick with this
790 //
791 if (FreeEntry == StartingVa)
792 {
793 //
794 // Well, now we are a free entry. At worse we just have our newly freed
795 // pages, at best we have our pages plus whatever entry came after us
796 //
797 FreeEntry->Size = FreePages;
798
799 //
800 // Find the appropriate list we should be on
801 //
802 i = FreeEntry->Size - 1;
803 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
804
805 //
806 // And insert us
807 //
808 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
809 }
810
811 //
812 // Just a sanity check
813 //
814 ASSERT(FreePages != 0);
815
816 //
817 // Get all the pages between our allocation and its end. These will all now
818 // become free page chunks.
819 //
820 NextEntry = StartingVa;
821 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
822 do
823 {
824 //
825 // Link back to the parent free entry, and keep going
826 //
827 NextEntry->Owner = FreeEntry;
828 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
829 } while (NextEntry != LastEntry);
830
831 //
832 // We're done, release the lock and let the caller know how much we freed
833 //
834 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
835 return NumberOfPages;
836 }
837
838
839 BOOLEAN
840 NTAPI
841 MiRaisePoolQuota(IN POOL_TYPE PoolType,
842 IN ULONG CurrentMaxQuota,
843 OUT PULONG NewMaxQuota)
844 {
845 //
846 // Not implemented
847 //
848 UNIMPLEMENTED;
849 *NewMaxQuota = CurrentMaxQuota + 65536;
850 return TRUE;
851 }
852
853 /* PUBLIC FUNCTIONS ***********************************************************/
854
855 /*
856 * @unimplemented
857 */
858 PVOID
859 NTAPI
860 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
861 IN ULONG PoolTag)
862 {
863 UNIMPLEMENTED;
864 return NULL;
865 }
866
867 /*
868 * @unimplemented
869 */
870 VOID
871 NTAPI
872 MmFreeMappingAddress(IN PVOID BaseAddress,
873 IN ULONG PoolTag)
874 {
875 UNIMPLEMENTED;
876 }
877
878 /* EOF */