Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / pool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/pool.c
5 * PURPOSE: ARM Memory Manager Pool Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::POOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
22 PFN_NUMBER MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
23 PVOID MmNonPagedPoolEnd0;
24 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
25
26 MM_PAGED_POOL_INFO MmPagedPoolInfo;
27
28 /* PRIVATE FUNCTIONS **********************************************************/
29
30 VOID
31 NTAPI
32 MiInitializeArmPool(VOID)
33 {
34 ULONG i;
35 PFN_NUMBER PoolPages;
36 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
37 PMMPTE PointerPte;
38 PAGED_CODE();
39
40 //
41 // We keep 4 lists of free pages (4 lists help avoid contention)
42 //
43 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
44 {
45 //
46 // Initialize each of them
47 //
48 InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
49 }
50
51 //
52 // Calculate how many pages the initial nonpaged pool has
53 //
54 PoolPages = BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
55 MmNumberOfFreeNonPagedPool = PoolPages;
56
57 //
58 // Initialize the first free entry
59 //
60 FreeEntry = MmNonPagedPoolStart;
61 FirstEntry = FreeEntry;
62 FreeEntry->Size = PoolPages;
63 FreeEntry->Owner = FirstEntry;
64
65 //
66 // Insert it into the last list
67 //
68 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
69 &FreeEntry->List);
70
71 //
72 // Now create free entries for every single other page
73 //
74 while (PoolPages-- > 1)
75 {
76 //
77 // Link them all back to the original entry
78 //
79 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
80 FreeEntry->Owner = FirstEntry;
81 }
82
83 //
84 // Validate and remember first allocated pool page
85 //
86 PointerPte = MiAddressToPte(MmNonPagedPoolStart);
87 ASSERT(PointerPte->u.Hard.Valid == 1);
88 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
89
90 //
91 // Keep track of where initial nonpaged pool ends
92 //
93 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
94 MmSizeOfNonPagedPoolInBytes);
95
96 //
97 // Validate and remember last allocated pool page
98 //
99 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
100 ASSERT(PointerPte->u.Hard.Valid == 1);
101 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
102
103 //
104 // Validate the first nonpaged pool expansion page (which is a guard page)
105 //
106 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
107 ASSERT(PointerPte->u.Hard.Valid == 0);
108
109 //
110 // Calculate the size of the expansion region alone
111 //
112 MiExpansionPoolPagesInitialCharge =
113 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
114
115 //
116 // Remove 2 pages, since there's a guard page on top and on the bottom
117 //
118 MiExpansionPoolPagesInitialCharge -= 2;
119
120 //
121 // Now initialize the nonpaged pool expansion PTE space. Remember there's a
122 // guard page on top so make sure to skip it. The bottom guard page will be
123 // guaranteed by the fact our size is off by one.
124 //
125 MiInitializeSystemPtes(PointerPte + 1,
126 MiExpansionPoolPagesInitialCharge,
127 NonPagedPoolExpansion);
128 }
129
130 PVOID
131 NTAPI
132 MiAllocatePoolPages(IN POOL_TYPE PoolType,
133 IN SIZE_T SizeInBytes)
134 {
135 PFN_NUMBER SizeInPages, PageFrameNumber;
136 ULONG i;
137 KIRQL OldIrql;
138 PLIST_ENTRY NextEntry, NextHead, LastHead;
139 PMMPTE PointerPte, StartPte;
140 MMPTE TempPte;
141 PMMPFN Pfn1;
142 PVOID BaseVa;
143 PMMFREE_POOL_ENTRY FreeEntry;
144
145 //
146 // Figure out how big the allocation is in pages
147 //
148 SizeInPages = BYTES_TO_PAGES(SizeInBytes);
149
150 //
151 // Allocations of less than 4 pages go into their individual buckets
152 //
153 i = SizeInPages - 1;
154 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
155
156 //
157 // Loop through all the free page lists based on the page index
158 //
159 NextHead = &MmNonPagedPoolFreeListHead[i];
160 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
161
162 //
163 // Acquire the nonpaged pool lock
164 //
165 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
166 do
167 {
168 //
169 // Now loop through all the free page entries in this given list
170 //
171 NextEntry = NextHead->Flink;
172 while (NextEntry != NextHead)
173 {
174 //
175 // Grab the entry and see if it can handle our allocation
176 //
177 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
178 if (FreeEntry->Size >= SizeInPages)
179 {
180 //
181 // It does, so consume the pages from here
182 //
183 FreeEntry->Size -= SizeInPages;
184
185 //
186 // The allocation will begin in this free page area
187 //
188 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
189 (FreeEntry->Size << PAGE_SHIFT));
190
191 //
192 // This is not a free page segment anymore
193 //
194 RemoveEntryList(&FreeEntry->List);
195
196 //
197 // However, check if its' still got space left
198 //
199 if (FreeEntry->Size != 0)
200 {
201 //
202 // Insert it back into a different list, based on its pages
203 //
204 i = FreeEntry->Size - 1;
205 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
206 InsertTailList (&MmNonPagedPoolFreeListHead[i],
207 &FreeEntry->List);
208 }
209
210 //
211 // Grab the PTE for this allocation
212 //
213 PointerPte = MiAddressToPte(BaseVa);
214 ASSERT(PointerPte->u.Hard.Valid == 1);
215
216 //
217 // Grab the PFN NextEntry and index
218 //
219 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
220
221 //
222 // Now mark it as the beginning of an allocation
223 //
224 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
225 Pfn1->u3.e1.StartOfAllocation = 1;
226
227 //
228 // Check if the allocation is larger than one page
229 //
230 if (SizeInPages != 1)
231 {
232 //
233 // Navigate to the last PFN entry and PTE
234 //
235 PointerPte += SizeInPages - 1;
236 ASSERT(PointerPte->u.Hard.Valid == 1);
237 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
238 }
239
240 //
241 // Mark this PFN as the last (might be the same as the first)
242 //
243 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
244 Pfn1->u3.e1.EndOfAllocation = 1;
245
246 //
247 // Release the nonpaged pool lock, and return the allocation
248 //
249 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
250 return BaseVa;
251 }
252
253 //
254 // Try the next free page entry
255 //
256 NextEntry = FreeEntry->List.Flink;
257 }
258 } while (++NextHead < LastHead);
259
260 //
261 // If we got here, we're out of space.
262 // Start by releasing the lock
263 //
264 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
265
266 //
267 // Allocate some system PTEs
268 //
269 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
270 PointerPte = StartPte;
271 if (StartPte == NULL)
272 {
273 //
274 // Ran out of memory
275 //
276 DPRINT1("Out of NP Expansion Pool\n");
277 return NULL;
278 }
279
280 //
281 // Acquire the pool lock now
282 //
283 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
284
285 //
286 // Lock the PFN database too
287 //
288 //KeAcquireQueuedSpinLockAtDpcLevel(LockQueuePfnLock);
289
290 //
291 // Loop the pages
292 //
293 TempPte = HyperTemplatePte;
294 do
295 {
296 //
297 // Allocate a page
298 //
299 PageFrameNumber = MmAllocPage(MC_NPPOOL, 0);
300
301 //
302 // Get the PFN entry for it
303 //
304 Pfn1 = MiGetPfnEntry(PageFrameNumber);
305
306 //
307 // Write the PTE for it
308 //
309 TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
310 ASSERT(PointerPte->u.Hard.Valid == 0);
311 ASSERT(TempPte.u.Hard.Valid == 1);
312 *PointerPte++ = TempPte;
313 } while (--SizeInPages > 0);
314
315 //
316 // This is the last page
317 //
318 Pfn1->u3.e1.EndOfAllocation = 1;
319
320 //
321 // Get the first page and mark it as such
322 //
323 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
324 Pfn1->u3.e1.StartOfAllocation = 1;
325
326 //
327 // Release the PFN and nonpaged pool lock
328 //
329 //KeReleaseQueuedSpinLockFromDpcLevel(LockQueuePfnLock);
330 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
331
332 //
333 // Return the address
334 //
335 return MiPteToAddress(StartPte);
336 }
337
338 ULONG
339 NTAPI
340 MiFreePoolPages(IN PVOID StartingVa)
341 {
342 PMMPTE PointerPte, StartPte;
343 PMMPFN Pfn1, StartPfn;
344 PFN_NUMBER FreePages, NumberOfPages;
345 KIRQL OldIrql;
346 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
347 ULONG i;
348
349 //
350 // Get the first PTE and its corresponding PFN entry
351 //
352 StartPte = PointerPte = MiAddressToPte(StartingVa);
353 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
354
355 //
356 // Loop until we find the last PTE
357 //
358 while (Pfn1->u3.e1.EndOfAllocation == 0)
359 {
360 //
361 // Keep going
362 //
363 PointerPte++;
364 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
365 }
366
367 //
368 // Now we know how many pages we have
369 //
370 NumberOfPages = PointerPte - StartPte + 1;
371
372 //
373 // Acquire the nonpaged pool lock
374 //
375 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
376
377 //
378 // Mark the first and last PTEs as not part of an allocation anymore
379 //
380 StartPfn->u3.e1.StartOfAllocation = 0;
381 Pfn1->u3.e1.EndOfAllocation = 0;
382
383 //
384 // Assume we will free as many pages as the allocation was
385 //
386 FreePages = NumberOfPages;
387
388 //
389 // Peek one page past the end of the allocation
390 //
391 PointerPte++;
392
393 //
394 // Guard against going past initial nonpaged pool
395 //
396 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
397 {
398 //
399 // This page is on the outskirts of initial nonpaged pool, so ignore it
400 //
401 Pfn1 = NULL;
402 }
403 else
404 {
405 //
406 // Otherwise, our entire allocation must've fit within the initial non
407 // paged pool, or the expansion nonpaged pool, so get the PFN entry of
408 // the next allocation
409 //
410 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
411 if (PointerPte->u.Hard.Valid == 1)
412 {
413 //
414 // It's either expansion or initial: get the PFN entry
415 //
416 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
417 }
418 else
419 {
420 //
421 // This means we've reached the guard page that protects the end of
422 // the expansion nonpaged pool
423 //
424 Pfn1 = NULL;
425 }
426
427 }
428
429 //
430 // Check if this allocation actually exists
431 //
432 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
433 {
434 //
435 // It doesn't, so we should actually locate a free entry descriptor
436 //
437 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
438 (NumberOfPages << PAGE_SHIFT));
439 ASSERT(FreeEntry->Owner == FreeEntry);
440
441 //
442 // Consume this entry's pages, and remove it from its free list
443 //
444 FreePages += FreeEntry->Size;
445 RemoveEntryList (&FreeEntry->List);
446 }
447
448 //
449 // Now get the official free entry we'll create for the caller's allocation
450 //
451 FreeEntry = StartingVa;
452
453 //
454 // Check if the our allocation is the very first page
455 //
456 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
457 {
458 //
459 // Then we can't do anything or we'll risk underflowing
460 //
461 Pfn1 = NULL;
462 }
463 else
464 {
465 //
466 // Otherwise, get the PTE for the page right before our allocation
467 //
468 PointerPte -= NumberOfPages + 1;
469 if (PointerPte->u.Hard.Valid == 1)
470 {
471 //
472 // It's either expansion or initial nonpaged pool, get the PFN entry
473 //
474 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
475 }
476 else
477 {
478 //
479 // We must've reached the guard page, so don't risk touching it
480 //
481 Pfn1 = NULL;
482 }
483 }
484
485 //
486 // Check if there is a valid PFN entry for the page before the allocation
487 // and then check if this page was actually the end of an allocation.
488 // If it wasn't, then we know for sure it's a free page
489 //
490 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
491 {
492 //
493 // Get the free entry descriptor for that given page range
494 //
495 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
496 FreeEntry = FreeEntry->Owner;
497
498 //
499 // Check if the entry is small enough to be indexed on a free list
500 // If it is, we'll want to re-insert it, since we're about to
501 // collapse our pages on top of it, which will change its count
502 //
503 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
504 {
505 //
506 // Remove the list from where it is now
507 //
508 RemoveEntryList(&FreeEntry->List);
509
510 //
511 // Update its size
512 //
513 FreeEntry->Size += FreePages;
514
515 //
516 // And now find the new appropriate list to place it in
517 //
518 i = (ULONG)(FreeEntry->Size - 1);
519 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
520
521 //
522 // Do it
523 //
524 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
525 }
526 else
527 {
528 //
529 // Otherwise, just combine our free pages into this entry
530 //
531 FreeEntry->Size += FreePages;
532 }
533 }
534
535 //
536 // Check if we were unable to do any compaction, and we'll stick with this
537 //
538 if (FreeEntry == StartingVa)
539 {
540 //
541 // Well, now we are a free entry. At worse we just have our newly freed
542 // pages, at best we have our pages plus whatever entry came after us
543 //
544 FreeEntry->Size = FreePages;
545
546 //
547 // Find the appropriate list we should be on
548 //
549 i = FreeEntry->Size - 1;
550 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
551
552 //
553 // And insert us
554 //
555 InsertTailList (&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
556 }
557
558 //
559 // Just a sanity check
560 //
561 ASSERT(FreePages != 0);
562
563 //
564 // Get all the pages between our allocation and its end. These will all now
565 // become free page chunks.
566 //
567 NextEntry = StartingVa;
568 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
569 do
570 {
571 //
572 // Link back to the parent free entry, and keep going
573 //
574 NextEntry->Owner = FreeEntry;
575 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
576 } while (NextEntry != LastEntry);
577
578 //
579 // We're done, release the lock and let the caller know how much we freed
580 //
581 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
582 return NumberOfPages;
583 }
584
585 /* EOF */