Sync to trunk HEAD (r43416)
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / expool.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/expool.c
5 * PURPOSE: ARM Memory Manager Executive Pool Manager
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::EXPOOL"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* GLOBALS ********************************************************************/
20
21 POOL_DESCRIPTOR NonPagedPoolDescriptor;
22 PPOOL_DESCRIPTOR PoolVector[2];
23
24 /* PRIVATE FUNCTIONS **********************************************************/
25
26 VOID
27 NTAPI
28 ExInitializePoolDescriptor(IN PPOOL_DESCRIPTOR PoolDescriptor,
29 IN POOL_TYPE PoolType,
30 IN ULONG PoolIndex,
31 IN ULONG Threshold,
32 IN PVOID PoolLock)
33 {
34 PLIST_ENTRY NextEntry, LastEntry;
35
36 //
37 // Setup the descriptor based on the caller's request
38 //
39 PoolDescriptor->PoolType = PoolType;
40 PoolDescriptor->PoolIndex = PoolIndex;
41 PoolDescriptor->Threshold = Threshold;
42 PoolDescriptor->LockAddress = PoolLock;
43
44 //
45 // Initialize accounting data
46 //
47 PoolDescriptor->RunningAllocs = 0;
48 PoolDescriptor->RunningDeAllocs = 0;
49 PoolDescriptor->TotalPages = 0;
50 PoolDescriptor->TotalBytes = 0;
51 PoolDescriptor->TotalBigPages = 0;
52
53 //
54 // Nothing pending for now
55 //
56 PoolDescriptor->PendingFrees = NULL;
57 PoolDescriptor->PendingFreeDepth = 0;
58
59 //
60 // Loop all the descriptor's allocation lists and initialize them
61 //
62 NextEntry = PoolDescriptor->ListHeads;
63 LastEntry = NextEntry + POOL_LISTS_PER_PAGE;
64 while (NextEntry < LastEntry) InitializeListHead(NextEntry++);
65 }
66
67 VOID
68 NTAPI
69 InitializePool(IN POOL_TYPE PoolType,
70 IN ULONG Threshold)
71 {
72 ASSERT(PoolType == NonPagedPool);
73
74 //
75 // Initialize the nonpaged pool descirptor
76 //
77 PoolVector[PoolType] = &NonPagedPoolDescriptor;
78 ExInitializePoolDescriptor(PoolVector[PoolType],
79 PoolType,
80 0,
81 Threshold,
82 NULL);
83 }
84
85 PVOID
86 NTAPI
87 ExAllocateArmPoolWithTag(IN POOL_TYPE PoolType,
88 IN SIZE_T NumberOfBytes,
89 IN ULONG Tag)
90 {
91 PPOOL_DESCRIPTOR PoolDesc;
92 PLIST_ENTRY ListHead;
93 PPOOL_HEADER Entry, NextEntry, FragmentEntry;
94 KIRQL OldIrql;
95 ULONG BlockSize, i;
96
97 //
98 // Some sanity checks
99 //
100 ASSERT(Tag != 0);
101 ASSERT(Tag != ' GIB');
102 ASSERT(NumberOfBytes != 0);
103
104 //
105 // Get the pool type and its corresponding vector for this request
106 //
107 PoolType = PoolType & BASE_POOL_TYPE_MASK;
108 PoolDesc = PoolVector[PoolType];
109 ASSERT(PoolDesc != NULL);
110
111 //
112 // Check if this is a big page allocation
113 //
114 if (NumberOfBytes > POOL_MAX_ALLOC)
115 {
116 //
117 // Then just return the number of pages requested
118 //
119 return MiAllocatePoolPages(PoolType, NumberOfBytes);
120 }
121
122 //
123 // Should never request 0 bytes from the pool, but since so many drivers do
124 // it, we'll just assume they want 1 byte, based on NT's similar behavior
125 //
126 if (!NumberOfBytes) NumberOfBytes = 1;
127
128 //
129 // A pool allocation is defined by its data, a linked list to connect it to
130 // the free list (if necessary), and a pool header to store accounting info.
131 // Calculate this size, then convert it into a block size (units of pool
132 // headers)
133 //
134 // Note that i cannot overflow (past POOL_LISTS_PER_PAGE) because any such
135 // request would've been treated as a POOL_MAX_ALLOC earlier and resulted in
136 // the direct allocation of pages.
137 //
138 i = (NumberOfBytes + sizeof(POOL_HEADER) + sizeof(LIST_ENTRY) - 1) /
139 sizeof(POOL_HEADER);
140
141 //
142 // Loop in the free lists looking for a block if this size. Start with the
143 // list optimized for this kind of size lookup
144 //
145 ListHead = &PoolDesc->ListHeads[i];
146 do
147 {
148 //
149 // Are there any free entries available on this list?
150 //
151 if (!IsListEmpty(ListHead))
152 {
153 //
154 // Acquire the nonpaged pool lock now
155 //
156 OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
157
158 //
159 // And make sure the list still has entries
160 //
161 if (IsListEmpty(ListHead))
162 {
163 //
164 // Someone raced us (and won) before we had a chance to acquire
165 // the lock.
166 //
167 // Try again!
168 //
169 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
170 ListHead++;
171 continue;
172 }
173
174 //
175 // Remove a free entry from the list
176 // Note that due to the way we insert free blocks into multiple lists
177 // there is a guarantee that any block on this list will either be
178 // of the correct size, or perhaps larger.
179 //
180 Entry = (PPOOL_HEADER)RemoveHeadList(ListHead) - 1;
181 ASSERT(Entry->BlockSize >= i);
182 ASSERT(Entry->PoolType == 0);
183
184 //
185 // Check if this block is larger that what we need. The block could
186 // not possibly be smaller, due to the reason explained above (and
187 // we would've asserted on a checked build if this was the case).
188 //
189 if (Entry->BlockSize != i)
190 {
191 //
192 // Is there an entry before this one?
193 //
194 if (Entry->PreviousSize == 0)
195 {
196 //
197 // There isn't anyone before us, so take the next block and
198 // turn it into a fragment that contains the leftover data
199 // that we don't need to satisfy the caller's request
200 //
201 FragmentEntry = Entry + i;
202 FragmentEntry->BlockSize = Entry->BlockSize - i;
203
204 //
205 // And make it point back to us
206 //
207 FragmentEntry->PreviousSize = i;
208
209 //
210 // Now get the block that follows the new fragment and check
211 // if it's still on the same page as us (and not at the end)
212 //
213 NextEntry = FragmentEntry + FragmentEntry->BlockSize;
214 if (PAGE_ALIGN(NextEntry) != NextEntry)
215 {
216 //
217 // Adjust this next block to point to our newly created
218 // fragment block
219 //
220 NextEntry->PreviousSize = FragmentEntry->BlockSize;
221 }
222 }
223 else
224 {
225 //
226 // There is a free entry before us, which we know is smaller
227 // so we'll make this entry the fragment instead
228 //
229 FragmentEntry = Entry;
230
231 //
232 // And then we'll remove from it the actual size required.
233 // Now the entry is a leftover free fragment
234 //
235 Entry->BlockSize -= i;
236
237 //
238 // Now let's go to the next entry after the fragment (which
239 // used to point to our original free entry) and make it
240 // reference the new fragment entry instead.
241 //
242 // This is the entry that will actually end up holding the
243 // allocation!
244 //
245 Entry += Entry->BlockSize;
246 Entry->PreviousSize = FragmentEntry->BlockSize;
247
248 //
249 // And now let's go to the entry after that one and check if
250 // it's still on the same page, and not at the end
251 //
252 NextEntry = Entry + i;
253 if (PAGE_ALIGN(NextEntry) != NextEntry)
254 {
255 //
256 // Make it reference the allocation entry
257 //
258 NextEntry->PreviousSize = i;
259 }
260 }
261
262 //
263 // Now our (allocation) entry is the right size
264 //
265 Entry->BlockSize = i;
266
267 //
268 // And the next entry is now the free fragment which contains
269 // the remaining difference between how big the original entry
270 // was, and the actual size the caller needs/requested.
271 //
272 FragmentEntry->PoolType = 0;
273 BlockSize = FragmentEntry->BlockSize;
274
275 //
276 // Now check if enough free bytes remained for us to have a
277 // "full" entry, which contains enough bytes for a linked list
278 // and thus can be used for allocations (up to 8 bytes...)
279 //
280 if (BlockSize != 1)
281 {
282 //
283 // Insert the free entry into the free list for this size
284 //
285 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
286 (PLIST_ENTRY)FragmentEntry + 1);
287 }
288 }
289
290 //
291 // We have found an entry for this allocation, so set the pool type
292 // and release the lock since we're done
293 //
294 Entry->PoolType = PoolType + 1;
295 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
296
297 //
298 // Return the pool allocation
299 //
300 Entry->PoolTag = Tag;
301 return ++Entry;
302 }
303 } while (++ListHead != &PoolDesc->ListHeads[POOL_LISTS_PER_PAGE]);
304
305 //
306 // There were no free entries left, so we have to allocate a new fresh page
307 //
308 Entry = MiAllocatePoolPages(PoolType, PAGE_SIZE);
309 Entry->Ulong1 = 0;
310 Entry->BlockSize = i;
311 Entry->PoolType = PoolType + 1;
312
313 //
314 // This page will have two entries -- one for the allocation (which we just
315 // created above), and one for the remaining free bytes, which we're about
316 // to create now. The free bytes are the whole page minus what was allocated
317 // and then converted into units of block headers.
318 //
319 BlockSize = (PAGE_SIZE / sizeof(POOL_HEADER)) - i;
320 FragmentEntry = Entry + i;
321 FragmentEntry->Ulong1 = 0;
322 FragmentEntry->BlockSize = BlockSize;
323 FragmentEntry->PreviousSize = i;
324
325 //
326 // Now check if enough free bytes remained for us to have a "full" entry,
327 // which contains enough bytes for a linked list and thus can be used for
328 // allocations (up to 8 bytes...)
329 //
330 if (FragmentEntry->BlockSize != 1)
331 {
332 //
333 // Excellent -- acquire the nonpaged pool lock
334 //
335 OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
336
337 //
338 // And insert the free entry into the free list for this block size
339 //
340 InsertTailList(&PoolDesc->ListHeads[BlockSize - 1],
341 (PLIST_ENTRY)FragmentEntry + 1);
342
343 //
344 // Release the nonpaged pool lock
345 //
346 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
347 }
348
349 //
350 // And return the pool allocation
351 //
352 Entry->PoolTag = Tag;
353 return ++Entry;
354 }
355
356 PVOID
357 NTAPI
358 ExAllocateArmPool(POOL_TYPE PoolType,
359 SIZE_T NumberOfBytes)
360 {
361 //
362 // Use a default tag of "None"
363 //
364 return ExAllocatePoolWithTag(PoolType, NumberOfBytes, 'enoN');
365 }
366
367 VOID
368 NTAPI
369 ExFreeArmPoolWithTag(IN PVOID P,
370 IN ULONG TagToFree)
371 {
372 PPOOL_HEADER Entry, NextEntry;
373 ULONG BlockSize;
374 KIRQL OldIrql;
375 POOL_TYPE PoolType;
376 PPOOL_DESCRIPTOR PoolDesc;
377 BOOLEAN Combined = FALSE;
378
379 //
380 // Quickly deal with big page allocations
381 //
382 if (PAGE_ALIGN(P) == P)
383 {
384 MiFreePoolPages(P);
385 return;
386 }
387
388 //
389 // Get the entry for this pool allocation
390 // The pointer math here may look wrong or confusing, but it is quite right
391 //
392 Entry = P;
393 Entry--;
394
395 //
396 // Get the size of the entry, and it's pool type, then load the descriptor
397 // for this pool type
398 //
399 BlockSize = Entry->BlockSize;
400 PoolType = (Entry->PoolType & BASE_POOL_TYPE_MASK) - 1;
401 PoolDesc = PoolVector[PoolType];
402
403 //
404 // Get the pointer to the next entry
405 //
406 NextEntry = Entry + BlockSize;
407
408 //
409 // Acquire the nonpaged pool lock
410 //
411 OldIrql = KeAcquireQueuedSpinLock(LockQueueNonPagedPoolLock);
412
413 //
414 // Check if the next allocation is at the end of the page
415 //
416 if (PAGE_ALIGN(NextEntry) != NextEntry)
417 {
418 //
419 // We may be able to combine the block if it's free
420 //
421 if (NextEntry->PoolType == 0)
422 {
423 //
424 // The next block is free, so we'll do a combine
425 //
426 Combined = TRUE;
427
428 //
429 // Make sure there's actual data in the block -- anything smaller
430 // than this means we only have the header, so there's no linked list
431 // for us to remove
432 //
433 if ((NextEntry->BlockSize != 1))
434 {
435 //
436 // The block is at least big enough to have a linked list, so go
437 // ahead and remove it
438 //
439 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
440 }
441
442 //
443 // Our entry is now combined with the next entry
444 //
445 Entry->BlockSize = Entry->BlockSize + NextEntry->BlockSize;
446 }
447 }
448
449 //
450 // Now check if there was a previous entry on the same page as us
451 //
452 if (Entry->PreviousSize)
453 {
454 //
455 // Great, grab that entry and check if it's free
456 //
457 NextEntry = Entry - Entry->PreviousSize;
458 if (NextEntry->PoolType == 0)
459 {
460 //
461 // It is, so we can do a combine
462 //
463 Combined = TRUE;
464
465 //
466 // Make sure there's actual data in the block -- anything smaller
467 // than this means we only have the header so there's no linked list
468 // for us to remove
469 //
470 if ((NextEntry->BlockSize != 1))
471 {
472 //
473 // The block is at least big enough to have a linked list, so go
474 // ahead and remove it
475 //
476 RemoveEntryList((PLIST_ENTRY)NextEntry + 1);
477 }
478
479 //
480 // Combine our original block (which might've already been combined
481 // with the next block), into the previous block
482 //
483 NextEntry->BlockSize = NextEntry->BlockSize + Entry->BlockSize;
484
485 //
486 // And now we'll work with the previous block instead
487 //
488 Entry = NextEntry;
489 }
490 }
491
492 //
493 // By now, it may have been possible for our combined blocks to actually
494 // have made up a full page (if there were only 2-3 allocations on the
495 // page, they could've all been combined).
496 //
497 if ((PAGE_ALIGN(Entry) == Entry) &&
498 (PAGE_ALIGN(Entry + Entry->BlockSize) == Entry + Entry->BlockSize))
499 {
500 //
501 // In this case, release the nonpaged pool lock, and free the page
502 //
503 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
504 MiFreePoolPages(Entry);
505 return;
506 }
507
508 //
509 // Otherwise, we now have a free block (or a combination of 2 or 3)
510 //
511 Entry->PoolType = 0;
512 BlockSize = Entry->BlockSize;
513 ASSERT(BlockSize != 1);
514
515 //
516 // Check if we actually did combine it with anyone
517 //
518 if (Combined)
519 {
520 //
521 // Get the first combined block (either our original to begin with, or
522 // the one after the original, depending if we combined with the previous)
523 //
524 NextEntry = Entry + BlockSize;
525
526 //
527 // As long as the next block isn't on a page boundary, have it point
528 // back to us
529 //
530 if (PAGE_ALIGN(NextEntry) != NextEntry) NextEntry->PreviousSize = BlockSize;
531 }
532
533 //
534 // Insert this new free block, and release the nonpaged pool lock
535 //
536 InsertHeadList(&PoolDesc->ListHeads[BlockSize - 1], (PLIST_ENTRY)Entry + 1);
537 KeReleaseQueuedSpinLock(LockQueueNonPagedPoolLock, OldIrql);
538 }
539
540 VOID
541 NTAPI
542 ExFreeArmPool(PVOID P)
543 {
544 //
545 // Just free without checking for the tag
546 //
547 ExFreeArmPoolWithTag(P, 0);
548 }
549
550 /* EOF */