Merge from amd64 branch:
[reactos.git] / reactos / ntoskrnl / mm / rpoolmgr.h
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/RPoolMgr.h
5 * PURPOSE: A semi-generic reuseable Pool implementation
6 * PROGRAMMER: Royce Mitchell III
7 * UPDATE HISTORY:
8 */
9
10 #ifndef RPOOLMGR_H
11 #define RPOOLMGR_H
12
13 typedef unsigned long rulong;
14
15 #define R_IS_POOL_PTR(pool,ptr) (((void*)(ULONG_PTR)(ptr) >= pool->UserBase) && ((ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)))
16 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
17 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
18
19 #ifndef R_ROUND_UP
20 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
21 #endif//R_ROUND_UP
22
23 #ifndef R_ROUND_DOWN
24 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
25 #endif//R_ROUND_DOWN
26
27 #ifndef R_QUEMIN
28 // R_QUEMIN is the minimum number of entries to keep in a que
29 #define R_QUEMIN 0
30 #endif//R_QUEMIN
31
32 #ifndef R_QUECOUNT
33 // 16, 32, 64, 128, 256, 512
34 #define R_QUECOUNT 6
35 #endif//R_QUECOUNT
36
37 #ifndef R_RZ
38 // R_RZ is the redzone size
39 #define R_RZ 4
40 #endif//R_RZ
41
42 #ifndef R_RZ_LOVALUE
43 #define R_RZ_LOVALUE 0x87
44 #endif//R_RZ_LOVALUE
45
46 #ifndef R_RZ_HIVALUE
47 #define R_RZ_HIVALUE 0xA5
48 #endif//R_RZ_HIVALUE
49
50 #ifndef R_STACK
51 // R_STACK is the number of stack entries to store in blocks for debug purposes
52 #define R_STACK 6
53 #else // R_STACK
54 #if R_STACK > 0 && R_STACK < 6
55 /* Increase the frame depth to get a reasonable back trace */
56 #undef R_STACK
57 #define R_STACK 6
58 #endif // R_STACK > 0 && R_STACK < 6
59 #endif//R_STACK
60
61 #ifndef R_TAG
62 // R_TAG do we keep track of tags on a per-memory block basis?
63 #define R_TAG 0
64 #endif//R_TAG
65
66 #ifdef R_MAGIC
67 # ifndef R_FREE_MAGIC
68 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
69 # endif//R_FREE_MAGIC
70 # ifndef R_USED_MAGIC
71 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
72 # endif//R_USED_MAGIC
73 #endif//R_MAGIC
74
75 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
76 // in both the R_FREE and R_USED structures
77
78 typedef struct _R_FREE
79 {
80 #ifdef R_MAGIC
81 rulong FreeMagic;
82 #endif//R_MAGIC
83 rulong PrevSize : 30;
84 rulong Status : 2;
85 rulong Size;
86 #if R_STACK
87 ULONG_PTR LastOwnerStack[R_STACK];
88 #endif//R_STACK
89 struct _R_FREE* NextFree;
90 struct _R_FREE* PrevFree;
91 }
92 R_FREE, *PR_FREE;
93
94 typedef struct _R_USED
95 {
96 #ifdef R_MAGIC
97 rulong UsedMagic;
98 #endif//R_MAGIC
99 rulong PrevSize : 30;
100 rulong Status : 2;
101 rulong Size;
102 #if R_STACK
103 ULONG_PTR LastOwnerStack[R_STACK];
104 #endif//R_STACK
105 struct _R_USED* NextUsed;
106 #if R_RZ
107 rulong UserSize; // how many bytes the user actually asked for...
108 #endif//R_RZ
109 rulong Tag;
110 }
111 R_USED, *PR_USED;
112
113 typedef struct _R_QUE
114 {
115 rulong Count;
116 PR_USED First, Last;
117 }
118 R_QUE, *PR_QUE;
119
120 typedef struct _R_POOL
121 {
122 void* PoolBase;
123 rulong PoolSize;
124 void* UserBase;
125 rulong UserSize;
126 rulong Alignments[3];
127 PR_FREE FirstFree, LastFree;
128 R_QUE Que[R_QUECOUNT][3];
129 R_MUTEX Mutex;
130 }
131 R_POOL, *PR_POOL;
132
133 #if !R_STACK
134 #define RiPrintLastOwner(Block)
135 #else
136 static void
137 RiPrintLastOwner ( PR_USED Block )
138 {
139 int i;
140 for ( i = 0; i < R_STACK; i++ )
141 {
142 if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
143 {
144 R_DEBUG(" ");
145 //if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
146 {
147 R_DEBUG("<%X>", Block->LastOwnerStack[i] );
148 }
149 }
150 }
151 }
152 #endif//R_STACK
153
154 static int
155 RQueWhich ( rulong size )
156 {
157 rulong que, quesize;
158 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
159 {
160 if ( quesize >= size )
161 {
162 return que;
163 }
164 }
165 return -1;
166 }
167
168 static void
169 RQueInit ( PR_QUE que )
170 {
171 que->Count = 0;
172 que->First = NULL;
173 que->Last = NULL;
174 }
175
176 static void
177 RQueAdd ( PR_QUE que, PR_USED Item )
178 {
179 ASSERT(Item);
180 Item->Status = 2;
181 Item->NextUsed = NULL;
182 ++que->Count;
183 if ( !que->Last )
184 {
185 que->First = que->Last = Item;
186 return;
187 }
188 ASSERT(!que->Last->NextUsed);
189 que->Last->NextUsed = Item;
190 que->Last = Item;
191 }
192
193 static PR_USED
194 RQueRemove ( PR_QUE que )
195 {
196 PR_USED Item;
197 #if R_QUEMIN
198 if ( que->count < R_QUEMIN )
199 return NULL;
200 #endif
201 if ( !que->First )
202 return NULL;
203 Item = que->First;
204 que->First = Item->NextUsed;
205 if ( !--que->Count )
206 {
207 ASSERT ( !que->First );
208 que->Last = NULL;
209 }
210 Item->Status = 0;
211 return Item;
212 }
213
214 static void
215 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
216 {
217 ASSERT(pool);
218 ASSERT(Item);
219 if ( !pool->FirstFree )
220 {
221 pool->FirstFree = pool->LastFree = Item;
222 Item->NextFree = NULL;
223 }
224 else
225 {
226 pool->FirstFree->PrevFree = Item;
227 Item->NextFree = pool->FirstFree;
228 pool->FirstFree = Item;
229 }
230 Item->PrevFree = NULL;
231 }
232
233 static void
234 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
235 {
236 ASSERT(pool);
237 ASSERT(Item);
238 if ( Item->NextFree )
239 Item->NextFree->PrevFree = Item->PrevFree;
240 else
241 {
242 ASSERT ( pool->LastFree == Item );
243 pool->LastFree = Item->PrevFree;
244 }
245 if ( Item->PrevFree )
246 Item->PrevFree->NextFree = Item->NextFree;
247 else
248 {
249 ASSERT ( pool->FirstFree == Item );
250 pool->FirstFree = Item->NextFree;
251 }
252 #if DBG
253 Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
254 #endif//DBG
255 }
256
257 #if !R_STACK
258 #define RFreeFillStack(free)
259 #define RUsedFillStack(used)
260 #else
261 static void
262 RFreeFillStack ( PR_FREE free )
263 {
264 int i;
265 ULONG stack[R_STACK+3]; // need to skip 3 known levels of stack trace
266 memset ( stack, 0xCD, sizeof(stack) );
267 R_GET_STACK_FRAMES ( stack, R_STACK+3 );
268 for ( i = 0; i < R_STACK; i++ )
269 free->LastOwnerStack[i] = stack[i+3];
270 }
271
272 static void
273 RUsedFillStack ( PR_USED used )
274 {
275 int i;
276 ULONG stack[R_STACK+2]; // need to skip 2 known levels of stack trace
277 memset ( stack, 0xCD, sizeof(stack) );
278 R_GET_STACK_FRAMES ( stack, R_STACK+2 );
279 for ( i = 0; i < R_STACK; i++ )
280 used->LastOwnerStack[i] = stack[i+2];
281 }
282 #endif
283
284 static PR_FREE
285 RFreeInit ( void* memory )
286 {
287 PR_FREE block = (PR_FREE)memory;
288 #if R_FREEMAGIC
289 block->FreeMagic = R_FREE_MAGIC;
290 #endif//R_FREEMAGIC
291 block->Status = 0;
292 RFreeFillStack ( block );
293 #if DBG
294 block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
295 #endif//DBG
296 return block;
297 }
298
299 PR_POOL
300 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
301 {
302 int align, que;
303 PR_POOL pool = (PR_POOL)PoolBase;
304
305 pool->PoolBase = PoolBase;
306 pool->PoolSize = PoolSize;
307 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
308 pool->UserSize = PoolSize - sizeof(R_POOL);
309 pool->Alignments[0] = align1;
310 pool->Alignments[1] = align2;
311 pool->Alignments[2] = align3;
312 pool->FirstFree = pool->LastFree = NULL;
313
314 RPoolAddFree ( pool,
315 RFreeInit ( pool->UserBase ));
316
317 pool->FirstFree->PrevSize = 0;
318 pool->FirstFree->Size = pool->UserSize;
319
320 for ( que = 0; que < R_QUECOUNT; que++ )
321 {
322 for ( align = 0; align < 3; align++ )
323 {
324 RQueInit ( &pool->Que[que][align] );
325 }
326 }
327 return pool;
328 }
329
330 #if R_RZ
331 static const char*
332 RFormatTag ( rulong Tag, char* buf )
333 {
334 int i;
335 *(rulong*)&buf[0] = Tag;
336 buf[4] = 0;
337 for ( i = 0; i < 4; i++ )
338 {
339 if ( !buf[i] )
340 buf[i] = ' ';
341 }
342 return buf;
343 }
344 #endif
345
346 #if !R_RZ
347 #define RUsedRedZoneCheck(pUsed,Addr,file,line, printzone)
348 #else//R_RZ
349 static void
350 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
351 {
352 char tag[5];
353 unsigned int i;
354
355 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
356 file, line, violation, Addr );
357
358 #ifdef R_MAGIC
359 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
360 #endif//R_MAGIC
361 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
362 RFormatTag(pUsed->Tag,tag),
363 pUsed->Tag,
364 pUsed->Size,
365 pUsed->UserSize );
366
367 if ( printzone )
368 {
369 unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
370 unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
371 R_DEBUG ( ", LoZone " );
372 for ( i = 0; i < R_RZ; i++ )
373 R_DEBUG ( "%02x", LoZone[i] );
374 R_DEBUG ( ", HiZone " );
375 for ( i = 0; i < R_RZ; i++ )
376 R_DEBUG ( "%02x", HiZone[i] );
377 }
378 R_DEBUG ( "\n" );
379
380 R_DEBUG ( "First few Stack Frames:" );
381 RiPrintLastOwner ( pUsed );
382 R_DEBUG ( "\n" );
383
384 R_DEBUG ( "Contents of Block:\n" );
385 for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
386 {
387 int j;
388 R_DEBUG ( "%04X ", i );
389 for ( j = 0; j < 16; j++ )
390 {
391 if ( i+j < pUsed->UserSize )
392 {
393 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
394 }
395 else
396 {
397 R_DEBUG ( " " );
398 }
399 }
400 R_DEBUG(" ");
401 for ( j = 0; j < 16; j++ )
402 {
403 if ( i+j < pUsed->UserSize )
404 {
405 char c = Addr[i+j];
406 if ( c < 0x20 || c > 0x7E )
407 c = '.';
408 R_DEBUG ( "%c", c );
409 }
410 else
411 {
412 R_DEBUG ( " " );
413 }
414 }
415 R_DEBUG("\n");
416 }
417 R_PANIC();
418 }
419 static void
420 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
421 {
422 int i;
423 unsigned char *LoZone, *HiZone;
424 int bLow = 1;
425 int bHigh = 1;
426
427 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
428 #ifdef R_MAGIC
429 if ( pUsed->UsedMagic == R_FREE_MAGIC )
430 {
431 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
432 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
433 }
434 if ( pUsed->UsedMagic != R_USED_MAGIC )
435 {
436 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
437 }
438 #endif//R_MAGIC
439 switch ( pUsed->Status )
440 {
441 case 0: // freed into main pool
442 case 2: // in ques
443 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
444 // no need for break here - RiBadBlock doesn't return
445 case 1: // allocated - this is okay
446 break;
447 default:
448 RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
449 }
450 if ( pUsed->Status != 1 )
451 {
452 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
453 }
454 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
455 {
456 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
457 }
458 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
459 {
460 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
461 }
462 HiZone = (unsigned char*)Addr + pUsed->UserSize;
463 LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
464 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
465 {
466 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
467 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
468 }
469 if ( !bLow || !bHigh )
470 {
471 const char* violation = "High and Low-side redzone overwrite";
472 if ( bHigh ) // high is okay, so it was just low failed
473 violation = "Low-side redzone overwrite";
474 else if ( bLow ) // low side is okay, so it was just high failed
475 violation = "High-side redzone overwrite";
476 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
477 }
478 }
479 #endif//R_RZ
480
481 PR_FREE
482 RPreviousBlock ( PR_FREE Block )
483 {
484 if ( Block->PrevSize > 0 )
485 return (PR_FREE)( (char*)Block - Block->PrevSize );
486 return NULL;
487 }
488
489 PR_FREE
490 RNextBlock ( PR_POOL pool, PR_FREE Block )
491 {
492 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
493 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
494 NextBlock = NULL;
495 return NextBlock;
496 }
497
498 static __inline void*
499 RHdrToBody ( void* blk )
500 /*
501 * FUNCTION: Translate a block header address to the corresponding block
502 * address (internal)
503 */
504 {
505 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
506 }
507
508 static __inline PR_USED
509 RBodyToHdr ( void* addr )
510 {
511 return (PR_USED)
512 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
513 }
514
515 static int
516 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
517 {
518 PR_FREE Free;
519 Free = pool->FirstFree;
520 if ( Free == Block )
521 return 1;
522 while ( Free != Block )
523 {
524 Free = Free->NextFree;
525 if ( !Free )
526 return 0;
527 }
528 return 1;
529 }
530
531 static void
532 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
533 {
534 {
535 PR_USED Block = (PR_USED)pool->UserBase;
536 PR_USED NextBlock;
537
538 for ( ;; )
539 {
540 switch ( Block->Status )
541 {
542 case 0: // block is in chain
543 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
544 break;
545 case 1: // block is allocated
546 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
547 break;
548 case 2: // block is in que
549 // nothing to verify here yet
550 break;
551 default:
552 ASSERT ( !"invalid status in memory block found in pool!" );
553 }
554 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
555 if ( !NextBlock )
556 break;
557 ASSERT ( NextBlock->PrevSize == Block->Size );
558 Block = NextBlock;
559 }
560 }
561 {
562 // now let's step through the list of free pointers and verify
563 // each one can be found by size-jumping...
564 PR_FREE Free = (PR_FREE)pool->FirstFree;
565 while ( Free )
566 {
567 PR_FREE NextFree = (PR_FREE)pool->UserBase;
568 if ( Free != NextFree )
569 {
570 while ( NextFree != Free )
571 {
572 NextFree = RNextBlock ( pool, NextFree );
573 ASSERT(NextFree);
574 }
575 }
576 Free = Free->NextFree;
577 }
578 }
579 }
580
581 static void
582 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
583 {
584 R_ASSERT_PTR(pool,Block);
585 ASSERT ( NewSize < pool->UserSize );
586 ASSERT ( NewSize >= sizeof(R_FREE) );
587 Block->Size = NewSize;
588 if ( !NextBlock )
589 NextBlock = RNextBlock ( pool, Block );
590 if ( NextBlock )
591 NextBlock->PrevSize = NewSize;
592 }
593
594 static PR_FREE
595 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
596 {
597 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
598 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
599 RSetSize ( pool, Block, NewSize, NewBlock );
600 RFreeInit ( NewBlock );
601 RPoolAddFree ( pool, NewBlock );
602 return NewBlock;
603 }
604
605 static void
606 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
607 {
608 ASSERT ( RPreviousBlock(Second) == First );
609 ASSERT ( First->Size == Second->PrevSize );
610 RPoolRemoveFree ( pool, Second );
611 RSetSize ( pool, First, First->Size + Second->Size, NULL );
612 }
613
614 static void
615 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
616 {
617 PR_FREE NextBlock, PreviousBlock;
618
619 RFreeInit ( FreeBlock );
620 RPoolAddFree ( pool, FreeBlock );
621
622 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
623
624 /*
625 * If the next block is immediately adjacent to the newly freed one then
626 * merge them.
627 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
628 */
629 NextBlock = RNextBlock ( pool, FreeBlock );
630 if ( NextBlock != NULL && !NextBlock->Status )
631 {
632 RFreeMerge ( pool, FreeBlock, NextBlock );
633 }
634
635 /*
636 * If the previous block is adjacent to the newly freed one then
637 * merge them.
638 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
639 */
640 PreviousBlock = RPreviousBlock ( FreeBlock );
641 if ( PreviousBlock != NULL && !PreviousBlock->Status )
642 {
643 RFreeMerge ( pool, PreviousBlock, FreeBlock );
644 }
645 }
646
647 static void
648 RiUsedInit ( PR_USED Block, rulong Tag )
649 {
650 Block->Status = 1;
651 RUsedFillStack ( Block );
652 #ifdef R_MAGIC
653 Block->UsedMagic = R_USED_MAGIC;
654 #endif//R_MAGIC
655 //ASSERT_SIZE ( Block->Size );
656
657 // now add the block to the used block list
658 #if DBG
659 Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
660 #endif//R_USED_LIST
661
662 Block->Tag = Tag;
663 }
664
665 #if !R_RZ
666 #define RiUsedInitRedZone(Block,UserSize)
667 #else//R_RZ
668 static void
669 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
670 {
671 // write out buffer-overrun detection bytes
672 char* Addr = (char*)RHdrToBody(Block);
673 Block->UserSize = UserSize;
674 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
675 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
676 #if DBG
677 memset ( Addr, 0xCD, UserSize );
678 #endif//DBG
679 }
680 #endif//R_RZ
681
682 static void*
683 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
684 {
685 PR_USED NewBlock;
686 PR_FREE BestBlock,
687 NextBlock,
688 PreviousBlock,
689 BestPreviousBlock,
690 CurrentBlock;
691 void* BestAlignedAddr;
692 int que,
693 queBytes = NumberOfBytes;
694 rulong BlockSize,
695 Alignment;
696 int que_reclaimed = 0;
697
698 ASSERT ( pool );
699 ASSERT ( align < 3 );
700
701 R_ACQUIRE_MUTEX(pool);
702
703 if ( !NumberOfBytes )
704 {
705 R_DEBUG("0 bytes requested - initiating pool verification\n");
706 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
707 R_RELEASE_MUTEX(pool);
708 return NULL;
709 }
710 if ( NumberOfBytes > pool->PoolSize )
711 {
712 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
713 {
714 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
715 RUsedRedZoneCheck(pool,RBodyToHdr((void*)(ULONG_PTR)NumberOfBytes), (char*)(ULONG_PTR)NumberOfBytes, __FILE__, __LINE__ );
716 R_RELEASE_MUTEX(pool);
717 return NULL;
718 }
719 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
720 R_RELEASE_MUTEX(pool);
721 return NULL;
722 }
723
724 que = RQueWhich ( NumberOfBytes );
725 if ( que >= 0 )
726 {
727 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
728 {
729 RiUsedInit ( NewBlock, Tag );
730 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
731 R_RELEASE_MUTEX(pool);
732 return RHdrToBody(NewBlock);
733 }
734 queBytes = 16 << que;
735 }
736
737 /*
738 * Calculate the total number of bytes we will need.
739 */
740 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
741 if (BlockSize < sizeof(R_FREE))
742 {
743 /* At least we need the size of the free block header. */
744 BlockSize = sizeof(R_FREE);
745 }
746
747 try_again:
748 /*
749 * Find the best-fitting block.
750 */
751 BestBlock = NULL;
752 Alignment = pool->Alignments[align];
753 PreviousBlock = NULL;
754 BestPreviousBlock = NULL,
755 CurrentBlock = pool->FirstFree;
756 BestAlignedAddr = NULL;
757
758 while ( CurrentBlock != NULL )
759 {
760 PVOID Addr = RHdrToBody(CurrentBlock);
761 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
762 /* calculate last size-aligned address available within this block */
763 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
764 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
765
766 /* special case, this address is already size-aligned, and the right size */
767 if ( Addr == AlignedAddr )
768 {
769 BestAlignedAddr = AlignedAddr;
770 BestPreviousBlock = PreviousBlock;
771 BestBlock = CurrentBlock;
772 break;
773 }
774 // if we carve out a size-aligned block... is it still past the end of this
775 // block's free header?
776 else if ( (char*)RBodyToHdr(AlignedAddr)
777 >= (char*)CurrentBlock+sizeof(R_FREE) )
778 {
779 /*
780 * there's enough room to allocate our size-aligned memory out
781 * of this block, see if it's a better choice than any previous
782 * finds
783 */
784 if ( BestBlock == NULL
785 || BestBlock->Size > CurrentBlock->Size )
786 {
787 BestAlignedAddr = AlignedAddr;
788 BestPreviousBlock = PreviousBlock;
789 BestBlock = CurrentBlock;
790 }
791 }
792
793 PreviousBlock = CurrentBlock;
794 CurrentBlock = CurrentBlock->NextFree;
795 }
796
797 /*
798 * We didn't find anything suitable at all.
799 */
800 if (BestBlock == NULL)
801 {
802 if ( !que_reclaimed )
803 {
804 // reclaim que
805 int i, j;
806 for ( i = 0; i < R_QUECOUNT; i++ )
807 {
808 for ( j = 0; j < 3; j++ )
809 {
810 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
811 {
812 RPoolReclaim ( pool, BestBlock );
813 }
814 }
815 }
816
817 que_reclaimed = 1;
818 goto try_again;
819 }
820 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
821 queBytes );
822 R_RELEASE_MUTEX(pool);
823 return NULL;
824 }
825 /*
826 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
827 * there's room at the beginning of the block for a free block...
828 */
829 {
830 void* Addr = RHdrToBody(BestBlock);
831 if ( BestAlignedAddr != Addr )
832 {
833 PR_FREE NewFreeBlock = RFreeSplit (
834 pool,
835 BestBlock,
836 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
837 ASSERT ( BestAlignedAddr > Addr );
838
839 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
840 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
841 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
842
843 /* we want the following code to use our size-aligned block */
844 BestPreviousBlock = BestBlock;
845 BestBlock = NewFreeBlock;
846
847 //VerifyPagedPool();
848 }
849 }
850 /*
851 * Is there enough space to create a second block from the unused portion.
852 */
853 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
854 {
855 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
856 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
857
858 /*
859 * Create the new free block.
860 */
861 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
862 //ASSERT_SIZE ( NextBlock->Size );
863 }
864 /*
865 * Remove the selected block from the list of free blocks.
866 */
867 //DPRINT ( "Removing selected block from free block list\n" );
868 RPoolRemoveFree ( pool, BestBlock );
869 /*
870 * Create the new used block header.
871 */
872 NewBlock = (PR_USED)BestBlock;
873 RiUsedInit ( NewBlock, Tag );
874
875 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
876
877 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
878 R_RELEASE_MUTEX(pool);
879
880 return RHdrToBody(NewBlock);
881 }
882
883 static void
884 RPoolFree ( PR_POOL pool, void* Addr )
885 {
886 PR_USED UsedBlock;
887 rulong UsedSize;
888 PR_FREE FreeBlock;
889 rulong UserSize;
890 int que;
891
892 ASSERT(pool);
893 if ( !Addr )
894 {
895 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
896 R_ACQUIRE_MUTEX(pool);
897 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
898 R_RELEASE_MUTEX(pool);
899 return;
900 }
901 R_ASSERT_PTR(pool,Addr);
902
903 UsedBlock = RBodyToHdr(Addr);
904 UsedSize = UsedBlock->Size;
905 FreeBlock = (PR_FREE)UsedBlock;
906 #if R_RZ
907 UserSize = UsedBlock->UserSize;
908 #else
909 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
910 #endif//R_RZ
911
912 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
913
914 #if R_RZ
915 memset ( Addr, 0xCD, UsedBlock->UserSize );
916 #endif
917
918 que = RQueWhich ( UserSize );
919 if ( que >= 0 )
920 {
921 int queBytes = 16 << que;
922 ASSERT( (rulong)queBytes >= UserSize );
923 if ( que >= 0 )
924 {
925 int align = 0;
926 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
927 align = 2;
928 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
929 align = 1;
930 R_ACQUIRE_MUTEX(pool);
931 RQueAdd ( &pool->Que[que][align], UsedBlock );
932 R_RELEASE_MUTEX(pool);
933 return;
934 }
935 }
936
937 R_ACQUIRE_MUTEX(pool);
938 RPoolReclaim ( pool, FreeBlock );
939 R_RELEASE_MUTEX(pool);
940 }
941
942 #if 0
943 static void
944 RPoolDumpByTag ( PR_POOL pool, rulong Tag )
945 {
946 PR_USED Block = (PR_USED)pool->UserBase;
947 PR_USED NextBlock;
948 int count = 0;
949 char tag[5];
950
951 // TODO FIXME - should we validate params or ASSERT_IRQL?
952 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
953 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
954
955 R_ACQUIRE_MUTEX(pool);
956 for ( ;; )
957 {
958 if ( Block->Status == 1 && Block->Tag == Tag )
959 {
960 R_DEBUG ( " %08X %08X\n", Block, Block->Size );
961 ++count;
962 }
963 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
964 if ( !NextBlock )
965 break;
966 ASSERT ( NextBlock->PrevSize == Block->Size );
967 Block = NextBlock;
968 }
969 R_RELEASE_MUTEX(pool);
970
971 R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
972 }
973 #endif
974
975 rulong
976 RPoolQueryTag ( void* Addr )
977 {
978 PR_USED Block = RBodyToHdr(Addr);
979 // TODO FIXME - should we validate params?
980 #ifdef R_MAGIC
981 if ( Block->UsedMagic != R_USED_MAGIC )
982 return 0xDEADBEEF;
983 #endif//R_MAGIC
984 if ( Block->Status != 1 )
985 return 0xDEADBEEF;
986 return Block->Tag;
987 }
988
989 void
990 RPoolStats ( PR_POOL pool )
991 {
992 int free=0, used=0, qued=0;
993 PR_USED Block = (PR_USED)pool->UserBase;
994
995 R_ACQUIRE_MUTEX(pool);
996 while ( Block )
997 {
998 switch ( Block->Status )
999 {
1000 case 0:
1001 ++free;
1002 break;
1003 case 1:
1004 ++used;
1005 break;
1006 case 2:
1007 ++qued;
1008 break;
1009 default:
1010 ASSERT ( !"Invalid Status for Block in pool!" );
1011 }
1012 Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
1013 }
1014 R_RELEASE_MUTEX(pool);
1015
1016 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
1017 }
1018
1019 #ifdef R_LARGEST_ALLOC_POSSIBLE
1020 static rulong
1021 RPoolLargestAllocPossible ( PR_POOL pool, int align )
1022 {
1023 int Alignment = pool->Alignments[align];
1024 rulong LargestUserSize = 0;
1025 PR_FREE Block = (PR_FREE)pool->UserBase;
1026 while ( Block )
1027 {
1028 if ( Block->Status != 1 )
1029 {
1030 void* Addr, *AlignedAddr;
1031 rulong BlockMaxUserSize;
1032 int cue, cueBytes;
1033
1034 Addr = (char*)Block + sizeof(R_USED) + R_RZ;
1035 AlignedAddr = R_ROUND_UP(Addr,Alignment);
1036 if ( Addr != AlignedAddr )
1037 Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
1038 BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
1039 cue = RQueWhich ( BlockMaxUserSize );
1040 if ( cue >= 0 )
1041 {
1042 cueBytes = 16 << cue;
1043 if ( cueBytes > BlockMaxUserSize );
1044 {
1045 if ( !cue )
1046 BlockMaxUserSize = 0;
1047 else
1048 BlockMaxUserSize = 16 << (cue-1);
1049 }
1050 }
1051 if ( BlockMaxUserSize > LargestUserSize )
1052 LargestUserSize = BlockMaxUserSize;
1053 }
1054 Block = RNextBlock ( pool, Block );
1055 }
1056 return LargestUserSize;
1057 }
1058 #endif//R_LARGEST_ALLOC_POSSIBLE
1059
1060 #endif//RPOOLMGR_H