Copy wininet to branch
[reactos.git] / reactos / ntoskrnl / mm / RPoolMgr.h
1 /* $Id$
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/RPoolMgr.h
6 * PURPOSE: A semi-generic reuseable Pool implementation
7 * PROGRAMMER: Royce Mitchell III
8 * UPDATE HISTORY:
9 */
10
11 #ifndef RPOOLMGR_H
12 #define RPOOLMGR_H
13
14 typedef unsigned long rulong;
15
16 #define R_IS_POOL_PTR(pool,ptr) (void*)(ptr) >= pool->UserBase && (ULONG_PTR)(ptr) < ((ULONG_PTR)pool->UserBase + pool->UserSize)
17 #define R_ASSERT_PTR(pool,ptr) ASSERT( R_IS_POOL_PTR(pool,ptr) )
18 #define R_ASSERT_SIZE(pool,sz) ASSERT( sz > (sizeof(R_USED)+2*R_RZ) && sz >= sizeof(R_FREE) && sz < pool->UserSize )
19
20 #ifndef R_ROUND_UP
21 #define R_ROUND_UP(x,s) ((PVOID)(((ULONG_PTR)(x)+(s)-1) & ~((ULONG_PTR)(s)-1)))
22 #endif//R_ROUND_UP
23
24 #ifndef R_ROUND_DOWN
25 #define R_ROUND_DOWN(x,s) ((PVOID)(((ULONG_PTR)(x)) & ~((ULONG_PTR)(s)-1)))
26 #endif//R_ROUND_DOWN
27
28 #ifndef R_QUEMIN
29 // R_QUEMIN is the minimum number of entries to keep in a que
30 #define R_QUEMIN 0
31 #endif//R_QUEMIN
32
33 #ifndef R_QUECOUNT
34 // 16, 32, 64, 128, 256, 512
35 #define R_QUECOUNT 6
36 #endif//R_QUECOUNT
37
38 #ifndef R_RZ
39 // R_RZ is the redzone size
40 #define R_RZ 4
41 #endif//R_RZ
42
43 #ifndef R_RZ_LOVALUE
44 #define R_RZ_LOVALUE 0x87
45 #endif//R_RZ_LOVALUE
46
47 #ifndef R_RZ_HIVALUE
48 #define R_RZ_HIVALUE 0xA5
49 #endif//R_RZ_HIVALUE
50
51 #ifndef R_STACK
52 // R_STACK is the number of stack entries to store in blocks for debug purposes
53 #define R_STACK 3
54 #endif//R_STACK
55
56 #ifndef R_TAG
57 // R_TAG do we keep track of tags on a per-memory block basis?
58 #define R_TAG 0
59 #endif//R_TAG
60
61 #ifdef R_MAGIC
62 # ifndef R_FREE_MAGIC
63 # define R_FREE_MAGIC (rulong)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
64 # endif//R_FREE_MAGIC
65 # ifndef R_USED_MAGIC
66 # define R_USED_MAGIC (rulong)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
67 # endif//R_USED_MAGIC
68 #endif//R_MAGIC
69
70 // **IMPORTANT NOTE** Magic, PrevSize, Size and Status must be at same offset
71 // in both the R_FREE and R_USED structures
72
73 typedef struct _R_FREE
74 {
75 #ifdef R_MAGIC
76 rulong FreeMagic;
77 #endif//R_MAGIC
78 rulong PrevSize : 30;
79 rulong Status : 2;
80 rulong Size;
81 #if R_STACK
82 ULONG_PTR LastOwnerStack[R_STACK];
83 #endif//R_STACK
84 struct _R_FREE* NextFree;
85 struct _R_FREE* PrevFree;
86 }
87 R_FREE, *PR_FREE;
88
89 typedef struct _R_USED
90 {
91 #ifdef R_MAGIC
92 rulong UsedMagic;
93 #endif//R_MAGIC
94 rulong PrevSize : 30;
95 rulong Status : 2;
96 rulong Size;
97 #if R_STACK
98 ULONG_PTR LastOwnerStack[R_STACK];
99 #endif//R_STACK
100 struct _R_USED* NextUsed;
101 #if R_RZ
102 rulong UserSize; // how many bytes the user actually asked for...
103 #endif//R_RZ
104 rulong Tag;
105 }
106 R_USED, *PR_USED;
107
108 typedef struct _R_QUE
109 {
110 rulong Count;
111 PR_USED First, Last;
112 }
113 R_QUE, *PR_QUE;
114
115 typedef struct _R_POOL
116 {
117 void* PoolBase;
118 rulong PoolSize;
119 void* UserBase;
120 rulong UserSize;
121 rulong Alignments[3];
122 PR_FREE FirstFree, LastFree;
123 R_QUE Que[R_QUECOUNT][3];
124 R_MUTEX Mutex;
125 }
126 R_POOL, *PR_POOL;
127
128 #if !R_STACK
129 #define RiPrintLastOwner(Block)
130 #else
131 static void
132 RiPrintLastOwner ( PR_USED Block )
133 {
134 int i;
135 for ( i = 0; i < R_STACK; i++ )
136 {
137 if ( Block->LastOwnerStack[i] != 0xDEADBEEF )
138 {
139 R_DEBUG(" ");
140 if (!R_PRINT_ADDRESS ((PVOID)Block->LastOwnerStack[i]) )
141 {
142 R_DEBUG("<%X>", Block->LastOwnerStack[i] );
143 }
144 }
145 }
146 }
147 #endif//R_STACK
148
149 static int
150 RQueWhich ( rulong size )
151 {
152 rulong que, quesize;
153 for ( que=0, quesize=16; que < R_QUECOUNT; que++, quesize<<=1 )
154 {
155 if ( quesize >= size )
156 {
157 return que;
158 }
159 }
160 return -1;
161 }
162
163 static void
164 RQueInit ( PR_QUE que )
165 {
166 que->Count = 0;
167 que->First = NULL;
168 que->Last = NULL;
169 }
170
171 static void
172 RQueAdd ( PR_QUE que, PR_USED Item )
173 {
174 ASSERT(Item);
175 Item->Status = 2;
176 Item->NextUsed = NULL;
177 ++que->Count;
178 if ( !que->Last )
179 {
180 que->First = que->Last = Item;
181 return;
182 }
183 ASSERT(!que->Last->NextUsed);
184 que->Last->NextUsed = Item;
185 que->Last = Item;
186 }
187
188 static PR_USED
189 RQueRemove ( PR_QUE que )
190 {
191 PR_USED Item;
192 #if R_QUEMIN
193 if ( que->count < R_QUEMIN )
194 return NULL;
195 #endif
196 if ( !que->First )
197 return NULL;
198 Item = que->First;
199 que->First = Item->NextUsed;
200 if ( !--que->Count )
201 {
202 ASSERT ( !que->First );
203 que->Last = NULL;
204 }
205 Item->Status = 0;
206 return Item;
207 }
208
209 static void
210 RPoolAddFree ( PR_POOL pool, PR_FREE Item )
211 {
212 ASSERT(pool);
213 ASSERT(Item);
214 if ( !pool->FirstFree )
215 {
216 pool->FirstFree = pool->LastFree = Item;
217 Item->NextFree = NULL;
218 }
219 else
220 {
221 pool->FirstFree->PrevFree = Item;
222 Item->NextFree = pool->FirstFree;
223 pool->FirstFree = Item;
224 }
225 Item->PrevFree = NULL;
226 }
227
228 static void
229 RPoolRemoveFree ( PR_POOL pool, PR_FREE Item )
230 {
231 ASSERT(pool);
232 ASSERT(Item);
233 if ( Item->NextFree )
234 Item->NextFree->PrevFree = Item->PrevFree;
235 else
236 {
237 ASSERT ( pool->LastFree == Item );
238 pool->LastFree = Item->PrevFree;
239 }
240 if ( Item->PrevFree )
241 Item->PrevFree->NextFree = Item->NextFree;
242 else
243 {
244 ASSERT ( pool->FirstFree == Item );
245 pool->FirstFree = Item->NextFree;
246 }
247 #if defined(DBG) || defined(KDBG)
248 Item->NextFree = Item->PrevFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
249 #endif//DBG || KDBG
250 }
251
252 static void
253 RFreeFillStack ( PR_FREE free )
254 {
255 int i;
256 ULONG stack[R_EXTRA_STACK_UP+3]; // need to skip 3 known levels of stack trace
257 memset ( stack, 0xCD, sizeof(stack) );
258 R_GET_STACK_FRAMES ( stack, R_EXTRA_STACK_UP+3 );
259 for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
260 free->LastOwnerStack[i] = stack[i+3];
261 }
262
263 static void
264 RUsedFillStack ( PR_USED used )
265 {
266 int i;
267 ULONG stack[R_EXTRA_STACK_UP+2]; // need to skip 2 known levels of stack trace
268 memset ( stack, 0xCD, sizeof(stack) );
269 R_GET_STACK_FRAMES ( stack, R_EXTRA_STACK_UP+2 );
270 for ( i = 0; i < R_EXTRA_STACK_UP; i++ )
271 used->LastOwnerStack[i] = stack[i+2];
272 }
273
274 static PR_FREE
275 RFreeInit ( void* memory )
276 {
277 PR_FREE block = (PR_FREE)memory;
278 #if R_FREEMAGIC
279 block->FreeMagic = R_FREE_MAGIC;
280 #endif//R_FREEMAGIC
281 block->Status = 0;
282 RFreeFillStack ( block );
283 #if defined(DBG) || defined(KDBG)
284 block->PrevFree = block->NextFree = (PR_FREE)(ULONG_PTR)0xDEADBEEF;
285 #endif//DBG || KDBG
286 return block;
287 }
288
289 PR_POOL
290 RPoolInit ( void* PoolBase, rulong PoolSize, int align1, int align2, int align3 )
291 {
292 int align, que;
293 PR_POOL pool = (PR_POOL)PoolBase;
294
295 pool->PoolBase = PoolBase;
296 pool->PoolSize = PoolSize;
297 pool->UserBase = (char*)pool->PoolBase + sizeof(R_POOL);
298 pool->UserSize = PoolSize - sizeof(R_POOL);
299 pool->Alignments[0] = align1;
300 pool->Alignments[1] = align2;
301 pool->Alignments[2] = align3;
302 pool->FirstFree = pool->LastFree = NULL;
303
304 RPoolAddFree ( pool,
305 RFreeInit ( pool->UserBase ));
306
307 pool->FirstFree->PrevSize = 0;
308 pool->FirstFree->Size = pool->UserSize;
309
310 for ( que = 0; que < R_QUECOUNT; que++ )
311 {
312 for ( align = 0; align < 3; align++ )
313 {
314 RQueInit ( &pool->Que[que][align] );
315 }
316 }
317 return pool;
318 }
319
320 static const char*
321 RFormatTag ( rulong Tag, char* buf )
322 {
323 int i;
324 *(rulong*)&buf[0] = Tag;
325 buf[4] = 0;
326 for ( i = 0; i < 4; i++ )
327 {
328 if ( !buf[i] )
329 buf[i] = ' ';
330 }
331 return buf;
332 }
333
334 #if !R_RZ
335 #define RUsedRedZoneCheck(pUsed,Addr,file,line)
336 #else//R_RZ
337 static void
338 RiBadBlock ( PR_USED pUsed, char* Addr, const char* violation, const char* file, int line, int printzone )
339 {
340 char tag[5];
341 int i;
342
343 R_DEBUG("%s(%i): %s detected for paged pool address 0x%x\n",
344 file, line, violation, Addr );
345
346 #ifdef R_MAGIC
347 R_DEBUG ( "UsedMagic 0x%x, ", pUsed->UsedMagic );
348 #endif//R_MAGIC
349 R_DEBUG ( "Tag %s(%X), Size %i, UserSize %i",
350 RFormatTag(pUsed->Tag,tag),
351 pUsed->Tag,
352 pUsed->Size,
353 pUsed->UserSize );
354
355 if ( printzone )
356 {
357 unsigned char* HiZone = (unsigned char*)Addr + pUsed->UserSize;
358 unsigned char* LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
359 R_DEBUG ( ", LoZone " );
360 for ( i = 0; i < R_RZ; i++ )
361 R_DEBUG ( "%02x", LoZone[i] );
362 R_DEBUG ( ", HiZone " );
363 for ( i = 0; i < R_RZ; i++ )
364 R_DEBUG ( "%02x", HiZone[i] );
365 }
366 R_DEBUG ( "\n" );
367
368 R_DEBUG ( "First few Stack Frames:" );
369 RiPrintLastOwner ( pUsed );
370 R_DEBUG ( "\n" );
371
372 R_DEBUG ( "Contents of Block:\n" );
373 for ( i = 0; i < 8*16 && i < pUsed->UserSize; i += 16 )
374 {
375 int j;
376 R_DEBUG ( "%04X ", i );
377 for ( j = 0; j < 16; j++ )
378 {
379 if ( i+j < pUsed->UserSize )
380 {
381 R_DEBUG ( "%02X ", (unsigned)(unsigned char)Addr[i+j] );
382 }
383 else
384 {
385 R_DEBUG ( " " );
386 }
387 }
388 R_DEBUG(" ");
389 for ( j = 0; j < 16; j++ )
390 {
391 if ( i+j < pUsed->UserSize )
392 {
393 char c = Addr[i+j];
394 if ( c < 0x20 || c > 0x7E )
395 c = '.';
396 R_DEBUG ( "%c", c );
397 }
398 else
399 {
400 R_DEBUG ( " " );
401 }
402 }
403 R_DEBUG("\n");
404 }
405 R_PANIC();
406 }
407 static void
408 RUsedRedZoneCheck ( PR_POOL pool, PR_USED pUsed, char* Addr, const char* file, int line )
409 {
410 int i;
411 unsigned char *LoZone, *HiZone;
412 int bLow = 1;
413 int bHigh = 1;
414
415 ASSERT ( Addr >= (char*)pool->UserBase && Addr < ((char*)pool->UserBase + pool->UserSize - 16) );
416 #ifdef R_MAGIC
417 if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
418 {
419 pUsed->UserSize = 0; // just to keep from confusion, MmpBadBlock() doesn't return...
420 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
421 }
422 if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
423 {
424 RiBadBlock ( pUsed, Addr, "bad magic", file, line, 0 );
425 }
426 #endif//R_MAGIC
427 switch ( pUsed->Status )
428 {
429 case 0: // freed into main pool
430 case 2: // in ques
431 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
432 // no need for break here - RiBadBlock doesn't return
433 case 1: // allocated - this is okay
434 break;
435 default:
436 RiBadBlock ( pUsed, Addr, "corrupt status", file, line, 0 );
437 }
438 if ( pUsed->Status != 1 )
439 {
440 RiBadBlock ( pUsed, Addr, "double-free", file, line, 0 );
441 }
442 if ( pUsed->Size > pool->PoolSize || pUsed->Size == 0 )
443 {
444 RiBadBlock ( pUsed, Addr, "invalid size", file, line, 0 );
445 }
446 if ( pUsed->UserSize > pool->PoolSize || pUsed->UserSize == 0 )
447 {
448 RiBadBlock ( pUsed, Addr, "invalid user size", file, line, 0 );
449 }
450 HiZone = (unsigned char*)Addr + pUsed->UserSize;
451 LoZone = (unsigned char*)Addr - R_RZ; // this is to simplify indexing below...
452 for ( i = 0; i < R_RZ && bLow && bHigh; i++ )
453 {
454 bLow = bLow && ( LoZone[i] == R_RZ_LOVALUE );
455 bHigh = bHigh && ( HiZone[i] == R_RZ_HIVALUE );
456 }
457 if ( !bLow || !bHigh )
458 {
459 const char* violation = "High and Low-side redzone overwrite";
460 if ( bHigh ) // high is okay, so it was just low failed
461 violation = "Low-side redzone overwrite";
462 else if ( bLow ) // low side is okay, so it was just high failed
463 violation = "High-side redzone overwrite";
464 RiBadBlock ( pUsed, Addr, violation, file, line, 1 );
465 }
466 }
467 #endif//R_RZ
468
469 PR_FREE
470 RPreviousBlock ( PR_FREE Block )
471 {
472 if ( Block->PrevSize > 0 )
473 return (PR_FREE)( (char*)Block - Block->PrevSize );
474 return NULL;
475 }
476
477 PR_FREE
478 RNextBlock ( PR_POOL pool, PR_FREE Block )
479 {
480 PR_FREE NextBlock = (PR_FREE)( (char*)Block + Block->Size );
481 if ( (char*)NextBlock >= (char*)pool->UserBase + pool->UserSize )
482 NextBlock = NULL;
483 return NextBlock;
484 }
485
486 inline static void*
487 RHdrToBody ( void* blk )
488 /*
489 * FUNCTION: Translate a block header address to the corresponding block
490 * address (internal)
491 */
492 {
493 return ( (void *) ((char*)blk + sizeof(R_USED) + R_RZ) );
494 }
495
496 inline static PR_USED
497 RBodyToHdr ( void* addr )
498 {
499 return (PR_USED)
500 ( ((char*)addr) - sizeof(R_USED) - R_RZ );
501 }
502
503 static int
504 RiInFreeChain ( PR_POOL pool, PR_FREE Block )
505 {
506 PR_FREE Free;
507 Free = pool->FirstFree;
508 if ( Free == Block )
509 return 1;
510 while ( Free != Block )
511 {
512 Free = Free->NextFree;
513 if ( !Free )
514 return 0;
515 }
516 return 1;
517 }
518
519 static void
520 RPoolRedZoneCheck ( PR_POOL pool, const char* file, int line )
521 {
522 {
523 PR_USED Block = (PR_USED)pool->UserBase;
524 PR_USED NextBlock;
525
526 for ( ;; )
527 {
528 switch ( Block->Status )
529 {
530 case 0: // block is in chain
531 ASSERT ( RiInFreeChain ( pool, (PR_FREE)Block ) );
532 break;
533 case 1: // block is allocated
534 RUsedRedZoneCheck ( pool, Block, RHdrToBody(Block), file, line );
535 break;
536 case 2: // block is in que
537 // nothing to verify here yet
538 break;
539 default:
540 ASSERT ( !"invalid status in memory block found in pool!" );
541 }
542 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
543 if ( !NextBlock )
544 break;
545 ASSERT ( NextBlock->PrevSize == Block->Size );
546 Block = NextBlock;
547 }
548 }
549 {
550 // now let's step through the list of free pointers and verify
551 // each one can be found by size-jumping...
552 PR_FREE Free = (PR_FREE)pool->FirstFree;
553 while ( Free )
554 {
555 PR_FREE NextFree = (PR_FREE)pool->UserBase;
556 if ( Free != NextFree )
557 {
558 while ( NextFree != Free )
559 {
560 NextFree = RNextBlock ( pool, NextFree );
561 ASSERT(NextFree);
562 }
563 }
564 Free = Free->NextFree;
565 }
566 }
567 }
568
569 static void
570 RSetSize ( PR_POOL pool, PR_FREE Block, rulong NewSize, PR_FREE NextBlock )
571 {
572 R_ASSERT_PTR(pool,Block);
573 ASSERT ( NewSize < pool->UserSize );
574 ASSERT ( NewSize >= sizeof(R_FREE) );
575 Block->Size = NewSize;
576 if ( !NextBlock )
577 NextBlock = RNextBlock ( pool, Block );
578 if ( NextBlock )
579 NextBlock->PrevSize = NewSize;
580 }
581
582 static PR_FREE
583 RFreeSplit ( PR_POOL pool, PR_FREE Block, rulong NewSize )
584 {
585 PR_FREE NewBlock = (PR_FREE)((char*)Block + NewSize);
586 RSetSize ( pool, NewBlock, Block->Size - NewSize, NULL );
587 RSetSize ( pool, Block, NewSize, NewBlock );
588 RFreeInit ( NewBlock );
589 RPoolAddFree ( pool, NewBlock );
590 return NewBlock;
591 }
592
593 static void
594 RFreeMerge ( PR_POOL pool, PR_FREE First, PR_FREE Second )
595 {
596 ASSERT ( RPreviousBlock(Second) == First );
597 ASSERT ( First->Size == Second->PrevSize );
598 RPoolRemoveFree ( pool, Second );
599 RSetSize ( pool, First, First->Size + Second->Size, NULL );
600 }
601
602 static void
603 RPoolReclaim ( PR_POOL pool, PR_FREE FreeBlock )
604 {
605 PR_FREE NextBlock, PreviousBlock;
606
607 RFreeInit ( FreeBlock );
608 RPoolAddFree ( pool, FreeBlock );
609
610 // TODO FIXME - don't merge and always insert freed blocks at the end for debugging purposes...
611
612 /*
613 * If the next block is immediately adjacent to the newly freed one then
614 * merge them.
615 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
616 */
617 NextBlock = RNextBlock ( pool, FreeBlock );
618 if ( NextBlock != NULL && !NextBlock->Status )
619 {
620 RFreeMerge ( pool, FreeBlock, NextBlock );
621 }
622
623 /*
624 * If the previous block is adjacent to the newly freed one then
625 * merge them.
626 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
627 */
628 PreviousBlock = RPreviousBlock ( FreeBlock );
629 if ( PreviousBlock != NULL && !PreviousBlock->Status )
630 {
631 RFreeMerge ( pool, PreviousBlock, FreeBlock );
632 }
633 }
634
635 static void
636 RiUsedInit ( PR_USED Block, rulong Tag )
637 {
638 Block->Status = 1;
639 RUsedFillStack ( Block );
640 #if R_MAGIC
641 Block->UsedMagic = R_USED_MAGIC;
642 #endif//R_MAGIC
643 //ASSERT_SIZE ( Block->Size );
644
645 // now add the block to the used block list
646 #if defined(DBG) || defined(KDBG)
647 Block->NextUsed = (PR_USED)(ULONG_PTR)0xDEADBEEF;
648 #endif//R_USED_LIST
649
650 Block->Tag = Tag;
651 }
652
653 #if !R_RZ
654 #define RiUsedInitRedZone(Block,UserSize)
655 #else//R_RZ
656 static void
657 RiUsedInitRedZone ( PR_USED Block, rulong UserSize )
658 {
659 // write out buffer-overrun detection bytes
660 char* Addr = (char*)RHdrToBody(Block);
661 Block->UserSize = UserSize;
662 memset ( Addr - R_RZ, R_RZ_LOVALUE, R_RZ );
663 memset ( Addr + Block->UserSize, R_RZ_HIVALUE, R_RZ );
664 #if defined(DBG) || defined(KDBG)
665 memset ( Addr, 0xCD, UserSize );
666 #endif//DBG || KDBG
667 }
668 #endif//R_RZ
669
670 static void*
671 RPoolAlloc ( PR_POOL pool, rulong NumberOfBytes, rulong Tag, rulong align )
672 {
673 PR_USED NewBlock;
674 PR_FREE BestBlock,
675 NextBlock,
676 PreviousBlock,
677 BestPreviousBlock,
678 CurrentBlock;
679 void* BestAlignedAddr;
680 int que,
681 queBytes = NumberOfBytes;
682 rulong BlockSize,
683 Alignment;
684 int que_reclaimed = 0;
685
686 ASSERT ( pool );
687 ASSERT ( align < 3 );
688
689 R_ACQUIRE_MUTEX(pool);
690
691 if ( !NumberOfBytes )
692 {
693 R_DEBUG("0 bytes requested - initiating pool verification\n");
694 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
695 R_RELEASE_MUTEX(pool);
696 return NULL;
697 }
698 if ( NumberOfBytes > pool->PoolSize )
699 {
700 if ( R_IS_POOL_PTR(pool,NumberOfBytes) )
701 {
702 R_DEBUG("red zone verification requested for block 0x%X\n", NumberOfBytes );
703 RUsedRedZoneCheck(pool,RBodyToHdr((void*)NumberOfBytes), (char*)NumberOfBytes, __FILE__, __LINE__ );
704 R_RELEASE_MUTEX(pool);
705 return NULL;
706 }
707 R_DEBUG("Invalid allocation request: %i bytes\n", NumberOfBytes );
708 R_RELEASE_MUTEX(pool);
709 return NULL;
710 }
711
712 que = RQueWhich ( NumberOfBytes );
713 if ( que >= 0 )
714 {
715 if ( (NewBlock = RQueRemove ( &pool->Que[que][align] )) )
716 {
717 R_RELEASE_MUTEX(pool);
718 RiUsedInit ( NewBlock, Tag );
719 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
720 return RHdrToBody(NewBlock);
721 }
722 queBytes = 16 << que;
723 }
724
725 /*
726 * Calculate the total number of bytes we will need.
727 */
728 BlockSize = queBytes + sizeof(R_USED) + 2*R_RZ;
729 if (BlockSize < sizeof(R_FREE))
730 {
731 /* At least we need the size of the free block header. */
732 BlockSize = sizeof(R_FREE);
733 }
734
735 try_again:
736 /*
737 * Find the best-fitting block.
738 */
739 BestBlock = NULL;
740 Alignment = pool->Alignments[align];
741 PreviousBlock = NULL;
742 BestPreviousBlock = NULL,
743 CurrentBlock = pool->FirstFree;
744 BestAlignedAddr = NULL;
745
746 while ( CurrentBlock != NULL )
747 {
748 PVOID Addr = RHdrToBody(CurrentBlock);
749 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
750 /* calculate last size-aligned address available within this block */
751 PVOID AlignedAddr = R_ROUND_DOWN((char*)CurrentBlockEnd-queBytes-R_RZ, Alignment);
752 ASSERT ( (char*)AlignedAddr+queBytes+R_RZ <= (char*)CurrentBlockEnd );
753
754 /* special case, this address is already size-aligned, and the right size */
755 if ( Addr == AlignedAddr )
756 {
757 BestAlignedAddr = AlignedAddr;
758 BestPreviousBlock = PreviousBlock;
759 BestBlock = CurrentBlock;
760 break;
761 }
762 // if we carve out a size-aligned block... is it still past the end of this
763 // block's free header?
764 else if ( (char*)RBodyToHdr(AlignedAddr)
765 >= (char*)CurrentBlock+sizeof(R_FREE) )
766 {
767 /*
768 * there's enough room to allocate our size-aligned memory out
769 * of this block, see if it's a better choice than any previous
770 * finds
771 */
772 if ( BestBlock == NULL
773 || BestBlock->Size > CurrentBlock->Size )
774 {
775 BestAlignedAddr = AlignedAddr;
776 BestPreviousBlock = PreviousBlock;
777 BestBlock = CurrentBlock;
778 }
779 }
780
781 PreviousBlock = CurrentBlock;
782 CurrentBlock = CurrentBlock->NextFree;
783 }
784
785 /*
786 * We didn't find anything suitable at all.
787 */
788 if (BestBlock == NULL)
789 {
790 if ( !que_reclaimed )
791 {
792 // reclaim que
793 int i, j;
794 for ( i = 0; i < R_QUECOUNT; i++ )
795 {
796 for ( j = 0; j < 3; j++ )
797 {
798 while ( (BestBlock = (PR_FREE)RQueRemove ( &pool->Que[i][j] )) )
799 {
800 RPoolReclaim ( pool, BestBlock );
801 }
802 }
803 }
804
805 que_reclaimed = 1;
806 goto try_again;
807 }
808 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
809 queBytes );
810 R_RELEASE_MUTEX(pool);
811 return NULL;
812 }
813 /*
814 * we found a best block. If Addr isn't already aligned, we've pre-qualified that
815 * there's room at the beginning of the block for a free block...
816 */
817 {
818 void* Addr = RHdrToBody(BestBlock);
819 if ( BestAlignedAddr != Addr )
820 {
821 PR_FREE NewFreeBlock = RFreeSplit (
822 pool,
823 BestBlock,
824 (char*)RBodyToHdr(BestAlignedAddr) - (char*)BestBlock );
825 ASSERT ( BestAlignedAddr > Addr );
826
827 //DPRINT ( "breaking off preceding bytes into their own block...\n" );
828 /*DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
829 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );*/
830
831 /* we want the following code to use our size-aligned block */
832 BestPreviousBlock = BestBlock;
833 BestBlock = NewFreeBlock;
834
835 //VerifyPagedPool();
836 }
837 }
838 /*
839 * Is there enough space to create a second block from the unused portion.
840 */
841 if ( (BestBlock->Size - BlockSize) > sizeof(R_FREE) )
842 {
843 /*DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
844 BestBlock, BestBlock->Size, BlockSize, NewSize );*/
845
846 /*
847 * Create the new free block.
848 */
849 NextBlock = RFreeSplit ( pool, BestBlock, BlockSize );
850 //ASSERT_SIZE ( NextBlock->Size );
851 }
852 /*
853 * Remove the selected block from the list of free blocks.
854 */
855 //DPRINT ( "Removing selected block from free block list\n" );
856 RPoolRemoveFree ( pool, BestBlock );
857 /*
858 * Create the new used block header.
859 */
860 NewBlock = (PR_USED)BestBlock;
861 RiUsedInit ( NewBlock, Tag );
862
863 R_RELEASE_MUTEX(pool);
864
865 /* RtlZeroMemory(RHdrToBody(NewBlock), NumberOfBytes);*/
866
867 RiUsedInitRedZone ( NewBlock, NumberOfBytes );
868
869 return RHdrToBody(NewBlock);
870 }
871
872 static void
873 RPoolFree ( PR_POOL pool, void* Addr )
874 {
875 PR_USED UsedBlock;
876 rulong UsedSize;
877 PR_FREE FreeBlock;
878 rulong UserSize;
879 int que;
880
881 ASSERT(pool);
882 if ( !Addr )
883 {
884 R_DEBUG("Attempt to free NULL ptr, initiating Red Zone Check\n" );
885 R_ACQUIRE_MUTEX(pool);
886 RPoolRedZoneCheck ( pool, __FILE__, __LINE__ );
887 R_RELEASE_MUTEX(pool);
888 return;
889 }
890 R_ASSERT_PTR(pool,Addr);
891
892 UsedBlock = RBodyToHdr(Addr);
893 UsedSize = UsedBlock->Size;
894 FreeBlock = (PR_FREE)UsedBlock;
895 #if R_RZ
896 UserSize = UsedBlock->UserSize;
897 #else
898 UserSize = UsedSize - sizeof(R_USED) - 2*R_RZ;
899 #endif//R_RZ
900
901 RUsedRedZoneCheck ( pool, UsedBlock, Addr, __FILE__, __LINE__ );
902
903 #if R_RZ
904 memset ( Addr, 0xCD, UsedBlock->UserSize );
905 #endif
906
907 que = RQueWhich ( UserSize );
908 if ( que >= 0 )
909 {
910 int queBytes = 16 << que;
911 ASSERT( queBytes >= UserSize );
912 if ( que >= 0 )
913 {
914 int align = 0;
915 if ( R_ROUND_UP(Addr,pool->Alignments[2]) == Addr )
916 align = 2;
917 else if ( R_ROUND_UP(Addr,pool->Alignments[1]) == Addr )
918 align = 1;
919 R_ACQUIRE_MUTEX(pool);
920 RQueAdd ( &pool->Que[que][align], UsedBlock );
921 R_RELEASE_MUTEX(pool);
922 return;
923 }
924 }
925
926 R_ACQUIRE_MUTEX(pool);
927 RPoolReclaim ( pool, FreeBlock );
928 R_RELEASE_MUTEX(pool);
929 }
930
931 static void
932 RPoolDumpByTag ( PR_POOL pool, rulong Tag )
933 {
934 PR_USED Block = (PR_USED)pool->UserBase;
935 PR_USED NextBlock;
936 int count = 0;
937 char tag[5];
938
939 // TODO FIXME - should we validate params or ASSERT_IRQL?
940 R_DEBUG ( "PagedPool Dump by tag '%s'\n", RFormatTag(Tag,tag) );
941 R_DEBUG ( " -BLOCK-- --SIZE--\n" );
942
943 R_ACQUIRE_MUTEX(pool);
944 for ( ;; )
945 {
946 if ( Block->Status == 1 && Block->Tag == Tag )
947 {
948 R_DEBUG ( " %08X %08X\n", Block, Block->Size );
949 ++count;
950 }
951 NextBlock = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
952 if ( !NextBlock )
953 break;
954 ASSERT ( NextBlock->PrevSize == Block->Size );
955 Block = NextBlock;
956 }
957 R_RELEASE_MUTEX(pool);
958
959 R_DEBUG ( "Entries found for tag '%s': %i\n", tag, count );
960 }
961
962 rulong
963 RPoolQueryTag ( void* Addr )
964 {
965 PR_USED Block = RBodyToHdr(Addr);
966 // TODO FIXME - should we validate params?
967 #if R_MAGIC
968 if ( Block->UsedMagic != R_USED_MAGIC )
969 return 0xDEADBEEF;
970 #endif//R_MAGIC
971 if ( Block->Status != 1 )
972 return 0xDEADBEEF;
973 return Block->Tag;
974 }
975
976 void
977 RPoolStats ( PR_POOL pool )
978 {
979 int free=0, used=0, qued=0;
980 PR_USED Block = (PR_USED)pool->UserBase;
981
982 R_ACQUIRE_MUTEX(pool);
983 while ( Block )
984 {
985 switch ( Block->Status )
986 {
987 case 0:
988 ++free;
989 break;
990 case 1:
991 ++used;
992 break;
993 case 2:
994 ++qued;
995 break;
996 default:
997 ASSERT ( !"Invalid Status for Block in pool!" );
998 }
999 Block = (PR_USED)RNextBlock(pool,(PR_FREE)Block);
1000 }
1001 R_RELEASE_MUTEX(pool);
1002
1003 R_DEBUG ( "Pool Stats: Free=%i, Used=%i, Qued=%i, Total=%i\n", free, used, qued, (free+used+qued) );
1004 }
1005
1006 #ifdef R_LARGEST_ALLOC_POSSIBLE
1007 static rulong
1008 RPoolLargestAllocPossible ( PR_POOL pool, int align )
1009 {
1010 int Alignment = pool->Alignments[align];
1011 rulong LargestUserSize = 0;
1012 PR_FREE Block = (PR_FREE)pool->UserBase;
1013 while ( Block )
1014 {
1015 if ( Block->Status != 1 )
1016 {
1017 void* Addr, *AlignedAddr;
1018 rulong BlockMaxUserSize;
1019 int cue, cueBytes;
1020
1021 Addr = (char*)Block + sizeof(R_USED) + R_RZ;
1022 AlignedAddr = R_ROUND_UP(Addr,Alignment);
1023 if ( Addr != AlignedAddr )
1024 Addr = R_ROUND_UP((char*)Block + sizeof(R_FREE) + sizeof(R_USED) + R_RZ, Alignment );
1025 BlockMaxUserSize = (char*)Block + Block->Size - (char*)Addr - R_RZ;
1026 cue = RQueWhich ( BlockMaxUserSize );
1027 if ( cue >= 0 )
1028 {
1029 cueBytes = 16 << cue;
1030 if ( cueBytes > BlockMaxUserSize );
1031 {
1032 if ( !cue )
1033 BlockMaxUserSize = 0;
1034 else
1035 BlockMaxUserSize = 16 << (cue-1);
1036 }
1037 }
1038 if ( BlockMaxUserSize > LargestUserSize )
1039 LargestUserSize = BlockMaxUserSize;
1040 }
1041 Block = RNextBlock ( pool, Block );
1042 }
1043 return LargestUserSize;
1044 }
1045 #endif//R_LARGEST_ALLOC_POSSIBLE
1046
1047 #endif//RPOOLMGR_H