8eeeb66c724ca6077d091a51ff5bebd843750752
[reactos.git] / reactos / ntoskrnl / mm / ppool.c
1 /* $Id: ppool.c,v 1.36 2004/12/13 20:11:08 arty Exp $
2 *
3 * COPYRIGHT: See COPYING in the top level directory
4 * PROJECT: ReactOS kernel
5 * FILE: ntoskrnl/mm/ppool.c
6 * PURPOSE: Implements the paged pool
7 * PROGRAMMER: David Welch (welch@mcmail.com)
8 * UPDATE HISTORY:
9 * Created 22/05/98
10 */
11
12 /* INCLUDES *****************************************************************/
13
14 #include <ntoskrnl.h>
15 #define NDEBUG
16 #include <internal/debug.h>
17
18 /* GLOBALS *******************************************************************/
19
20 /* Define to enable strict checking of the paged pool on every allocation */
21 /* #define ENABLE_VALIDATE_POOL */
22
23 #undef ASSERT
24 #define ASSERT(x) if (!(x)) {DbgPrint("Assertion "#x" failed at %s:%d\n", __FILE__,__LINE__); KeBugCheck(0); }
25 #define ASSERT_SIZE(n) ASSERT ( (n) <= MmPagedPoolSize && (n) > 0 )
26 #define IS_PPOOL_PTR(p) ((size_t)(p)) >= ((size_t)MmPagedPoolBase) && ((size_t)(p)) < ((size_t)((size_t)MmPagedPoolBase+MmPagedPoolSize))
27 #define ASSERT_PTR(p) ASSERT ( IS_PPOOL_PTR(p) )
28
29 // to disable buffer over/under-run detection, set the following macro to 0
30 #if !defined(DBG) && !defined(KDBG)
31 #define MM_PPOOL_REDZONE_BYTES 0
32 #else
33 #define MM_PPOOL_REDZONE_BYTES 4
34 #define MM_PPOOL_REDZONE_LOVALUE 0x87
35 #define MM_PPOOL_REDZONE_HIVALUE 0xA5
36 #define MM_PPOOL_FREEMAGIC (ULONG)(('F'<<0) + ('r'<<8) + ('E'<<16) + ('e'<<24))
37 #define MM_PPOOL_USEDMAGIC (ULONG)(('u'<<0) + ('S'<<8) + ('e'<<16) + ('D'<<24))
38 #define MM_PPOOL_LASTOWNER_ENTRIES 3
39 #endif
40
41 typedef struct _MM_PPOOL_FREE_BLOCK_HEADER
42 {
43 #if MM_PPOOL_REDZONE_BYTES
44 ULONG FreeMagic;
45 #endif//MM_PPOOL_REDZONE_BYTES
46 ULONG Size;
47 struct _MM_PPOOL_FREE_BLOCK_HEADER* NextFree;
48 #if MM_PPOOL_REDZONE_BYTES
49 ULONG LastOwnerStack[MM_PPOOL_LASTOWNER_ENTRIES];
50 #endif//MM_PPOOL_REDZONE_BYTES
51 }
52 MM_PPOOL_FREE_BLOCK_HEADER, *PMM_PPOOL_FREE_BLOCK_HEADER;
53
54 typedef struct _MM_PPOOL_USED_BLOCK_HEADER
55 {
56 #if MM_PPOOL_REDZONE_BYTES
57 ULONG UsedMagic;
58 #endif//MM_PPOOL_REDZONE_BYTES
59 ULONG Size;
60 #if MM_PPOOL_REDZONE_BYTES
61 ULONG UserSize; // how many bytes the user actually asked for...
62 #endif//MM_PPOOL_REDZONE_BYTES
63 struct _MM_PPOOL_USED_BLOCK_HEADER* NextUsed;
64 ULONG Tag;
65 #if MM_PPOOL_REDZONE_BYTES
66 ULONG LastOwnerStack[MM_PPOOL_LASTOWNER_ENTRIES];
67 #endif//MM_PPOOL_REDZONE_BYTES
68 }
69 MM_PPOOL_USED_BLOCK_HEADER, *PMM_PPOOL_USED_BLOCK_HEADER;
70
71 PVOID MmPagedPoolBase;
72 ULONG MmPagedPoolSize;
73 ULONG MmTotalPagedPoolQuota = 0;
74 static FAST_MUTEX MmPagedPoolLock;
75 static PMM_PPOOL_FREE_BLOCK_HEADER MmPagedPoolFirstFreeBlock;
76 static PMM_PPOOL_USED_BLOCK_HEADER MmPagedPoolFirstUsedBlock;
77
78 /* FUNCTIONS *****************************************************************/
79
80 inline static void* block_to_address ( PVOID blk )
81 /*
82 * FUNCTION: Translate a block header address to the corresponding block
83 * address (internal)
84 */
85 {
86 return ( (void *) ((char*)blk + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + MM_PPOOL_REDZONE_BYTES) );
87 }
88
89 inline static PMM_PPOOL_USED_BLOCK_HEADER address_to_block(PVOID addr)
90 {
91 return (PMM_PPOOL_USED_BLOCK_HEADER)
92 ( ((char*)addr) - sizeof(MM_PPOOL_USED_BLOCK_HEADER) - MM_PPOOL_REDZONE_BYTES );
93 }
94
95 VOID INIT_FUNCTION
96 MmInitializePagedPool(VOID)
97 {
98 MmPagedPoolFirstFreeBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)MmPagedPoolBase;
99 /*
100 * We are still at a high IRQL level at this point so explicitly commit
101 * the first page of the paged pool before writing the first block header.
102 */
103 MmCommitPagedPoolAddress((PVOID)MmPagedPoolFirstFreeBlock, FALSE);
104 MmPagedPoolFirstFreeBlock->Size = MmPagedPoolSize;
105 MmPagedPoolFirstFreeBlock->NextFree = NULL;
106
107 #if MM_PPOOL_REDZONE_BYTES
108 MmPagedPoolFirstFreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
109 {
110 int i;
111 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
112 MmPagedPoolFirstFreeBlock->LastOwnerStack[i] = 0;
113 }
114
115 MmPagedPoolFirstUsedBlock = NULL;
116 #endif//MM_PPOOL_REDZONE_BYTES
117
118 ExInitializeFastMutex(&MmPagedPoolLock);
119 }
120
121 #ifdef ENABLE_VALIDATE_POOL
122 static void VerifyPagedPool ( int line )
123 {
124 PMM_PPOOL_FREE_BLOCK_HEADER p = MmPagedPoolFirstFreeBlock;
125 int count = 0;
126 DPRINT ( "VerifyPagedPool(%i):\n", line );
127 while ( p )
128 {
129 DPRINT ( " 0x%x: %lu bytes (next 0x%x)\n", p, p->Size, p->NextFree );
130 #if MM_PPOOL_REDZONE_BYTES
131 ASSERT ( p->FreeMagic == MM_PPOOL_FREEMAGIC );
132 #endif//MM_PPOOL_REDZONE_BYTES
133 ASSERT_PTR(p);
134 ASSERT_SIZE(p->Size);
135 count++;
136 p = p->NextFree;
137 }
138 DPRINT ( "VerifyPagedPool(%i): (%lu blocks)\n", line, count );
139 }
140 #define VerifyPagedPool() VerifyPagedPool(__LINE__)
141 #else
142 #define VerifyPagedPool()
143 #endif
144
145 BOOLEAN STDCALL
146 KeRosPrintAddress(PVOID address);
147
148 #if !MM_PPOOL_REDZONE_BYTES
149 #define MmpRedZoneCheck(pUsed,Addr,file,line)
150 #else//MM_PPOOL_REDZONE_BYTES
151 static VOID FASTCALL
152 MmpRedZoneCheck ( PMM_PPOOL_USED_BLOCK_HEADER pUsed, PUCHAR Addr, const char* file, int line )
153 {
154 int i;
155 PUCHAR AddrEnd = Addr + pUsed->UserSize;
156 BOOL bLow = TRUE;
157 BOOL bHigh = TRUE;
158
159 ASSERT_PTR(Addr);
160 if ( pUsed->UsedMagic == MM_PPOOL_FREEMAGIC )
161 {
162 PMM_PPOOL_FREE_BLOCK_HEADER pFree = (PMM_PPOOL_FREE_BLOCK_HEADER)pUsed;
163 DPRINT1 ( "Double-free detected for Block 0x%x (kthread=0x%x)!\n", Addr, KeGetCurrentThread() );
164 DbgPrint ( "First Free Stack Frames:" );
165 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
166 {
167 if ( pFree->LastOwnerStack[i] != 0xDEADBEEF )
168 {
169 DbgPrint(" ");
170 if (!KeRosPrintAddress ((PVOID)pFree->LastOwnerStack[i]) )
171 {
172 DbgPrint("<%X>", pFree->LastOwnerStack[i] );
173 }
174 }
175 }
176 DbgPrint ( "\n" );
177 KEBUGCHECK(BAD_POOL_HEADER);
178 }
179 if ( pUsed->UsedMagic != MM_PPOOL_USEDMAGIC )
180 {
181 DPRINT1 ( "Bad magic in Block 0x%x!\n", Addr );
182 KEBUGCHECK(BAD_POOL_HEADER);
183 }
184 ASSERT_SIZE(pUsed->Size);
185 ASSERT_SIZE(pUsed->UserSize);
186 ASSERT_PTR(AddrEnd);
187 Addr -= MM_PPOOL_REDZONE_BYTES; // this is to simplify indexing below...
188 for ( i = 0; i < MM_PPOOL_REDZONE_BYTES && bLow && bHigh; i++ )
189 {
190 bLow = bLow && ( Addr[i] == MM_PPOOL_REDZONE_LOVALUE );
191 bHigh = bHigh && ( AddrEnd[i] == MM_PPOOL_REDZONE_HIVALUE );
192 }
193 if ( !bLow || !bHigh )
194 {
195 const char* violation = "High and Low-side";
196 if ( bHigh ) // high is okay, so it was just low failed
197 violation = "Low-side";
198 else if ( bLow ) // low side is okay, so it was just high failed
199 violation = "High-side";
200 DbgPrint("%s(%i): %s redzone violation detected for paged pool address 0x%x\n",
201 file, line, violation, Addr );
202
203 DbgPrint ( "UsedMagic 0x%x, Tag 0x%x, LoZone ",
204 pUsed->UsedMagic,
205 pUsed->Tag);
206
207 for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
208 DbgPrint ( "%02x", Addr[i] );
209 DbgPrint ( ", HiZone " );
210 for ( i = 0; i < MM_PPOOL_REDZONE_BYTES; i++ )
211 DbgPrint ( "%02x", AddrEnd[i] );
212 DbgPrint ( "\n" );
213
214 DbgPrint ( "First Free Stack Frames:" );
215 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
216 {
217 if ( pUsed->LastOwnerStack[i] != 0xDEADBEEF )
218 {
219 DbgPrint(" ");
220 if (!KeRosPrintAddress ((PVOID)pUsed->LastOwnerStack[i]) )
221 {
222 DbgPrint("<%X>", pUsed->LastOwnerStack[i] );
223 }
224 }
225 }
226 DbgPrint ( "\n" );
227
228 KEBUGCHECK(BAD_POOL_HEADER);
229 }
230 }
231 #endif//MM_PPOOL_REDZONE_BYTES
232
233 VOID STDCALL
234 MmDbgPagedPoolRedZoneCheck ( const char* file, int line )
235 {
236 #if MM_PPOOL_REDZONE_BYTES
237 PMM_PPOOL_USED_BLOCK_HEADER pUsed = MmPagedPoolFirstUsedBlock;
238
239 while ( pUsed )
240 {
241 MmpRedZoneCheck ( pUsed, block_to_address(pUsed), __FILE__, __LINE__ );
242 pUsed = pUsed->NextUsed;
243 }
244 #endif//MM_PPOOL_REDZONE_BYTES
245 }
246
247 /**********************************************************************
248 * NAME INTERNAL
249 * ExAllocatePagedPoolWithTag@12
250 *
251 * DESCRIPTION
252 *
253 * ARGUMENTS
254 *
255 * RETURN VALUE
256 */
257 PVOID STDCALL
258 ExAllocatePagedPoolWithTag (IN POOL_TYPE PoolType,
259 IN ULONG NumberOfBytes,
260 IN ULONG Tag)
261 {
262 PMM_PPOOL_FREE_BLOCK_HEADER BestBlock;
263 PMM_PPOOL_FREE_BLOCK_HEADER CurrentBlock;
264 ULONG BlockSize;
265 PMM_PPOOL_USED_BLOCK_HEADER NewBlock;
266 PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
267 PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
268 PMM_PPOOL_FREE_BLOCK_HEADER BestPreviousBlock;
269 PVOID BlockAddress;
270 ULONG Alignment;
271
272 ASSERT_IRQL(APC_LEVEL);
273
274 ExAcquireFastMutex(&MmPagedPoolLock);
275
276 /*
277 * Don't bother allocating anything for a zero-byte block.
278 */
279 if (NumberOfBytes == 0)
280 {
281 MmDbgPagedPoolRedZoneCheck(__FILE__,__LINE__);
282 ExReleaseFastMutex(&MmPagedPoolLock);
283 return(NULL);
284 }
285
286 DPRINT ( "ExAllocatePagedPoolWithTag(%i,%lu,%lu)\n", PoolType, NumberOfBytes, Tag );
287 VerifyPagedPool();
288
289 if (NumberOfBytes >= PAGE_SIZE)
290 {
291 Alignment = PAGE_SIZE;
292 }
293 else if (PoolType == PagedPoolCacheAligned)
294 {
295 Alignment = MM_CACHE_LINE_SIZE;
296 }
297 else
298 {
299 Alignment = MM_POOL_ALIGNMENT;
300 }
301
302 /*
303 * Calculate the total number of bytes we will need.
304 */
305 BlockSize = NumberOfBytes + sizeof(MM_PPOOL_USED_BLOCK_HEADER) + 2*MM_PPOOL_REDZONE_BYTES;
306 if (BlockSize < sizeof(MM_PPOOL_FREE_BLOCK_HEADER))
307 {
308 /* At least we need the size of the free block header. */
309 BlockSize = sizeof(MM_PPOOL_FREE_BLOCK_HEADER);
310 }
311
312
313 /*
314 * Find the best-fitting block.
315 */
316 PreviousBlock = NULL;
317 BestPreviousBlock = BestBlock = NULL;
318 CurrentBlock = MmPagedPoolFirstFreeBlock;
319 if ( Alignment > 0 )
320 {
321 PVOID BestAlignedAddr = NULL;
322 while ( CurrentBlock != NULL )
323 {
324 PVOID Addr = block_to_address(CurrentBlock);
325 PVOID CurrentBlockEnd = (char*)CurrentBlock + CurrentBlock->Size;
326 /* calculate last size-aligned address available within this block */
327 PVOID AlignedAddr = MM_ROUND_DOWN((char*)CurrentBlockEnd-NumberOfBytes-MM_PPOOL_REDZONE_BYTES, Alignment);
328 ASSERT ( (char*)AlignedAddr+NumberOfBytes+MM_PPOOL_REDZONE_BYTES <= (char*)CurrentBlockEnd );
329
330 /* special case, this address is already size-aligned, and the right size */
331 if ( Addr == AlignedAddr )
332 {
333 BestAlignedAddr = AlignedAddr;
334 BestPreviousBlock = PreviousBlock;
335 BestBlock = CurrentBlock;
336 break;
337 }
338 else if ( Addr < (PVOID)address_to_block(AlignedAddr) )
339 {
340 /*
341 * there's enough room to allocate our size-aligned memory out
342 * of this block, see if it's a better choice than any previous
343 * finds
344 */
345 if ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
346 {
347 BestAlignedAddr = AlignedAddr;
348 BestPreviousBlock = PreviousBlock;
349 BestBlock = CurrentBlock;
350 }
351 }
352
353 PreviousBlock = CurrentBlock;
354 CurrentBlock = CurrentBlock->NextFree;
355 }
356
357 /*
358 * we found a best block can/should we chop a few bytes off the beginning
359 * into a separate memory block?
360 */
361 if ( BestBlock != NULL )
362 {
363 PVOID Addr = block_to_address(BestBlock);
364 if ( BestAlignedAddr != Addr )
365 {
366 PMM_PPOOL_FREE_BLOCK_HEADER NewFreeBlock =
367 (PMM_PPOOL_FREE_BLOCK_HEADER)address_to_block(BestAlignedAddr);
368 ASSERT ( BestAlignedAddr > Addr );
369 NewFreeBlock->Size = (char*)Addr + BestBlock->Size - (char*)BestAlignedAddr;
370 #if MM_PPOOL_REDZONE_BYTES
371 NewFreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
372 #endif//MM_PPOOL_REDZONE_BYTES
373 ASSERT_SIZE(NewFreeBlock->Size);
374 BestBlock->Size = (size_t)NewFreeBlock - (size_t)Addr;
375 ASSERT_SIZE(BestBlock->Size);
376
377 DPRINT ( "breaking off preceding bytes into their own block...\n" );
378 DPRINT ( "NewFreeBlock 0x%x Size %lu (Old Block's new size %lu) NextFree 0x%x\n",
379 NewFreeBlock, NewFreeBlock->Size, BestBlock->Size, BestBlock->NextFree );
380
381 /* insert the new block into the chain */
382 NewFreeBlock->NextFree = BestBlock->NextFree;
383 BestBlock->NextFree = NewFreeBlock;
384
385 /* we want the following code to use our size-aligned block */
386 BestPreviousBlock = BestBlock;
387 BestBlock = NewFreeBlock;
388
389 //VerifyPagedPool();
390 }
391 }
392 }
393 /*
394 * non-size-aligned block search
395 */
396 else
397 while ( CurrentBlock != NULL )
398 {
399 if ( CurrentBlock->Size >= BlockSize
400 && ( BestBlock == NULL || BestBlock->Size > CurrentBlock->Size )
401 )
402 {
403 BestPreviousBlock = PreviousBlock;
404 BestBlock = CurrentBlock;
405 }
406
407 PreviousBlock = CurrentBlock;
408 CurrentBlock = CurrentBlock->NextFree;
409 }
410
411 /*
412 * We didn't find anything suitable at all.
413 */
414 if (BestBlock == NULL)
415 {
416 DPRINT1("Trying to allocate %lu bytes from paged pool - nothing suitable found, returning NULL\n",
417 NumberOfBytes );
418 ExReleaseFastMutex(&MmPagedPoolLock);
419 return(NULL);
420 }
421
422 DPRINT("BestBlock 0x%x NextFree 0x%x\n", BestBlock, BestBlock->NextFree );
423
424 //VerifyPagedPool();
425
426 /*
427 * Is there enough space to create a second block from the unused portion.
428 */
429 if ( BestBlock->Size > BlockSize
430 && (BestBlock->Size - BlockSize) > sizeof(MM_PPOOL_FREE_BLOCK_HEADER)
431 )
432 {
433 ULONG NewSize = BestBlock->Size - BlockSize;
434 ASSERT_SIZE ( NewSize );
435
436 //DPRINT("creating 2nd block from unused portion\n");
437 DPRINT("BestBlock 0x%x Size 0x%x BlockSize 0x%x NewSize 0x%x\n",
438 BestBlock, BestBlock->Size, BlockSize, NewSize );
439
440 /*
441 * Create the new free block.
442 */
443 //DPRINT("creating the new free block");
444 NextBlock = (PMM_PPOOL_FREE_BLOCK_HEADER)((char*)BestBlock + BlockSize);
445 //DPRINT(".");
446 NextBlock->Size = NewSize;
447 #if MM_PPOOL_REDZONE_BYTES
448 NextBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
449 #endif//MM_PPOOL_REDZONE_BYTES
450 ASSERT_SIZE ( NextBlock->Size );
451 //DPRINT(".");
452 NextBlock->NextFree = BestBlock->NextFree;
453 //DPRINT(".\n");
454
455 /*
456 * Replace the old free block with it.
457 */
458 //DPRINT("replacing old free block with it");
459 if (BestPreviousBlock == NULL)
460 {
461 //DPRINT("(from beginning)");
462 MmPagedPoolFirstFreeBlock = NextBlock;
463 }
464 else
465 {
466 //DPRINT("(from previous)");
467 BestPreviousBlock->NextFree = NextBlock;
468 }
469 //DPRINT(".\n");
470
471 /*
472 * Create the new used block header.
473 */
474 //DPRINT("create new used block header");
475 NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
476 //DPRINT(".");
477 NewBlock->Size = BlockSize;
478 #if MM_PPOOL_REDZONE_BYTES
479 {
480 PULONG Frame;
481 int i;
482 #if defined __GNUC__
483 __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
484 #elif defined(_MSC_VER)
485 __asm mov [Frame], ebp
486 #endif
487
488 NewBlock->UsedMagic = MM_PPOOL_USEDMAGIC;
489
490 Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
491 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
492 {
493 if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
494 NewBlock->LastOwnerStack[i] = 0xDEADBEEF;
495 else
496 {
497 //DbgPrint ( " 0x%x", Frame[1] );
498 NewBlock->LastOwnerStack[i] = Frame[1];
499 Frame = (PULONG)Frame[0];
500 }
501 }
502 }
503 #endif//MM_PPOOL_REDZONE_BYTES
504 ASSERT_SIZE ( NewBlock->Size );
505 //DPRINT(".\n");
506 }
507 else
508 {
509 ULONG NewSize = BestBlock->Size;
510
511 /*
512 * Remove the selected block from the list of free blocks.
513 */
514 //DPRINT ( "Removing selected block from free block list\n" );
515 if (BestPreviousBlock == NULL)
516 {
517 MmPagedPoolFirstFreeBlock = BestBlock->NextFree;
518 }
519 else
520 {
521 BestPreviousBlock->NextFree = BestBlock->NextFree;
522 }
523
524 /*
525 * Set up the header of the new block
526 */
527 NewBlock = (PMM_PPOOL_USED_BLOCK_HEADER)BestBlock;
528 NewBlock->Size = NewSize;
529 #if MM_PPOOL_REDZONE_BYTES
530 {
531 PULONG Frame;
532 int i;
533 #if defined __GNUC__
534 __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
535 #elif defined(_MSC_VER)
536 __asm mov [Frame], ebp
537 #endif
538
539 NewBlock->UsedMagic = MM_PPOOL_USEDMAGIC;
540
541 Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
542 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
543 {
544 if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
545 NewBlock->LastOwnerStack[i] = 0xDEADBEEF;
546 else
547 {
548 //DbgPrint ( " 0x%x", Frame[1] );
549 NewBlock->LastOwnerStack[i] = Frame[1];
550 Frame = (PULONG)Frame[0];
551 }
552 }
553 }
554 #endif//MM_PPOOL_REDZONE_BYTES
555 ASSERT_SIZE ( NewBlock->Size );
556 }
557
558 // now add the block to the used block list
559 NewBlock->NextUsed = MmPagedPoolFirstUsedBlock;
560 MmPagedPoolFirstUsedBlock = NewBlock;
561
562 NewBlock->Tag = Tag;
563
564 VerifyPagedPool();
565
566 ExReleaseFastMutex(&MmPagedPoolLock);
567
568 BlockAddress = block_to_address ( NewBlock );
569 /* RtlZeroMemory(BlockAddress, NumberOfBytes);*/
570
571 #if MM_PPOOL_REDZONE_BYTES
572
573 NewBlock->UserSize = NumberOfBytes;
574 // write out buffer-overrun detection bytes
575 {
576 PUCHAR Addr = (PUCHAR)BlockAddress;
577 //DbgPrint ( "writing buffer-overrun detection bytes" );
578 memset ( Addr - MM_PPOOL_REDZONE_BYTES,
579 MM_PPOOL_REDZONE_LOVALUE, MM_PPOOL_REDZONE_BYTES );
580 memset ( Addr + NewBlock->UserSize, MM_PPOOL_REDZONE_HIVALUE,
581 MM_PPOOL_REDZONE_BYTES );
582 }
583
584 #endif//MM_PPOOL_REDZONE_BYTES
585
586 return(BlockAddress);
587 }
588
589 VOID STDCALL
590 ExFreePagedPool(IN PVOID Block)
591 {
592 PMM_PPOOL_FREE_BLOCK_HEADER PreviousBlock;
593 PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = address_to_block(Block);
594 ULONG UsedSize = UsedBlock->Size;
595 PMM_PPOOL_FREE_BLOCK_HEADER FreeBlock =
596 (PMM_PPOOL_FREE_BLOCK_HEADER)UsedBlock;
597 PMM_PPOOL_FREE_BLOCK_HEADER NextBlock;
598 PMM_PPOOL_FREE_BLOCK_HEADER NextNextBlock;
599
600 ASSERT_IRQL(APC_LEVEL);
601
602 MmpRedZoneCheck ( UsedBlock, Block, __FILE__, __LINE__ );
603
604 #if MM_PPOOL_REDZONE_BYTES
605 memset ( Block, 0xCD, UsedBlock->UserSize );
606 #endif
607
608 ExAcquireFastMutex(&MmPagedPoolLock);
609
610 // remove from used list...
611 {
612 PMM_PPOOL_USED_BLOCK_HEADER pPrev = MmPagedPoolFirstUsedBlock;
613 if ( pPrev == UsedBlock )
614 {
615 // special-case, our freeing block is first in list...
616 MmPagedPoolFirstUsedBlock = pPrev->NextUsed;
617 }
618 else
619 {
620 while ( pPrev && pPrev->NextUsed != UsedBlock )
621 pPrev = pPrev->NextUsed;
622 // if this assert fails - memory has been corrupted
623 // ( or I have a logic error...! )
624 ASSERT ( pPrev->NextUsed == UsedBlock );
625 pPrev->NextUsed = UsedBlock->NextUsed;
626 }
627 }
628
629 /*
630 * Begin setting up the newly freed block's header.
631 */
632 FreeBlock->Size = UsedSize;
633 #if MM_PPOOL_REDZONE_BYTES
634 FreeBlock->FreeMagic = MM_PPOOL_FREEMAGIC;
635 {
636 PULONG Frame;
637 int i;
638 #if defined __GNUC__
639 __asm__("mov %%ebp, %%ebx" : "=b" (Frame) : );
640 #elif defined(_MSC_VER)
641 __asm mov [Frame], ebp
642 #endif
643 //DbgPrint ( "Stack Frames for Free Block 0x%x:", Block );
644 Frame = (PULONG)Frame[0]; // step out of ExFreePagedPool
645 for ( i = 0; i < MM_PPOOL_LASTOWNER_ENTRIES; i++ )
646 {
647 if ( Frame == 0 || (ULONG)Frame == 0xDEADBEEF )
648 FreeBlock->LastOwnerStack[i] = 0xDEADBEEF;
649 else
650 {
651 //DbgPrint ( " 0x%x", Frame[1] );
652 FreeBlock->LastOwnerStack[i] = Frame[1];
653 Frame = (PULONG)Frame[0];
654 }
655 }
656 //DbgPrint ( "\n" );
657 //KeRosDumpStackFrames ( NULL, 4 );
658 }
659 #endif//MM_PPOOL_REDZONE_BYTES
660 ASSERT_SIZE ( FreeBlock->Size );
661
662 /*
663 * Find the blocks immediately before and after the newly freed block on the free list.
664 */
665 PreviousBlock = NULL;
666 NextBlock = MmPagedPoolFirstFreeBlock;
667 while (NextBlock != NULL && NextBlock < FreeBlock)
668 {
669 PreviousBlock = NextBlock;
670 NextBlock = NextBlock->NextFree;
671 }
672
673 /*
674 * Insert the freed block on the free list.
675 */
676 if (PreviousBlock == NULL)
677 {
678 FreeBlock->NextFree = MmPagedPoolFirstFreeBlock;
679 MmPagedPoolFirstFreeBlock = FreeBlock;
680 }
681 else
682 {
683 PreviousBlock->NextFree = FreeBlock;
684 FreeBlock->NextFree = NextBlock;
685 }
686
687 /*
688 * If the next block is immediately adjacent to the newly freed one then
689 * merge them.
690 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
691 */
692 if (NextBlock != NULL &&
693 ((char*)FreeBlock + FreeBlock->Size) == (char*)NextBlock)
694 {
695 FreeBlock->Size = FreeBlock->Size + NextBlock->Size;
696 ASSERT_SIZE ( FreeBlock->Size );
697 FreeBlock->NextFree = NextBlock->NextFree;
698 NextNextBlock = NextBlock->NextFree;
699 }
700 else
701 {
702 NextNextBlock = NextBlock;
703 }
704
705 /*
706 * If the previous block is adjacent to the newly freed one then
707 * merge them.
708 * PLEASE DO NOT WIPE OUT 'MAGIC' OR 'LASTOWNER' DATA FOR MERGED FREE BLOCKS
709 */
710 if (PreviousBlock != NULL &&
711 ((char*)PreviousBlock + PreviousBlock->Size) == (char*)FreeBlock)
712 {
713 PreviousBlock->Size = PreviousBlock->Size + FreeBlock->Size;
714 ASSERT_SIZE ( PreviousBlock->Size );
715 PreviousBlock->NextFree = NextNextBlock;
716 }
717
718 VerifyPagedPool();
719
720 ExReleaseFastMutex(&MmPagedPoolLock);
721 }
722
723 VOID STDCALL
724 ExRosDumpPagedPoolByTag ( ULONG Tag )
725 {
726 PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = MmPagedPoolFirstUsedBlock;
727 int count = 0;
728 char tag[5];
729
730 // TODO FIXME - should we validate params or ASSERT_IRQL?
731 *(ULONG*)&tag[0] = Tag;
732 tag[4] = 0;
733 DbgPrint ( "PagedPool Dump by tag '%s'\n", tag );
734 DbgPrint ( " -BLOCK-- --SIZE--\n" );
735 while ( IS_PPOOL_PTR(UsedBlock) )
736 {
737 if ( UsedBlock->Tag == Tag )
738 {
739 DbgPrint ( " %08X %08X\n", UsedBlock, UsedBlock->Size );
740 ++count;
741 }
742 UsedBlock = UsedBlock->NextUsed;
743 }
744 if ( UsedBlock && !IS_PPOOL_PTR(UsedBlock) )
745 {
746 DPRINT1 ( "!!NextUsed took me to lala land: 0x%08X\n", UsedBlock );
747 }
748 DbgPrint ( "Entries found for tag '%s': %i\n", tag, count );
749 }
750
751 ULONG STDCALL
752 ExRosQueryPagedPoolTag ( PVOID Block )
753 {
754 PMM_PPOOL_USED_BLOCK_HEADER UsedBlock = address_to_block(Block);
755 // TODO FIXME - should we validate params or ASSERT_IRQL?
756 return UsedBlock->Tag;
757 }
758
759 /* EOF */