+ PVOID PoolToFree;
+ PEX_RUNDOWN_REF RunRef;
+ ULONG RunRefSize, Count, Offset;
+ PEX_RUNDOWN_REF_CACHE_AWARE RunRefCacheAware;
+
+ PAGED_CODE();
+
+ /* Allocate the master structure */
+ RunRefCacheAware = ExAllocatePoolWithTag(PoolType, sizeof(EX_RUNDOWN_REF_CACHE_AWARE), Tag);
+ if (RunRefCacheAware == NULL)
+ {
+ return NULL;
+ }
+
+ /* Compute the size of each runref */
+ RunRefCacheAware->Number = KeNumberProcessors;
+ if (KeNumberProcessors <= 1)
+ {
+ RunRefSize = sizeof(EX_RUNDOWN_REF);
+ }
+ else
+ {
+ RunRefSize = KeGetRecommendedSharedDataAlignment();
+ ASSERT((RunRefSize & (RunRefSize - 1)) == 0);
+ }
+
+ /* It must at least hold a EX_RUNDOWN_REF structure */
+ ASSERT(sizeof(EX_RUNDOWN_REF) <= RunRefSize);
+ RunRefCacheAware->RunRefSize = RunRefSize;
+
+ /* Allocate our runref pool */
+ PoolToFree = ExAllocatePoolWithTag(PoolType, RunRefSize * RunRefCacheAware->Number, Tag);
+ if (PoolToFree == NULL)
+ {
+ ExFreePoolWithTag(RunRefCacheAware, Tag);
+ return NULL;
+ }
+
+ /* On SMP, check for alignment */
+ if (RunRefCacheAware->Number > 1)
+ {
+ /* FIXME: properly align run refs */
+ UNIMPLEMENTED;
+ }
+
+ RunRefCacheAware->RunRefs = PoolToFree;
+ RunRefCacheAware->PoolToFree = PoolToFree;
+
+ /* And initialize runref */
+ if (RunRefCacheAware->Number != 0)
+ {
+ for (Count = 0; Count < RunRefCacheAware->Number; ++Count)
+ {
+ Offset = RunRefCacheAware->RunRefSize * Count;
+ RunRef = (PEX_RUNDOWN_REF)((ULONG_PTR)RunRefCacheAware->RunRefs + Offset);
+ RunRef->Count = 0;
+ }
+ }
+
+ return RunRefCacheAware;