*/
if (current->Valid)
{
- TempLength = min(Bcb->CacheSegmentSize, Length);
+ TempLength = min(VACB_MAPPING_GRANULARITY, Length);
memcpy(Buffer, current->BaseAddress, TempLength);
Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
{
current2 = current2->NextInChain;
- current_size += Bcb->CacheSegmentSize;
+ current_size += VACB_MAPPING_GRANULARITY;
}
/*
while ((current2 != NULL) && !current2->Valid && (current_size < MAX_RW_LENGTH))
{
PVOID address = current2->BaseAddress;
- for (i = 0; i < (Bcb->CacheSegmentSize / PAGE_SIZE); i++, address = RVA(address, PAGE_SIZE))
+ for (i = 0; i < (VACB_MAPPING_GRANULARITY / PAGE_SIZE); i++, address = RVA(address, PAGE_SIZE))
{
*MdlPages++ = MmGetPfnForProcess(NULL, address);
}
current2 = current2->NextInChain;
- current_size += Bcb->CacheSegmentSize;
+ current_size += VACB_MAPPING_GRANULARITY;
}
/*
{
previous = current;
current = current->NextInChain;
- TempLength = min(Bcb->CacheSegmentSize, Length);
+ TempLength = min(VACB_MAPPING_GRANULARITY, Length);
memcpy(Buffer, previous->BaseAddress, TempLength);
Buffer = (PVOID)((ULONG_PTR)Buffer + TempLength);
Length = Length - TempLength;
CcRosReleaseCacheSegment(Bcb, previous, TRUE, FALSE, FALSE);
- current_size += Bcb->CacheSegmentSize;
+ current_size += VACB_MAPPING_GRANULARITY;
}
}
}
SegOffset.QuadPart = CacheSeg->FileOffset;
Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
- if (Size > CacheSeg->Bcb->CacheSegmentSize)
+ if (Size > VACB_MAPPING_GRANULARITY)
{
- Size = CacheSeg->Bcb->CacheSegmentSize;
+ Size = VACB_MAPPING_GRANULARITY;
}
Mdl = IoAllocateMdl(CacheSeg->BaseAddress, Size, FALSE, FALSE, NULL);
return Status;
}
- if (CacheSeg->Bcb->CacheSegmentSize > Size)
+ if (VACB_MAPPING_GRANULARITY > Size)
{
RtlZeroMemory((char*)CacheSeg->BaseAddress + Size,
- CacheSeg->Bcb->CacheSegmentSize - Size);
+ VACB_MAPPING_GRANULARITY - Size);
}
return STATUS_SUCCESS;
CacheSeg->Dirty = FALSE;
SegOffset.QuadPart = CacheSeg->FileOffset;
Size = (ULONG)(CacheSeg->Bcb->AllocationSize.QuadPart - CacheSeg->FileOffset);
- if (Size > CacheSeg->Bcb->CacheSegmentSize)
+ if (Size > VACB_MAPPING_GRANULARITY)
{
- Size = CacheSeg->Bcb->CacheSegmentSize;
+ Size = VACB_MAPPING_GRANULARITY;
}
//
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (!current->Valid &&
- DoSegmentsIntersect(current->FileOffset, Bcb->CacheSegmentSize,
+ DoSegmentsIntersect(current->FileOffset, VACB_MAPPING_GRANULARITY,
ReadOffset, Length))
{
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
}
- TempLength = ReadOffset % Bcb->CacheSegmentSize;
+ TempLength = ReadOffset % VACB_MAPPING_GRANULARITY;
if (TempLength != 0)
{
- TempLength = min (Length, Bcb->CacheSegmentSize - TempLength);
+ TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
Status = CcRosRequestCacheSegment(Bcb,
ROUND_DOWN(ReadOffset,
- Bcb->CacheSegmentSize),
+ VACB_MAPPING_GRANULARITY),
&BaseAddress, &Valid, &CacheSeg);
if (!NT_SUCCESS(Status))
{
return FALSE;
}
}
- memcpy (Buffer, (char*)BaseAddress + ReadOffset % Bcb->CacheSegmentSize,
+ memcpy (Buffer, (char*)BaseAddress + ReadOffset % VACB_MAPPING_GRANULARITY,
TempLength);
CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, FALSE, FALSE);
ReadLength += TempLength;
while (Length > 0)
{
- TempLength = min(max(Bcb->CacheSegmentSize, MAX_RW_LENGTH), Length);
+ TempLength = min(max(VACB_MAPPING_GRANULARITY, MAX_RW_LENGTH), Length);
Status = ReadCacheSegmentChain(Bcb, ReadOffset, TempLength, Buffer);
if (!NT_SUCCESS(Status))
{
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (!CacheSeg->Valid &&
- DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
+ DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
WriteOffset, Length))
{
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
}
- TempLength = WriteOffset % Bcb->CacheSegmentSize;
+ TempLength = WriteOffset % VACB_MAPPING_GRANULARITY;
if (TempLength != 0)
{
ULONG ROffset;
- ROffset = ROUND_DOWN(WriteOffset, Bcb->CacheSegmentSize);
- TempLength = min (Length, Bcb->CacheSegmentSize - TempLength);
+ ROffset = ROUND_DOWN(WriteOffset, VACB_MAPPING_GRANULARITY);
+ TempLength = min (Length, VACB_MAPPING_GRANULARITY - TempLength);
Status = CcRosRequestCacheSegment(Bcb, ROffset,
&BaseAddress, &Valid, &CacheSeg);
if (!NT_SUCCESS(Status))
return FALSE;
}
}
- memcpy ((char*)BaseAddress + WriteOffset % Bcb->CacheSegmentSize,
+ memcpy ((char*)BaseAddress + WriteOffset % VACB_MAPPING_GRANULARITY,
Buffer, TempLength);
CcRosReleaseCacheSegment(Bcb, CacheSeg, TRUE, TRUE, FALSE);
while (Length > 0)
{
- TempLength = min (Bcb->CacheSegmentSize, Length);
+ TempLength = min (VACB_MAPPING_GRANULARITY, Length);
Status = CcRosRequestCacheSegment(Bcb,
WriteOffset,
&BaseAddress,
{
return FALSE;
}
- if (!Valid && TempLength < Bcb->CacheSegmentSize)
+ if (!Valid && TempLength < VACB_MAPPING_GRANULARITY)
{
if (!NT_SUCCESS(ReadCacheSegment(CacheSeg)))
{
CacheSeg = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
if (!CacheSeg->Valid &&
- DoSegmentsIntersect(CacheSeg->FileOffset, Bcb->CacheSegmentSize,
+ DoSegmentsIntersect(CacheSeg->FileOffset, VACB_MAPPING_GRANULARITY,
WriteOffset.u.LowPart, Length))
{
KeReleaseSpinLock(&Bcb->BcbLock, oldirql);
while (Length > 0)
{
ULONG Offset;
- Offset = WriteOffset.u.LowPart % Bcb->CacheSegmentSize;
+ Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
if (Length + Offset > MAX_ZERO_LENGTH)
{
CurrentLength = MAX_ZERO_LENGTH - Offset;
while (current != NULL)
{
- Offset = WriteOffset.u.LowPart % Bcb->CacheSegmentSize;
+ Offset = WriteOffset.u.LowPart % VACB_MAPPING_GRANULARITY;
if ((Offset != 0) ||
- (Offset + CurrentLength < Bcb->CacheSegmentSize))
+ (Offset + CurrentLength < VACB_MAPPING_GRANULARITY))
{
if (!current->Valid)
{
Status);
}
}
- TempLength = min (CurrentLength, Bcb->CacheSegmentSize - Offset);
+ TempLength = min (CurrentLength, VACB_MAPPING_GRANULARITY - Offset);
}
else
{
- TempLength = Bcb->CacheSegmentSize;
+ TempLength = VACB_MAPPING_GRANULARITY;
}
memset ((PUCHAR)current->BaseAddress + Offset, 0, TempLength);
CacheSegment->Dirty = FALSE;
RemoveEntryList(&CacheSegment->DirtySegmentListEntry);
- DirtyPageCount -= CacheSegment->Bcb->CacheSegmentSize / PAGE_SIZE;
+ DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
CcRosCacheSegmentDecRefCount(CacheSegment);
KeReleaseSpinLock(&CacheSegment->Bcb->BcbLock, oldIrql);
continue;
}
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
+ PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
KeReleaseGuardedMutex(&ViewLock);
KeReleaseGuardedMutex(&ViewLock);
/* Page out the segment */
- for (i = 0; i < current->Bcb->CacheSegmentSize / PAGE_SIZE; i++)
+ for (i = 0; i < VACB_MAPPING_GRANULARITY / PAGE_SIZE; i++)
{
Page = (PFN_NUMBER)(MmGetPhysicalAddress((PUCHAR)current->BaseAddress + (i * PAGE_SIZE)).QuadPart >> PAGE_SHIFT);
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);
/* Calculate how many pages we freed for Mm */
- PagesPerSegment = current->Bcb->CacheSegmentSize / PAGE_SIZE;
+ PagesPerSegment = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
PagesFreed = min(PagesPerSegment, Target);
Target -= PagesFreed;
(*NrFreed) += PagesFreed;
if (!WasDirty && CacheSeg->Dirty)
{
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
if (Mapped)
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
+ if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
FileOffset))
{
CcRosCacheSegmentIncRefCount(current);
if (!CacheSeg->Dirty)
{
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
else
{
if (!WasDirty && NowDirty)
{
InsertTailList(&DirtySegmentListHead, &CacheSeg->DirtySegmentListEntry);
- DirtyPageCount += Bcb->CacheSegmentSize / PAGE_SIZE;
+ DirtyPageCount += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
CcRosCacheSegmentDecRefCount(CacheSeg);
current->Valid = FALSE;
current->Dirty = FALSE;
current->PageOut = FALSE;
- current->FileOffset = ROUND_DOWN(FileOffset, Bcb->CacheSegmentSize);
+ current->FileOffset = ROUND_DOWN(FileOffset, VACB_MAPPING_GRANULARITY);
current->Bcb = Bcb;
#if DBG
if ( Bcb->Trace )
{
current = CONTAINING_RECORD(current_entry, CACHE_SEGMENT,
BcbSegmentListEntry);
- if (IsPointInSegment(current->FileOffset, Bcb->CacheSegmentSize,
+ if (IsPointInSegment(current->FileOffset, VACB_MAPPING_GRANULARITY,
FileOffset))
{
CcRosCacheSegmentIncRefCount(current);
#ifdef CACHE_BITMAP
KeAcquireSpinLock(&CiCacheSegMappingRegionLock, &oldIrql);
- StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, Bcb->CacheSegmentSize / PAGE_SIZE, CiCacheSegMappingRegionHint);
+ StartingOffset = RtlFindClearBitsAndSet(&CiCacheSegMappingRegionAllocMap, VACB_MAPPING_GRANULARITY / PAGE_SIZE, CiCacheSegMappingRegionHint);
if (StartingOffset == 0xffffffff)
{
if (CiCacheSegMappingRegionHint == StartingOffset)
{
- CiCacheSegMappingRegionHint += Bcb->CacheSegmentSize / PAGE_SIZE;
+ CiCacheSegMappingRegionHint += VACB_MAPPING_GRANULARITY / PAGE_SIZE;
}
KeReleaseSpinLock(&CiCacheSegMappingRegionLock, oldIrql);
Status = MmCreateMemoryArea(MmGetKernelAddressSpace(),
0, // nothing checks for cache_segment mareas, so set to 0
¤t->BaseAddress,
- Bcb->CacheSegmentSize,
+ VACB_MAPPING_GRANULARITY,
PAGE_READWRITE,
(PMEMORY_AREA*)¤t->MemoryArea,
FALSE,
}
#endif
- MmMapMemoryArea(current->BaseAddress, Bcb->CacheSegmentSize,
+ MmMapMemoryArea(current->BaseAddress, VACB_MAPPING_GRANULARITY,
MC_CACHE, PAGE_READWRITE);
return(STATUS_SUCCESS);
DPRINT("CcRosGetCacheSegmentChain()\n");
- Length = ROUND_UP(Length, Bcb->CacheSegmentSize);
+ Length = ROUND_UP(Length, VACB_MAPPING_GRANULARITY);
CacheSegList = _alloca(sizeof(PCACHE_SEGMENT) *
- (Length / Bcb->CacheSegmentSize));
+ (Length / VACB_MAPPING_GRANULARITY));
/*
* Look for a cache segment already mapping the same data.
*/
- for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
+ for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
{
- ULONG CurrentOffset = FileOffset + (i * Bcb->CacheSegmentSize);
+ ULONG CurrentOffset = FileOffset + (i * VACB_MAPPING_GRANULARITY);
current = CcRosLookupCacheSegment(Bcb, CurrentOffset);
if (current != NULL)
{
}
}
- for (i = 0; i < (Length / Bcb->CacheSegmentSize); i++)
+ for (i = 0; i < (Length / VACB_MAPPING_GRANULARITY); i++)
{
if (i == 0)
{
ASSERT(Bcb);
- if ((FileOffset % Bcb->CacheSegmentSize) != 0)
+ if ((FileOffset % VACB_MAPPING_GRANULARITY) != 0)
{
DPRINT1("Bad fileoffset %x should be multiple of %x",
- FileOffset, Bcb->CacheSegmentSize);
+ FileOffset, VACB_MAPPING_GRANULARITY);
KeBugCheck(CACHE_MANAGER);
}
}
#endif
#ifdef CACHE_BITMAP
- RegionSize = CacheSeg->Bcb->CacheSegmentSize / PAGE_SIZE;
+ RegionSize = VACB_MAPPING_GRANULARITY / PAGE_SIZE;
/* Unmap all the pages. */
for (i = 0; i < RegionSize; i++)
KeReleaseGuardedMutex(&ViewLock);
}
- Offset.QuadPart += Bcb->CacheSegmentSize;
- if (Length > Bcb->CacheSegmentSize)
+ Offset.QuadPart += VACB_MAPPING_GRANULARITY;
+ if (Length > VACB_MAPPING_GRANULARITY)
{
- Length -= Bcb->CacheSegmentSize;
+ Length -= VACB_MAPPING_GRANULARITY;
}
else
{
if (current->Dirty)
{
RemoveEntryList(¤t->DirtySegmentListEntry);
- DirtyPageCount -= Bcb->CacheSegmentSize / PAGE_SIZE;
+ DirtyPageCount -= VACB_MAPPING_GRANULARITY / PAGE_SIZE;
DPRINT1("Freeing dirty segment\n");
}
InsertHeadList(&FreeList, ¤t->BcbSegmentListEntry);