ULONG CcFastReadNoWait;
ULONG CcFastReadResourceMiss;
+/* Counters:
+ * - Amount of pages flushed to the disk
+ * - Number of flush operations
+ */
+ULONG CcDataPages = 0;
+ULONG CcDataFlushes = 0;
+
/* FUNCTIONS *****************************************************************/
VOID
CcReadVirtualAddress (
PROS_VACB Vacb)
{
- ULONG Size, Pages;
+ ULONG Size;
PMDL Mdl;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
+ ULARGE_INTEGER LargeSize;
- Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
- if (Size > VACB_MAPPING_GRANULARITY)
+ LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
+ if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
{
- Size = VACB_MAPPING_GRANULARITY;
+ LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
}
+ Size = LargeSize.LowPart;
- Pages = BYTES_TO_PAGES(Size);
- ASSERT(Pages * PAGE_SIZE <= VACB_MAPPING_GRANULARITY);
+ Size = ROUND_TO_PAGES(Size);
+ ASSERT(Size <= VACB_MAPPING_GRANULARITY);
+ ASSERT(Size > 0);
- Mdl = IoAllocateMdl(Vacb->BaseAddress, Pages * PAGE_SIZE, FALSE, FALSE, NULL);
+ Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
if (!Mdl)
{
return STATUS_INSUFFICIENT_RESOURCES;
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
+ DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
NTSTATUS Status;
IO_STATUS_BLOCK IoStatus;
KEVENT Event;
+ ULARGE_INTEGER LargeSize;
- Size = (ULONG)(Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart);
- if (Size > VACB_MAPPING_GRANULARITY)
+ LargeSize.QuadPart = Vacb->SharedCacheMap->SectionSize.QuadPart - Vacb->FileOffset.QuadPart;
+ if (LargeSize.QuadPart > VACB_MAPPING_GRANULARITY)
{
- Size = VACB_MAPPING_GRANULARITY;
+ LargeSize.QuadPart = VACB_MAPPING_GRANULARITY;
}
+ Size = LargeSize.LowPart;
//
// Nonpaged pool PDEs in ReactOS must actually be synchronized between the
// MmGlobalPageDirectory and the real system PDE directory. What a mess...
} while (++i < (Size >> PAGE_SHIFT));
}
+ Size = ROUND_TO_PAGES(Size);
+ ASSERT(Size <= VACB_MAPPING_GRANULARITY);
+ ASSERT(Size > 0);
+
Mdl = IoAllocateMdl(Vacb->BaseAddress, Size, FALSE, FALSE, NULL);
if (!Mdl)
{
_SEH2_EXCEPT (EXCEPTION_EXECUTE_HANDLER)
{
Status = _SEH2_GetExceptionCode();
+ DPRINT1("MmProbeAndLockPages failed with: %lx for %p (%p, %p)\n", Status, Mdl, Vacb, Vacb->BaseAddress);
KeBugCheck(CACHE_MANAGER);
} _SEH2_END;
/* If that was a successful sync read operation, let's handle read ahead */
if (Operation == CcOperationRead && Length == 0 && Wait)
{
- /* If file isn't random access, schedule next read */
- if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS))
+ /* If file isn't random access and next read may get us cross VACB boundary,
+ * schedule next read
+ */
+ if (!BooleanFlagOn(FileObject->Flags, FO_RANDOM_ACCESS) &&
+ (CurrentOffset - 1) / VACB_MAPPING_GRANULARITY != (CurrentOffset + BytesCopied - 1) / VACB_MAPPING_GRANULARITY)
{
CcScheduleReadAhead(FileObject, (PLARGE_INTEGER)&FileOffset, BytesCopied);
}
/* Remember it's locked */
Locked = TRUE;
+ /* Don't read past the end of the file */
+ if (CurrentOffset >= SharedCacheMap->FileSize.QuadPart)
+ {
+ goto Clear;
+ }
+ if (CurrentOffset + Length > SharedCacheMap->FileSize.QuadPart)
+ {
+ Length = SharedCacheMap->FileSize.QuadPart - CurrentOffset;
+ }
+
/* Next of the algorithm will lock like CcCopyData with the slight
* difference that we don't copy data back to an user-backed buffer
* We just bring data into Cc
Length = BytesToWrite;
}
- /* Convert it to pages count */
- Pages = (Length + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ Pages = BYTES_TO_PAGES(Length);
/* By default, assume limits per file won't be hit */
PerFileDefer = FALSE;
&CcDeferredWriteSpinLock);
}
+ DPRINT1("Actively deferring write for: %p\n", FileObject);
/* Now, we'll loop until our event is set. When it is set, it means that caller
* can immediately write, and has to
*/