[NTOS:MM] Use inline functions to acquire/release the PFN lock.
[reactos.git] / ntoskrnl / mm / balance.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/mm/balance.c
5 * PURPOSE: kernel memory managment functions
6 *
7 * PROGRAMMERS: David Welch (welch@cwcom.net)
8 * Cameron Gutman (cameron.gutman@reactos.org)
9 */
10
11 /* INCLUDES *****************************************************************/
12
13 #include <ntoskrnl.h>
14 #define NDEBUG
15 #include <debug.h>
16
17 #include "ARM3/miarm.h"
18
19 #if defined (ALLOC_PRAGMA)
20 #pragma alloc_text(INIT, MmInitializeBalancer)
21 #pragma alloc_text(INIT, MmInitializeMemoryConsumer)
22 #pragma alloc_text(INIT, MiInitBalancerThread)
23 #endif
24
25
26 /* TYPES ********************************************************************/
27 typedef struct _MM_ALLOCATION_REQUEST
28 {
29 PFN_NUMBER Page;
30 LIST_ENTRY ListEntry;
31 KEVENT Event;
32 }
33 MM_ALLOCATION_REQUEST, *PMM_ALLOCATION_REQUEST;
34 /* GLOBALS ******************************************************************/
35
36 MM_MEMORY_CONSUMER MiMemoryConsumers[MC_MAXIMUM];
37 static ULONG MiMinimumAvailablePages;
38 static ULONG MiNrTotalPages;
39 static LIST_ENTRY AllocationListHead;
40 static KSPIN_LOCK AllocationListLock;
41 static ULONG MiMinimumPagesPerRun;
42
43 static CLIENT_ID MiBalancerThreadId;
44 static HANDLE MiBalancerThreadHandle = NULL;
45 static KEVENT MiBalancerEvent;
46 static KTIMER MiBalancerTimer;
47
48 /* FUNCTIONS ****************************************************************/
49
50 VOID
51 INIT_FUNCTION
52 NTAPI
53 MmInitializeBalancer(ULONG NrAvailablePages, ULONG NrSystemPages)
54 {
55 memset(MiMemoryConsumers, 0, sizeof(MiMemoryConsumers));
56 InitializeListHead(&AllocationListHead);
57 KeInitializeSpinLock(&AllocationListLock);
58
59 MiNrTotalPages = NrAvailablePages;
60
61 /* Set up targets. */
62 MiMinimumAvailablePages = 256;
63 MiMinimumPagesPerRun = 256;
64 if ((NrAvailablePages + NrSystemPages) >= 8192)
65 {
66 MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 4 * 3;
67 }
68 else if ((NrAvailablePages + NrSystemPages) >= 4096)
69 {
70 MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 3 * 2;
71 }
72 else
73 {
74 MiMemoryConsumers[MC_CACHE].PagesTarget = NrAvailablePages / 8;
75 }
76 MiMemoryConsumers[MC_USER].PagesTarget = NrAvailablePages - MiMinimumAvailablePages;
77 }
78
79 VOID
80 INIT_FUNCTION
81 NTAPI
82 MmInitializeMemoryConsumer(
83 ULONG Consumer,
84 NTSTATUS (*Trim)(ULONG Target, ULONG Priority, PULONG NrFreed))
85 {
86 MiMemoryConsumers[Consumer].Trim = Trim;
87 }
88
89 VOID
90 NTAPI
91 MiZeroPhysicalPage(
92 IN PFN_NUMBER PageFrameIndex
93 );
94
95 NTSTATUS
96 NTAPI
97 MmReleasePageMemoryConsumer(ULONG Consumer, PFN_NUMBER Page)
98 {
99 if (Page == 0)
100 {
101 DPRINT1("Tried to release page zero.\n");
102 KeBugCheck(MEMORY_MANAGEMENT);
103 }
104
105 if (MmGetReferenceCountPage(Page) == 1)
106 {
107 if(Consumer == MC_USER) MmRemoveLRUUserPage(Page);
108 (void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
109 }
110
111 MmDereferencePage(Page);
112
113 return(STATUS_SUCCESS);
114 }
115
116 ULONG
117 NTAPI
118 MiTrimMemoryConsumer(ULONG Consumer, ULONG InitialTarget)
119 {
120 ULONG Target = InitialTarget;
121 ULONG NrFreedPages = 0;
122 NTSTATUS Status;
123
124 /* Make sure we can trim this consumer */
125 if (!MiMemoryConsumers[Consumer].Trim)
126 {
127 /* Return the unmodified initial target */
128 return InitialTarget;
129 }
130
131 if (MiMemoryConsumers[Consumer].PagesUsed > MiMemoryConsumers[Consumer].PagesTarget)
132 {
133 /* Consumer page limit exceeded */
134 Target = max(Target, MiMemoryConsumers[Consumer].PagesUsed - MiMemoryConsumers[Consumer].PagesTarget);
135 }
136 if (MmAvailablePages < MiMinimumAvailablePages)
137 {
138 /* Global page limit exceeded */
139 Target = (ULONG)max(Target, MiMinimumAvailablePages - MmAvailablePages);
140 }
141
142 if (Target)
143 {
144 if (!InitialTarget)
145 {
146 /* If there was no initial target,
147 * swap at least MiMinimumPagesPerRun */
148 Target = max(Target, MiMinimumPagesPerRun);
149 }
150
151 /* Now swap the pages out */
152 Status = MiMemoryConsumers[Consumer].Trim(Target, 0, &NrFreedPages);
153
154 DPRINT("Trimming consumer %lu: Freed %lu pages with a target of %lu pages\n", Consumer, NrFreedPages, Target);
155
156 if (!NT_SUCCESS(Status))
157 {
158 KeBugCheck(MEMORY_MANAGEMENT);
159 }
160
161 /* Update the target */
162 if (NrFreedPages < Target)
163 Target -= NrFreedPages;
164 else
165 Target = 0;
166
167 /* Return the remaining pages needed to meet the target */
168 return Target;
169 }
170 else
171 {
172 /* Initial target is zero and we don't have anything else to add */
173 return 0;
174 }
175 }
176
177 NTSTATUS
178 MmTrimUserMemory(ULONG Target, ULONG Priority, PULONG NrFreedPages)
179 {
180 PFN_NUMBER CurrentPage;
181 PFN_NUMBER NextPage;
182 NTSTATUS Status;
183
184 (*NrFreedPages) = 0;
185
186 CurrentPage = MmGetLRUFirstUserPage();
187 while (CurrentPage != 0 && Target > 0)
188 {
189 Status = MmPageOutPhysicalAddress(CurrentPage);
190 if (NT_SUCCESS(Status))
191 {
192 DPRINT("Succeeded\n");
193 Target--;
194 (*NrFreedPages)++;
195 }
196
197 NextPage = MmGetLRUNextUserPage(CurrentPage);
198 if (NextPage <= CurrentPage)
199 {
200 /* We wrapped around, so we're done */
201 break;
202 }
203 CurrentPage = NextPage;
204 }
205
206 return STATUS_SUCCESS;
207 }
208
209 static BOOLEAN
210 MiIsBalancerThread(VOID)
211 {
212 return (MiBalancerThreadHandle != NULL) &&
213 (PsGetCurrentThreadId() == MiBalancerThreadId.UniqueThread);
214 }
215
216 VOID
217 NTAPI
218 MmRebalanceMemoryConsumers(VOID)
219 {
220 if (MiBalancerThreadHandle != NULL &&
221 !MiIsBalancerThread())
222 {
223 KeSetEvent(&MiBalancerEvent, IO_NO_INCREMENT, FALSE);
224 }
225 }
226
227 NTSTATUS
228 NTAPI
229 MmRequestPageMemoryConsumer(ULONG Consumer, BOOLEAN CanWait,
230 PPFN_NUMBER AllocatedPage)
231 {
232 ULONG PagesUsed;
233 PFN_NUMBER Page;
234
235 /*
236 * Make sure we don't exceed our individual target.
237 */
238 PagesUsed = InterlockedIncrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
239 if (PagesUsed > MiMemoryConsumers[Consumer].PagesTarget &&
240 !MiIsBalancerThread())
241 {
242 MmRebalanceMemoryConsumers();
243 }
244
245 /*
246 * Allocate always memory for the non paged pool and for the pager thread.
247 */
248 if ((Consumer == MC_SYSTEM) /* || MiIsBalancerThread() */)
249 {
250 Page = MmAllocPage(Consumer);
251 if (Page == 0)
252 {
253 KeBugCheck(NO_PAGES_AVAILABLE);
254 }
255 if (Consumer == MC_USER) MmInsertLRULastUserPage(Page);
256 *AllocatedPage = Page;
257 if (MmAvailablePages < MiMinimumAvailablePages)
258 MmRebalanceMemoryConsumers();
259 return(STATUS_SUCCESS);
260 }
261
262 /*
263 * Make sure we don't exceed global targets.
264 */
265 if (((MmAvailablePages < MiMinimumAvailablePages) && !MiIsBalancerThread())
266 || (MmAvailablePages < (MiMinimumAvailablePages / 2)))
267 {
268 MM_ALLOCATION_REQUEST Request;
269
270 if (!CanWait)
271 {
272 (void)InterlockedDecrementUL(&MiMemoryConsumers[Consumer].PagesUsed);
273 MmRebalanceMemoryConsumers();
274 return(STATUS_NO_MEMORY);
275 }
276
277 /* Insert an allocation request. */
278 Request.Page = 0;
279 KeInitializeEvent(&Request.Event, NotificationEvent, FALSE);
280
281 ExInterlockedInsertTailList(&AllocationListHead, &Request.ListEntry, &AllocationListLock);
282 MmRebalanceMemoryConsumers();
283
284 KeWaitForSingleObject(&Request.Event,
285 0,
286 KernelMode,
287 FALSE,
288 NULL);
289
290 Page = Request.Page;
291 if (Page == 0)
292 {
293 KeBugCheck(NO_PAGES_AVAILABLE);
294 }
295
296 if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
297 *AllocatedPage = Page;
298
299 if (MmAvailablePages < MiMinimumAvailablePages)
300 {
301 MmRebalanceMemoryConsumers();
302 }
303
304 return(STATUS_SUCCESS);
305 }
306
307 /*
308 * Actually allocate the page.
309 */
310 Page = MmAllocPage(Consumer);
311 if (Page == 0)
312 {
313 KeBugCheck(NO_PAGES_AVAILABLE);
314 }
315 if(Consumer == MC_USER) MmInsertLRULastUserPage(Page);
316 *AllocatedPage = Page;
317
318 if (MmAvailablePages < MiMinimumAvailablePages)
319 {
320 MmRebalanceMemoryConsumers();
321 }
322
323 return(STATUS_SUCCESS);
324 }
325
326
327 VOID NTAPI
328 MiBalancerThread(PVOID Unused)
329 {
330 PVOID WaitObjects[2];
331 NTSTATUS Status;
332 ULONG i;
333
334 WaitObjects[0] = &MiBalancerEvent;
335 WaitObjects[1] = &MiBalancerTimer;
336
337 while (1)
338 {
339 Status = KeWaitForMultipleObjects(2,
340 WaitObjects,
341 WaitAny,
342 Executive,
343 KernelMode,
344 FALSE,
345 NULL,
346 NULL);
347
348 if (Status == STATUS_WAIT_0 || Status == STATUS_WAIT_1)
349 {
350 ULONG InitialTarget = 0;
351
352 #if (_MI_PAGING_LEVELS == 2)
353 if (!MiIsBalancerThread())
354 {
355 /* Clean up the unused PDEs */
356 ULONG_PTR Address;
357 PEPROCESS Process = PsGetCurrentProcess();
358
359 /* Acquire PFN lock */
360 KIRQL OldIrql = MiAcquirePfnLock();
361 PMMPDE pointerPde;
362 for (Address = (ULONG_PTR)MI_LOWEST_VAD_ADDRESS;
363 Address < (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS;
364 Address += (PAGE_SIZE * PTE_COUNT))
365 {
366 if (MiQueryPageTableReferences((PVOID)Address) == 0)
367 {
368 pointerPde = MiAddressToPde(Address);
369 if (pointerPde->u.Hard.Valid)
370 MiDeletePte(pointerPde, MiPdeToPte(pointerPde), Process, NULL);
371 ASSERT(pointerPde->u.Hard.Valid == 0);
372 }
373 }
374 /* Release lock */
375 MiReleasePfnLock(OldIrql);
376 }
377 #endif
378 do
379 {
380 ULONG OldTarget = InitialTarget;
381
382 /* Trim each consumer */
383 for (i = 0; i < MC_MAXIMUM; i++)
384 {
385 InitialTarget = MiTrimMemoryConsumer(i, InitialTarget);
386 }
387
388 /* No pages left to swap! */
389 if (InitialTarget != 0 &&
390 InitialTarget == OldTarget)
391 {
392 /* Game over */
393 KeBugCheck(NO_PAGES_AVAILABLE);
394 }
395 }
396 while (InitialTarget != 0);
397 }
398 else
399 {
400 DPRINT1("KeWaitForMultipleObjects failed, status = %x\n", Status);
401 KeBugCheck(MEMORY_MANAGEMENT);
402 }
403 }
404 }
405
406 BOOLEAN MmRosNotifyAvailablePage(PFN_NUMBER Page)
407 {
408 PLIST_ENTRY Entry;
409 PMM_ALLOCATION_REQUEST Request;
410 PMMPFN Pfn1;
411
412 /* Make sure the PFN lock is held */
413 MI_ASSERT_PFN_LOCK_HELD();
414
415 if (!MiMinimumAvailablePages)
416 {
417 /* Dirty way to know if we were initialized. */
418 return FALSE;
419 }
420
421 Entry = ExInterlockedRemoveHeadList(&AllocationListHead, &AllocationListLock);
422 if (!Entry)
423 return FALSE;
424
425 Request = CONTAINING_RECORD(Entry, MM_ALLOCATION_REQUEST, ListEntry);
426 MiZeroPhysicalPage(Page);
427 Request->Page = Page;
428
429 Pfn1 = MiGetPfnEntry(Page);
430 ASSERT(Pfn1->u3.e2.ReferenceCount == 0);
431 Pfn1->u3.e2.ReferenceCount = 1;
432 Pfn1->u3.e1.PageLocation = ActiveAndValid;
433
434 /* This marks the PFN as a ReactOS PFN */
435 Pfn1->u4.AweAllocation = TRUE;
436
437 /* Allocate the extra ReactOS Data and zero it out */
438 Pfn1->u1.SwapEntry = 0;
439 Pfn1->RmapListHead = NULL;
440
441 KeSetEvent(&Request->Event, IO_NO_INCREMENT, FALSE);
442
443 return TRUE;
444 }
445
446 VOID
447 INIT_FUNCTION
448 NTAPI
449 MiInitBalancerThread(VOID)
450 {
451 KPRIORITY Priority;
452 NTSTATUS Status;
453 #if !defined(__GNUC__)
454
455 LARGE_INTEGER dummyJunkNeeded;
456 dummyJunkNeeded.QuadPart = -20000000; /* 2 sec */
457 ;
458 #endif
459
460
461 KeInitializeEvent(&MiBalancerEvent, SynchronizationEvent, FALSE);
462 KeInitializeTimerEx(&MiBalancerTimer, SynchronizationTimer);
463 KeSetTimerEx(&MiBalancerTimer,
464 #if defined(__GNUC__)
465 (LARGE_INTEGER)(LONGLONG)-20000000LL, /* 2 sec */
466 #else
467 dummyJunkNeeded,
468 #endif
469 2000, /* 2 sec */
470 NULL);
471
472 Status = PsCreateSystemThread(&MiBalancerThreadHandle,
473 THREAD_ALL_ACCESS,
474 NULL,
475 NULL,
476 &MiBalancerThreadId,
477 MiBalancerThread,
478 NULL);
479 if (!NT_SUCCESS(Status))
480 {
481 KeBugCheck(MEMORY_MANAGEMENT);
482 }
483
484 Priority = LOW_REALTIME_PRIORITY + 1;
485 NtSetInformationThread(MiBalancerThreadHandle,
486 ThreadPriority,
487 &Priority,
488 sizeof(Priority));
489
490 }
491
492
493 /* EOF */