Sync to trunk head (r42241)
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::CONTMEM"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PRIVATE FUNCTIONS **********************************************************/
20
21 PVOID
22 NTAPI
23 MiCheckForContiguousMemory(IN PVOID BaseAddress,
24 IN PFN_NUMBER BaseAddressPages,
25 IN PFN_NUMBER SizeInPages,
26 IN PFN_NUMBER LowestPfn,
27 IN PFN_NUMBER HighestPfn,
28 IN PFN_NUMBER BoundaryPfn,
29 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
30 {
31 PMMPTE StartPte, EndPte;
32 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
33
34 //
35 // Okay, first of all check if the PFNs match our restrictions
36 //
37 if (LowestPfn > HighestPfn) return NULL;
38 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
39 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
40 if (BaseAddressPages < SizeInPages) return NULL;
41
42 //
43 // This is the last page we need to get to and the boundary requested
44 //
45 HighPage = HighestPfn + 1 - SizeInPages;
46 BoundaryMask = ~(BoundaryPfn - 1);
47
48 //
49 // And here's the PTEs for this allocation. Let's go scan them.
50 //
51 StartPte = MiAddressToPte(BaseAddress);
52 EndPte = StartPte + BaseAddressPages;
53 while (StartPte < EndPte)
54 {
55 //
56 // Get this PTE's page number
57 //
58 ASSERT (StartPte->u.Hard.Valid == 1);
59 Page = PFN_FROM_PTE(StartPte);
60
61 //
62 // Is this the beginning of our adventure?
63 //
64 if (!Pages)
65 {
66 //
67 // Check if this PFN is within our range
68 //
69 if ((Page >= LowestPfn) && (Page <= HighPage))
70 {
71 //
72 // It is! Do you care about boundary (alignment)?
73 //
74 if (!(BoundaryPfn) ||
75 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
76 {
77 //
78 // You don't care, or you do care but we deliver
79 //
80 Pages++;
81 }
82 }
83
84 //
85 // Have we found all the pages we need by now?
86 // Incidently, this means you only wanted one page
87 //
88 if (Pages == SizeInPages)
89 {
90 //
91 // Mission complete
92 //
93 return MiPteToAddress(StartPte);
94 }
95 }
96 else
97 {
98 //
99 // Have we found a page that doesn't seem to be contiguous?
100 //
101 if (Page != (PreviousPage + 1))
102 {
103 //
104 // Ah crap, we have to start over
105 //
106 Pages = 0;
107 continue;
108 }
109
110 //
111 // Otherwise, we're still in the game. Do we have all our pages?
112 //
113 if (++Pages == SizeInPages)
114 {
115 //
116 // We do! This entire range was contiguous, so we'll return it!
117 //
118 return MiPteToAddress(StartPte - Pages + 1);
119 }
120 }
121
122 //
123 // Try with the next PTE, remember this PFN
124 //
125 PreviousPage = Page;
126 StartPte++;
127 continue;
128 }
129
130 //
131 // All good returns are within the loop...
132 //
133 return NULL;
134 }
135
136 PVOID
137 NTAPI
138 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
139 IN PFN_NUMBER HighestPfn,
140 IN PFN_NUMBER BoundaryPfn,
141 IN PFN_NUMBER SizeInPages,
142 IN MEMORY_CACHING_TYPE CacheType)
143 {
144 PFN_NUMBER Page;
145 PHYSICAL_ADDRESS PhysicalAddress;
146 PAGED_CODE ();
147 ASSERT(SizeInPages != 0);
148
149 //
150 // Our last hope is to scan the free page list for contiguous pages
151 //
152 Page = MiFindContiguousPages(LowestPfn,
153 HighestPfn,
154 BoundaryPfn,
155 SizeInPages,
156 CacheType);
157 if (!Page) return NULL;
158
159 //
160 // We'll just piggyback on the I/O memory mapper
161 //
162 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
163 return MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
164 }
165
166 PVOID
167 NTAPI
168 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
169 IN PFN_NUMBER LowestAcceptablePfn,
170 IN PFN_NUMBER HighestAcceptablePfn,
171 IN PFN_NUMBER BoundaryPfn,
172 IN MEMORY_CACHING_TYPE CacheType)
173 {
174 PVOID BaseAddress;
175 PFN_NUMBER SizeInPages;
176 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
177 ASSERT(NumberOfBytes != 0);
178
179 //
180 // Compute size requested
181 //
182 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
183
184 //
185 // Convert the cache attribute and check for cached requests
186 //
187 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
188 if (CacheAttribute == MiCached)
189 {
190 //
191 // Because initial nonpaged pool is supposed to be contiguous, go ahead
192 // and try making a nonpaged pool allocation first.
193 //
194 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
195 NumberOfBytes,
196 'mCmM');
197 if (BaseAddress)
198 {
199 //
200 // Now make sure it's actually contiguous (if it came from expansion
201 // it might not be).
202 //
203 if (MiCheckForContiguousMemory(BaseAddress,
204 SizeInPages,
205 SizeInPages,
206 LowestAcceptablePfn,
207 HighestAcceptablePfn,
208 BoundaryPfn,
209 CacheAttribute))
210 {
211 //
212 // Sweet, we're in business!
213 //
214 return BaseAddress;
215 }
216
217 //
218 // No such luck
219 //
220 ExFreePool(BaseAddress);
221 }
222 }
223
224 //
225 // According to MSDN, the system won't try anything else if you're higher
226 // than APC level.
227 //
228 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
229
230 //
231 // Otherwise, we'll go try to find some
232 //
233 return MiFindContiguousMemory(LowestAcceptablePfn,
234 HighestAcceptablePfn,
235 BoundaryPfn,
236 SizeInPages,
237 CacheType);
238 }
239
240 VOID
241 NTAPI
242 MiFreeContiguousMemory(IN PVOID BaseAddress)
243 {
244 KIRQL OldIrql;
245 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
246 PMMPFN Pfn1, StartPfn;
247 PAGED_CODE();
248
249 //
250 // First, check if the memory came from initial nonpaged pool, or expansion
251 //
252 if (((BaseAddress >= MmNonPagedPoolStart) &&
253 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
254 MmSizeOfNonPagedPoolInBytes))) ||
255 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
256 (BaseAddress < MmNonPagedPoolEnd)))
257 {
258 //
259 // It did, so just use the pool to free this
260 //
261 ExFreePool(BaseAddress);
262 return;
263 }
264
265 //
266 // Otherwise, get the PTE and page number for the allocation
267 //
268 PageFrameIndex = PFN_FROM_PTE(MiAddressToPte(BaseAddress));
269
270 //
271 // Now get the PFN entry for this, and make sure it's the correct one
272 //
273 Pfn1 = MiGetPfnEntry(PageFrameIndex);
274 if (Pfn1->u3.e1.StartOfAllocation == 0)
275 {
276 //
277 // This probably means you did a free on an address that was in between
278 //
279 KeBugCheckEx (BAD_POOL_CALLER,
280 0x60,
281 (ULONG_PTR)BaseAddress,
282 0,
283 0);
284 }
285
286 //
287 // Now this PFN isn't the start of any allocation anymore, it's going out
288 //
289 StartPfn = Pfn1;
290 Pfn1->u3.e1.StartOfAllocation = 0;
291
292 //
293 // Look the PFNs
294 //
295 do
296 {
297 //
298 // Until we find the one that marks the end of the allocation
299 //
300 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
301
302 //
303 // Found it, unmark it
304 //
305 Pfn1--;
306 Pfn1->u3.e1.EndOfAllocation = 0;
307
308 //
309 // Now compute how many pages this represents
310 //
311 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
312
313 //
314 // So we can know how much to unmap (recall we piggyback on I/O mappings)
315 //
316 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
317
318 //
319 // Lock the PFN database
320 //
321 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
322
323 //
324 // Loop all the pages
325 //
326 LastPage = PageFrameIndex + PageCount;
327 do
328 {
329 //
330 // Free each one, and move on
331 //
332 MmReleasePageMemoryConsumer(MC_NPPOOL, PageFrameIndex);
333 } while (++PageFrameIndex < LastPage);
334
335 //
336 // Release the PFN lock
337 //
338 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
339 }
340
341 /* PUBLIC FUNCTIONS ***********************************************************/
342
343 /*
344 * @implemented
345 */
346 PVOID
347 NTAPI
348 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
349 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
350 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
351 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
352 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
353 {
354 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
355 ASSERT (NumberOfBytes != 0);
356
357 //
358 // Convert the lowest address into a PFN
359 //
360 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
361 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
362
363 //
364 // Convert and validate the boundary address into a PFN
365 //
366 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
367 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
368
369 //
370 // Convert the highest address into a PFN
371 //
372 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
373 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
374
375 //
376 // Validate the PFN bounds
377 //
378 if (LowestPfn > HighestPfn) return NULL;
379
380 //
381 // Let the contiguous memory allocator handle it
382 //
383 return MiAllocateContiguousMemory(NumberOfBytes,
384 LowestPfn,
385 HighestPfn,
386 BoundaryPfn,
387 CacheType);
388 }
389
390 /*
391 * @implemented
392 */
393 PVOID
394 NTAPI
395 MmAllocateContiguousMemory(IN ULONG NumberOfBytes,
396 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
397 {
398 PFN_NUMBER HighestPfn;
399
400 //
401 // Convert and normalize the highest address into a PFN
402 //
403 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
404 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
405
406 //
407 // Let the contiguous memory allocator handle it
408 //
409 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
410 }
411
412 /*
413 * @implemented
414 */
415 VOID
416 NTAPI
417 MmFreeContiguousMemory(IN PVOID BaseAddress)
418 {
419 //
420 // Let the contiguous memory allocator handle it
421 //
422 MiFreeContiguousMemory(BaseAddress);
423 }
424
425 /*
426 * @implemented
427 */
428 VOID
429 NTAPI
430 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
431 IN ULONG NumberOfBytes,
432 IN MEMORY_CACHING_TYPE CacheType)
433 {
434 //
435 // Just call the non-cached version (there's no cache issues for freeing)
436 //
437 MiFreeContiguousMemory(BaseAddress);
438 }
439
440 /* EOF */