Merge trunk head (r43756)
[reactos.git] / reactos / ntoskrnl / mm / ARM3 / contmem.c
1 /*
2 * PROJECT: ReactOS Kernel
3 * LICENSE: BSD - See COPYING.ARM in the top level directory
4 * FILE: ntoskrnl/mm/ARM3/contmem.c
5 * PURPOSE: ARM Memory Manager Contiguous Memory Allocator
6 * PROGRAMMERS: ReactOS Portable Systems Group
7 */
8
9 /* INCLUDES *******************************************************************/
10
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14
15 #line 15 "ARMĀ³::CONTMEM"
16 #define MODULE_INVOLVED_IN_ARM3
17 #include "../ARM3/miarm.h"
18
19 /* PRIVATE FUNCTIONS **********************************************************/
20
21 PVOID
22 NTAPI
23 MiCheckForContiguousMemory(IN PVOID BaseAddress,
24 IN PFN_NUMBER BaseAddressPages,
25 IN PFN_NUMBER SizeInPages,
26 IN PFN_NUMBER LowestPfn,
27 IN PFN_NUMBER HighestPfn,
28 IN PFN_NUMBER BoundaryPfn,
29 IN MI_PFN_CACHE_ATTRIBUTE CacheAttribute)
30 {
31 PMMPTE StartPte, EndPte;
32 PFN_NUMBER PreviousPage = 0, Page, HighPage, BoundaryMask, Pages = 0;
33
34 //
35 // Okay, first of all check if the PFNs match our restrictions
36 //
37 if (LowestPfn > HighestPfn) return NULL;
38 if (LowestPfn + SizeInPages <= LowestPfn) return NULL;
39 if (LowestPfn + SizeInPages - 1 > HighestPfn) return NULL;
40 if (BaseAddressPages < SizeInPages) return NULL;
41
42 //
43 // This is the last page we need to get to and the boundary requested
44 //
45 HighPage = HighestPfn + 1 - SizeInPages;
46 BoundaryMask = ~(BoundaryPfn - 1);
47
48 //
49 // And here's the PTEs for this allocation. Let's go scan them.
50 //
51 StartPte = MiAddressToPte(BaseAddress);
52 EndPte = StartPte + BaseAddressPages;
53 while (StartPte < EndPte)
54 {
55 //
56 // Get this PTE's page number
57 //
58 ASSERT (StartPte->u.Hard.Valid == 1);
59 Page = PFN_FROM_PTE(StartPte);
60
61 //
62 // Is this the beginning of our adventure?
63 //
64 if (!Pages)
65 {
66 //
67 // Check if this PFN is within our range
68 //
69 if ((Page >= LowestPfn) && (Page <= HighPage))
70 {
71 //
72 // It is! Do you care about boundary (alignment)?
73 //
74 if (!(BoundaryPfn) ||
75 (!((Page ^ (Page + SizeInPages - 1)) & BoundaryMask)))
76 {
77 //
78 // You don't care, or you do care but we deliver
79 //
80 Pages++;
81 }
82 }
83
84 //
85 // Have we found all the pages we need by now?
86 // Incidently, this means you only wanted one page
87 //
88 if (Pages == SizeInPages)
89 {
90 //
91 // Mission complete
92 //
93 return MiPteToAddress(StartPte);
94 }
95 }
96 else
97 {
98 //
99 // Have we found a page that doesn't seem to be contiguous?
100 //
101 if (Page != (PreviousPage + 1))
102 {
103 //
104 // Ah crap, we have to start over
105 //
106 Pages = 0;
107 continue;
108 }
109
110 //
111 // Otherwise, we're still in the game. Do we have all our pages?
112 //
113 if (++Pages == SizeInPages)
114 {
115 //
116 // We do! This entire range was contiguous, so we'll return it!
117 //
118 return MiPteToAddress(StartPte - Pages + 1);
119 }
120 }
121
122 //
123 // Try with the next PTE, remember this PFN
124 //
125 PreviousPage = Page;
126 StartPte++;
127 continue;
128 }
129
130 //
131 // All good returns are within the loop...
132 //
133 return NULL;
134 }
135
136 PVOID
137 NTAPI
138 MiFindContiguousMemory(IN PFN_NUMBER LowestPfn,
139 IN PFN_NUMBER HighestPfn,
140 IN PFN_NUMBER BoundaryPfn,
141 IN PFN_NUMBER SizeInPages,
142 IN MEMORY_CACHING_TYPE CacheType)
143 {
144 PFN_NUMBER Page;
145 PHYSICAL_ADDRESS PhysicalAddress;
146 PAGED_CODE ();
147 ASSERT(SizeInPages != 0);
148
149 //
150 // Our last hope is to scan the free page list for contiguous pages
151 //
152 Page = MiFindContiguousPages(LowestPfn,
153 HighestPfn,
154 BoundaryPfn,
155 SizeInPages,
156 CacheType);
157 if (!Page) return NULL;
158
159 //
160 // We'll just piggyback on the I/O memory mapper
161 //
162 PhysicalAddress.QuadPart = Page << PAGE_SHIFT;
163 return MmMapIoSpace(PhysicalAddress, SizeInPages << PAGE_SHIFT, CacheType);
164 }
165
166 PVOID
167 NTAPI
168 MiAllocateContiguousMemory(IN SIZE_T NumberOfBytes,
169 IN PFN_NUMBER LowestAcceptablePfn,
170 IN PFN_NUMBER HighestAcceptablePfn,
171 IN PFN_NUMBER BoundaryPfn,
172 IN MEMORY_CACHING_TYPE CacheType)
173 {
174 PVOID BaseAddress;
175 PFN_NUMBER SizeInPages;
176 MI_PFN_CACHE_ATTRIBUTE CacheAttribute;
177
178 //
179 // Verify count and cache type
180 //
181 ASSERT(NumberOfBytes != 0);
182 ASSERT(CacheType <= MmWriteCombined);
183
184 //
185 // Compute size requested
186 //
187 SizeInPages = BYTES_TO_PAGES(NumberOfBytes);
188
189 //
190 // Convert the cache attribute and check for cached requests
191 //
192 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType];
193 if (CacheAttribute == MiCached)
194 {
195 //
196 // Because initial nonpaged pool is supposed to be contiguous, go ahead
197 // and try making a nonpaged pool allocation first.
198 //
199 BaseAddress = ExAllocatePoolWithTag(NonPagedPoolCacheAligned,
200 NumberOfBytes,
201 'mCmM');
202 if (BaseAddress)
203 {
204 //
205 // Now make sure it's actually contiguous (if it came from expansion
206 // it might not be).
207 //
208 if (MiCheckForContiguousMemory(BaseAddress,
209 SizeInPages,
210 SizeInPages,
211 LowestAcceptablePfn,
212 HighestAcceptablePfn,
213 BoundaryPfn,
214 CacheAttribute))
215 {
216 //
217 // Sweet, we're in business!
218 //
219 return BaseAddress;
220 }
221
222 //
223 // No such luck
224 //
225 ExFreePool(BaseAddress);
226 }
227 }
228
229 //
230 // According to MSDN, the system won't try anything else if you're higher
231 // than APC level.
232 //
233 if (KeGetCurrentIrql() > APC_LEVEL) return NULL;
234
235 //
236 // Otherwise, we'll go try to find some
237 //
238 return MiFindContiguousMemory(LowestAcceptablePfn,
239 HighestAcceptablePfn,
240 BoundaryPfn,
241 SizeInPages,
242 CacheType);
243 }
244
245 VOID
246 NTAPI
247 MiFreeContiguousMemory(IN PVOID BaseAddress)
248 {
249 KIRQL OldIrql;
250 PFN_NUMBER PageFrameIndex, LastPage, PageCount;
251 PMMPFN Pfn1, StartPfn;
252 PAGED_CODE();
253
254 //
255 // First, check if the memory came from initial nonpaged pool, or expansion
256 //
257 if (((BaseAddress >= MmNonPagedPoolStart) &&
258 (BaseAddress < (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
259 MmSizeOfNonPagedPoolInBytes))) ||
260 ((BaseAddress >= MmNonPagedPoolExpansionStart) &&
261 (BaseAddress < MmNonPagedPoolEnd)))
262 {
263 //
264 // It did, so just use the pool to free this
265 //
266 ExFreePool(BaseAddress);
267 return;
268 }
269
270 //
271 // Otherwise, get the PTE and page number for the allocation
272 //
273 PageFrameIndex = PFN_FROM_PTE(MiAddressToPte(BaseAddress));
274
275 //
276 // Now get the PFN entry for this, and make sure it's the correct one
277 //
278 Pfn1 = MiGetPfnEntry(PageFrameIndex);
279 if (Pfn1->u3.e1.StartOfAllocation == 0)
280 {
281 //
282 // This probably means you did a free on an address that was in between
283 //
284 KeBugCheckEx (BAD_POOL_CALLER,
285 0x60,
286 (ULONG_PTR)BaseAddress,
287 0,
288 0);
289 }
290
291 //
292 // Now this PFN isn't the start of any allocation anymore, it's going out
293 //
294 StartPfn = Pfn1;
295 Pfn1->u3.e1.StartOfAllocation = 0;
296
297 //
298 // Look the PFNs
299 //
300 do
301 {
302 //
303 // Until we find the one that marks the end of the allocation
304 //
305 } while (Pfn1++->u3.e1.EndOfAllocation == 0);
306
307 //
308 // Found it, unmark it
309 //
310 Pfn1--;
311 Pfn1->u3.e1.EndOfAllocation = 0;
312
313 //
314 // Now compute how many pages this represents
315 //
316 PageCount = (ULONG)(Pfn1 - StartPfn + 1);
317
318 //
319 // So we can know how much to unmap (recall we piggyback on I/O mappings)
320 //
321 MmUnmapIoSpace(BaseAddress, PageCount << PAGE_SHIFT);
322
323 //
324 // Lock the PFN database
325 //
326 OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
327
328 //
329 // Loop all the pages
330 //
331 LastPage = PageFrameIndex + PageCount;
332 do
333 {
334 //
335 // Free each one, and move on
336 //
337 MmReleasePageMemoryConsumer(MC_NPPOOL, PageFrameIndex);
338 } while (++PageFrameIndex < LastPage);
339
340 //
341 // Release the PFN lock
342 //
343 KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
344 }
345
346 /* PUBLIC FUNCTIONS ***********************************************************/
347
348 /*
349 * @implemented
350 */
351 PVOID
352 NTAPI
353 MmAllocateContiguousMemorySpecifyCache(IN SIZE_T NumberOfBytes,
354 IN PHYSICAL_ADDRESS LowestAcceptableAddress OPTIONAL,
355 IN PHYSICAL_ADDRESS HighestAcceptableAddress,
356 IN PHYSICAL_ADDRESS BoundaryAddressMultiple OPTIONAL,
357 IN MEMORY_CACHING_TYPE CacheType OPTIONAL)
358 {
359 PFN_NUMBER LowestPfn, HighestPfn, BoundaryPfn;
360
361 //
362 // Verify count and cache type
363 //
364 ASSERT(NumberOfBytes != 0);
365 ASSERT(CacheType <= MmWriteCombined);
366
367 //
368 // Convert the lowest address into a PFN
369 //
370 LowestPfn = (PFN_NUMBER)(LowestAcceptableAddress.QuadPart >> PAGE_SHIFT);
371 if (BYTE_OFFSET(LowestAcceptableAddress.LowPart)) LowestPfn++;
372
373 //
374 // Convert and validate the boundary address into a PFN
375 //
376 if (BYTE_OFFSET(BoundaryAddressMultiple.LowPart)) return NULL;
377 BoundaryPfn = (PFN_NUMBER)(BoundaryAddressMultiple.QuadPart >> PAGE_SHIFT);
378
379 //
380 // Convert the highest address into a PFN
381 //
382 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
383 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
384
385 //
386 // Validate the PFN bounds
387 //
388 if (LowestPfn > HighestPfn) return NULL;
389
390 //
391 // Let the contiguous memory allocator handle it
392 //
393 return MiAllocateContiguousMemory(NumberOfBytes,
394 LowestPfn,
395 HighestPfn,
396 BoundaryPfn,
397 CacheType);
398 }
399
400 /*
401 * @implemented
402 */
403 PVOID
404 NTAPI
405 MmAllocateContiguousMemory(IN ULONG NumberOfBytes,
406 IN PHYSICAL_ADDRESS HighestAcceptableAddress)
407 {
408 PFN_NUMBER HighestPfn;
409
410 //
411 // Verify byte count
412 //
413 ASSERT(NumberOfBytes != 0);
414
415 //
416 // Convert and normalize the highest address into a PFN
417 //
418 HighestPfn = (PFN_NUMBER)(HighestAcceptableAddress.QuadPart >> PAGE_SHIFT);
419 if (HighestPfn > MmHighestPhysicalPage) HighestPfn = MmHighestPhysicalPage;
420
421 //
422 // Let the contiguous memory allocator handle it
423 //
424 return MiAllocateContiguousMemory(NumberOfBytes, 0, HighestPfn, 0, MmCached);
425 }
426
427 /*
428 * @implemented
429 */
430 VOID
431 NTAPI
432 MmFreeContiguousMemory(IN PVOID BaseAddress)
433 {
434 //
435 // Let the contiguous memory allocator handle it
436 //
437 MiFreeContiguousMemory(BaseAddress);
438 }
439
440 /*
441 * @implemented
442 */
443 VOID
444 NTAPI
445 MmFreeContiguousMemorySpecifyCache(IN PVOID BaseAddress,
446 IN SIZE_T NumberOfBytes,
447 IN MEMORY_CACHING_TYPE CacheType)
448 {
449 //
450 // Just call the non-cached version (there's no cache issues for freeing)
451 //
452 MiFreeContiguousMemory(BaseAddress);
453 }
454
455 /* EOF */