"[00:08] Stefan100: 5) Alex_Ionescu will have your head"
[reactos.git] / reactos / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT TAG('A', 't', 'o', 'T') /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlpLookupModuleBase(
34 PVOID Address)
35 {
36 PLDR_DATA_TABLE_ENTRY LdrEntry;
37 BOOLEAN InSystem;
38 PVOID p;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)Address > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 p = KiPcToFileHeader(Address, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 p = KiRosPcToUserFileHeader(Address, &LdrEntry);
50 }
51
52 return p;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 TAG('R', 'R', 'l', 'e'),
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(BOOLEAN Type)
72 {
73 /* This check is meaningless in kernel-mode */
74 return Type;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(IN BOOLEAN NewValue)
80 {
81 /* This check is meaningless in kernel-mode */
82 return FALSE;
83 }
84
85 KPROCESSOR_MODE
86 STDCALL
87 RtlpGetMode()
88 {
89 return KernelMode;
90 }
91
92 PVOID
93 STDCALL
94 RtlpAllocateMemory(ULONG Bytes,
95 ULONG Tag)
96 {
97 return ExAllocatePoolWithTag(PagedPool,
98 (SIZE_T)Bytes,
99 Tag);
100 }
101
102
103 #define TAG_USTR TAG('U', 'S', 'T', 'R')
104 #define TAG_ASTR TAG('A', 'S', 'T', 'R')
105 #define TAG_OSTR TAG('O', 'S', 'T', 'R')
106 VOID
107 STDCALL
108 RtlpFreeMemory(PVOID Mem,
109 ULONG Tag)
110 {
111 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
112 ExFreePool(Mem);
113 else
114 ExFreePoolWithTag(Mem, Tag);
115 }
116
117 /*
118 * @implemented
119 */
120 VOID STDCALL
121 RtlAcquirePebLock(VOID)
122 {
123
124 }
125
126 /*
127 * @implemented
128 */
129 VOID STDCALL
130 RtlReleasePebLock(VOID)
131 {
132
133 }
134
135 NTSTATUS
136 STDCALL
137 LdrShutdownThread(VOID)
138 {
139 return STATUS_SUCCESS;
140 }
141
142
143 PPEB
144 STDCALL
145 RtlGetCurrentPeb(VOID)
146 {
147 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
148 }
149
150 NTSTATUS
151 STDCALL
152 RtlDeleteHeapLock(
153 PRTL_CRITICAL_SECTION CriticalSection)
154 {
155 ASSERT(FALSE);
156 return STATUS_SUCCESS;
157 }
158
159 NTSTATUS
160 STDCALL
161 RtlEnterHeapLock(
162 PRTL_CRITICAL_SECTION CriticalSection)
163 {
164 ASSERT(FALSE);
165 return STATUS_SUCCESS;
166 }
167
168 NTSTATUS
169 STDCALL
170 RtlInitializeHeapLock(
171 PRTL_CRITICAL_SECTION CriticalSection)
172 {
173 ASSERT(FALSE);
174 return STATUS_SUCCESS;
175 }
176
177 NTSTATUS
178 STDCALL
179 RtlLeaveHeapLock(
180 PRTL_CRITICAL_SECTION CriticalSection)
181 {
182 ASSERT(FALSE);
183 return STATUS_SUCCESS;
184 }
185
186 #ifdef DBG
187 VOID FASTCALL
188 CHECK_PAGED_CODE_RTL(char *file, int line)
189 {
190 if(KeGetCurrentIrql() > APC_LEVEL)
191 {
192 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%d)\n", file, line, KeGetCurrentIrql());
193 ASSERT(FALSE);
194 }
195 }
196 #endif
197
198 VOID
199 NTAPI
200 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
201 IN PCONTEXT ContextRecord,
202 IN PVOID ContextData,
203 IN ULONG Size)
204 {
205 /* Check the global flag */
206 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
207 {
208 /* FIXME: Log this exception */
209 }
210 }
211
212 BOOLEAN
213 NTAPI
214 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
215 IN ULONG_PTR RegistrationFrameEnd,
216 IN OUT PULONG_PTR StackLow,
217 IN OUT PULONG_PTR StackHigh)
218 {
219 PKPRCB Prcb;
220 ULONG_PTR DpcStack;
221
222 /* Check if we are at DISPATCH or higher */
223 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
224 {
225 /* Get the PRCB and DPC Stack */
226 Prcb = KeGetCurrentPrcb();
227 DpcStack = (ULONG_PTR)Prcb->DpcStack;
228
229 /* Check if we are in a DPC and the stack matches */
230 if ((Prcb->DpcRoutineActive) &&
231 (RegistrationFrameEnd <= DpcStack) &&
232 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
233 {
234 /* Update the limits to the DPC Stack's */
235 *StackHigh = DpcStack;
236 *StackLow = DpcStack - KERNEL_STACK_SIZE;
237 return TRUE;
238 }
239 }
240
241 /* Not in DPC stack */
242 return FALSE;
243 }
244
245 #if !defined(_ARM_) && !defined(_AMD64_)
246
247 BOOLEAN
248 NTAPI
249 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
250 IN ULONG_PTR *StackBegin,
251 IN ULONG_PTR *StackEnd)
252 {
253 PKTHREAD Thread = KeGetCurrentThread();
254
255 /* Don't even try at ISR level or later */
256 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
257
258 /* Start with defaults */
259 *StackBegin = Thread->StackLimit;
260 *StackEnd = (ULONG_PTR)Thread->StackBase;
261
262 /* Check if EBP is inside the stack */
263 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
264 {
265 /* Then make the stack start at EBP */
266 *StackBegin = Ebp;
267 }
268 else
269 {
270 /* Now we're going to assume we're on the DPC stack */
271 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
272 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
273
274 /* Check if we seem to be on the DPC stack */
275 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
276 {
277 /* We're on the DPC stack */
278 *StackBegin = Ebp;
279 }
280 else
281 {
282 /* We're somewhere else entirely... use EBP for safety */
283 *StackBegin = Ebp;
284 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
285 }
286 }
287
288 /* Return success */
289 return TRUE;
290 }
291
292 /*
293 * @implemented
294 */
295 ULONG
296 NTAPI
297 RtlWalkFrameChain(OUT PVOID *Callers,
298 IN ULONG Count,
299 IN ULONG Flags)
300 {
301 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
302 ULONG Eip;
303 BOOLEAN Result, StopSearch = FALSE;
304 ULONG i = 0;
305 PKTHREAD Thread = KeGetCurrentThread();
306 PTEB Teb;
307 PKTRAP_FRAME TrapFrame;
308
309 /* Get current EBP */
310 #if defined(_M_IX86)
311 #if defined __GNUC__
312 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
313 #elif defined(_MSC_VER)
314 __asm mov Stack, ebp
315 #endif
316 #elif defined(_M_MIPS)
317 __asm__("move $sp, %0" : "=r" (Stack) : );
318 #elif defined(_M_PPC)
319 __asm__("mr %0,1" : "=r" (Stack) : );
320 #elif defined(_M_ARM)
321 __asm__("mov sp, %0" : "=r"(Stack) : );
322 #else
323 #error Unknown architecture
324 #endif
325
326 /* Set it as the stack begin limit as well */
327 StackBegin = (ULONG_PTR)Stack;
328
329 /* Check if we're called for non-logging mode */
330 if (!Flags)
331 {
332 /* Get the actual safe limits */
333 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
334 &StackBegin,
335 &StackEnd);
336 if (!Result) return 0;
337 }
338
339 /* Use a SEH block for maximum protection */
340 _SEH_TRY
341 {
342 /* Check if we want the user-mode stack frame */
343 if (Flags == 1)
344 {
345 /* Get the trap frame and TEB */
346 TrapFrame = Thread->TrapFrame;
347 Teb = Thread->Teb;
348
349 /* Make sure we can trust the TEB and trap frame */
350 if (!(Teb) ||
351 !((PVOID)((ULONG_PTR)TrapFrame & 0x80000000)) ||
352 ((PVOID)TrapFrame <= (PVOID)Thread->StackLimit) ||
353 ((PVOID)TrapFrame >= (PVOID)Thread->StackBase) ||
354 (KeIsAttachedProcess()) ||
355 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
356 {
357 /* Invalid or unsafe attempt to get the stack */
358 return 0;
359 }
360
361 /* Get the stack limits */
362 StackBegin = (ULONG_PTR)Teb->Tib.StackLimit;
363 StackEnd = (ULONG_PTR)Teb->Tib.StackBase;
364 #ifdef _M_IX86
365 Stack = TrapFrame->Ebp;
366 #elif defined(_M_PPC)
367 Stack = TrapFrame->Gpr1;
368 #endif
369
370 /* Validate them */
371 if (StackEnd <= StackBegin) return 0;
372 ProbeForRead((PVOID)StackBegin,
373 StackEnd - StackBegin,
374 sizeof(CHAR));
375 }
376
377 /* Loop the frames */
378 for (i = 0; i < Count; i++)
379 {
380 /*
381 * Leave if we're past the stack,
382 * if we're before the stack,
383 * or if we've reached ourselves.
384 */
385 if ((Stack >= StackEnd) ||
386 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
387 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
388 {
389 /* We're done or hit a bad address */
390 break;
391 }
392
393 /* Get new stack and EIP */
394 NewStack = *(PULONG_PTR)Stack;
395 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
396
397 /* Check if the new pointer is above the oldone and past the end */
398 if (!((Stack < NewStack) && (NewStack < StackEnd)))
399 {
400 /* Stop searching after this entry */
401 StopSearch = TRUE;
402 }
403
404 /* Also make sure that the EIP isn't a stack address */
405 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
406
407 /* Check if we reached a user-mode address */
408 if (!(Flags) && !(Eip & 0x80000000)) break;
409
410 /* Save this frame */
411 Callers[i] = (PVOID)Eip;
412
413 /* Check if we should continue */
414 if (StopSearch)
415 {
416 /* Return the next index */
417 i++;
418 break;
419 }
420
421 /* Move to the next stack */
422 Stack = NewStack;
423 }
424 }
425 _SEH_HANDLE
426 {
427 /* No index */
428 i = 0;
429 }
430 _SEH_END;
431
432 /* Return frames parsed */
433 return i;
434 }
435
436 #endif
437
438 #ifdef _AMD64_
439 VOID
440 NTAPI
441 RtlpGetStackLimits(
442 OUT PULONG_PTR LowLimit,
443 OUT PULONG_PTR HighLimit)
444 {
445 PKTHREAD CurrentThread = KeGetCurrentThread();
446 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
447 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
448 }
449 #endif
450
451 /* RTL Atom Tables ************************************************************/
452
453 NTSTATUS
454 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
455 {
456 ExInitializeFastMutex(&AtomTable->FastMutex);
457
458 return STATUS_SUCCESS;
459 }
460
461
462 VOID
463 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
464 {
465 }
466
467
468 BOOLEAN
469 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
470 {
471 ExAcquireFastMutex(&AtomTable->FastMutex);
472 return TRUE;
473 }
474
475 VOID
476 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
477 {
478 ExReleaseFastMutex(&AtomTable->FastMutex);
479 }
480
481 BOOLEAN
482 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
483 {
484 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
485 return (AtomTable->ExHandleTable != NULL);
486 }
487
488 VOID
489 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
490 {
491 if (AtomTable->ExHandleTable)
492 {
493 ExSweepHandleTable(AtomTable->ExHandleTable,
494 NULL,
495 NULL);
496 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
497 AtomTable->ExHandleTable = NULL;
498 }
499 }
500
501 PRTL_ATOM_TABLE
502 RtlpAllocAtomTable(ULONG Size)
503 {
504 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
505 Size);
506 if (Table != NULL)
507 {
508 RtlZeroMemory(Table,
509 Size);
510 }
511
512 return Table;
513 }
514
515 VOID
516 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
517 {
518 ExFreePool(AtomTable);
519 }
520
521 PRTL_ATOM_TABLE_ENTRY
522 RtlpAllocAtomTableEntry(ULONG Size)
523 {
524 PRTL_ATOM_TABLE_ENTRY Entry;
525
526 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
527 if (Entry != NULL)
528 {
529 RtlZeroMemory(Entry, Size);
530 }
531
532 return Entry;
533 }
534
535 VOID
536 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
537 {
538 ExFreePoolWithTag(Entry, TAG_ATMT);
539 }
540
541 VOID
542 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
543 {
544 ExDestroyHandle(AtomTable->ExHandleTable,
545 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
546 NULL);
547 }
548
549 BOOLEAN
550 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
551 {
552 HANDLE_TABLE_ENTRY ExEntry;
553 HANDLE Handle;
554 USHORT HandleIndex;
555
556 ExEntry.Object = Entry;
557 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
558
559 Handle = ExCreateHandle(AtomTable->ExHandleTable,
560 &ExEntry);
561 if (Handle != NULL)
562 {
563 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
564 /* FIXME - Handle Indexes >= 0xC000 ?! */
565 if ((ULONG_PTR)HandleIndex >> 2 < 0xC000)
566 {
567 Entry->HandleIndex = HandleIndex;
568 Entry->Atom = 0xC000 + HandleIndex;
569
570 return TRUE;
571 }
572 else
573 ExDestroyHandle(AtomTable->ExHandleTable,
574 Handle,
575 NULL);
576 }
577
578 return FALSE;
579 }
580
581 PRTL_ATOM_TABLE_ENTRY
582 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
583 {
584 PHANDLE_TABLE_ENTRY ExEntry;
585 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
586
587 /* NOTE: There's no need to explicitly enter a critical region because it's
588 guaranteed that we're in a critical region right now (as we hold
589 the atom table lock) */
590
591 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
592 (HANDLE)((ULONG_PTR)Index << 2));
593 if (ExEntry != NULL)
594 {
595 Entry = ExEntry->Object;
596
597 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
598 ExEntry);
599 }
600
601 return Entry;
602 }
603
604 /*
605 * Ldr Resource support code
606 */
607
608 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
609 LPCWSTR name, void *root,
610 int want_dir );
611 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
612 USHORT id, void *root, int want_dir );
613 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
614 void *root, int want_dir );
615
616 /**********************************************************************
617 * find_entry
618 *
619 * Find a resource entry
620 */
621 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
622 ULONG level, void **ret, int want_dir )
623 {
624 ULONG size;
625 void *root;
626 IMAGE_RESOURCE_DIRECTORY *resdirptr;
627
628 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
629 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
630 resdirptr = root;
631
632 if (!level--) goto done;
633 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
634 return STATUS_RESOURCE_TYPE_NOT_FOUND;
635 if (!level--) return STATUS_SUCCESS;
636
637 resdirptr = *ret;
638 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
639 return STATUS_RESOURCE_NAME_NOT_FOUND;
640 if (!level--) return STATUS_SUCCESS;
641 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
642
643 resdirptr = *ret;
644
645 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
646
647 return STATUS_RESOURCE_DATA_NOT_FOUND;
648
649 done:
650 *ret = resdirptr;
651 return STATUS_SUCCESS;
652 }
653
654
655 /* EOF */