[REACTOS]
[reactos.git] / reactos / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT 'TotA' /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlPcToFileHeader(
34 IN PVOID PcValue,
35 OUT PVOID *BaseOfImage)
36 {
37 PLDR_DATA_TABLE_ENTRY LdrEntry;
38 BOOLEAN InSystem;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 *BaseOfImage = KiRosPcToUserFileHeader(PcValue, &LdrEntry);
50 }
51
52 return *BaseOfImage;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 'elRR',
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(VOID)
72 {
73 /* This check is meaningless in kernel-mode */
74 return FALSE;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(VOID)
80 {
81 /* Nothing to set in kernel mode */
82 return FALSE;
83 }
84
85 VOID
86 NTAPI
87 RtlpClearInDbgPrint(VOID)
88 {
89 /* Nothing to clear in kernel mode */
90 }
91
92 KPROCESSOR_MODE
93 NTAPI
94 RtlpGetMode()
95 {
96 return KernelMode;
97 }
98
99 PVOID
100 NTAPI
101 RtlpAllocateMemory(ULONG Bytes,
102 ULONG Tag)
103 {
104 return ExAllocatePoolWithTag(PagedPool,
105 (SIZE_T)Bytes,
106 Tag);
107 }
108
109
110 #define TAG_USTR 'RTSU'
111 #define TAG_ASTR 'RTSA'
112 #define TAG_OSTR 'RTSO'
113 VOID
114 NTAPI
115 RtlpFreeMemory(PVOID Mem,
116 ULONG Tag)
117 {
118 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
119 ExFreePool(Mem);
120 else
121 ExFreePoolWithTag(Mem, Tag);
122 }
123
124 /*
125 * @implemented
126 */
127 VOID NTAPI
128 RtlAcquirePebLock(VOID)
129 {
130
131 }
132
133 /*
134 * @implemented
135 */
136 VOID NTAPI
137 RtlReleasePebLock(VOID)
138 {
139
140 }
141
142 NTSTATUS
143 NTAPI
144 LdrShutdownThread(VOID)
145 {
146 return STATUS_SUCCESS;
147 }
148
149
150 PPEB
151 NTAPI
152 RtlGetCurrentPeb(VOID)
153 {
154 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
155 }
156
157 NTSTATUS
158 NTAPI
159 RtlDeleteHeapLock(IN OUT PHEAP_LOCK Lock)
160 {
161 ExDeleteResourceLite(&Lock->Resource);
162 ExFreePool(Lock);
163
164 return STATUS_SUCCESS;
165 }
166
167 NTSTATUS
168 NTAPI
169 RtlEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
170 {
171 KeEnterCriticalRegion();
172
173 if (Exclusive)
174 ExAcquireResourceExclusiveLite(&Lock->Resource, TRUE);
175 else
176 ExAcquireResourceSharedLite(&Lock->Resource, TRUE);
177
178 return STATUS_SUCCESS;
179 }
180
181 NTSTATUS
182 NTAPI
183 RtlInitializeHeapLock(IN OUT PHEAP_LOCK *Lock)
184 {
185 PHEAP_LOCK HeapLock = ExAllocatePool(NonPagedPool, sizeof(HEAP_LOCK));
186 if (HeapLock == NULL)
187 return STATUS_NO_MEMORY;
188
189 ExInitializeResourceLite(&HeapLock->Resource);
190 *Lock = HeapLock;
191
192 return STATUS_SUCCESS;
193 }
194
195 NTSTATUS
196 NTAPI
197 RtlLeaveHeapLock(IN OUT PHEAP_LOCK Lock)
198 {
199 ExReleaseResourceLite(&Lock->Resource);
200 KeLeaveCriticalRegion();
201
202 return STATUS_SUCCESS;
203 }
204
205 struct _HEAP;
206
207 VOID
208 NTAPI
209 RtlpAddHeapToProcessList(struct _HEAP *Heap)
210 {
211 UNREFERENCED_PARAMETER(Heap);
212 }
213
214 VOID
215 NTAPI
216 RtlpRemoveHeapFromProcessList(struct _HEAP *Heap)
217 {
218 UNREFERENCED_PARAMETER(Heap);
219 }
220
221 VOID
222 RtlInitializeHeapManager(VOID)
223 {
224 }
225
226 #if DBG
227 VOID FASTCALL
228 CHECK_PAGED_CODE_RTL(char *file, int line)
229 {
230 if(KeGetCurrentIrql() > APC_LEVEL)
231 {
232 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%d)\n", file, line, KeGetCurrentIrql());
233 ASSERT(FALSE);
234 }
235 }
236 #endif
237
238 VOID
239 NTAPI
240 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
241 IN PCONTEXT ContextRecord,
242 IN PVOID ContextData,
243 IN ULONG Size)
244 {
245 /* Check the global flag */
246 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
247 {
248 /* FIXME: Log this exception */
249 }
250 }
251
252 BOOLEAN
253 NTAPI
254 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
255 IN ULONG_PTR RegistrationFrameEnd,
256 IN OUT PULONG_PTR StackLow,
257 IN OUT PULONG_PTR StackHigh)
258 {
259 PKPRCB Prcb;
260 ULONG_PTR DpcStack;
261
262 /* Check if we are at DISPATCH or higher */
263 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
264 {
265 /* Get the PRCB and DPC Stack */
266 Prcb = KeGetCurrentPrcb();
267 DpcStack = (ULONG_PTR)Prcb->DpcStack;
268
269 /* Check if we are in a DPC and the stack matches */
270 if ((Prcb->DpcRoutineActive) &&
271 (RegistrationFrameEnd <= DpcStack) &&
272 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
273 {
274 /* Update the limits to the DPC Stack's */
275 *StackHigh = DpcStack;
276 *StackLow = DpcStack - KERNEL_STACK_SIZE;
277 return TRUE;
278 }
279 }
280
281 /* Not in DPC stack */
282 return FALSE;
283 }
284
285 #if !defined(_ARM_) && !defined(_AMD64_)
286
287 BOOLEAN
288 NTAPI
289 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
290 IN ULONG_PTR *StackBegin,
291 IN ULONG_PTR *StackEnd)
292 {
293 PKTHREAD Thread = KeGetCurrentThread();
294
295 /* Don't even try at ISR level or later */
296 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
297
298 /* Start with defaults */
299 *StackBegin = Thread->StackLimit;
300 *StackEnd = (ULONG_PTR)Thread->StackBase;
301
302 /* Check if EBP is inside the stack */
303 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
304 {
305 /* Then make the stack start at EBP */
306 *StackBegin = Ebp;
307 }
308 else
309 {
310 /* Now we're going to assume we're on the DPC stack */
311 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
312 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
313
314 /* Check if we seem to be on the DPC stack */
315 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
316 {
317 /* We're on the DPC stack */
318 *StackBegin = Ebp;
319 }
320 else
321 {
322 /* We're somewhere else entirely... use EBP for safety */
323 *StackBegin = Ebp;
324 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
325 }
326 }
327
328 /* Return success */
329 return TRUE;
330 }
331
332 /*
333 * @implemented
334 */
335 ULONG
336 NTAPI
337 RtlWalkFrameChain(OUT PVOID *Callers,
338 IN ULONG Count,
339 IN ULONG Flags)
340 {
341 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
342 ULONG Eip;
343 BOOLEAN Result, StopSearch = FALSE;
344 ULONG i = 0;
345 PETHREAD Thread = PsGetCurrentThread();
346 PTEB Teb;
347 PKTRAP_FRAME TrapFrame;
348
349 /* Get current EBP */
350 #if defined(_M_IX86)
351 #if defined __GNUC__
352 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
353 #elif defined(_MSC_VER)
354 __asm mov Stack, ebp
355 #endif
356 #elif defined(_M_MIPS)
357 __asm__("move $sp, %0" : "=r" (Stack) : );
358 #elif defined(_M_PPC)
359 __asm__("mr %0,1" : "=r" (Stack) : );
360 #elif defined(_M_ARM)
361 __asm__("mov sp, %0" : "=r"(Stack) : );
362 #else
363 #error Unknown architecture
364 #endif
365
366 /* Set it as the stack begin limit as well */
367 StackBegin = (ULONG_PTR)Stack;
368
369 /* Check if we're called for non-logging mode */
370 if (!Flags)
371 {
372 /* Get the actual safe limits */
373 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
374 &StackBegin,
375 &StackEnd);
376 if (!Result) return 0;
377 }
378
379 /* Use a SEH block for maximum protection */
380 _SEH2_TRY
381 {
382 /* Check if we want the user-mode stack frame */
383 if (Flags == 1)
384 {
385 /* Get the trap frame and TEB */
386 TrapFrame = KeGetTrapFrame(&Thread->Tcb);
387 Teb = Thread->Tcb.Teb;
388
389 /* Make sure we can trust the TEB and trap frame */
390 if (!(Teb) ||
391 (KeIsAttachedProcess()) ||
392 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
393 {
394 /* Invalid or unsafe attempt to get the stack */
395 _SEH2_YIELD(return 0;)
396 }
397
398 /* Get the stack limits */
399 StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit;
400 StackEnd = (ULONG_PTR)Teb->NtTib.StackBase;
401 #ifdef _M_IX86
402 Stack = TrapFrame->Ebp;
403 #elif defined(_M_PPC)
404 Stack = TrapFrame->Gpr1;
405 #else
406 #error Unknown architecture
407 #endif
408
409 /* Validate them */
410 if (StackEnd <= StackBegin) return 0;
411 ProbeForRead((PVOID)StackBegin,
412 StackEnd - StackBegin,
413 sizeof(CHAR));
414 }
415
416 /* Loop the frames */
417 for (i = 0; i < Count; i++)
418 {
419 /*
420 * Leave if we're past the stack,
421 * if we're before the stack,
422 * or if we've reached ourselves.
423 */
424 if ((Stack >= StackEnd) ||
425 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
426 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
427 {
428 /* We're done or hit a bad address */
429 break;
430 }
431
432 /* Get new stack and EIP */
433 NewStack = *(PULONG_PTR)Stack;
434 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
435
436 /* Check if the new pointer is above the oldone and past the end */
437 if (!((Stack < NewStack) && (NewStack < StackEnd)))
438 {
439 /* Stop searching after this entry */
440 StopSearch = TRUE;
441 }
442
443 /* Also make sure that the EIP isn't a stack address */
444 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
445
446 /* Check if we reached a user-mode address */
447 if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage
448
449 /* Save this frame */
450 Callers[i] = (PVOID)Eip;
451
452 /* Check if we should continue */
453 if (StopSearch)
454 {
455 /* Return the next index */
456 i++;
457 break;
458 }
459
460 /* Move to the next stack */
461 Stack = NewStack;
462 }
463 }
464 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
465 {
466 /* No index */
467 i = 0;
468 }
469 _SEH2_END;
470
471 /* Return frames parsed */
472 return i;
473 }
474
475 #endif
476
477 #ifdef _AMD64_
478 VOID
479 NTAPI
480 RtlpGetStackLimits(
481 OUT PULONG_PTR LowLimit,
482 OUT PULONG_PTR HighLimit)
483 {
484 PKTHREAD CurrentThread = KeGetCurrentThread();
485 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
486 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
487 }
488 #endif
489
490 /* RTL Atom Tables ************************************************************/
491
492 NTSTATUS
493 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
494 {
495 ExInitializeFastMutex(&AtomTable->FastMutex);
496
497 return STATUS_SUCCESS;
498 }
499
500
501 VOID
502 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
503 {
504 }
505
506
507 BOOLEAN
508 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
509 {
510 ExAcquireFastMutex(&AtomTable->FastMutex);
511 return TRUE;
512 }
513
514 VOID
515 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
516 {
517 ExReleaseFastMutex(&AtomTable->FastMutex);
518 }
519
520 BOOLEAN
521 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
522 {
523 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
524 return (AtomTable->ExHandleTable != NULL);
525 }
526
527 VOID
528 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
529 {
530 if (AtomTable->ExHandleTable)
531 {
532 ExSweepHandleTable(AtomTable->ExHandleTable,
533 NULL,
534 NULL);
535 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
536 AtomTable->ExHandleTable = NULL;
537 }
538 }
539
540 PRTL_ATOM_TABLE
541 RtlpAllocAtomTable(ULONG Size)
542 {
543 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
544 Size);
545 if (Table != NULL)
546 {
547 RtlZeroMemory(Table,
548 Size);
549 }
550
551 return Table;
552 }
553
554 VOID
555 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
556 {
557 ExFreePool(AtomTable);
558 }
559
560 PRTL_ATOM_TABLE_ENTRY
561 RtlpAllocAtomTableEntry(ULONG Size)
562 {
563 PRTL_ATOM_TABLE_ENTRY Entry;
564
565 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
566 if (Entry != NULL)
567 {
568 RtlZeroMemory(Entry, Size);
569 }
570
571 return Entry;
572 }
573
574 VOID
575 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
576 {
577 ExFreePoolWithTag(Entry, TAG_ATMT);
578 }
579
580 VOID
581 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
582 {
583 ExDestroyHandle(AtomTable->ExHandleTable,
584 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
585 NULL);
586 }
587
588 BOOLEAN
589 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
590 {
591 HANDLE_TABLE_ENTRY ExEntry;
592 HANDLE Handle;
593 USHORT HandleIndex;
594
595 /* Initialize ex handle table entry */
596 ExEntry.Object = Entry;
597 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
598
599 /* Create ex handle */
600 Handle = ExCreateHandle(AtomTable->ExHandleTable,
601 &ExEntry);
602 if (!Handle) return FALSE;
603
604 /* Calculate HandleIndex (by getting rid of the first two bits) */
605 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
606
607 /* Index must be less than 0xC000 */
608 if (HandleIndex >= 0xC000)
609 {
610 /* Destroy ex handle */
611 ExDestroyHandle(AtomTable->ExHandleTable,
612 Handle,
613 NULL);
614
615 /* Return failure */
616 return FALSE;
617 }
618
619 /* Initialize atom table entry */
620 Entry->HandleIndex = HandleIndex;
621 Entry->Atom = 0xC000 + HandleIndex;
622
623 /* Return success */
624 return TRUE;
625 }
626
627 PRTL_ATOM_TABLE_ENTRY
628 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
629 {
630 PHANDLE_TABLE_ENTRY ExEntry;
631 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
632
633 /* NOTE: There's no need to explicitly enter a critical region because it's
634 guaranteed that we're in a critical region right now (as we hold
635 the atom table lock) */
636
637 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
638 (HANDLE)((ULONG_PTR)Index << 2));
639 if (ExEntry != NULL)
640 {
641 Entry = ExEntry->Object;
642
643 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
644 ExEntry);
645 }
646
647 return Entry;
648 }
649
650 /*
651 * Ldr Resource support code
652 */
653
654 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
655 LPCWSTR name, void *root,
656 int want_dir );
657 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
658 USHORT id, void *root, int want_dir );
659 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
660 void *root, int want_dir );
661
662 /**********************************************************************
663 * find_entry
664 *
665 * Find a resource entry
666 */
667 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
668 ULONG level, void **ret, int want_dir )
669 {
670 ULONG size;
671 void *root;
672 IMAGE_RESOURCE_DIRECTORY *resdirptr;
673
674 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
675 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
676 resdirptr = root;
677
678 if (!level--) goto done;
679 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
680 return STATUS_RESOURCE_TYPE_NOT_FOUND;
681 if (!level--) return STATUS_SUCCESS;
682
683 resdirptr = *ret;
684 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
685 return STATUS_RESOURCE_NAME_NOT_FOUND;
686 if (!level--) return STATUS_SUCCESS;
687 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
688
689 resdirptr = *ret;
690
691 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
692
693 return STATUS_RESOURCE_DATA_NOT_FOUND;
694
695 done:
696 *ret = resdirptr;
697 return STATUS_SUCCESS;
698 }
699
700 NTSTATUS
701 NTAPI
702 RtlpSafeCopyMemory(
703 _Out_writes_bytes_all_(Length) VOID UNALIGNED *Destination,
704 _In_reads_bytes_(Length) CONST VOID UNALIGNED *Source,
705 _In_ SIZE_T Length)
706 {
707 _SEH2_TRY
708 {
709 RtlCopyMemory(Destination, Source, Length);
710 }
711 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
712 {
713 _SEH2_YIELD(return _SEH2_GetExceptionCode());
714 }
715 _SEH2_END;
716
717 return STATUS_SUCCESS;
718 }
719
720 BOOLEAN
721 NTAPI
722 RtlCallVectoredExceptionHandlers(
723 _In_ PEXCEPTION_RECORD ExceptionRecord,
724 _In_ PCONTEXT Context)
725 {
726 /* In the kernel we don't have vectored exception handlers */
727 return FALSE;
728 }
729
730 /* EOF */