Sync with trunk (48237)
[reactos.git] / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT 'TotA' /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlPcToFileHeader(
34 IN PVOID PcValue,
35 OUT PVOID *BaseOfImage)
36 {
37 PLDR_DATA_TABLE_ENTRY LdrEntry;
38 BOOLEAN InSystem;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 *BaseOfImage = KiRosPcToUserFileHeader(PcValue, &LdrEntry);
50 }
51
52 return *BaseOfImage;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 'elRR',
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(VOID)
72 {
73 /* This check is meaningless in kernel-mode */
74 return FALSE;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(VOID)
80 {
81 /* Nothing to set in kernel mode */
82 return FALSE;
83 }
84
85 VOID
86 NTAPI
87 RtlpClearInDbgPrint(VOID)
88 {
89 /* Nothing to clear in kernel mode */
90 }
91
92 KPROCESSOR_MODE
93 NTAPI
94 RtlpGetMode()
95 {
96 return KernelMode;
97 }
98
99 PVOID
100 NTAPI
101 RtlpAllocateMemory(ULONG Bytes,
102 ULONG Tag)
103 {
104 return ExAllocatePoolWithTag(PagedPool,
105 (SIZE_T)Bytes,
106 Tag);
107 }
108
109
110 #define TAG_USTR 'RTSU'
111 #define TAG_ASTR 'RTSA'
112 #define TAG_OSTR 'RTSO'
113 VOID
114 NTAPI
115 RtlpFreeMemory(PVOID Mem,
116 ULONG Tag)
117 {
118 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
119 ExFreePool(Mem);
120 else
121 ExFreePoolWithTag(Mem, Tag);
122 }
123
124 /*
125 * @implemented
126 */
127 VOID NTAPI
128 RtlAcquirePebLock(VOID)
129 {
130
131 }
132
133 /*
134 * @implemented
135 */
136 VOID NTAPI
137 RtlReleasePebLock(VOID)
138 {
139
140 }
141
142 NTSTATUS
143 NTAPI
144 LdrShutdownThread(VOID)
145 {
146 return STATUS_SUCCESS;
147 }
148
149
150 PPEB
151 NTAPI
152 RtlGetCurrentPeb(VOID)
153 {
154 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
155 }
156
157 NTSTATUS
158 NTAPI
159 RtlDeleteHeapLock(
160 PRTL_CRITICAL_SECTION CriticalSection)
161 {
162 ASSERT(FALSE);
163 return STATUS_SUCCESS;
164 }
165
166 NTSTATUS
167 NTAPI
168 RtlEnterHeapLock(
169 PRTL_CRITICAL_SECTION CriticalSection)
170 {
171 ASSERT(FALSE);
172 return STATUS_SUCCESS;
173 }
174
175 NTSTATUS
176 NTAPI
177 RtlInitializeHeapLock(
178 PRTL_CRITICAL_SECTION CriticalSection)
179 {
180 ASSERT(FALSE);
181 return STATUS_SUCCESS;
182 }
183
184 NTSTATUS
185 NTAPI
186 RtlLeaveHeapLock(
187 PRTL_CRITICAL_SECTION CriticalSection)
188 {
189 ASSERT(FALSE);
190 return STATUS_SUCCESS;
191 }
192
193 #if DBG
194 VOID FASTCALL
195 CHECK_PAGED_CODE_RTL(char *file, int line)
196 {
197 if(KeGetCurrentIrql() > APC_LEVEL)
198 {
199 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%d)\n", file, line, KeGetCurrentIrql());
200 ASSERT(FALSE);
201 }
202 }
203 #endif
204
205 VOID
206 NTAPI
207 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
208 IN PCONTEXT ContextRecord,
209 IN PVOID ContextData,
210 IN ULONG Size)
211 {
212 /* Check the global flag */
213 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
214 {
215 /* FIXME: Log this exception */
216 }
217 }
218
219 BOOLEAN
220 NTAPI
221 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
222 IN ULONG_PTR RegistrationFrameEnd,
223 IN OUT PULONG_PTR StackLow,
224 IN OUT PULONG_PTR StackHigh)
225 {
226 PKPRCB Prcb;
227 ULONG_PTR DpcStack;
228
229 /* Check if we are at DISPATCH or higher */
230 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
231 {
232 /* Get the PRCB and DPC Stack */
233 Prcb = KeGetCurrentPrcb();
234 DpcStack = (ULONG_PTR)Prcb->DpcStack;
235
236 /* Check if we are in a DPC and the stack matches */
237 if ((Prcb->DpcRoutineActive) &&
238 (RegistrationFrameEnd <= DpcStack) &&
239 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
240 {
241 /* Update the limits to the DPC Stack's */
242 *StackHigh = DpcStack;
243 *StackLow = DpcStack - KERNEL_STACK_SIZE;
244 return TRUE;
245 }
246 }
247
248 /* Not in DPC stack */
249 return FALSE;
250 }
251
252 #if !defined(_ARM_) && !defined(_AMD64_)
253
254 BOOLEAN
255 NTAPI
256 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
257 IN ULONG_PTR *StackBegin,
258 IN ULONG_PTR *StackEnd)
259 {
260 PKTHREAD Thread = KeGetCurrentThread();
261
262 /* Don't even try at ISR level or later */
263 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
264
265 /* Start with defaults */
266 *StackBegin = Thread->StackLimit;
267 *StackEnd = (ULONG_PTR)Thread->StackBase;
268
269 /* Check if EBP is inside the stack */
270 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
271 {
272 /* Then make the stack start at EBP */
273 *StackBegin = Ebp;
274 }
275 else
276 {
277 /* Now we're going to assume we're on the DPC stack */
278 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
279 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
280
281 /* Check if we seem to be on the DPC stack */
282 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
283 {
284 /* We're on the DPC stack */
285 *StackBegin = Ebp;
286 }
287 else
288 {
289 /* We're somewhere else entirely... use EBP for safety */
290 *StackBegin = Ebp;
291 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
292 }
293 }
294
295 /* Return success */
296 return TRUE;
297 }
298
299 /*
300 * @implemented
301 */
302 ULONG
303 NTAPI
304 RtlWalkFrameChain(OUT PVOID *Callers,
305 IN ULONG Count,
306 IN ULONG Flags)
307 {
308 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
309 ULONG Eip;
310 BOOLEAN Result, StopSearch = FALSE;
311 ULONG i = 0;
312 PETHREAD Thread = PsGetCurrentThread();
313 PTEB Teb;
314 PKTRAP_FRAME TrapFrame;
315
316 /* Get current EBP */
317 #if defined(_M_IX86)
318 #if defined __GNUC__
319 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
320 #elif defined(_MSC_VER)
321 __asm mov Stack, ebp
322 #endif
323 #elif defined(_M_MIPS)
324 __asm__("move $sp, %0" : "=r" (Stack) : );
325 #elif defined(_M_PPC)
326 __asm__("mr %0,1" : "=r" (Stack) : );
327 #elif defined(_M_ARM)
328 __asm__("mov sp, %0" : "=r"(Stack) : );
329 #else
330 #error Unknown architecture
331 #endif
332
333 /* Set it as the stack begin limit as well */
334 StackBegin = (ULONG_PTR)Stack;
335
336 /* Check if we're called for non-logging mode */
337 if (!Flags)
338 {
339 /* Get the actual safe limits */
340 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
341 &StackBegin,
342 &StackEnd);
343 if (!Result) return 0;
344 }
345
346 /* Use a SEH block for maximum protection */
347 _SEH2_TRY
348 {
349 /* Check if we want the user-mode stack frame */
350 if (Flags == 1)
351 {
352 /* Get the trap frame and TEB */
353 TrapFrame = KeGetTrapFrame(&Thread->Tcb);
354 Teb = Thread->Tcb.Teb;
355
356 /* Make sure we can trust the TEB and trap frame */
357 if (!(Teb) ||
358 (KeIsAttachedProcess()) ||
359 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
360 {
361 /* Invalid or unsafe attempt to get the stack */
362 _SEH2_YIELD(return 0;)
363 }
364
365 /* Get the stack limits */
366 StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit;
367 StackEnd = (ULONG_PTR)Teb->NtTib.StackBase;
368 #ifdef _M_IX86
369 Stack = TrapFrame->Ebp;
370 #elif defined(_M_PPC)
371 Stack = TrapFrame->Gpr1;
372 #else
373 #error Unknown architecture
374 #endif
375
376 /* Validate them */
377 if (StackEnd <= StackBegin) return 0;
378 ProbeForRead((PVOID)StackBegin,
379 StackEnd - StackBegin,
380 sizeof(CHAR));
381 }
382
383 /* Loop the frames */
384 for (i = 0; i < Count; i++)
385 {
386 /*
387 * Leave if we're past the stack,
388 * if we're before the stack,
389 * or if we've reached ourselves.
390 */
391 if ((Stack >= StackEnd) ||
392 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
393 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
394 {
395 /* We're done or hit a bad address */
396 break;
397 }
398
399 /* Get new stack and EIP */
400 NewStack = *(PULONG_PTR)Stack;
401 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
402
403 /* Check if the new pointer is above the oldone and past the end */
404 if (!((Stack < NewStack) && (NewStack < StackEnd)))
405 {
406 /* Stop searching after this entry */
407 StopSearch = TRUE;
408 }
409
410 /* Also make sure that the EIP isn't a stack address */
411 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
412
413 /* Check if we reached a user-mode address */
414 if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage
415
416 /* Save this frame */
417 Callers[i] = (PVOID)Eip;
418
419 /* Check if we should continue */
420 if (StopSearch)
421 {
422 /* Return the next index */
423 i++;
424 break;
425 }
426
427 /* Move to the next stack */
428 Stack = NewStack;
429 }
430 }
431 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
432 {
433 /* No index */
434 i = 0;
435 }
436 _SEH2_END;
437
438 /* Return frames parsed */
439 return i;
440 }
441
442 #endif
443
444 #ifdef _AMD64_
445 VOID
446 NTAPI
447 RtlpGetStackLimits(
448 OUT PULONG_PTR LowLimit,
449 OUT PULONG_PTR HighLimit)
450 {
451 PKTHREAD CurrentThread = KeGetCurrentThread();
452 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
453 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
454 }
455 #endif
456
457 /* RTL Atom Tables ************************************************************/
458
459 NTSTATUS
460 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
461 {
462 ExInitializeFastMutex(&AtomTable->FastMutex);
463
464 return STATUS_SUCCESS;
465 }
466
467
468 VOID
469 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
470 {
471 }
472
473
474 BOOLEAN
475 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
476 {
477 ExAcquireFastMutex(&AtomTable->FastMutex);
478 return TRUE;
479 }
480
481 VOID
482 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
483 {
484 ExReleaseFastMutex(&AtomTable->FastMutex);
485 }
486
487 BOOLEAN
488 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
489 {
490 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
491 return (AtomTable->ExHandleTable != NULL);
492 }
493
494 VOID
495 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
496 {
497 if (AtomTable->ExHandleTable)
498 {
499 ExSweepHandleTable(AtomTable->ExHandleTable,
500 NULL,
501 NULL);
502 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
503 AtomTable->ExHandleTable = NULL;
504 }
505 }
506
507 PRTL_ATOM_TABLE
508 RtlpAllocAtomTable(ULONG Size)
509 {
510 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
511 Size);
512 if (Table != NULL)
513 {
514 RtlZeroMemory(Table,
515 Size);
516 }
517
518 return Table;
519 }
520
521 VOID
522 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
523 {
524 ExFreePool(AtomTable);
525 }
526
527 PRTL_ATOM_TABLE_ENTRY
528 RtlpAllocAtomTableEntry(ULONG Size)
529 {
530 PRTL_ATOM_TABLE_ENTRY Entry;
531
532 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
533 if (Entry != NULL)
534 {
535 RtlZeroMemory(Entry, Size);
536 }
537
538 return Entry;
539 }
540
541 VOID
542 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
543 {
544 ExFreePoolWithTag(Entry, TAG_ATMT);
545 }
546
547 VOID
548 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
549 {
550 ExDestroyHandle(AtomTable->ExHandleTable,
551 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
552 NULL);
553 }
554
555 BOOLEAN
556 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
557 {
558 HANDLE_TABLE_ENTRY ExEntry;
559 HANDLE Handle;
560 USHORT HandleIndex;
561
562 /* Initialize ex handle table entry */
563 ExEntry.Object = Entry;
564 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
565
566 /* Create ex handle */
567 Handle = ExCreateHandle(AtomTable->ExHandleTable,
568 &ExEntry);
569 if (!Handle) return FALSE;
570
571 /* Calculate HandleIndex (by getting rid of the first two bits) */
572 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
573
574 /* Index must be less than 0xC000 */
575 if (HandleIndex >= 0xC000)
576 {
577 /* Destroy ex handle */
578 ExDestroyHandle(AtomTable->ExHandleTable,
579 Handle,
580 NULL);
581
582 /* Return failure */
583 return FALSE;
584 }
585
586 /* Initialize atom table entry */
587 Entry->HandleIndex = HandleIndex;
588 Entry->Atom = 0xC000 + HandleIndex;
589
590 /* Return success */
591 return TRUE;
592 }
593
594 PRTL_ATOM_TABLE_ENTRY
595 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
596 {
597 PHANDLE_TABLE_ENTRY ExEntry;
598 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
599
600 /* NOTE: There's no need to explicitly enter a critical region because it's
601 guaranteed that we're in a critical region right now (as we hold
602 the atom table lock) */
603
604 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
605 (HANDLE)((ULONG_PTR)Index << 2));
606 if (ExEntry != NULL)
607 {
608 Entry = ExEntry->Object;
609
610 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
611 ExEntry);
612 }
613
614 return Entry;
615 }
616
617 /*
618 * Ldr Resource support code
619 */
620
621 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
622 LPCWSTR name, void *root,
623 int want_dir );
624 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
625 USHORT id, void *root, int want_dir );
626 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
627 void *root, int want_dir );
628
629 /**********************************************************************
630 * find_entry
631 *
632 * Find a resource entry
633 */
634 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
635 ULONG level, void **ret, int want_dir )
636 {
637 ULONG size;
638 void *root;
639 IMAGE_RESOURCE_DIRECTORY *resdirptr;
640
641 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
642 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
643 resdirptr = root;
644
645 if (!level--) goto done;
646 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
647 return STATUS_RESOURCE_TYPE_NOT_FOUND;
648 if (!level--) return STATUS_SUCCESS;
649
650 resdirptr = *ret;
651 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
652 return STATUS_RESOURCE_NAME_NOT_FOUND;
653 if (!level--) return STATUS_SUCCESS;
654 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
655
656 resdirptr = *ret;
657
658 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
659
660 return STATUS_RESOURCE_DATA_NOT_FOUND;
661
662 done:
663 *ret = resdirptr;
664 return STATUS_SUCCESS;
665 }
666
667
668 /* EOF */