Merge trunk head (r43756)
[reactos.git] / reactos / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT 'TotA' /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlpLookupModuleBase(
34 PVOID Address)
35 {
36 PLDR_DATA_TABLE_ENTRY LdrEntry;
37 BOOLEAN InSystem;
38 PVOID p;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)Address > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 p = KiPcToFileHeader(Address, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 p = KiRosPcToUserFileHeader(Address, &LdrEntry);
50 }
51
52 return p;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 'elRR',
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(VOID)
72 {
73 /* This check is meaningless in kernel-mode */
74 return FALSE;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(IN BOOLEAN NewValue)
80 {
81 /* This check is meaningless in kernel-mode */
82 return FALSE;
83 }
84
85 KPROCESSOR_MODE
86 NTAPI
87 RtlpGetMode()
88 {
89 return KernelMode;
90 }
91
92 PVOID
93 NTAPI
94 RtlpAllocateMemory(ULONG Bytes,
95 ULONG Tag)
96 {
97 return ExAllocatePoolWithTag(PagedPool,
98 (SIZE_T)Bytes,
99 Tag);
100 }
101
102
103 #define TAG_USTR 'RTSU'
104 #define TAG_ASTR 'RTSA'
105 #define TAG_OSTR 'RTSO'
106 VOID
107 NTAPI
108 RtlpFreeMemory(PVOID Mem,
109 ULONG Tag)
110 {
111 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
112 ExFreePool(Mem);
113 else
114 ExFreePoolWithTag(Mem, Tag);
115 }
116
117 /*
118 * @implemented
119 */
120 VOID NTAPI
121 RtlAcquirePebLock(VOID)
122 {
123
124 }
125
126 /*
127 * @implemented
128 */
129 VOID NTAPI
130 RtlReleasePebLock(VOID)
131 {
132
133 }
134
135 NTSTATUS
136 NTAPI
137 LdrShutdownThread(VOID)
138 {
139 return STATUS_SUCCESS;
140 }
141
142
143 PPEB
144 NTAPI
145 RtlGetCurrentPeb(VOID)
146 {
147 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
148 }
149
150 NTSTATUS
151 NTAPI
152 RtlDeleteHeapLock(
153 PRTL_CRITICAL_SECTION CriticalSection)
154 {
155 ASSERT(FALSE);
156 return STATUS_SUCCESS;
157 }
158
159 NTSTATUS
160 NTAPI
161 RtlEnterHeapLock(
162 PRTL_CRITICAL_SECTION CriticalSection)
163 {
164 ASSERT(FALSE);
165 return STATUS_SUCCESS;
166 }
167
168 NTSTATUS
169 NTAPI
170 RtlInitializeHeapLock(
171 PRTL_CRITICAL_SECTION CriticalSection)
172 {
173 ASSERT(FALSE);
174 return STATUS_SUCCESS;
175 }
176
177 NTSTATUS
178 NTAPI
179 RtlLeaveHeapLock(
180 PRTL_CRITICAL_SECTION CriticalSection)
181 {
182 ASSERT(FALSE);
183 return STATUS_SUCCESS;
184 }
185
186 #if DBG
187 VOID FASTCALL
188 CHECK_PAGED_CODE_RTL(char *file, int line)
189 {
190 if(KeGetCurrentIrql() > APC_LEVEL)
191 {
192 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%d)\n", file, line, KeGetCurrentIrql());
193 ASSERT(FALSE);
194 }
195 }
196 #endif
197
198 VOID
199 NTAPI
200 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
201 IN PCONTEXT ContextRecord,
202 IN PVOID ContextData,
203 IN ULONG Size)
204 {
205 /* Check the global flag */
206 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
207 {
208 /* FIXME: Log this exception */
209 }
210 }
211
212 BOOLEAN
213 NTAPI
214 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
215 IN ULONG_PTR RegistrationFrameEnd,
216 IN OUT PULONG_PTR StackLow,
217 IN OUT PULONG_PTR StackHigh)
218 {
219 PKPRCB Prcb;
220 ULONG_PTR DpcStack;
221
222 /* Check if we are at DISPATCH or higher */
223 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
224 {
225 /* Get the PRCB and DPC Stack */
226 Prcb = KeGetCurrentPrcb();
227 DpcStack = (ULONG_PTR)Prcb->DpcStack;
228
229 /* Check if we are in a DPC and the stack matches */
230 if ((Prcb->DpcRoutineActive) &&
231 (RegistrationFrameEnd <= DpcStack) &&
232 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
233 {
234 /* Update the limits to the DPC Stack's */
235 *StackHigh = DpcStack;
236 *StackLow = DpcStack - KERNEL_STACK_SIZE;
237 return TRUE;
238 }
239 }
240
241 /* Not in DPC stack */
242 return FALSE;
243 }
244
245 #if !defined(_ARM_) && !defined(_AMD64_)
246
247 BOOLEAN
248 NTAPI
249 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
250 IN ULONG_PTR *StackBegin,
251 IN ULONG_PTR *StackEnd)
252 {
253 PKTHREAD Thread = KeGetCurrentThread();
254
255 /* Don't even try at ISR level or later */
256 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
257
258 /* Start with defaults */
259 *StackBegin = Thread->StackLimit;
260 *StackEnd = (ULONG_PTR)Thread->StackBase;
261
262 /* Check if EBP is inside the stack */
263 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
264 {
265 /* Then make the stack start at EBP */
266 *StackBegin = Ebp;
267 }
268 else
269 {
270 /* Now we're going to assume we're on the DPC stack */
271 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
272 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
273
274 /* Check if we seem to be on the DPC stack */
275 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
276 {
277 /* We're on the DPC stack */
278 *StackBegin = Ebp;
279 }
280 else
281 {
282 /* We're somewhere else entirely... use EBP for safety */
283 *StackBegin = Ebp;
284 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
285 }
286 }
287
288 /* Return success */
289 return TRUE;
290 }
291
292 /*
293 * @implemented
294 */
295 ULONG
296 NTAPI
297 RtlWalkFrameChain(OUT PVOID *Callers,
298 IN ULONG Count,
299 IN ULONG Flags)
300 {
301 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
302 ULONG Eip;
303 BOOLEAN Result, StopSearch = FALSE;
304 ULONG i = 0;
305 PKTHREAD Thread = KeGetCurrentThread();
306 PTEB Teb;
307 PKTRAP_FRAME TrapFrame;
308
309 /* Get current EBP */
310 #if defined(_M_IX86)
311 #if defined __GNUC__
312 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
313 #elif defined(_MSC_VER)
314 __asm mov Stack, ebp
315 #endif
316 #elif defined(_M_MIPS)
317 __asm__("move $sp, %0" : "=r" (Stack) : );
318 #elif defined(_M_PPC)
319 __asm__("mr %0,1" : "=r" (Stack) : );
320 #elif defined(_M_ARM)
321 __asm__("mov sp, %0" : "=r"(Stack) : );
322 #else
323 #error Unknown architecture
324 #endif
325
326 /* Set it as the stack begin limit as well */
327 StackBegin = (ULONG_PTR)Stack;
328
329 /* Check if we're called for non-logging mode */
330 if (!Flags)
331 {
332 /* Get the actual safe limits */
333 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
334 &StackBegin,
335 &StackEnd);
336 if (!Result) return 0;
337 }
338
339 /* Use a SEH block for maximum protection */
340 _SEH2_TRY
341 {
342 /* Check if we want the user-mode stack frame */
343 if (Flags == 1)
344 {
345 /* Get the trap frame and TEB */
346 TrapFrame = Thread->TrapFrame;
347 Teb = Thread->Teb;
348
349 /* Make sure we can trust the TEB and trap frame */
350 if (!(Teb) ||
351 !((PVOID)((ULONG_PTR)TrapFrame & 0x80000000)) ||
352 ((PVOID)TrapFrame <= (PVOID)Thread->StackLimit) ||
353 ((PVOID)TrapFrame >= (PVOID)Thread->StackBase) ||
354 (KeIsAttachedProcess()) ||
355 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
356 {
357 /* Invalid or unsafe attempt to get the stack */
358 return 0;
359 }
360
361 /* Get the stack limits */
362 StackBegin = (ULONG_PTR)Teb->Tib.StackLimit;
363 StackEnd = (ULONG_PTR)Teb->Tib.StackBase;
364 #ifdef _M_IX86
365 Stack = TrapFrame->Ebp;
366 #elif defined(_M_PPC)
367 Stack = TrapFrame->Gpr1;
368 #else
369 #error Unknown architecture
370 #endif
371
372 /* Validate them */
373 if (StackEnd <= StackBegin) return 0;
374 ProbeForRead((PVOID)StackBegin,
375 StackEnd - StackBegin,
376 sizeof(CHAR));
377 }
378
379 /* Loop the frames */
380 for (i = 0; i < Count; i++)
381 {
382 /*
383 * Leave if we're past the stack,
384 * if we're before the stack,
385 * or if we've reached ourselves.
386 */
387 if ((Stack >= StackEnd) ||
388 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
389 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
390 {
391 /* We're done or hit a bad address */
392 break;
393 }
394
395 /* Get new stack and EIP */
396 NewStack = *(PULONG_PTR)Stack;
397 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
398
399 /* Check if the new pointer is above the oldone and past the end */
400 if (!((Stack < NewStack) && (NewStack < StackEnd)))
401 {
402 /* Stop searching after this entry */
403 StopSearch = TRUE;
404 }
405
406 /* Also make sure that the EIP isn't a stack address */
407 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
408
409 /* Check if we reached a user-mode address */
410 if (!(Flags) && !(Eip & 0x80000000)) break;
411
412 /* Save this frame */
413 Callers[i] = (PVOID)Eip;
414
415 /* Check if we should continue */
416 if (StopSearch)
417 {
418 /* Return the next index */
419 i++;
420 break;
421 }
422
423 /* Move to the next stack */
424 Stack = NewStack;
425 }
426 }
427 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
428 {
429 /* No index */
430 i = 0;
431 }
432 _SEH2_END;
433
434 /* Return frames parsed */
435 return i;
436 }
437
438 #endif
439
440 #ifdef _AMD64_
441 VOID
442 NTAPI
443 RtlpGetStackLimits(
444 OUT PULONG_PTR LowLimit,
445 OUT PULONG_PTR HighLimit)
446 {
447 PKTHREAD CurrentThread = KeGetCurrentThread();
448 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
449 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
450 }
451 #endif
452
453 /* RTL Atom Tables ************************************************************/
454
455 NTSTATUS
456 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
457 {
458 ExInitializeFastMutex(&AtomTable->FastMutex);
459
460 return STATUS_SUCCESS;
461 }
462
463
464 VOID
465 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
466 {
467 }
468
469
470 BOOLEAN
471 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
472 {
473 ExAcquireFastMutex(&AtomTable->FastMutex);
474 return TRUE;
475 }
476
477 VOID
478 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
479 {
480 ExReleaseFastMutex(&AtomTable->FastMutex);
481 }
482
483 BOOLEAN
484 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
485 {
486 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
487 return (AtomTable->ExHandleTable != NULL);
488 }
489
490 VOID
491 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
492 {
493 if (AtomTable->ExHandleTable)
494 {
495 ExSweepHandleTable(AtomTable->ExHandleTable,
496 NULL,
497 NULL);
498 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
499 AtomTable->ExHandleTable = NULL;
500 }
501 }
502
503 PRTL_ATOM_TABLE
504 RtlpAllocAtomTable(ULONG Size)
505 {
506 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
507 Size);
508 if (Table != NULL)
509 {
510 RtlZeroMemory(Table,
511 Size);
512 }
513
514 return Table;
515 }
516
517 VOID
518 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
519 {
520 ExFreePool(AtomTable);
521 }
522
523 PRTL_ATOM_TABLE_ENTRY
524 RtlpAllocAtomTableEntry(ULONG Size)
525 {
526 PRTL_ATOM_TABLE_ENTRY Entry;
527
528 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
529 if (Entry != NULL)
530 {
531 RtlZeroMemory(Entry, Size);
532 }
533
534 return Entry;
535 }
536
537 VOID
538 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
539 {
540 ExFreePoolWithTag(Entry, TAG_ATMT);
541 }
542
543 VOID
544 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
545 {
546 ExDestroyHandle(AtomTable->ExHandleTable,
547 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
548 NULL);
549 }
550
551 BOOLEAN
552 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
553 {
554 HANDLE_TABLE_ENTRY ExEntry;
555 HANDLE Handle;
556 USHORT HandleIndex;
557
558 /* Initialize ex handle table entry */
559 ExEntry.Object = Entry;
560 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
561
562 /* Create ex handle */
563 Handle = ExCreateHandle(AtomTable->ExHandleTable,
564 &ExEntry);
565 if (!Handle) return FALSE;
566
567 /* Calculate HandleIndex (by getting rid of the first two bits) */
568 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
569
570 /* Index must be less than 0xC000 */
571 if (HandleIndex >= 0xC000)
572 {
573 /* Destroy ex handle */
574 ExDestroyHandle(AtomTable->ExHandleTable,
575 Handle,
576 NULL);
577
578 /* Return failure */
579 return FALSE;
580 }
581
582 /* Initialize atom table entry */
583 Entry->HandleIndex = HandleIndex;
584 Entry->Atom = 0xC000 + HandleIndex;
585
586 /* Return success */
587 return TRUE;
588 }
589
590 PRTL_ATOM_TABLE_ENTRY
591 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
592 {
593 PHANDLE_TABLE_ENTRY ExEntry;
594 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
595
596 /* NOTE: There's no need to explicitly enter a critical region because it's
597 guaranteed that we're in a critical region right now (as we hold
598 the atom table lock) */
599
600 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
601 (HANDLE)((ULONG_PTR)Index << 2));
602 if (ExEntry != NULL)
603 {
604 Entry = ExEntry->Object;
605
606 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
607 ExEntry);
608 }
609
610 return Entry;
611 }
612
613 /*
614 * Ldr Resource support code
615 */
616
617 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
618 LPCWSTR name, void *root,
619 int want_dir );
620 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
621 USHORT id, void *root, int want_dir );
622 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
623 void *root, int want_dir );
624
625 /**********************************************************************
626 * find_entry
627 *
628 * Find a resource entry
629 */
630 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
631 ULONG level, void **ret, int want_dir )
632 {
633 ULONG size;
634 void *root;
635 IMAGE_RESOURCE_DIRECTORY *resdirptr;
636
637 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
638 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
639 resdirptr = root;
640
641 if (!level--) goto done;
642 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
643 return STATUS_RESOURCE_TYPE_NOT_FOUND;
644 if (!level--) return STATUS_SUCCESS;
645
646 resdirptr = *ret;
647 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
648 return STATUS_RESOURCE_NAME_NOT_FOUND;
649 if (!level--) return STATUS_SUCCESS;
650 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
651
652 resdirptr = *ret;
653
654 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
655
656 return STATUS_RESOURCE_DATA_NOT_FOUND;
657
658 done:
659 *ret = resdirptr;
660 return STATUS_SUCCESS;
661 }
662
663
664 /* EOF */