[RTL]
[reactos.git] / reactos / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT 'TotA' /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlPcToFileHeader(
34 IN PVOID PcValue,
35 OUT PVOID *BaseOfImage)
36 {
37 PLDR_DATA_TABLE_ENTRY LdrEntry;
38 BOOLEAN InSystem;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 *BaseOfImage = KiRosPcToUserFileHeader(PcValue, &LdrEntry);
50 }
51
52 return *BaseOfImage;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 'elRR',
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(VOID)
72 {
73 /* This check is meaningless in kernel-mode */
74 return FALSE;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(VOID)
80 {
81 /* Nothing to set in kernel mode */
82 return FALSE;
83 }
84
85 VOID
86 NTAPI
87 RtlpClearInDbgPrint(VOID)
88 {
89 /* Nothing to clear in kernel mode */
90 }
91
92 KPROCESSOR_MODE
93 NTAPI
94 RtlpGetMode()
95 {
96 return KernelMode;
97 }
98
99 PVOID
100 NTAPI
101 RtlpAllocateMemory(ULONG Bytes,
102 ULONG Tag)
103 {
104 return ExAllocatePoolWithTag(PagedPool,
105 (SIZE_T)Bytes,
106 Tag);
107 }
108
109
110 #define TAG_USTR 'RTSU'
111 #define TAG_ASTR 'RTSA'
112 #define TAG_OSTR 'RTSO'
113 VOID
114 NTAPI
115 RtlpFreeMemory(PVOID Mem,
116 ULONG Tag)
117 {
118 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
119 ExFreePool(Mem);
120 else
121 ExFreePoolWithTag(Mem, Tag);
122 }
123
124 /*
125 * @implemented
126 */
127 VOID NTAPI
128 RtlAcquirePebLock(VOID)
129 {
130
131 }
132
133 /*
134 * @implemented
135 */
136 VOID NTAPI
137 RtlReleasePebLock(VOID)
138 {
139
140 }
141
142 NTSTATUS
143 NTAPI
144 LdrShutdownThread(VOID)
145 {
146 return STATUS_SUCCESS;
147 }
148
149
150 PPEB
151 NTAPI
152 RtlGetCurrentPeb(VOID)
153 {
154 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
155 }
156
157 NTSTATUS
158 NTAPI
159 RtlDeleteHeapLock(IN OUT PHEAP_LOCK Lock)
160 {
161 ExDeleteResourceLite(&Lock->Resource);
162 ExFreePool(Lock);
163
164 return STATUS_SUCCESS;
165 }
166
167 NTSTATUS
168 NTAPI
169 RtlEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
170 {
171 KeEnterCriticalRegion();
172
173 if (Exclusive)
174 ExAcquireResourceExclusiveLite(&Lock->Resource, TRUE);
175 else
176 ExAcquireResourceSharedLite(&Lock->Resource, TRUE);
177
178 return STATUS_SUCCESS;
179 }
180
181 BOOLEAN
182 NTAPI
183 RtlTryEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
184 {
185 BOOLEAN Success;
186 KeEnterCriticalRegion();
187
188 if (Exclusive)
189 Success = ExAcquireResourceExclusiveLite(&Lock->Resource, FALSE);
190 else
191 Success = ExAcquireResourceSharedLite(&Lock->Resource, FALSE);
192
193 if (!Success)
194 KeLeaveCriticalRegion();
195
196 return Success;
197 }
198
199 NTSTATUS
200 NTAPI
201 RtlInitializeHeapLock(IN OUT PHEAP_LOCK *Lock)
202 {
203 PHEAP_LOCK HeapLock = ExAllocatePool(NonPagedPool, sizeof(HEAP_LOCK));
204 if (HeapLock == NULL)
205 return STATUS_NO_MEMORY;
206
207 ExInitializeResourceLite(&HeapLock->Resource);
208 *Lock = HeapLock;
209
210 return STATUS_SUCCESS;
211 }
212
213 NTSTATUS
214 NTAPI
215 RtlLeaveHeapLock(IN OUT PHEAP_LOCK Lock)
216 {
217 ExReleaseResourceLite(&Lock->Resource);
218 KeLeaveCriticalRegion();
219
220 return STATUS_SUCCESS;
221 }
222
223 struct _HEAP;
224
225 VOID
226 NTAPI
227 RtlpAddHeapToProcessList(struct _HEAP *Heap)
228 {
229 UNREFERENCED_PARAMETER(Heap);
230 }
231
232 VOID
233 NTAPI
234 RtlpRemoveHeapFromProcessList(struct _HEAP *Heap)
235 {
236 UNREFERENCED_PARAMETER(Heap);
237 }
238
239 VOID
240 RtlInitializeHeapManager(VOID)
241 {
242 }
243
244 #if DBG
245 VOID FASTCALL
246 CHECK_PAGED_CODE_RTL(char *file, int line)
247 {
248 if(KeGetCurrentIrql() > APC_LEVEL)
249 {
250 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%u)\n", file, line, KeGetCurrentIrql());
251 ASSERT(FALSE);
252 }
253 }
254 #endif
255
256 VOID
257 NTAPI
258 RtlpSetHeapParameters(IN PRTL_HEAP_PARAMETERS Parameters)
259 {
260 /* Apply defaults for non-set parameters */
261 if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit;
262 if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve;
263 if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
264 if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
265 }
266
267 VOID
268 NTAPI
269 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
270 IN PCONTEXT ContextRecord,
271 IN PVOID ContextData,
272 IN ULONG Size)
273 {
274 /* Check the global flag */
275 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
276 {
277 /* FIXME: Log this exception */
278 }
279 }
280
281 BOOLEAN
282 NTAPI
283 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
284 IN ULONG_PTR RegistrationFrameEnd,
285 IN OUT PULONG_PTR StackLow,
286 IN OUT PULONG_PTR StackHigh)
287 {
288 PKPRCB Prcb;
289 ULONG_PTR DpcStack;
290
291 /* Check if we are at DISPATCH or higher */
292 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
293 {
294 /* Get the PRCB and DPC Stack */
295 Prcb = KeGetCurrentPrcb();
296 DpcStack = (ULONG_PTR)Prcb->DpcStack;
297
298 /* Check if we are in a DPC and the stack matches */
299 if ((Prcb->DpcRoutineActive) &&
300 (RegistrationFrameEnd <= DpcStack) &&
301 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
302 {
303 /* Update the limits to the DPC Stack's */
304 *StackHigh = DpcStack;
305 *StackLow = DpcStack - KERNEL_STACK_SIZE;
306 return TRUE;
307 }
308 }
309
310 /* Not in DPC stack */
311 return FALSE;
312 }
313
314 #if !defined(_ARM_) && !defined(_AMD64_)
315
316 BOOLEAN
317 NTAPI
318 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
319 IN ULONG_PTR *StackBegin,
320 IN ULONG_PTR *StackEnd)
321 {
322 PKTHREAD Thread = KeGetCurrentThread();
323
324 /* Don't even try at ISR level or later */
325 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
326
327 /* Start with defaults */
328 *StackBegin = Thread->StackLimit;
329 *StackEnd = (ULONG_PTR)Thread->StackBase;
330
331 /* Check if EBP is inside the stack */
332 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
333 {
334 /* Then make the stack start at EBP */
335 *StackBegin = Ebp;
336 }
337 else
338 {
339 /* Now we're going to assume we're on the DPC stack */
340 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
341 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
342
343 /* Check if we seem to be on the DPC stack */
344 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
345 {
346 /* We're on the DPC stack */
347 *StackBegin = Ebp;
348 }
349 else
350 {
351 /* We're somewhere else entirely... use EBP for safety */
352 *StackBegin = Ebp;
353 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
354 }
355 }
356
357 /* Return success */
358 return TRUE;
359 }
360
361 /*
362 * @implemented
363 */
364 ULONG
365 NTAPI
366 RtlWalkFrameChain(OUT PVOID *Callers,
367 IN ULONG Count,
368 IN ULONG Flags)
369 {
370 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
371 ULONG Eip;
372 BOOLEAN Result, StopSearch = FALSE;
373 ULONG i = 0;
374 PETHREAD Thread = PsGetCurrentThread();
375 PTEB Teb;
376 PKTRAP_FRAME TrapFrame;
377
378 /* Get current EBP */
379 #if defined(_M_IX86)
380 #if defined __GNUC__
381 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
382 #elif defined(_MSC_VER)
383 __asm mov Stack, ebp
384 #endif
385 #elif defined(_M_MIPS)
386 __asm__("move $sp, %0" : "=r" (Stack) : );
387 #elif defined(_M_PPC)
388 __asm__("mr %0,1" : "=r" (Stack) : );
389 #elif defined(_M_ARM)
390 __asm__("mov sp, %0" : "=r"(Stack) : );
391 #else
392 #error Unknown architecture
393 #endif
394
395 /* Set it as the stack begin limit as well */
396 StackBegin = (ULONG_PTR)Stack;
397
398 /* Check if we're called for non-logging mode */
399 if (!Flags)
400 {
401 /* Get the actual safe limits */
402 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
403 &StackBegin,
404 &StackEnd);
405 if (!Result) return 0;
406 }
407
408 /* Use a SEH block for maximum protection */
409 _SEH2_TRY
410 {
411 /* Check if we want the user-mode stack frame */
412 if (Flags == 1)
413 {
414 /* Get the trap frame and TEB */
415 TrapFrame = KeGetTrapFrame(&Thread->Tcb);
416 Teb = Thread->Tcb.Teb;
417
418 /* Make sure we can trust the TEB and trap frame */
419 if (!(Teb) ||
420 (KeIsAttachedProcess()) ||
421 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
422 {
423 /* Invalid or unsafe attempt to get the stack */
424 _SEH2_YIELD(return 0;)
425 }
426
427 /* Get the stack limits */
428 StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit;
429 StackEnd = (ULONG_PTR)Teb->NtTib.StackBase;
430 #ifdef _M_IX86
431 Stack = TrapFrame->Ebp;
432 #elif defined(_M_PPC)
433 Stack = TrapFrame->Gpr1;
434 #else
435 #error Unknown architecture
436 #endif
437
438 /* Validate them */
439 if (StackEnd <= StackBegin) _SEH2_YIELD(return 0);
440 ProbeForRead((PVOID)StackBegin,
441 StackEnd - StackBegin,
442 sizeof(CHAR));
443 }
444
445 /* Loop the frames */
446 for (i = 0; i < Count; i++)
447 {
448 /*
449 * Leave if we're past the stack,
450 * if we're before the stack,
451 * or if we've reached ourselves.
452 */
453 if ((Stack >= StackEnd) ||
454 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
455 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
456 {
457 /* We're done or hit a bad address */
458 break;
459 }
460
461 /* Get new stack and EIP */
462 NewStack = *(PULONG_PTR)Stack;
463 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
464
465 /* Check if the new pointer is above the oldone and past the end */
466 if (!((Stack < NewStack) && (NewStack < StackEnd)))
467 {
468 /* Stop searching after this entry */
469 StopSearch = TRUE;
470 }
471
472 /* Also make sure that the EIP isn't a stack address */
473 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
474
475 /* Check if we reached a user-mode address */
476 if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage
477
478 /* Save this frame */
479 Callers[i] = (PVOID)Eip;
480
481 /* Check if we should continue */
482 if (StopSearch)
483 {
484 /* Return the next index */
485 i++;
486 break;
487 }
488
489 /* Move to the next stack */
490 Stack = NewStack;
491 }
492 }
493 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
494 {
495 /* No index */
496 i = 0;
497 }
498 _SEH2_END;
499
500 /* Return frames parsed */
501 return i;
502 }
503
504 #endif
505
506 #ifdef _AMD64_
507 VOID
508 NTAPI
509 RtlpGetStackLimits(
510 OUT PULONG_PTR LowLimit,
511 OUT PULONG_PTR HighLimit)
512 {
513 PKTHREAD CurrentThread = KeGetCurrentThread();
514 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
515 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
516 }
517 #endif
518
519 /* RTL Atom Tables ************************************************************/
520
521 NTSTATUS
522 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
523 {
524 ExInitializeFastMutex(&AtomTable->FastMutex);
525
526 return STATUS_SUCCESS;
527 }
528
529
530 VOID
531 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
532 {
533 }
534
535
536 BOOLEAN
537 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
538 {
539 ExAcquireFastMutex(&AtomTable->FastMutex);
540 return TRUE;
541 }
542
543 VOID
544 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
545 {
546 ExReleaseFastMutex(&AtomTable->FastMutex);
547 }
548
549 BOOLEAN
550 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
551 {
552 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
553 return (AtomTable->ExHandleTable != NULL);
554 }
555
556 BOOLEAN
557 NTAPI
558 RtlpCloseHandleCallback(
559 IN PHANDLE_TABLE_ENTRY HandleTableEntry,
560 IN HANDLE Handle,
561 IN PVOID HandleTable)
562 {
563 /* Destroy and unlock the handle entry */
564 return ExDestroyHandle(HandleTable, Handle, HandleTableEntry);
565 }
566
567 VOID
568 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
569 {
570 if (AtomTable->ExHandleTable)
571 {
572 ExSweepHandleTable(AtomTable->ExHandleTable,
573 RtlpCloseHandleCallback,
574 AtomTable->ExHandleTable);
575 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
576 AtomTable->ExHandleTable = NULL;
577 }
578 }
579
580 PRTL_ATOM_TABLE
581 RtlpAllocAtomTable(ULONG Size)
582 {
583 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
584 Size);
585 if (Table != NULL)
586 {
587 RtlZeroMemory(Table,
588 Size);
589 }
590
591 return Table;
592 }
593
594 VOID
595 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
596 {
597 ExFreePool(AtomTable);
598 }
599
600 PRTL_ATOM_TABLE_ENTRY
601 RtlpAllocAtomTableEntry(ULONG Size)
602 {
603 PRTL_ATOM_TABLE_ENTRY Entry;
604
605 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
606 if (Entry != NULL)
607 {
608 RtlZeroMemory(Entry, Size);
609 }
610
611 return Entry;
612 }
613
614 VOID
615 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
616 {
617 ExFreePoolWithTag(Entry, TAG_ATMT);
618 }
619
620 VOID
621 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
622 {
623 ExDestroyHandle(AtomTable->ExHandleTable,
624 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
625 NULL);
626 }
627
628 BOOLEAN
629 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
630 {
631 HANDLE_TABLE_ENTRY ExEntry;
632 HANDLE Handle;
633 USHORT HandleIndex;
634
635 /* Initialize ex handle table entry */
636 ExEntry.Object = Entry;
637 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
638
639 /* Create ex handle */
640 Handle = ExCreateHandle(AtomTable->ExHandleTable,
641 &ExEntry);
642 if (!Handle) return FALSE;
643
644 /* Calculate HandleIndex (by getting rid of the first two bits) */
645 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
646
647 /* Index must be less than 0xC000 */
648 if (HandleIndex >= 0xC000)
649 {
650 /* Destroy ex handle */
651 ExDestroyHandle(AtomTable->ExHandleTable,
652 Handle,
653 NULL);
654
655 /* Return failure */
656 return FALSE;
657 }
658
659 /* Initialize atom table entry */
660 Entry->HandleIndex = HandleIndex;
661 Entry->Atom = 0xC000 + HandleIndex;
662
663 /* Return success */
664 return TRUE;
665 }
666
667 PRTL_ATOM_TABLE_ENTRY
668 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
669 {
670 PHANDLE_TABLE_ENTRY ExEntry;
671 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
672
673 /* NOTE: There's no need to explicitly enter a critical region because it's
674 guaranteed that we're in a critical region right now (as we hold
675 the atom table lock) */
676
677 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
678 (HANDLE)((ULONG_PTR)Index << 2));
679 if (ExEntry != NULL)
680 {
681 Entry = ExEntry->Object;
682
683 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
684 ExEntry);
685 }
686
687 return Entry;
688 }
689
690 /*
691 * Ldr Resource support code
692 */
693
694 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
695 LPCWSTR name, void *root,
696 int want_dir );
697 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
698 USHORT id, void *root, int want_dir );
699 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
700 void *root, int want_dir );
701
702 /**********************************************************************
703 * find_entry
704 *
705 * Find a resource entry
706 */
707 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
708 ULONG level, void **ret, int want_dir )
709 {
710 ULONG size;
711 void *root;
712 IMAGE_RESOURCE_DIRECTORY *resdirptr;
713
714 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
715 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
716 if (size < sizeof(*resdirptr)) return STATUS_RESOURCE_DATA_NOT_FOUND;
717 resdirptr = root;
718
719 if (!level--) goto done;
720 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
721 return STATUS_RESOURCE_TYPE_NOT_FOUND;
722 if (!level--) return STATUS_SUCCESS;
723
724 resdirptr = *ret;
725 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
726 return STATUS_RESOURCE_NAME_NOT_FOUND;
727 if (!level--) return STATUS_SUCCESS;
728 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
729
730 resdirptr = *ret;
731
732 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
733
734 return STATUS_RESOURCE_DATA_NOT_FOUND;
735
736 done:
737 *ret = resdirptr;
738 return STATUS_SUCCESS;
739 }
740
741 NTSTATUS
742 NTAPI
743 RtlpSafeCopyMemory(
744 _Out_writes_bytes_all_(Length) VOID UNALIGNED *Destination,
745 _In_reads_bytes_(Length) CONST VOID UNALIGNED *Source,
746 _In_ SIZE_T Length)
747 {
748 _SEH2_TRY
749 {
750 RtlCopyMemory(Destination, Source, Length);
751 }
752 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
753 {
754 _SEH2_YIELD(return _SEH2_GetExceptionCode());
755 }
756 _SEH2_END;
757
758 return STATUS_SUCCESS;
759 }
760
761 BOOLEAN
762 NTAPI
763 RtlCallVectoredExceptionHandlers(
764 _In_ PEXCEPTION_RECORD ExceptionRecord,
765 _In_ PCONTEXT Context)
766 {
767 /* In the kernel we don't have vectored exception handlers */
768 return FALSE;
769 }
770
771 /* EOF */