Sync up with trunk r61578.
[reactos.git] / ntoskrnl / rtl / libsupp.c
1 /*
2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS kernel
4 * FILE: ntoskrnl/rtl/libsupp.c
5 * PURPOSE: RTL Support Routines
6 * PROGRAMMERS: Alex Ionescu (alex@relsoft.net)
7 * Gunnar Dalsnes
8 */
9
10 /* INCLUDES ******************************************************************/
11
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15
16 #define TAG_ATMT 'TotA' /* Atom table */
17
18 extern ULONG NtGlobalFlag;
19
20 typedef struct _RTL_RANGE_ENTRY
21 {
22 LIST_ENTRY Entry;
23 RTL_RANGE Range;
24 } RTL_RANGE_ENTRY, *PRTL_RANGE_ENTRY;
25
26 PAGED_LOOKASIDE_LIST RtlpRangeListEntryLookasideList;
27 SIZE_T RtlpAllocDeallocQueryBufferSize = 128;
28
29 /* FUNCTIONS *****************************************************************/
30
31 PVOID
32 NTAPI
33 RtlPcToFileHeader(
34 IN PVOID PcValue,
35 OUT PVOID *BaseOfImage)
36 {
37 PLDR_DATA_TABLE_ENTRY LdrEntry;
38 BOOLEAN InSystem;
39
40 /* Get the base for this file */
41 if ((ULONG_PTR)PcValue > (ULONG_PTR)MmHighestUserAddress)
42 {
43 /* We are in kernel */
44 *BaseOfImage = KiPcToFileHeader(PcValue, &LdrEntry, FALSE, &InSystem);
45 }
46 else
47 {
48 /* We are in user land */
49 *BaseOfImage = KiRosPcToUserFileHeader(PcValue, &LdrEntry);
50 }
51
52 return *BaseOfImage;
53 }
54
55 VOID
56 NTAPI
57 RtlInitializeRangeListPackage(VOID)
58 {
59 /* Setup the lookaside list for allocations (not used yet) */
60 ExInitializePagedLookasideList(&RtlpRangeListEntryLookasideList,
61 NULL,
62 NULL,
63 POOL_COLD_ALLOCATION,
64 sizeof(RTL_RANGE_ENTRY),
65 'elRR',
66 16);
67 }
68
69 BOOLEAN
70 NTAPI
71 RtlpCheckForActiveDebugger(VOID)
72 {
73 /* This check is meaningless in kernel-mode */
74 return FALSE;
75 }
76
77 BOOLEAN
78 NTAPI
79 RtlpSetInDbgPrint(VOID)
80 {
81 /* Nothing to set in kernel mode */
82 return FALSE;
83 }
84
85 VOID
86 NTAPI
87 RtlpClearInDbgPrint(VOID)
88 {
89 /* Nothing to clear in kernel mode */
90 }
91
92 KPROCESSOR_MODE
93 NTAPI
94 RtlpGetMode()
95 {
96 return KernelMode;
97 }
98
99 PVOID
100 NTAPI
101 RtlpAllocateMemory(ULONG Bytes,
102 ULONG Tag)
103 {
104 return ExAllocatePoolWithTag(PagedPool,
105 (SIZE_T)Bytes,
106 Tag);
107 }
108
109
110 #define TAG_USTR 'RTSU'
111 #define TAG_ASTR 'RTSA'
112 #define TAG_OSTR 'RTSO'
113 VOID
114 NTAPI
115 RtlpFreeMemory(PVOID Mem,
116 ULONG Tag)
117 {
118 if (Tag == TAG_ASTR || Tag == TAG_OSTR || Tag == TAG_USTR)
119 ExFreePool(Mem);
120 else
121 ExFreePoolWithTag(Mem, Tag);
122 }
123
124 /*
125 * @implemented
126 */
127 VOID NTAPI
128 RtlAcquirePebLock(VOID)
129 {
130
131 }
132
133 /*
134 * @implemented
135 */
136 VOID NTAPI
137 RtlReleasePebLock(VOID)
138 {
139
140 }
141
142 NTSTATUS
143 NTAPI
144 LdrShutdownThread(VOID)
145 {
146 return STATUS_SUCCESS;
147 }
148
149
150 PPEB
151 NTAPI
152 RtlGetCurrentPeb(VOID)
153 {
154 return ((PEPROCESS)(KeGetCurrentThread()->ApcState.Process))->Peb;
155 }
156
157 NTSTATUS
158 NTAPI
159 RtlDeleteHeapLock(IN OUT PHEAP_LOCK Lock)
160 {
161 ExDeleteResourceLite(&Lock->Resource);
162 ExFreePool(Lock);
163
164 return STATUS_SUCCESS;
165 }
166
167 NTSTATUS
168 NTAPI
169 RtlEnterHeapLock(IN OUT PHEAP_LOCK Lock, IN BOOLEAN Exclusive)
170 {
171 KeEnterCriticalRegion();
172
173 if (Exclusive)
174 ExAcquireResourceExclusiveLite(&Lock->Resource, TRUE);
175 else
176 ExAcquireResourceSharedLite(&Lock->Resource, TRUE);
177
178 return STATUS_SUCCESS;
179 }
180
181 NTSTATUS
182 NTAPI
183 RtlInitializeHeapLock(IN OUT PHEAP_LOCK *Lock)
184 {
185 PHEAP_LOCK HeapLock = ExAllocatePool(NonPagedPool, sizeof(HEAP_LOCK));
186 if (HeapLock == NULL)
187 return STATUS_NO_MEMORY;
188
189 ExInitializeResourceLite(&HeapLock->Resource);
190 *Lock = HeapLock;
191
192 return STATUS_SUCCESS;
193 }
194
195 NTSTATUS
196 NTAPI
197 RtlLeaveHeapLock(IN OUT PHEAP_LOCK Lock)
198 {
199 ExReleaseResourceLite(&Lock->Resource);
200 KeLeaveCriticalRegion();
201
202 return STATUS_SUCCESS;
203 }
204
205 struct _HEAP;
206
207 VOID
208 NTAPI
209 RtlpAddHeapToProcessList(struct _HEAP *Heap)
210 {
211 UNREFERENCED_PARAMETER(Heap);
212 }
213
214 VOID
215 NTAPI
216 RtlpRemoveHeapFromProcessList(struct _HEAP *Heap)
217 {
218 UNREFERENCED_PARAMETER(Heap);
219 }
220
221 VOID
222 RtlInitializeHeapManager(VOID)
223 {
224 }
225
226 #if DBG
227 VOID FASTCALL
228 CHECK_PAGED_CODE_RTL(char *file, int line)
229 {
230 if(KeGetCurrentIrql() > APC_LEVEL)
231 {
232 DbgPrint("%s:%i: Pagable code called at IRQL > APC_LEVEL (%u)\n", file, line, KeGetCurrentIrql());
233 ASSERT(FALSE);
234 }
235 }
236 #endif
237
238 VOID
239 NTAPI
240 RtlpSetHeapParameters(IN PRTL_HEAP_PARAMETERS Parameters)
241 {
242 /* Apply defaults for non-set parameters */
243 if (!Parameters->SegmentCommit) Parameters->SegmentCommit = MmHeapSegmentCommit;
244 if (!Parameters->SegmentReserve) Parameters->SegmentReserve = MmHeapSegmentReserve;
245 if (!Parameters->DeCommitFreeBlockThreshold) Parameters->DeCommitFreeBlockThreshold = MmHeapDeCommitFreeBlockThreshold;
246 if (!Parameters->DeCommitTotalFreeThreshold) Parameters->DeCommitTotalFreeThreshold = MmHeapDeCommitTotalFreeThreshold;
247 }
248
249 VOID
250 NTAPI
251 RtlpCheckLogException(IN PEXCEPTION_RECORD ExceptionRecord,
252 IN PCONTEXT ContextRecord,
253 IN PVOID ContextData,
254 IN ULONG Size)
255 {
256 /* Check the global flag */
257 if (NtGlobalFlag & FLG_ENABLE_EXCEPTION_LOGGING)
258 {
259 /* FIXME: Log this exception */
260 }
261 }
262
263 BOOLEAN
264 NTAPI
265 RtlpHandleDpcStackException(IN PEXCEPTION_REGISTRATION_RECORD RegistrationFrame,
266 IN ULONG_PTR RegistrationFrameEnd,
267 IN OUT PULONG_PTR StackLow,
268 IN OUT PULONG_PTR StackHigh)
269 {
270 PKPRCB Prcb;
271 ULONG_PTR DpcStack;
272
273 /* Check if we are at DISPATCH or higher */
274 if (KeGetCurrentIrql() >= DISPATCH_LEVEL)
275 {
276 /* Get the PRCB and DPC Stack */
277 Prcb = KeGetCurrentPrcb();
278 DpcStack = (ULONG_PTR)Prcb->DpcStack;
279
280 /* Check if we are in a DPC and the stack matches */
281 if ((Prcb->DpcRoutineActive) &&
282 (RegistrationFrameEnd <= DpcStack) &&
283 ((ULONG_PTR)RegistrationFrame >= DpcStack - KERNEL_STACK_SIZE))
284 {
285 /* Update the limits to the DPC Stack's */
286 *StackHigh = DpcStack;
287 *StackLow = DpcStack - KERNEL_STACK_SIZE;
288 return TRUE;
289 }
290 }
291
292 /* Not in DPC stack */
293 return FALSE;
294 }
295
296 #if !defined(_ARM_) && !defined(_AMD64_)
297
298 BOOLEAN
299 NTAPI
300 RtlpCaptureStackLimits(IN ULONG_PTR Ebp,
301 IN ULONG_PTR *StackBegin,
302 IN ULONG_PTR *StackEnd)
303 {
304 PKTHREAD Thread = KeGetCurrentThread();
305
306 /* Don't even try at ISR level or later */
307 if (KeGetCurrentIrql() > DISPATCH_LEVEL) return FALSE;
308
309 /* Start with defaults */
310 *StackBegin = Thread->StackLimit;
311 *StackEnd = (ULONG_PTR)Thread->StackBase;
312
313 /* Check if EBP is inside the stack */
314 if ((*StackBegin <= Ebp) && (Ebp <= *StackEnd))
315 {
316 /* Then make the stack start at EBP */
317 *StackBegin = Ebp;
318 }
319 else
320 {
321 /* Now we're going to assume we're on the DPC stack */
322 *StackEnd = (ULONG_PTR)(KeGetPcr()->Prcb->DpcStack);
323 *StackBegin = *StackEnd - KERNEL_STACK_SIZE;
324
325 /* Check if we seem to be on the DPC stack */
326 if ((*StackEnd) && (*StackBegin < Ebp) && (Ebp <= *StackEnd))
327 {
328 /* We're on the DPC stack */
329 *StackBegin = Ebp;
330 }
331 else
332 {
333 /* We're somewhere else entirely... use EBP for safety */
334 *StackBegin = Ebp;
335 *StackEnd = (ULONG_PTR)PAGE_ALIGN(*StackBegin);
336 }
337 }
338
339 /* Return success */
340 return TRUE;
341 }
342
343 /*
344 * @implemented
345 */
346 ULONG
347 NTAPI
348 RtlWalkFrameChain(OUT PVOID *Callers,
349 IN ULONG Count,
350 IN ULONG Flags)
351 {
352 ULONG_PTR Stack, NewStack, StackBegin, StackEnd = 0;
353 ULONG Eip;
354 BOOLEAN Result, StopSearch = FALSE;
355 ULONG i = 0;
356 PETHREAD Thread = PsGetCurrentThread();
357 PTEB Teb;
358 PKTRAP_FRAME TrapFrame;
359
360 /* Get current EBP */
361 #if defined(_M_IX86)
362 #if defined __GNUC__
363 __asm__("mov %%ebp, %0" : "=r" (Stack) : );
364 #elif defined(_MSC_VER)
365 __asm mov Stack, ebp
366 #endif
367 #elif defined(_M_MIPS)
368 __asm__("move $sp, %0" : "=r" (Stack) : );
369 #elif defined(_M_PPC)
370 __asm__("mr %0,1" : "=r" (Stack) : );
371 #elif defined(_M_ARM)
372 __asm__("mov sp, %0" : "=r"(Stack) : );
373 #else
374 #error Unknown architecture
375 #endif
376
377 /* Set it as the stack begin limit as well */
378 StackBegin = (ULONG_PTR)Stack;
379
380 /* Check if we're called for non-logging mode */
381 if (!Flags)
382 {
383 /* Get the actual safe limits */
384 Result = RtlpCaptureStackLimits((ULONG_PTR)Stack,
385 &StackBegin,
386 &StackEnd);
387 if (!Result) return 0;
388 }
389
390 /* Use a SEH block for maximum protection */
391 _SEH2_TRY
392 {
393 /* Check if we want the user-mode stack frame */
394 if (Flags == 1)
395 {
396 /* Get the trap frame and TEB */
397 TrapFrame = KeGetTrapFrame(&Thread->Tcb);
398 Teb = Thread->Tcb.Teb;
399
400 /* Make sure we can trust the TEB and trap frame */
401 if (!(Teb) ||
402 (KeIsAttachedProcess()) ||
403 (KeGetCurrentIrql() >= DISPATCH_LEVEL))
404 {
405 /* Invalid or unsafe attempt to get the stack */
406 _SEH2_YIELD(return 0;)
407 }
408
409 /* Get the stack limits */
410 StackBegin = (ULONG_PTR)Teb->NtTib.StackLimit;
411 StackEnd = (ULONG_PTR)Teb->NtTib.StackBase;
412 #ifdef _M_IX86
413 Stack = TrapFrame->Ebp;
414 #elif defined(_M_PPC)
415 Stack = TrapFrame->Gpr1;
416 #else
417 #error Unknown architecture
418 #endif
419
420 /* Validate them */
421 if (StackEnd <= StackBegin) _SEH2_YIELD(return 0);
422 ProbeForRead((PVOID)StackBegin,
423 StackEnd - StackBegin,
424 sizeof(CHAR));
425 }
426
427 /* Loop the frames */
428 for (i = 0; i < Count; i++)
429 {
430 /*
431 * Leave if we're past the stack,
432 * if we're before the stack,
433 * or if we've reached ourselves.
434 */
435 if ((Stack >= StackEnd) ||
436 (!i ? (Stack < StackBegin) : (Stack <= StackBegin)) ||
437 ((StackEnd - Stack) < (2 * sizeof(ULONG_PTR))))
438 {
439 /* We're done or hit a bad address */
440 break;
441 }
442
443 /* Get new stack and EIP */
444 NewStack = *(PULONG_PTR)Stack;
445 Eip = *(PULONG_PTR)(Stack + sizeof(ULONG_PTR));
446
447 /* Check if the new pointer is above the oldone and past the end */
448 if (!((Stack < NewStack) && (NewStack < StackEnd)))
449 {
450 /* Stop searching after this entry */
451 StopSearch = TRUE;
452 }
453
454 /* Also make sure that the EIP isn't a stack address */
455 if ((StackBegin < Eip) && (Eip < StackEnd)) break;
456
457 /* Check if we reached a user-mode address */
458 if (!(Flags) && !(Eip & 0x80000000)) break; // FIXME: 3GB breakage
459
460 /* Save this frame */
461 Callers[i] = (PVOID)Eip;
462
463 /* Check if we should continue */
464 if (StopSearch)
465 {
466 /* Return the next index */
467 i++;
468 break;
469 }
470
471 /* Move to the next stack */
472 Stack = NewStack;
473 }
474 }
475 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
476 {
477 /* No index */
478 i = 0;
479 }
480 _SEH2_END;
481
482 /* Return frames parsed */
483 return i;
484 }
485
486 #endif
487
488 #ifdef _AMD64_
489 VOID
490 NTAPI
491 RtlpGetStackLimits(
492 OUT PULONG_PTR LowLimit,
493 OUT PULONG_PTR HighLimit)
494 {
495 PKTHREAD CurrentThread = KeGetCurrentThread();
496 *HighLimit = (ULONG_PTR)CurrentThread->InitialStack;
497 *LowLimit = (ULONG_PTR)CurrentThread->StackLimit;
498 }
499 #endif
500
501 /* RTL Atom Tables ************************************************************/
502
503 NTSTATUS
504 RtlpInitAtomTableLock(PRTL_ATOM_TABLE AtomTable)
505 {
506 ExInitializeFastMutex(&AtomTable->FastMutex);
507
508 return STATUS_SUCCESS;
509 }
510
511
512 VOID
513 RtlpDestroyAtomTableLock(PRTL_ATOM_TABLE AtomTable)
514 {
515 }
516
517
518 BOOLEAN
519 RtlpLockAtomTable(PRTL_ATOM_TABLE AtomTable)
520 {
521 ExAcquireFastMutex(&AtomTable->FastMutex);
522 return TRUE;
523 }
524
525 VOID
526 RtlpUnlockAtomTable(PRTL_ATOM_TABLE AtomTable)
527 {
528 ExReleaseFastMutex(&AtomTable->FastMutex);
529 }
530
531 BOOLEAN
532 RtlpCreateAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
533 {
534 AtomTable->ExHandleTable = ExCreateHandleTable(NULL);
535 return (AtomTable->ExHandleTable != NULL);
536 }
537
538 BOOLEAN
539 NTAPI
540 RtlpCloseHandleCallback(
541 IN PHANDLE_TABLE_ENTRY HandleTableEntry,
542 IN HANDLE Handle,
543 IN PVOID HandleTable)
544 {
545 /* Destroy and unlock the handle entry */
546 return ExDestroyHandle(HandleTable, Handle, HandleTableEntry);
547 }
548
549 VOID
550 RtlpDestroyAtomHandleTable(PRTL_ATOM_TABLE AtomTable)
551 {
552 if (AtomTable->ExHandleTable)
553 {
554 ExSweepHandleTable(AtomTable->ExHandleTable,
555 RtlpCloseHandleCallback,
556 AtomTable->ExHandleTable);
557 ExDestroyHandleTable(AtomTable->ExHandleTable, NULL);
558 AtomTable->ExHandleTable = NULL;
559 }
560 }
561
562 PRTL_ATOM_TABLE
563 RtlpAllocAtomTable(ULONG Size)
564 {
565 PRTL_ATOM_TABLE Table = ExAllocatePool(NonPagedPool,
566 Size);
567 if (Table != NULL)
568 {
569 RtlZeroMemory(Table,
570 Size);
571 }
572
573 return Table;
574 }
575
576 VOID
577 RtlpFreeAtomTable(PRTL_ATOM_TABLE AtomTable)
578 {
579 ExFreePool(AtomTable);
580 }
581
582 PRTL_ATOM_TABLE_ENTRY
583 RtlpAllocAtomTableEntry(ULONG Size)
584 {
585 PRTL_ATOM_TABLE_ENTRY Entry;
586
587 Entry = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_ATMT);
588 if (Entry != NULL)
589 {
590 RtlZeroMemory(Entry, Size);
591 }
592
593 return Entry;
594 }
595
596 VOID
597 RtlpFreeAtomTableEntry(PRTL_ATOM_TABLE_ENTRY Entry)
598 {
599 ExFreePoolWithTag(Entry, TAG_ATMT);
600 }
601
602 VOID
603 RtlpFreeAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
604 {
605 ExDestroyHandle(AtomTable->ExHandleTable,
606 (HANDLE)((ULONG_PTR)Entry->HandleIndex << 2),
607 NULL);
608 }
609
610 BOOLEAN
611 RtlpCreateAtomHandle(PRTL_ATOM_TABLE AtomTable, PRTL_ATOM_TABLE_ENTRY Entry)
612 {
613 HANDLE_TABLE_ENTRY ExEntry;
614 HANDLE Handle;
615 USHORT HandleIndex;
616
617 /* Initialize ex handle table entry */
618 ExEntry.Object = Entry;
619 ExEntry.GrantedAccess = 0x1; /* FIXME - valid handle */
620
621 /* Create ex handle */
622 Handle = ExCreateHandle(AtomTable->ExHandleTable,
623 &ExEntry);
624 if (!Handle) return FALSE;
625
626 /* Calculate HandleIndex (by getting rid of the first two bits) */
627 HandleIndex = (USHORT)((ULONG_PTR)Handle >> 2);
628
629 /* Index must be less than 0xC000 */
630 if (HandleIndex >= 0xC000)
631 {
632 /* Destroy ex handle */
633 ExDestroyHandle(AtomTable->ExHandleTable,
634 Handle,
635 NULL);
636
637 /* Return failure */
638 return FALSE;
639 }
640
641 /* Initialize atom table entry */
642 Entry->HandleIndex = HandleIndex;
643 Entry->Atom = 0xC000 + HandleIndex;
644
645 /* Return success */
646 return TRUE;
647 }
648
649 PRTL_ATOM_TABLE_ENTRY
650 RtlpGetAtomEntry(PRTL_ATOM_TABLE AtomTable, ULONG Index)
651 {
652 PHANDLE_TABLE_ENTRY ExEntry;
653 PRTL_ATOM_TABLE_ENTRY Entry = NULL;
654
655 /* NOTE: There's no need to explicitly enter a critical region because it's
656 guaranteed that we're in a critical region right now (as we hold
657 the atom table lock) */
658
659 ExEntry = ExMapHandleToPointer(AtomTable->ExHandleTable,
660 (HANDLE)((ULONG_PTR)Index << 2));
661 if (ExEntry != NULL)
662 {
663 Entry = ExEntry->Object;
664
665 ExUnlockHandleTableEntry(AtomTable->ExHandleTable,
666 ExEntry);
667 }
668
669 return Entry;
670 }
671
672 /*
673 * Ldr Resource support code
674 */
675
676 IMAGE_RESOURCE_DIRECTORY *find_entry_by_name( IMAGE_RESOURCE_DIRECTORY *dir,
677 LPCWSTR name, void *root,
678 int want_dir );
679 IMAGE_RESOURCE_DIRECTORY *find_entry_by_id( IMAGE_RESOURCE_DIRECTORY *dir,
680 USHORT id, void *root, int want_dir );
681 IMAGE_RESOURCE_DIRECTORY *find_first_entry( IMAGE_RESOURCE_DIRECTORY *dir,
682 void *root, int want_dir );
683
684 /**********************************************************************
685 * find_entry
686 *
687 * Find a resource entry
688 */
689 NTSTATUS find_entry( PVOID BaseAddress, LDR_RESOURCE_INFO *info,
690 ULONG level, void **ret, int want_dir )
691 {
692 ULONG size;
693 void *root;
694 IMAGE_RESOURCE_DIRECTORY *resdirptr;
695
696 root = RtlImageDirectoryEntryToData( BaseAddress, TRUE, IMAGE_DIRECTORY_ENTRY_RESOURCE, &size );
697 if (!root) return STATUS_RESOURCE_DATA_NOT_FOUND;
698 resdirptr = root;
699
700 if (!level--) goto done;
701 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Type, root, want_dir || level )))
702 return STATUS_RESOURCE_TYPE_NOT_FOUND;
703 if (!level--) return STATUS_SUCCESS;
704
705 resdirptr = *ret;
706 if (!(*ret = find_entry_by_name( resdirptr, (LPCWSTR)info->Name, root, want_dir || level )))
707 return STATUS_RESOURCE_NAME_NOT_FOUND;
708 if (!level--) return STATUS_SUCCESS;
709 if (level) return STATUS_INVALID_PARAMETER; /* level > 3 */
710
711 resdirptr = *ret;
712
713 if ((*ret = find_first_entry( resdirptr, root, want_dir ))) return STATUS_SUCCESS;
714
715 return STATUS_RESOURCE_DATA_NOT_FOUND;
716
717 done:
718 *ret = resdirptr;
719 return STATUS_SUCCESS;
720 }
721
722 NTSTATUS
723 NTAPI
724 RtlpSafeCopyMemory(
725 _Out_writes_bytes_all_(Length) VOID UNALIGNED *Destination,
726 _In_reads_bytes_(Length) CONST VOID UNALIGNED *Source,
727 _In_ SIZE_T Length)
728 {
729 _SEH2_TRY
730 {
731 RtlCopyMemory(Destination, Source, Length);
732 }
733 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
734 {
735 _SEH2_YIELD(return _SEH2_GetExceptionCode());
736 }
737 _SEH2_END;
738
739 return STATUS_SUCCESS;
740 }
741
742 BOOLEAN
743 NTAPI
744 RtlCallVectoredExceptionHandlers(
745 _In_ PEXCEPTION_RECORD ExceptionRecord,
746 _In_ PCONTEXT Context)
747 {
748 /* In the kernel we don't have vectored exception handlers */
749 return FALSE;
750 }
751
752 /* EOF */