#define NDEBUG
#include <debug.h>
+#define KiVdmGetInstructionSize(x) ((x) & 0xFF)
+#define KiVdmGetPrefixFlags(x) ((x) & 0xFFFFFF00)
+
/* GLOBALS ********************************************************************/
ULONG KeI386EFlagsAndMaskV86 = EFLAGS_USER_SANITIZE;
{
ULONG Esp, V86EFlags, TrapEFlags;
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
+
/* Get current V8086 flags and mask out interrupt flag */
V86EFlags = *KiNtVdmState;
V86EFlags &= ~EFLAGS_INTERRUPT_MASK;
-
+
/* Get trap frame EFLags and leave only align, nested task and interrupt */
TrapEFlags = TrapFrame->EFlags;
- TrapEFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
-
+ V86EFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
+
/* Add in those flags if they exist, and add in the IOPL flag */
V86EFlags |= TrapEFlags;
V86EFlags |= EFLAGS_IOPL;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + (USHORT)TrapFrame->HardwareEsp;
- Esp -= 2;
/* Check for OPER32 */
- if (Flags & PFX_FLAG_OPER32)
+ if (KiVdmGetPrefixFlags(Flags) & PFX_FLAG_OPER32)
{
/* Save EFlags */
- Esp -= 2;
- *(PULONG)(Esp - 2) = V86EFlags;
+ Esp -= 4;
+ *(PULONG)Esp = V86EFlags;
}
else
{
/* Save EFLags */
+ Esp -= 2;
*(PUSHORT)Esp = (USHORT)V86EFlags;
}
/* Set new ESP and EIP */
- TrapFrame->HardwareEsp = (USHORT)Esp;
- TrapFrame->Eip += (Flags & 0xFF);
+ TrapFrame->HardwareEsp = Esp - (TrapFrame->HardwareSegSs << 4);
+ TrapFrame->Eip += KiVdmGetInstructionSize(Flags);
/* We're done */
return TRUE;
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + (USHORT)TrapFrame->HardwareEsp;
- /* Read EFlags */
- EFlags = *(PULONG)Esp;
- Esp += 4;
-
/* Check for OPER32 */
- if (!(Flags & PFX_FLAG_OPER32))
+ if (KiVdmGetPrefixFlags(Flags) & PFX_FLAG_OPER32)
{
- /* Read correct flags and use correct stack address */
- Esp -= 2;
- EFlags &= 0xFFFF;
+ /* Read EFlags */
+ EFlags = *(PULONG)Esp;
+ Esp += 4;
+ }
+ else
+ {
+ /* Read EFlags */
+ EFlags = *(PUSHORT)Esp;
+ Esp += 2;
}
/* Set new ESP */
- TrapFrame->HardwareEsp = Esp;
+ TrapFrame->HardwareEsp = Esp - (TrapFrame->HardwareSegSs << 4);
/* Mask out IOPL from the flags */
EFlags &= ~EFLAGS_IOPL;
/* Now leave only alignment, nested task and interrupt flag */
EFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_NESTED_TASK | EFLAGS_INTERRUPT_MASK);
- /* FIXME: Check for VME support */
-
+ /* Get trap EFlags */
+ TrapEFlags = TrapFrame->EFlags;
+
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
+
/* Add V86 and Interrupt flag */
V86EFlags |= EFLAGS_V86_MASK | EFLAGS_INTERRUPT_MASK;
-
+
/* Update EFlags in trap frame */
- TrapEFlags = TrapFrame->EFlags;
- TrapFrame->EFlags = (TrapFrame->EFlags & EFLAGS_VIP) | V86EFlags;
-
+ TrapFrame->EFlags = V86EFlags;
+
/* Check if ESP0 needs to be fixed up */
if (TrapEFlags & EFLAGS_V86_MASK) Ki386AdjustEsp0(TrapFrame);
/* FIXME: Check for VDM interrupts */
/* Update EIP */
- TrapFrame->Eip += (Flags & 0xFF);
+ TrapFrame->Eip += KiVdmGetInstructionSize(Flags);
/* We're done */
return TRUE;
/* Keep only alignment and interrupt flag from the V8086 state */
V86EFlags &= (EFLAGS_ALIGN_CHECK | EFLAGS_INTERRUPT_MASK);
- /* FIXME: Support VME */
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
/* Mask in the relevant V86 EFlags into the trap flags */
V86EFlags |= (TrapEFlags & ~EFLAGS_INTERRUPT_MASK);
/* Push IP */
Esp -= 2;
- *(PUSHORT)(Esp) = (USHORT)TrapFrame->Eip + (Flags & 0xFF) + 1;
+ *(PUSHORT)(Esp) = (USHORT)TrapFrame->Eip + KiVdmGetInstructionSize(Flags) + 1;
/* Update ESP */
TrapFrame->HardwareEsp = (USHORT)Esp;
Eip = (TrapFrame->SegCs << 4) + TrapFrame->Eip;
/* Now get the *next* EIP address (current is original + the count - 1) */
- Eip += (Flags & 0xFF);
+ Eip += KiVdmGetInstructionSize(Flags);
/* Now read the interrupt number */
Interrupt = *(PUCHAR)Eip;
-
+
/* Read the EIP from its IVT entry */
Interrupt = *(PULONG)(Interrupt * 4);
TrapFrame->Eip = (USHORT)Interrupt;
IN ULONG Flags)
{
ULONG Esp, V86EFlags, EFlags, TrapEFlags, Eip;
-
+
/* Build flat ESP */
Esp = (TrapFrame->HardwareSegSs << 4) + TrapFrame->HardwareEsp;
/* Check for OPER32 */
- if (Flags & PFX_FLAG_OPER32)
+ if (KiVdmGetPrefixFlags(Flags) & PFX_FLAG_OPER32)
{
/* Build segmented EIP */
TrapFrame->Eip = *(PULONG)Esp;
EFlags &= ~(EFLAGS_IOPL + EFLAGS_VIF + EFLAGS_NESTED_TASK + EFLAGS_VIP);
V86EFlags = EFlags;
- /* FIXME: Check for VME support */
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
/* Add V86 and Interrupt flag */
EFlags |= EFLAGS_V86_MASK | EFLAGS_INTERRUPT_MASK;
else
{
/* FIXME: Check for VDM interrupts */
+ DPRINT("FIXME: Check for VDM interrupts\n");
}
/* We're done */
KiVdmOpcodeCLI(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
- /* FIXME: Support VME */
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
- /* disable interrupts */
+ /* Disable interrupts */
KiVdmClearVdmEFlags(EFLAGS_INTERRUPT_MASK);
/* Skip instruction */
- TrapFrame->Eip += (Flags & 0xFF);
+ TrapFrame->Eip += KiVdmGetInstructionSize(Flags);
/* Done */
return TRUE;
KiVdmOpcodeSTI(IN PKTRAP_FRAME TrapFrame,
IN ULONG Flags)
{
- /* FIXME: Support VME */
+ /* Check for VME support */
+ ASSERT(KeI386VirtualIntExtensions == FALSE);
/* Enable interrupts */
KiVdmSetVdmEFlags(EFLAGS_INTERRUPT_MASK);
/* Skip instruction */
- TrapFrame->Eip += (Flags & 0xFF);
+ TrapFrame->Eip += KiVdmGetInstructionSize(Flags);
/* Done */
return TRUE;
/* Get flat EIP of the *current* instruction (not the original EIP) */
Eip = (TrapFrame->SegCs << 4) + TrapFrame->Eip;
- Eip += (Flags & 0xFF) - 1;
+ Eip += KiVdmGetInstructionSize(Flags) - 1;
/* Read the opcode entry */
switch (*(PUCHAR)Eip)
/* Restore TEB addresses */
Thread->Teb = V86Frame->ThreadTeb;
- KeGetPcr()->Tib.Self = V86Frame->PcrTeb;
+ KeGetPcr()->NtTib.Self = V86Frame->PcrTeb;
/* Setup real TEB descriptor */
GdtEntry = &((PKIPCR)KeGetPcr())->GDT[KGDT_R3_TEB / sizeof(KGDTENTRY)];
GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)Thread->Teb >> 16);
GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)Thread->Teb >> 24);
- /* Enable interrupts and get back to protected mode */
+ /* Enable interrupts and return a pointer to the trap frame */
_enable();
- return TrapFrame->Edi;
+ return (ULONG)PmTrapFrame;
}
VOID
/* Save TEB addresses */
V86Frame->ThreadTeb = Thread->Teb;
- V86Frame->PcrTeb = KeGetPcr()->Tib.Self;
+ V86Frame->PcrTeb = KeGetPcr()->NtTib.Self;
/* Save return EIP */
TrapFrame->Eip = (ULONG_PTR)Ki386BiosCallReturnAddress;
RtlCopyMemory(NpxFrame, V86Frame->ThreadStack, sizeof(FX_SAVE_AREA));
/* Clear exception list */
- KeGetPcr()->Tib.ExceptionList = EXCEPTION_CHAIN_END;
+ KeGetPcr()->NtTib.ExceptionList = EXCEPTION_CHAIN_END;
/* Set new ESP0 */
KeGetPcr()->TSS->Esp0 = (ULONG_PTR)&TrapFrame->V86Es;
/* Set VDM TEB */
Thread->Teb = (PTEB)TRAMPOLINE_TEB;
- KeGetPcr()->Tib.Self = (PVOID)TRAMPOLINE_TEB;
+ KeGetPcr()->NtTib.Self = (PVOID)TRAMPOLINE_TEB;
/* Setup VDM TEB descriptor */
GdtEntry = &((PKIPCR)KeGetPcr())->GDT[KGDT_R3_TEB / sizeof(KGDTENTRY)];
Tss->IoMapBase = (USHORT)IOPM_OFFSET;
/* Switch stacks and work the magic */
- DPRINT1("Entering V86 mode\n");
Ki386SetupAndExitToV86Mode(VdmTeb);
- DPRINT1("Left V86 mode\n");
/* Restore IOPM */
RtlCopyMemory(&Tss->IoMaps[0].IoMap, Ki386IopmSaveArea, PAGE_SIZE * 2);
}
/*
- * @unimplemented
+ * @implemented
*/
BOOLEAN
NTAPI
Ke386IoSetAccessProcess(IN PKPROCESS Process,
- IN ULONG Flag)
+ IN ULONG MapNumber)
{
- UNIMPLEMENTED;
- return FALSE;
+ USHORT MapOffset;
+ PKPRCB Prcb;
+ KAFFINITY TargetProcessors;
+
+ if(MapNumber > IOPM_COUNT)
+ return FALSE;
+
+ MapOffset = KiComputeIopmOffset(MapNumber);
+
+ Process->IopmOffset = MapOffset;
+
+ TargetProcessors = Process->ActiveProcessors;
+ Prcb = KeGetCurrentPrcb();
+ if (TargetProcessors & Prcb->SetMember)
+ KeGetPcr()->TSS->IoMapBase = MapOffset;
+
+ return TRUE;
}
/*
- * @unimplemented
+ * @implemented
*/
BOOLEAN
NTAPI
-Ke386SetIoAccessMap(IN ULONG Flag,
- IN PVOID IopmBuffer)
+Ke386SetIoAccessMap(IN ULONG MapNumber,
+ IN PKIO_ACCESS_MAP IopmBuffer)
{
- UNIMPLEMENTED;
- return FALSE;
+ PKPROCESS CurrentProcess;
+ PKPRCB Prcb;
+ PVOID pt;
+
+ if ((MapNumber > IOPM_COUNT) || (MapNumber == IO_ACCESS_MAP_NONE))
+ return FALSE;
+
+ Prcb = KeGetCurrentPrcb();
+
+ // Copy the IOP map and load the map for the current process.
+ pt = &(KeGetPcr()->TSS->IoMaps[MapNumber-1].IoMap);
+ RtlMoveMemory(pt, (PVOID)IopmBuffer, IOPM_SIZE);
+ CurrentProcess = Prcb->CurrentThread->ApcState.Process;
+ KeGetPcr()->TSS->IoMapBase = CurrentProcess->IopmOffset;
+
+ return TRUE;
}
/*
- * @unimplemented
+ * @implemented
*/
BOOLEAN
NTAPI
-Ke386QueryIoAccessMap(IN ULONG Flag,
- IN PVOID IopmBuffer)
+Ke386QueryIoAccessMap(IN ULONG MapNumber,
+ IN PKIO_ACCESS_MAP IopmBuffer)
{
- UNIMPLEMENTED;
- return FALSE;
+ ULONG i;
+ PVOID Map;
+ PUCHAR p;
+
+ if (MapNumber > IOPM_COUNT)
+ return FALSE;
+
+ if (MapNumber == IO_ACCESS_MAP_NONE)
+ {
+ // no access, simply return a map of all 1s
+ p = (PUCHAR)IopmBuffer;
+ for (i = 0; i < IOPM_SIZE; i++) {
+ p[i] = (UCHAR)-1;
+ }
+ }
+ else
+ {
+ // copy the bits
+ Map = (PVOID)&(KeGetPcr()->TSS->IoMaps[MapNumber-1].IoMap);
+ RtlMoveMemory((PVOID)IopmBuffer, Map, IOPM_SIZE);
+ }
+
+ return TRUE;
}