Merge asm related changes from cmake branch.
svn path=/trunk/; revision=49826
-.intel_syntax noprefix
-.text
-.code16
#define ASM
+
+#include <asm.inc>
+
#include <arch.h>
+.text
+.code16
+
//.org 0x8000
.global RealEntryPoint
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <asm.inc>
+
+
.text
.code16
#define ASM
#include <arch.h>
-
-EXTERN(ChainLoadBiosBootSectorCode)
+PUBLIC ChainLoadBiosBootSectorCode
+ChainLoadBiosBootSectorCode:
.code64
call x86_64_SwitchToReal
// ljmpl $0x0000,$0x7C00
jmp 0x7c00:0x0000
-EXTERN(SoftReboot)
+PUBLIC SoftReboot
+SoftReboot:
.code64
call x86_64_SwitchToReal
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <asm.inc>
+
+
.text
.code16
ret
+END
/* EOF */
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <asm.inc>
+
+
.intel_syntax noprefix
.text
.code16
mov eax, Int386_eax
ret
+
+END
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
+#include <asm.inc>
+
+
.text
.code16
.fill 4096, 1, 0
PageDirectoryEnd:
+
+END
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
- .text
- .code16
+.intel_syntax noprefix
+#define HEX(y) 0x##y
#define ASM
#include <arch.h>
#include <multiboot.h>
+ .code16
EXTERN(RealEntryPoint)
cli
/* Setup segment registers */
- xorw %ax,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
+ xor ax, ax
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
+
/* Setup a stack */
- movw stack16,%sp
+ mov sp, word ptr ds:stack16
sti
.code32
/* Zero BootDrive and BootPartition */
- xorl %eax,%eax
- movl %eax,(_BootDrive)
- movl %eax,(_BootPartition)
+ xor eax, eax
+ mov dword ptr [_BootDrive], eax
+ mov dword ptr [_BootPartition], eax
/* Store the boot drive */
- movb %dl,(_BootDrive)
+ mov byte ptr [_BootDrive], dl
/* Store the boot partition */
- movb %dh,(_BootPartition)
+ mov byte ptr [_BootPartition], dh
/* GO! */
- pushl %eax
+ push eax
call _BootMain
call switch_to_real
.code16
- int $0x19
+ int HEX(19)
/* We should never get here */
stop:
*/
EXTERN(switch_to_prot)
- .code16
+.code16
cli /* None of these */
/* Of course CS has to already be valid. */
/* We are currently in real-mode so we */
/* need real-mode segment values. */
- xorw %ax,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
+ xor ax, ax
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
/* Get the return address off the stack */
- popw (code32ret)
+ pop word ptr ds:[code32ret]
/* Save 16-bit stack pointer */
- movw %sp,stack16
+ mov word ptr ds:[stack16], sp
/* Load the GDT */
lgdt gdtptr
lidt i386idtptr
/* Enable Protected Mode */
- mov %cr0,%eax
- orl $CR0_PE_SET,%eax
- mov %eax,%cr0
+ mov eax, cr0
+ or eax, CR0_PE_SET
+ mov cr0, eax
/* Clear prefetch queue & correct CS */
- ljmp $PMODE_CS, $inpmode
-
+ //ljmp PMODE_CS, inpmode
+ jmp far ptr PMODE_CS:inpmode
- .code32
+.code32
inpmode:
/* Setup segment selectors */
- movw $PMODE_DS,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
- movl stack32,%esp
+ mov ax, PMODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
+ mov esp, dword ptr [stack32]
/* Put the return address back onto the stack */
- pushl (code32ret)
+ push dword ptr [code32ret]
/* Now return in p-mode! */
ret
*/
EXTERN(switch_to_real)
- .code32
+.code32
/* We don't know what values are currently */
/* in the segment registers. So we are */
/* Of course CS has to already be valid. */
/* We are currently in protected-mode so we */
/* need protected-mode segment values. */
- movw $PMODE_DS,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
+ mov ax, PMODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
/* Get the return address off the stack */
- popl (code16ret)
+ pop dword ptr [code16ret]
/* Save 32-bit stack pointer */
- movl %esp,stack32
+ mov dword ptr [stack32], esp
/* jmp to 16-bit segment to set the limit correctly */
- ljmp $RMODE_CS, $switch_to_real16
+ ljmp RMODE_CS, switch_to_real16
switch_to_real16:
- .code16
+.code16
/* Restore segment registers to correct limit */
- movw $RMODE_DS,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
+ mov ax, RMODE_DS
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
/* Disable Protected Mode */
- mov %cr0,%eax
- andl $CR0_PE_CLR,%eax
- mov %eax,%cr0
+ mov eax, cr0
+ and eax, CR0_PE_CLR
+ mov cr0, eax
/* Clear prefetch queue & correct CS */
- ljmp $0, $inrmode
+ //ljmp $0, $inrmode
+ jmp far ptr 0:inrmode
inrmode:
- movw %cs,%ax
- movw %ax,%ds
- movw %ax,%es
- movw %ax,%fs
- movw %ax,%gs
- movw %ax,%ss
+ mov ax, cs
+ mov ds, ax
+ mov es, ax
+ mov fs, ax
+ mov gs, ax
+ mov ss, ax
/* Clear out the high 16-bits of ESP */
/* This is needed because I have one */
/* anything other than 0x0000 is in the high */
/* 16-bits of ESP. Even though real-mode */
/* code should only use SP and not ESP. */
- xorl %esp,%esp
+ xor esp, esp
- movw stack16,%sp
+ mov sp, word ptr ds:[stack16]
/* Put the return address back onto the stack */
- pushw (code16ret)
+ push word ptr ds:[code16ret]
/* Load IDTR with real mode value */
lidt rmode_idtptr
/*
* Needed for enabling the a20 address line
*/
- .code16
+.code16
empty_8042:
.word 0x00eb,0x00eb // jmp $+2, jmp $+2
- inb $0x64,%al
- cmp $0xff, %al // legacy-free machine without keyboard
- jz empty_8042_ret // controllers on Intel Macs read back 0xFF
- testb $0x02,%al
- jnz empty_8042
+ in al, HEX(64)
+ cmp al, HEX(ff) // legacy-free machine without keyboard
+ jz empty_8042_ret // controllers on Intel Macs read back 0xFF
+ test al, 2
+ jnz empty_8042
empty_8042_ret:
ret
* Enable the A20 address line (to allow access to over 1mb)
*/
EXTERN(_EnableA20)
- .code32
+.code32
- pushal
+ pusha
- call switch_to_real
- .code16
+ call switch_to_real
+.code16
- call empty_8042
- movb $0xD1,%al // command write
- outb %al,$0x64
- call empty_8042
- mov $0xDF,%al // A20 on
- out %al,$0x60
- call empty_8042
- call switch_to_prot
+ call empty_8042
+ mov al, HEX(D1) // command write
+ out HEX(64), al
+ call empty_8042
+ mov al, HEX(DF) // A20 on
+ out HEX(60), al
+ call empty_8042
+ call switch_to_prot
.code32
- popal
+ popa
ret
* Disable the A20 address line
*/
EXTERN(_DisableA20)
- .code32
+.code32
- pushal
+ pusha
- call switch_to_real
+ call switch_to_real
.code16
call empty_8042
- movb $0xD1,%al // command write
- outb %al,$0x64
- call empty_8042
- mov $0xDD,%al // A20 off
- out %al,$0x60
+ mov al, HEX(D1) // command write
+ out HEX(64), al
+ call empty_8042
+ mov al, HEX(DD) // A20 off
+ out HEX(60), al
call empty_8042
call switch_to_prot
.code32
- popal
+ popa
ret
* above 1MB. So we let Grub load us there and then relocate
* ourself to 0x8000
*/
-#define FREELDR_BASE 0x8000
-#define INITIAL_BASE 0x200000
+#define FREELDR_BASE HEX(8000)
+#define INITIAL_BASE HEX(200000)
/* Align 32 bits boundary */
- .align 4
+.align 4
/* Multiboot header */
MultibootHeader:
* our own */
lgdt gdtptrhigh + INITIAL_BASE - FREELDR_BASE
/* Reload segment selectors */
- ljmp $PMODE_CS, $(mb1 + INITIAL_BASE - FREELDR_BASE)
+ //ljmp $PMODE_CS, $(mb1 + INITIAL_BASE - FREELDR_BASE)
+ jmp far ptr PMODE_CS: (mb1 + INITIAL_BASE - FREELDR_BASE)
mb1:
- movw $PMODE_DS,%dx
- movw %dx,%ds
- movw %dx,%es
+ mov dx, PMODE_DS
+ mov ds, dx
+ mov es, dx
/* Check for valid multiboot signature */
- cmpl $MULTIBOOT_BOOTLOADER_MAGIC,%eax
- jne mbfail
+ cmp eax, MULTIBOOT_BOOTLOADER_MAGIC
+ jne mbfail
/* Store multiboot info in a safe place */
- movl %ebx,%esi
- movl $(mb_info + INITIAL_BASE - FREELDR_BASE),%edi
- movl $MB_INFO_SIZE,%ecx
+ mov esi, ebx
+ mov edi, offset mb_info + INITIAL_BASE - FREELDR_BASE
+ mov ecx, MB_INFO_SIZE
rep movsb
/* Save commandline */
- movl MB_INFO_FLAGS_OFFSET(%ebx),%edx
- testl $MB_INFO_FLAG_COMMAND_LINE,MB_INFO_FLAGS_OFFSET(%ebx)
- jz mb3
- movl MB_INFO_COMMAND_LINE_OFFSET(%ebx),%esi
- movl $(cmdline + INITIAL_BASE - FREELDR_BASE),%edi
- movl $CMDLINE_SIZE,%ecx
+ mov edx, [ebx + MB_INFO_FLAGS_OFFSET]
+ test dword ptr [ebx + MB_INFO_FLAGS_OFFSET], MB_INFO_FLAG_COMMAND_LINE
+ jz mb3
+ mov esi, [ebx + MB_INFO_COMMAND_LINE_OFFSET]
+ mov edi, offset cmdline + INITIAL_BASE - FREELDR_BASE
+ mov ecx, CMDLINE_SIZE
mb2: lodsb
stosb
- testb %al,%al
- jz mb3
- dec %ecx
- jnz mb2
+ test al, al
+ jz mb3
+ dec ecx
+ jnz mb2
mb3:
/* Copy to low mem */
- movl $INITIAL_BASE,%esi
- movl $FREELDR_BASE,%edi
- movl $(__bss_end__ - FREELDR_BASE),%ecx
- addl $3,%ecx
- shrl $2,%ecx
- rep movsl
+ mov esi, INITIAL_BASE
+ mov edi, FREELDR_BASE
+ mov ecx, (offset __bss_end__ - FREELDR_BASE)
+ add ecx, 3
+ shr ecx, 2
+ rep movsd
/* Load the GDT and IDT */
lgdt gdtptr
/* Clear prefetch queue & correct CS,
* jump to low mem */
- ljmp $PMODE_CS, $mb4
+ //ljmp $PMODE_CS, $mb4
+ jmp far ptr PMODE_CS:mb4
mb4:
/* Reload segment selectors */
- movw $PMODE_DS,%dx
- movw %dx,%ds
- movw %dx,%es
- movw %dx,%fs
- movw %dx,%gs
- movw %dx,%ss
- movl $STACK32ADDR,%esp
-
- movl $mb_info,%ebx
+ mov dx, PMODE_DS
+ mov ds, dx
+ mov es, dx
+ mov fs, dx
+ mov gs, dx
+ mov ss, dx
+ mov esp, STACK32ADDR
+
+ mov ebx, offset mb_info
/* See if the boot device was passed in */
- movl MB_INFO_FLAGS_OFFSET(%ebx),%edx
- testl $MB_INFO_FLAG_BOOT_DEVICE,%edx
- jz mb5
+ mov edx, [ebx + MB_INFO_FLAGS_OFFSET]
+ test edx, MB_INFO_FLAG_BOOT_DEVICE
+ jz mb5
/* Retrieve boot device info */
- movl MB_INFO_BOOT_DEVICE_OFFSET(%ebx),%eax
- shrl $16,%eax
- incb %al
- movb %al,_BootPartition
- movb %ah,_BootDrive
- jmp mb6
+ mov eax, [ebx + MB_INFO_BOOT_DEVICE_OFFSET]
+ shr eax, 16
+ inc al
+ mov byte ptr _BootPartition, al
+ mov byte ptr _BootDrive, ah
+ jmp mb6
mb5: /* No boot device known, assume first partition of first harddisk */
- movb $0x80,_BootDrive
- movb $1,_BootPartition
+ mov byte ptr _BootDrive, HEX(80)
+ mov byte ptr _BootPartition, 1
mb6:
/* Check for command line */
- mov $cmdline,%eax
- testl $MB_INFO_FLAG_COMMAND_LINE,MB_INFO_FLAGS_OFFSET(%ebx)
- jnz mb7
- xorl %eax,%eax
+ mov eax, offset cmdline
+ test dword ptr [ebx + MB_INFO_FLAGS_OFFSET], MB_INFO_FLAG_COMMAND_LINE
+ jnz mb7
+ xor eax, eax
mb7:
/* GO! */
- pushl %eax
+ push eax
call _BootMain
-mbfail: call switch_to_real
+mbfail:
+ call switch_to_real
.code16
- int $0x19
+ int 0x19
mbstop: jmp mbstop /* We should never get here */
.code32
.long 0
- .p2align 2 /* force 4-byte alignment */
+ .align 4 /* force 4-byte alignment */
gdt:
/* NULL Descriptor */
- .word 0x0000
- .word 0x0000
- .word 0x0000
- .word 0x0000
+ .word HEX(0000)
+ .word HEX(0000)
+ .word HEX(0000)
+ .word HEX(0000)
/* 32-bit flat CS */
- .word 0xFFFF
- .word 0x0000
- .word 0x9A00
- .word 0x00CF
+ .word HEX(FFFF)
+ .word HEX(0000)
+ .word HEX(9A00)
+ .word HEX(00CF)
/* 32-bit flat DS */
- .word 0xFFFF
- .word 0x0000
- .word 0x9200
- .word 0x00CF
+ .word HEX(FFFF)
+ .word HEX(0000)
+ .word HEX(9200)
+ .word HEX(00CF)
/* 16-bit real mode CS */
- .word 0xFFFF
- .word 0x0000
- .word 0x9E00
- .word 0x0000
+ .word HEX(FFFF)
+ .word HEX(0000)
+ .word HEX(9E00)
+ .word HEX(0000)
/* 16-bit real mode DS */
- .word 0xFFFF
- .word 0x0000
- .word 0x9200
- .word 0x0000
+ .word HEX(FFFF)
+ .word HEX(0000)
+ .word HEX(9200)
+ .word HEX(0000)
/* GDT table pointer */
gdtptr:
- .word 0x27 /* Limit */
- .long gdt /* Base Address */
+ .word HEX(27) /* Limit */
+ .long gdt /* Base Address */
/* Initial GDT table pointer for multiboot */
gdtptrhigh:
- .word 0x27 /* Limit */
- .long gdt + INITIAL_BASE - FREELDR_BASE /* Base Address */
+ .word HEX(27) /* Limit */
+ .long gdt + INITIAL_BASE - FREELDR_BASE /* Base Address */
/* Real-mode IDT pointer */
rmode_idtptr:
- .word 0x3ff /* Limit */
- .long 0 /* Base Address */
+ .word HEX(3ff) /* Limit */
+ .long 0 /* Base Address */
mb_info:
.fill MB_INFO_SIZE, 1, 0
/* INCLUDES ******************************************************************/
-#include <ndk/asm.h>
-.intel_syntax noprefix
+#include <asm.inc>
+#include <ks386.inc>
+
+EXTERN _LdrpInit@12:PROC
+EXTERN _NtTestAlert@0:PROC
+EXTERN _RtlDispatchException@8:PROC
+EXTERN _RtlRaiseException@4:PROC
+EXTERN _RtlRaiseStatus@4:PROC
+EXTERN _ZwCallbackReturn@12:PROC
+EXTERN _ZwContinue@8:PROC
+EXTERN _ZwRaiseException@12:PROC
/* FUNCTIONS ****************************************************************/
+.code
-.func LdrInitializeThunk@16
-.globl _LdrInitializeThunk@16
+PUBLIC _LdrInitializeThunk@16
_LdrInitializeThunk@16:
/* Get the APC Context */
/* Jump into the C initialization routine */
jmp _LdrpInit@12
-.endfunc
-.func KiUserApcExceptionHandler
+
_KiUserApcExceptionHandler:
/* Put the exception record in ECX and check the Flags */
/* We'll execute handler */
mov eax, EXCEPTION_EXECUTE_HANDLER
ret 16
-.endfunc
-.func KiUserApcDispatcher@16
-.globl _KiUserApcDispatcher@16
+
+PUBLIC _KiUserApcDispatcher@16
_KiUserApcDispatcher@16:
/* Setup SEH stack */
call _RtlRaiseStatus@4
jmp StatusRaiseApc
ret 16
-.endfunc
-.func KiUserCallbackExceptionHandler
+
_KiUserCallbackExceptionHandler:
/* Put the exception record in ECX and check the Flags */
/* We'll execute the handler */
mov eax, EXCEPTION_EXECUTE_HANDLER
ret 16
-.endfunc
-.func KiUserCallbackDispatcher@12
-.globl _KiUserCallbackDispatcher@12
+
+PUBLIC _KiUserCallbackDispatcher@12
_KiUserCallbackDispatcher@12:
/* Setup SEH stack */
mov eax, [eax+PEB_KERNEL_CALLBACK_TABLE]
/* Call the routine */
- call [eax+edx*4]
+ call dword ptr [eax+edx*4]
/* Return from callback */
push eax
call _RtlRaiseStatus@4
jmp StatusRaise
ret 12
-.endfunc
-.func KiRaiseUserExceptionDispatcher@0
-.globl _KiRaiseUserExceptionDispatcher@0
+
+PUBLIC _KiRaiseUserExceptionDispatcher@0
_KiRaiseUserExceptionDispatcher@0:
/* Setup stack for EXCEPTION_RECORD */
mov esp, ebp
pop ebp
ret
-.endfunc
-.func KiUserExceptionDispatcher@8
-.globl _KiUserExceptionDispatcher@8
+
+PUBLIC _KiUserExceptionDispatcher@8
_KiUserExceptionDispatcher@8:
/* Clear direction flag */
push esp
call _RtlRaiseException@4
ret 8
-.endfunc
-.func KiIntSystemCall@0
-.globl _KiIntSystemCall@0
+
+PUBLIC _KiIntSystemCall@0
_KiIntSystemCall@0:
/* Set stack in EDX and do the interrupt */
lea edx, [esp+8]
- int 0x2E
+ int HEX(2E)
/* Return to caller */
ret
-.endfunc
-.func KiFastSystemCall@0
-.globl _KiFastSystemCall@0
+
+PUBLIC _KiFastSystemCall@0
_KiFastSystemCall@0:
/* Put ESP in EDX and do the SYSENTER */
mov edx, esp
sysenter
-.endfunc
-.func KiFastSystemCallRet@0
-.globl _KiFastSystemCallRet@0
+
+PUBLIC _KiFastSystemCallRet@0
_KiFastSystemCallRet@0:
/* Just return to caller */
ret
-.endfunc
-.func RtlpGetStackLimits@8
-.globl _RtlpGetStackLimits@8
+
+PUBLIC _RtlpGetStackLimits@8
_RtlpGetStackLimits@8:
/* Get the stack limits */
/* return */
ret 8
-.endfunc
+
+END
* KJK::Hyperion <noog@libero.it>
*/
-#include <ndk/asm.h>
+#include <asm.inc>
-.globl SwitchToFiber
-.intel_syntax noprefix
+
+PUBLIC SwitchToFiber
SwitchToFiber:
/* FIXME: TODO */
ret 4
+
+END
* PROGRAMMER: Alex Ionescu (alex@relsoft.net)
*/
-.globl BaseThreadStartupThunk
-.globl BaseProcessStartThunk
-.intel_syntax noprefix
+#include <asm.inc>
+
+
+PUBLIC BaseThreadStartupThunk
+PUBLIC BaseProcessStartThunk
BaseThreadStartupThunk:
push 0 /* Return RIP */
jmp BaseProcessStartup
+END
/* EOF */
* KJK::Hyperion <noog@libero.it>
*/
-#include <ndk/asm.h>
+#include <asm.inc>
+#include <ks386.inc>
-.globl _SwitchToFiber@4
-.intel_syntax noprefix
+.code
+PUBLIC _SwitchToFiber@4
_SwitchToFiber@4:
/* Get the TEB */
mov edx, fs:[TEB_SELF]
mov [eax+FIBER_CONTEXT_EIP], ebx
/* Check if we're to save FPU State */
- cmp dword ptr [eax+FIBER_CONTEXT_FLAGS], CONTEXT_FULL | CONTEXT_FLOATING_POINT
+ cmp dword ptr [eax+FIBER_CONTEXT_FLAGS], CONTEXT_FULL OR CONTEXT_FLOATING_POINT
jnz NoFpuStateSave
/* Save the FPU State (Status and Control)*/
mov [edx+TEB_ACTIVATION_CONTEXT_STACK_POINTER], esi
/* Restore FPU State */
- cmp dword ptr [eax+FIBER_CONTEXT_FLAGS], CONTEXT_FULL | CONTEXT_FLOATING_POINT
+ cmp dword ptr [eax+FIBER_CONTEXT_FLAGS], CONTEXT_FULL OR CONTEXT_FLOATING_POINT
jnz NoFpuStateRestore
/* Check if the Status Word Changed */
StatusWordChanged:
/* Load the new one */
- mov word ptr [ecx+FIBER_CONTEXT_FLOAT_SAVE_TAG_WORD], 0xFFFF
+ mov word ptr [ecx+FIBER_CONTEXT_FLOAT_SAVE_TAG_WORD], HEX(0FFFF)
fldenv [ecx+FIBER_CONTEXT_FLOAT_SAVE_CONTROL_WORD]
ControlWordEqual:
mov [edx+TEB_FLS_DATA], eax
/* Jump to new fiber */
- jmp [ecx+FIBER_CONTEXT_EIP]
+ jmp dword ptr [ecx+FIBER_CONTEXT_EIP]
+END
/* EOF */
* PROGRAMMER: Alex Ionescu (alex@relsoft.net)
*/
-.globl _BaseThreadStartupThunk@0
-.globl _BaseProcessStartThunk@0
-.intel_syntax noprefix
+#include <asm.inc>
+
+.code
+
+EXTERN _BaseThreadStartup@8:PROC
+EXTERN _BaseProcessStartup@4:PROC
+
+PUBLIC _BaseThreadStartupThunk@0
+PUBLIC _BaseProcessStartThunk@0
_BaseThreadStartupThunk@0:
push 0 /* Return EIP */
jmp _BaseProcessStartup@4
+END
/* EOF */
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+
+#include <ksamd64.inc>
/* FUNCTIONS *****************************************************************/
AFTER
iret
-
+END
/* EOF */
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+
+#include <ksamd64.inc>
/* GLOBALS *******************************************************************/
UNIMPLEMENTED _HalpClockInterrupt
iret
+END
/* INCLUDES ******************************************************************/
-#include <asm.h>
-.intel_syntax noprefix
+#include <asm.inc>
+
+#include <ks386.inc>
+
+EXTERN _HalpAcquireSystemHardwareSpinLock@0:PROC
+EXTERN _HalpReleaseCmosSpinLock@0:PROC
+EXTERN _DbgBreakPoint@0:PROC
+EXTERN _HalpCurrentRollOver:DWORD
+EXTERN _HalpPerfCounterCutoff:DWORD
+
+#define PIC1_BASE HEX(20) /* IO base address for master PIC */
+#define PIC2_BASE HEX(A0) /* IO base address for slave PIC */
+#define PIC1_COMMAND PIC1_BASE
+#define PIC1_DATA (PIC1_BASE+1)
+#define PIC2_COMMAND PIC2_BASE
+#define PIC2_DATA (PIC2_BASE+1)
+#define PIC_EOI HEX(20)
+#define PIC_SPECIFIC_EOI2 HEX(62)
+
+#define CMOS_ADDR HEX(70)
+#define CMOS_DATA HEX(71)
+#define CMOS_REGISTER_A HEX(0A)
+#define CMOS_REGISTER_B HEX(0B)
+#define CMOS_REGISTER_C HEX(0C)
+#define CMOS_REGISTER_D HEX(0D)
+
+#define PIT_CH0 HEX(40)
+#define PIT_MODE HEX(43)
+#define SYSTEM_CTRL_PORT_A HEX(92)
/* GLOBALS *******************************************************************/
-.globl _HalpPerfCounter
+.data
+ASSUME CS:NOTHING, DS:NOTHING, ES:NOTHING, FS:NOTHING, GS:NOTHING
+
+PUBLIC _HalpPerfCounter
_HalpLastPerfCounterLow: .long 0
_HalpLastPerfCounterHigh: .long 0
_HalpPerfCounter:
/* FUNCTIONS *****************************************************************/
-.global _HalpCalibrateStallExecution@0
-.func HalpCalibrateStallExecution@0
+.code
+PUBLIC _HalpCalibrateStallExecution@0
_HalpCalibrateStallExecution@0:
/* Setup the stack frame */
/* Get the current interrupt mask on the PICs */
xor eax, eax
- in al, 0xA1
+ in al, PIC2_DATA
shl eax, 8
- in al, 0x21
+ in al, PIC1_DATA
/* Save it */
push eax
/* Now mask everything except the RTC and PIC 2 chain-interrupt */
- mov eax, ~((1 << 2) | (1 << 8))
+ mov eax, NOT (HEX(04) OR HEX(100))
/* Program the PICs */
- out 0x21, al
+ out PIC1_DATA, al
shr eax, 8
- out 0xA1, al
+ out PIC2_DATA, al
/* Now get the IDT */
sidt [ebp-8]
mov ecx, [ebp-6]
/* Get the IDT entry for the RTC */
- mov eax, 0x38
+ mov eax, HEX(38)
shl eax, 3
add ecx, eax
mov eax, offset OnlyOnePersonCanWriteHalCode
mov [ecx], ax
mov word ptr [ecx+2], KGDT_R0_CODE
- mov word ptr [ecx+4], 0x8E00
+ mov word ptr [ecx+4], HEX(08E00)
shr eax, 16
mov [ecx+6], ax
call _HalpAcquireSystemHardwareSpinLock@0
/* Now initialize register A on the CMOS */
- mov ax, (0x2D << 8) | 0xA
- out 0x70, al
+ mov ax, HEX(2D00) OR CMOS_REGISTER_A
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register B */
- mov ax, 0xB
- out 0x70, al
+ mov ax, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Don't touch the LastKnownGoodConfig hack */
mov ah, al
/* Enable the interrupt */
- or ah, 0x42
+ or ah, HEX(42)
/* Now write the register B */
- mov al, 0xB
- out 0x70, al
+ mov al, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register C */
- mov al, 0xC
- out 0x70, al
+ mov al, CMOS_REGISTER_C
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Read register D */
- mov al, 0xD
- out 0x70, al
+ mov al, CMOS_REGISTER_D
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Release CMOS lock */
call _HalpAcquireSystemHardwareSpinLock@0
/* Now initialize register A on the CMOS */
- mov ax, (0x2D << 8) | 0xA
- out 0x70, al
+ mov ax, HEX(2D00) OR CMOS_REGISTER_A
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register B */
- mov ax, 0xB
- out 0x70, al
+ mov ax, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Don't touch the LastKnownGoodConfig hack */
mov ah, al
/* Enable the interrupt */
- or ah, 0x42
+ or ah, HEX(42)
/* Now write the register B */
- mov al, 0xB
- out 0x70, al
+ mov al, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register C */
- mov al, 0xC
- out 0x70, al
+ mov al, CMOS_REGISTER_C
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Read register D */
- mov al, 0xD
- out 0x70, al
+ mov al, CMOS_REGISTER_D
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Release CMOS lock */
call _HalpReleaseCmosSpinLock@0
/* Dismiss the interrupt */
- mov al, 0x20
- out 0xA0, al
- mov al, 0x62
- out 0x20, al
+ mov al, PIC_EOI
+ out PIC2_COMMAND, al
+ mov al, PIC_SPECIFIC_EOI2
+ out PIC1_COMMAND, al
/* Reset the counter and return back to the looper */
xor eax, eax
/* Prepare for interrupt return */
pop eax
push offset AndItsNotYou
- mov eax, 0x13
+ mov eax, HEX(13)
/* Acquire CMOS lock */
call _HalpAcquireSystemHardwareSpinLock@0
/* Now initialize register A on the CMOS */
- mov ax, (0x2D << 8) | 0xA
- out 0x70, al
+ mov ax, HEX(2D00) OR CMOS_REGISTER_A
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register B */
- mov ax, 0xB
- out 0x70, al
+ mov ax, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Don't touch the LastKnownGoodConfig hack */
mov ah, al
/* Disable the interrupt */
- or ah, 0x2
+ or ah, 2
/* Now write the register B */
- mov al, 0xB
- out 0x70, al
+ mov al, CMOS_REGISTER_B
+ out CMOS_ADDR, al
jmp $+2
mov al, ah
- out 0x71, al
+ out CMOS_DATA, al
jmp $+2
/* Read register C */
- mov al, 0xC
- out 0x70, al
+ mov al, CMOS_REGISTER_C
+ out CMOS_ADDR, al
jmp $+2
- in al, 0x71
+ in al, CMOS_DATA
jmp $+2
/* Release CMOS lock */
call _HalpReleaseCmosSpinLock@0
/* Dismiss the interrupt */
- mov al, 0x20
- out 0xA0, al
- mov al, 0x62
- out 0x20, al
+ mov al, PIC_EOI
+ out PIC2_COMMAND, al
+ mov al, PIC_SPECIFIC_EOI2
+ out PIC1_COMMAND, al
/* Disable interrupts on return */
- and word ptr [esp+8], ~EFLAGS_INTERRUPT_MASK
+ and word ptr [esp+8], NOT EFLAGS_INTERRUPT_MASK
iretd
/************************* WE ARE BACK FROM RTC ***************************/
/* Restore the mask */
pop eax
- out 0x21, al
+ out PIC1_DATA, al
shr eax, 8
- out 0xA1, al
+ out PIC2_DATA, al
/* Restore EFLAGS */
popf
mov esp, ebp
pop ebp
ret
-.endfunc
+
#ifndef _MINIHAL_
-.globl _KeStallExecutionProcessor@4
-.func KeStallExecutionProcessor@4
+PUBLIC _KeStallExecutionProcessor@4
_KeStallExecutionProcessor@4:
/* Get the number of microseconds required */
Done:
/* Return */
ret 4
-.endfunc
#endif
-.global _KeQueryPerformanceCounter@4
-.func KeQueryPerformanceCounter@4
+PUBLIC _KeQueryPerformanceCounter@4
_KeQueryPerformanceCounter@4:
/* Check if we were called too early */
LoopPostInt:
/* Get the current value */
- mov ebx, _HalpPerfCounterLow
- mov esi, _HalpPerfCounterHigh
+ mov ebx, dword ptr _HalpPerfCounterLow
+ mov esi, dword ptr _HalpPerfCounterHigh
/* Read 8254 timer */
- mov al, 0
- out 0x43, al
- in al, 0x92
- or al, _HalpPerfCounterCutoff
- out 0x92, al
+ mov al, 0 /* Interrupt on terminal count */
+ out PIT_MODE, al
+ in al, SYSTEM_CTRL_PORT_A
+ or al, byte ptr _HalpPerfCounterCutoff
+ out SYSTEM_CTRL_PORT_A, al
jmp $+2
- in al, 0x40
+ in al, PIT_CH0
jmp $+2
movzx ecx, al
- in al, 0x40
+ in al, PIT_CH0
mov ch, al
/* Enable interrupts and do a short wait */
cli
/* Get the counter value again */
- mov eax, _HalpPerfCounterLow
- mov edx, _HalpPerfCounterHigh
+ mov eax, dword ptr _HalpPerfCounterLow
+ mov edx, dword ptr _HalpPerfCounterHigh
/* Check if someone updated the counter */
cmp eax, ebx
/* Check if the current 8254 value causes rollover */
neg ecx
- add ecx, _HalpCurrentRollOver
+ add ecx, dword ptr _HalpCurrentRollOver
jnb DoRollOver
SetSum:
adc edx, 0
/* Check if we're above or below the last high value */
- cmp edx, _HalpLastPerfCounterHigh
+ cmp edx, dword ptr _HalpLastPerfCounterHigh
jb short BelowHigh
jnz short BelowLow
/* Check if we're above or below the last low value */
- cmp eax, _HalpLastPerfCounterLow
+ cmp eax, dword ptr _HalpLastPerfCounterLow
jb BelowHigh
BelowLow:
/* Update the last value and bring back interrupts */
- mov _HalpLastPerfCounterLow, eax
- mov _HalpLastPerfCounterHigh, edx
+ mov dword ptr _HalpLastPerfCounterLow, eax
+ mov dword ptr _HalpLastPerfCounterHigh, edx
popf
/* Check if caller wants frequency */
/* We might have an incoming interrupt, save EFLAGS and reset rollover */
mov esi, [esp]
- mov ecx, _HalpCurrentRollOver
+ mov ecx, dword ptr _HalpCurrentRollOver
popf
/* Check if interrupts were enabled and try again */
BelowHigh:
/* Get the last counter values */
- mov ebx, _HalpLastPerfCounterLow
- mov esi, _HalpLastPerfCounterHigh
+ mov ebx, dword ptr _HalpLastPerfCounterLow
+ mov esi, dword ptr _HalpLastPerfCounterHigh
/* Check if the previous value was 0 and go back if yes */
mov ecx, ebx
sub ebx, eax
sbb esi, edx
jnz InvalidCount
- cmp ebx, _HalpCurrentRollOver
+ cmp ebx, dword ptr _HalpCurrentRollOver
jg InvalidCount
/* Fixup the count with the last known value */
InvalidCount:
popf
xor eax, eax
- mov _HalpLastPerfCounterLow, eax
- mov _HalpLastPerfCounterHigh, eax
+ mov dword ptr _HalpLastPerfCounterLow, eax
+ mov dword ptr _HalpLastPerfCounterHigh, eax
jmp LoopPreInt
-.endfunc
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/i386/asm.h>
+#include <asm.inc>
+
+#include <ks386.inc>
#include <internal/i386/asmmacro.S>
-.code32
-.text
+.code
TRAP_ENTRY HalpTrap0D, 0
TRAP_ENTRY HalpApcInterrupt, KI_SOFTWARE_TRAP
.space 2048
_HalpRealModeEnd:
PUBLIC _HalpRealModeEnd
+.endcode16
+END
--- /dev/null
+/*\r
+ * COPYRIGHT: See COPYING in the top level directory\r
+ * PROJECT: ReactOS Kernel\r
+ * FILE: ntoskrnl/include/amd64/asmmacro.S\r
+ * PURPOSE: ASM macros for for GAS and MASM/ML64\r
+ * PROGRAMMERS: Timo Kreuzer (timo.kreuzer@reactos.org)\r
+ */\r
+\r
+#ifdef _USE_ML\r
+\r
+/* Allow ".name" identifiers */\r
+OPTION DOTNAME\r
+\r
+.686P\r
+.XMM\r
+.MODEL FLAT\r
+ASSUME CS:NOTHING, DS:NOTHING, ES:NOTHING, FS:NOTHING, GS:NOTHING\r
+\r
+/* Hex numbers need to be in 01ABh format */\r
+#define HEX(x) 0##x##h\r
+\r
+/* Macro values need to be marked */\r
+#define VAL(x) x\r
+\r
+/* MASM/ML doesn't want explicit [rip] addressing */\r
+rip = 0\r
+\r
+/* Due to MASM's reverse syntax, we are forced to use a precompiler macro */\r
+#define MACRO(name, ...) name MACRO __VA_ARGS__\r
+\r
+/* To avoid reverse syntax we provide a new macro .PROC, replacing PROC... */\r
+.PROC MACRO name\r
+ name PROC FRAME\r
+ _name:\r
+ENDM\r
+\r
+/* ... and .ENDP, replacing ENDP */\r
+.ENDP MACRO name\r
+ name ENDP\r
+ENDM\r
+\r
+/* MASM doesn't have an ASCII macro */\r
+.ASCII MACRO text\r
+ DB text\r
+ENDM\r
+\r
+/* MASM doesn't have an ASCIZ macro */\r
+.ASCIZ MACRO text\r
+ DB text, 0\r
+ENDM\r
+\r
+#define lgdt lgdt fword ptr ds:\r
+\r
+#define lidt lidt fword ptr ds:\r
+\r
+ljmp MACRO segment, offset\r
+ DB 0\r
+ENDM\r
+\r
+.code64 MACRO\r
+ .code\r
+ENDM\r
+\r
+.code32 MACRO\r
+ .code\r
+ .586P\r
+ENDM\r
+\r
+.code16 MACRO\r
+ ASSUME nothing\r
+ .text SEGMENT use16\r
+ENDM\r
+\r
+.endcode16 MACRO\r
+ .text ENDS\r
+ENDM\r
+\r
+.bss MACRO\r
+ .DATA?\r
+ ASSUME nothing\r
+ENDM\r
+\r
+//.text MACRO\r
+//ENDM\r
+\r
+.align MACRO alignment\r
+ ALIGN alignment\r
+ENDM\r
+\r
+.byte MACRO args:VARARG\r
+ db args\r
+ENDM\r
+\r
+.short MACRO args:VARARG\r
+ dw args\r
+ENDM\r
+\r
+.word MACRO args:VARARG\r
+ dw args\r
+ENDM\r
+\r
+.long MACRO args:VARARG\r
+ dd args\r
+ENDM\r
+\r
+.double MACRO args:VARARG\r
+ dq args\r
+ENDM\r
+\r
+.org MACRO value\r
+ ORG value\r
+ENDM\r
+\r
+.fill MACRO repeat, size, value\r
+// FIXME\r
+ENDM\r
+\r
+ljmp MACRO segment, offset\r
+// FIXME\r
+ENDM\r
+\r
+UNIMPLEMENTED MACRO name\r
+ENDM\r
+\r
+/* We need this to distinguish repeat from macros */\r
+#define ENDR ENDM\r
+\r
+#else /***********************************************************************/\r
+\r
+/* Force intel syntax */\r
+.intel_syntax noprefix\r
+\r
+.altmacro\r
+\r
+/* Hex numbers need to be in 0x1AB format */\r
+#define HEX(y) 0x##y\r
+\r
+/* Macro values need to be marked */\r
+#define VAL(x) \x\r
+\r
+/* Due to MASM's reverse syntax, we are forced to use a precompiler macro */\r
+#define MACRO(...) .macro __VA_ARGS__\r
+#define ENDM .endm\r
+\r
+/* To avoid reverse syntax we provide a new macro .PROC, replacing PROC... */\r
+.macro .PROC name\r
+ .func \name\r
+ \name:\r
+ .cfi_startproc\r
+ .equ cfa_current_offset, -8\r
+.endm\r
+\r
+/* ... and .ENDP, replacing ENDP */\r
+.macro .ENDP name\r
+ .cfi_endproc\r
+ .endfunc\r
+.endm\r
+\r
+/* MASM compatible PUBLIC */\r
+.macro PUBLIC symbol\r
+ .global \symbol\r
+.endm\r
+\r
+/* Dummy ASSUME */\r
+.macro ASSUME p1 p2 p3 p4 p5 p6 p7 p8\r
+.endm\r
+\r
+/* MASM needs an end tag for segments */\r
+.macro .endcode16\r
+.endm\r
+\r
+/* MASM compatible ALIGN */\r
+#define ALIGN .align\r
+\r
+/* MASM compatible REPEAT, additional ENDR */\r
+#define REPEAT .rept\r
+#define ENDR .endr\r
+\r
+.macro ljmp segment, offset\r
+ jmp far ptr \segment:\offset\r
+.endm\r
+\r
+/* MASM compatible EXTERN */\r
+.macro EXTERN name\r
+.endm\r
+\r
+/* MASM needs an END tag */\r
+#define END\r
+\r
+.macro .MODEL model\r
+.endm\r
+\r
+.macro .code\r
+ .text\r
+.endm\r
+\r
+/* Macros for x64 stack unwind OPs */\r
+\r
+.macro .allocstack size\r
+ .cfi_adjust_cfa_offset \size\r
+ .set cfa_current_offset, cfa_current_offset - \size\r
+.endm\r
+\r
+code = 1\r
+.macro .pushframe param=0\r
+ .if (\param)\r
+ .cfi_adjust_cfa_offset 0x30\r
+ .set cfa_current_offset, cfa_current_offset - 0x30\r
+ .else\r
+ .cfi_adjust_cfa_offset 0x28\r
+ .set cfa_current_offset, cfa_current_offset - 0x28\r
+ .endif\r
+.endm\r
+\r
+.macro .pushreg reg\r
+ .cfi_adjust_cfa_offset 8\r
+ .equ cfa_current_offset, cfa_current_offset - 8\r
+ .cfi_offset \reg, cfa_current_offset\r
+.endm\r
+\r
+.macro .savereg reg, offset\r
+ // checkme!!!\r
+ .cfi_offset \reg, \offset\r
+.endm\r
+\r
+.macro .savexmm128 reg, offset\r
+ // checkme!!!\r
+ .cfi_offset \reg, \offset\r
+.endm\r
+\r
+.macro .setframe reg, offset\r
+ .cfi_def_cfa reg, \offset\r
+ .equ cfa_current_offset, \offset\r
+.endm\r
+\r
+.macro .endprolog\r
+.endm\r
+\r
+.macro UNIMPLEMENTED2 file, line, func\r
+\r
+ jmp 3f\r
+1: .asciz "\func"\r
+2: .asciz \file\r
+3:\r
+ sub rsp, 0x20\r
+ lea rcx, MsgUnimplemented[rip]\r
+ lea rdx, 1b[rip]\r
+ lea r8, 2b[rip]\r
+ mov r9, \line\r
+ call DbgPrint\r
+ add rsp, 0x20\r
+.endm\r
+#define UNIMPLEMENTED UNIMPLEMENTED2 __FILE__, __LINE__,\r
+\r
+/* MASM/ML uses ".if" for runtime conditionals, and "if" for compile time\r
+ conditionals. We therefore use "if", too. .if shouldn't be used at all */\r
+#define if .if\r
+#define endif .endif\r
+#define else .else\r
+#define elseif .elseif\r
+\r
+#endif\r
--- /dev/null
+\r
+/* Pointer size */\r
+SizeofPointer = 0x4\r
+\r
+/* Breakpoints */\r
+BREAKPOINT_BREAK = 0x0\r
+BREAKPOINT_PRINT = 0x1\r
+BREAKPOINT_PROMPT = 0x2\r
+BREAKPOINT_LOAD_SYMBOLS = 0x3\r
+BREAKPOINT_UNLOAD_SYMBOLS = 0x4\r
+BREAKPOINT_COMMAND_STRING = 0x5\r
+\r
+/* Context Frame Flags */\r
+CONTEXT_FULL = 0x10007\r
+CONTEXT_CONTROL = 0x10001\r
+CONTEXT_INTEGER = 0x10002\r
+CONTEXT_SEGMENTS = 0x10004\r
+CONTEXT_FLOATING_POINT = 0x10008\r
+CONTEXT_DEBUG_REGISTERS = 0x10010\r
+\r
+/* Exception flags */\r
+EXCEPTION_NONCONTINUABLE = 0x1\r
+EXCEPTION_UNWINDING = 0x2\r
+EXCEPTION_EXIT_UNWIND = 0x4\r
+EXCEPTION_STACK_INVALID = 0x8\r
+EXCEPTION_NESTED_CALL = 0x10\r
+EXCEPTION_TARGET_UNWIND = 0x20\r
+EXCEPTION_COLLIDED_UNWIND = 0x20\r
+EXCEPTION_UNWIND = 0x6\r
+EXCEPTION_EXECUTE_HANDLER = 0x1\r
+EXCEPTION_CONTINUE_SEARCH = 0x0\r
+EXCEPTION_CONTINUE_EXECUTION = 0xffffffff\r
+EXCEPTION_CHAIN_END = 0xffffffff\r
+\r
+/* Exception types */\r
+ExceptionContinueExecution = 0x0\r
+ExceptionContinueSearch = 0x1\r
+ExceptionNestedException = 0x2\r
+ExceptionCollidedUnwind = 0x3\r
+\r
+/* Lock Queue */\r
+LOCK_QUEUE_WAIT = 0x1\r
+LOCK_QUEUE_OWNER = 0x2\r
+LockQueueDispatcherLock = 0x0\r
+\r
+/* Process states */\r
+ProcessInMemory = 0x0\r
+ProcessOutOfMemory = 0x1\r
+ProcessInTransition = 0x2\r
+\r
+/* Processor mode */\r
+KernelMode = 0x0\r
+UserMode = 0x1\r
+\r
+/* Status codes */\r
+STATUS_ACCESS_VIOLATION = 0xc0000005\r
+STATUS_ASSERTION_FAILURE = 0xc0000420\r
+STATUS_ARRAY_BOUNDS_EXCEEDED = 0xc000008c\r
+STATUS_BAD_COMPRESSION_BUFFER = 0xc0000242\r
+STATUS_BREAKPOINT = 0x80000003\r
+STATUS_CALLBACK_POP_STACK = 0xc0000423\r
+STATUS_DATATYPE_MISALIGNMENT = 0x80000002\r
+STATUS_FLOAT_DENORMAL_OPERAND = 0xc000008d\r
+STATUS_FLOAT_DIVIDE_BY_ZERO = 0xc000008e\r
+STATUS_FLOAT_INEXACT_RESULT = 0xc000008f\r
+STATUS_FLOAT_INVALID_OPERATION = 0xc0000090\r
+STATUS_FLOAT_OVERFLOW = 0xc0000091\r
+STATUS_FLOAT_STACK_CHECK = 0xc0000092\r
+STATUS_FLOAT_UNDERFLOW = 0xc0000093\r
+STATUS_FLOAT_MULTIPLE_FAULTS = 0xc00002b4\r
+STATUS_FLOAT_MULTIPLE_TRAPS = 0xc00002b5\r
+STATUS_GUARD_PAGE_VIOLATION = 0x80000001\r
+STATUS_ILLEGAL_FLOAT_CONTEXT = 0xc000014a\r
+STATUS_ILLEGAL_INSTRUCTION = 0xc000001d\r
+STATUS_INSTRUCTION_MISALIGNMENT = 0xc00000aa\r
+STATUS_INVALID_HANDLE = 0xc0000008\r
+STATUS_INVALID_LOCK_SEQUENCE = 0xc000001e\r
+STATUS_INVALID_OWNER = 0xc000005a\r
+STATUS_INVALID_PARAMETER = 0xc000000d\r
+STATUS_INVALID_PARAMETER_1 = 0xc00000ef\r
+STATUS_INVALID_SYSTEM_SERVICE = 0xc000001c\r
+STATUS_INTEGER_DIVIDE_BY_ZERO = 0xc0000094\r
+STATUS_INTEGER_OVERFLOW = 0xc0000095\r
+STATUS_IN_PAGE_ERROR = 0xc0000006\r
+STATUS_KERNEL_APC = 0x100\r
+STATUS_LONGJUMP = 0x80000026\r
+STATUS_NO_CALLBACK_ACTIVE = 0xc0000258\r
+STATUS_NO_EVENT_PAIR = 0xc000014e\r
+STATUS_PRIVILEGED_INSTRUCTION = 0xc0000096\r
+STATUS_SINGLE_STEP = 0x80000004\r
+STATUS_STACK_BUFFER_OVERRUN = 0xc0000409\r
+STATUS_STACK_OVERFLOW = 0xc00000fd\r
+STATUS_SUCCESS = 0x0\r
+STATUS_THREAD_IS_TERMINATING = 0xc000004b\r
+STATUS_TIMEOUT = 0x102\r
+STATUS_UNWIND = 0xc0000027\r
+STATUS_UNWIND_CONSOLIDATE = 0x80000029\r
+STATUS_USER_APC = 0xc0\r
+STATUS_WAKE_SYSTEM_DEBUGGER = 0x80000007\r
+\r
+/* TLS defines */\r
+TLS_MINIMUM_AVAILABLE = 0x40\r
+TLS_EXPANSION_SLOTS = 0x400\r
+\r
+/* Thread states */\r
+Initialized = 0x0\r
+Ready = 0x1\r
+Running = 0x2\r
+Standby = 0x3\r
+Terminated = 0x4\r
+Waiting = 0x5\r
+\r
+/* Wait type / reason */\r
+WrExecutive = 0x7\r
+WrMutex = 0x1d\r
+WrDispatchInt = 0x1f\r
+WrQuantumEnd = 0x1e\r
+WrEventPair = 0xe\r
+WaitAny = 0x1\r
+WaitAll = 0x0\r
+\r
+/* Interrupt object types */\r
+InLevelSensitive = 0x0\r
+InLatched = 0x1\r
+\r
+/* Bug Check Codes */\r
+APC_INDEX_MISMATCH = 0x1\r
+INVALID_AFFINITY_SET = 0x3\r
+INVALID_DATA_ACCESS_TRAP = 0x4\r
+IRQL_NOT_GREATER_OR_EQUAL = 0x9\r
+IRQL_NOT_LESS_OR_EQUAL = 0xa\r
+NO_USER_MODE_CONTEXT = 0xe\r
+SPIN_LOCK_ALREADY_OWNED = 0xf\r
+SPIN_LOCK_NOT_OWNED = 0x10\r
+THREAD_NOT_MUTEX_OWNER = 0x11\r
+TRAP_CAUSE_UNKNOWN = 0x12\r
+KMODE_EXCEPTION_NOT_HANDLED = 0x1e\r
+KERNEL_APC_PENDING_DURING_EXIT = 0x20\r
+PANIC_STACK_SWITCH = 0x2b\r
+DATA_BUS_ERROR = 0x2e\r
+INSTRUCTION_BUS_ERROR = 0x2f\r
+SYSTEM_EXIT_OWNED_MUTEX = 0x39\r
+PAGE_FAULT_WITH_INTERRUPTS_OFF = 0x49\r
+IRQL_GT_ZERO_AT_SYSTEM_SERVICE = 0x4a\r
+DATA_COHERENCY_EXCEPTION = 0x55\r
+INSTRUCTION_COHERENCY_EXCEPTION = 0x56\r
+HAL1_INITIALIZATION_FAILED = 0x61\r
+UNEXPECTED_KERNEL_MODE_TRAP = 0x7f\r
+NMI_HARDWARE_FAILURE = 0x80\r
+SPIN_LOCK_INIT_FAILURE = 0x81\r
+ATTEMPTED_SWITCH_FROM_DPC = 0xb8\r
+\r
+/* IRQL */\r
+PASSIVE_LEVEL = 0x0\r
+APC_LEVEL = 0x1\r
+DISPATCH_LEVEL = 0x2\r
+CLOCK1_LEVEL = 0x1c\r
+CLOCK2_LEVEL = 0x1c\r
+IPI_LEVEL = 0x1d\r
+POWER_LEVEL = 0x1e\r
+PROFILE_LEVEL = 0x1b\r
+HIGH_LEVEL = 0x1f\r
+#ifdef NT_UP\r
+SYNCH_LEVEL = 0x2\r
+#else\r
+SYNCH_LEVEL = 0x1b\r
+#endif\r
+\r
+/* Stack sizes */\r
+KERNEL_STACK_SIZE = 0x3000\r
+KERNEL_LARGE_STACK_SIZE = 0xf000\r
+KERNEL_LARGE_STACK_COMMIT = 0x3000\r
+\r
+/* Miscellaneous Definitions */\r
+LOW_REALTIME_PRIORITY = 0x10\r
+CLOCK_QUANTUM_DECREMENT = 0x3\r
+WAIT_QUANTUM_DECREMENT = 0x1\r
+MAXIMUM_PROCESSORS = 0x20\r
+INITIAL_STALL_COUNT = 0x64\r
+KI_EXCEPTION_ACCESS_VIOLATION = 0x10000004\r
+Executive = 0x0\r
+FALSE = 0x0\r
+TRUE = 0x1\r
+DBG_STATUS_CONTROL_C = 0x1\r
+USER_SHARED_DATA = 0x7ffe0000\r
+PAGE_SIZE = 0x1000\r
+MAXIMUM_IDTVECTOR = 0xff\r
+PRIMARY_VECTOR_BASE = 0x30\r
+RPL_MASK = 0x3\r
+MODE_MASK = 0x1\r
+NUMBER_SERVICE_TABLES = 0x2\r
+SERVICE_NUMBER_MASK = 0xfff\r
+SERVICE_TABLE_SHIFT = 0x8\r
+SERVICE_TABLE_MASK = 0x10\r
+SERVICE_TABLE_TEST = 0x10\r
+\r
+/* KAPC */\r
+ApType = 0x0\r
+ApSize = 0x2\r
+ApThread = 0x8\r
+ApApcListEntry = 0xc\r
+ApKernelRoutine = 0x14\r
+ApRundownRoutine = 0x18\r
+ApNormalRoutine = 0x1c\r
+ApNormalContext = 0x20\r
+ApSystemArgument1 = 0x24\r
+ApSystemArgument2 = 0x28\r
+ApApcStateIndex = 0x2c\r
+ApApcMode = 0x2d\r
+ApInserted = 0x2e\r
+ApcObjectLength = 0x30\r
+\r
+/* KAPC_STATE */\r
+AsApcListHead = 0x0\r
+AsProcess = 0x10\r
+AsKernelApcInProgress = 0x14\r
+AsKernelApcPending = 0x15\r
+AsUserApcPending = 0x16\r
+\r
+/* CLIENT_ID */\r
+CidUniqueProcess = 0x0\r
+CidUniqueThread = 0x4\r
+\r
+/* RTL_CRITICAL_SECTION */\r
+CsDebugInfo = 0x0\r
+CsLockCount = 0x4\r
+CsRecursionCount = 0x8\r
+CsOwningThread = 0xc\r
+CsLockSemaphore = 0x10\r
+CsSpinCount = 0x14\r
+\r
+/* RTL_CRITICAL_SECTION_DEBUG */\r
+CsType = 0x0\r
+CsCreatorBackTraceIndex = 0x2\r
+CsCriticalSection = 0x4\r
+CsProcessLocksList = 0x8\r
+CsEntryCount = 0x10\r
+CsContentionCount = 0x14\r
+\r
+/* KDEVICE_QUEUE_ENTRY */\r
+DeDeviceListEntry = 0x0\r
+DeSortKey = 0x8\r
+DeInserted = 0xc\r
+DeviceQueueEntryLength = 0x10\r
+\r
+/* KDPC */\r
+DpType = 0x0\r
+DpImportance = 0x1\r
+DpNumber = 0x2\r
+DpDpcListEntry = 0x4\r
+DpDeferredRoutine = 0xc\r
+DpDeferredContext = 0x10\r
+DpSystemArgument1 = 0x14\r
+DpSystemArgument2 = 0x18\r
+DpDpcData = 0x1c\r
+DpcObjectLength = 0x20\r
+\r
+/* KDEVICE_QUEUE */\r
+DvType = 0x0\r
+DvSize = 0x2\r
+DvDeviceListHead = 0x4\r
+DvSpinLock = 0xc\r
+DvBusy = 0x10\r
+DeviceQueueObjectLength = 0x14\r
+\r
+/* EXCEPTION_RECORD */\r
+ErExceptionCode = 0x0\r
+ErExceptionFlags = 0x4\r
+ErExceptionRecord = 0x8\r
+ErExceptionAddress = 0xc\r
+ErNumberParameters = 0x10\r
+ErExceptionInformation = 0x14\r
+ExceptionRecordLength = 0x50\r
+EXCEPTION_RECORD_LENGTH = 0x50\r
+\r
+/* EPROCESS */\r
+EpDebugPort = 0xcc\r
+EpVdmObjects = 0x144\r
+ExecutiveProcessObjectLength = 0x278\r
+\r
+/* KEVENT */\r
+EvType = 0x0\r
+EvSize = 0x2\r
+EvSignalState = 0x4\r
+EvWaitListHead = 0x8\r
+EventObjectLength = 0x10\r
+\r
+/* FAST_MUTEX */\r
+FmCount = 0x0\r
+FmOwner = 0x4\r
+FmContention = 0x8\r
+FmOldIrql = 0x1c\r
+\r
+/* KINTERRUPT */\r
+InType = 0x0\r
+InSize = 0x2\r
+InInterruptListEntry = 0x4\r
+InServiceRoutine = 0xc\r
+InServiceContext = 0x10\r
+InSpinLock = 0x14\r
+InTickCount = 0x18\r
+InActualLock = 0x1c\r
+InDispatchAddress = 0x20\r
+InVector = 0x24\r
+InIrql = 0x28\r
+InSynchronizeIrql = 0x29\r
+InFloatingSave = 0x2a\r
+InConnected = 0x2b\r
+InNumber = 0x2c\r
+InShareVector = 0x2d\r
+InMode = 0x30\r
+InServiceCount = 0x34\r
+InDispatchCount = 0x38\r
+InDispatchCode = 0x3c\r
+InterruptObjectLength = 0x1e4\r
+\r
+/* IO_STATUS_BLOCK */\r
+IoStatus = 0x0\r
+IoPointer = 0x0\r
+IoInformation = 0x4\r
+\r
+/* KNODE */\r
+KnPfnDereferenceSListHead = 0x8\r
+KnProcessorMask = 0x10\r
+KnColor = 0x14\r
+KnSeed = 0x18\r
+KnNodeNumber = 0x19\r
+KnFlags = 0x1a\r
+knMmShiftedColor = 0x1e\r
+KnFreeCount = 0x22\r
+KnPfnDeferredList = 0x2a\r
+KNODE_SIZE = 0x2e\r
+\r
+/* KSPIN_LOCK_QUEUE */\r
+LqNext = 0x0\r
+LqLock = 0x4\r
+\r
+/* KLOCK_QUEUE_HANDLE */\r
+LqhNext = 0x0\r
+LqhLock = 0x4\r
+LqhOldIrql = 0x8\r
+LOCK_QUEUE_HEADER_SIZE = 0xc\r
+\r
+/* LARGE_INTEGER */\r
+LiLowPart = 0x0\r
+LiHighPart = 0x4\r
+\r
+/* LIST_ENTRY */\r
+LsFlink = 0x0\r
+LsBlink = 0x4\r
+\r
+/* PEB */\r
+PeKernelCallbackTable = 0x2c\r
+ProcessEnvironmentBlockLength = 0x230\r
+\r
+/* KPROFILE */\r
+PfType = 0x0\r
+PfSize = 0x2\r
+PfProfileListEntry = 0x4\r
+PfProcess = 0xc\r
+PfRangeBase = 0x10\r
+PfRangeLimit = 0x14\r
+PfBucketShift = 0x18\r
+PfBuffer = 0x1c\r
+PfSegment = 0x20\r
+PfAffinity = 0x24\r
+PfSource = 0x28\r
+PfStarted = 0x2c\r
+ProfileObjectLength = 0x30\r
+\r
+/* PORT_MESSAGE */\r
+PmLength = 0x0\r
+PmZeroInit = 0x4\r
+PmClientId = 0x8\r
+PmProcess = 0x8\r
+PmThread = 0xc\r
+PmMessageId = 0x10\r
+PmClientViewSize = 0x14\r
+PortMessageLength = 0x18\r
+\r
+/* KPROCESS */\r
+PrType = 0x0\r
+PrSize = 0x2\r
+PrSignalState = 0x4\r
+PrProfileListHead = 0x10\r
+PrDirectoryTableBase = 0x18\r
+PrLdtDescriptor = 0x20\r
+PrIopmOffset = 0x30\r
+PrInt21Descriptor = 0x28\r
+PrVdmTrapcHandler = 0x4c\r
+PrFlags = 0x6b\r
+PrActiveProcessors = 0x34\r
+PrKernelTime = 0x38\r
+PrUserTime = 0x3c\r
+PrReadyListHead = 0x40\r
+PrSwapListEntry = 0x48\r
+PrThreadListHead = 0x50\r
+PrProcessLock = 0x58\r
+PrAffinity = 0x5c\r
+PrProcessFlags = 0x60\r
+PrBasePriority = 0x64\r
+PrQuantumReset = 0x65\r
+PrState = 0x66\r
+PrStackCount = 0x6c\r
+KernelProcessObjectLength = 0x78\r
+\r
+/* KQUEUE */\r
+QuType = 0x0\r
+QuSize = 0x2\r
+QuSignalState = 0x4\r
+QuEntryListHead = 0x10\r
+QuCurrentCount = 0x18\r
+QuMaximumCount = 0x1c\r
+QuThreadListHead = 0x20\r
+QueueObjectLength = 0x28\r
+\r
+/* STRING */\r
+StrLength = 0x0\r
+StrMaximumLength = 0x2\r
+StrBuffer = 0x4\r
+\r
+/* TEB */\r
+TeCmTeb = 0x0\r
+TeExceptionList = 0x0\r
+TeStackBase = 0x4\r
+TeStackLimit = 0x8\r
+TeFiberData = 0x10\r
+TeSelf = 0x18\r
+TeEnvironmentPointer = 0x1c\r
+TeClientId = 0x20\r
+TeActiveRpcHandle = 0x28\r
+TeThreadLocalStoragePointer = 0x2c\r
+TeCountOfOwnedCriticalSections = 0x38\r
+TePeb = 0x30\r
+TeCsrClientThread = 0x3c\r
+TeWOW32Reserved = 0xc0\r
+TeExceptionCode = 0x1a4\r
+TeActivationContextStackPointer = 0x1a8\r
+TeGdiClientPID = 0x6c0\r
+TeGdiClientTID = 0x6c4\r
+TeGdiThreadLocalInfo = 0x6c8\r
+TeglDispatchTable = 0x7c4\r
+TeglReserved1 = 0xb68\r
+TeglReserved2 = 0xbdc\r
+TeglSectionInfo = 0xbe0\r
+TeglSection = 0xbe4\r
+TeglTable = 0xbe8\r
+TeglCurrentRC = 0xbec\r
+TeglContext = 0xbf0\r
+TeDeallocationStack = 0xe0c\r
+TeTlsSlots = 0xe10\r
+TeTlsExpansionSlots = 0xf94\r
+TeLastErrorValue = 0x34\r
+TeVdm = 0xf18\r
+TeInstrumentation = 0xf2c\r
+TeGdiBatchCount = 0xf70\r
+TeGuaranteedStackBytes = 0xf78\r
+TeFlsData = 0xfb4\r
+ThreadEnvironmentBlockLength = 0xfbc\r
+\r
+/* TIME_FIELDS */\r
+TfSecond = 0xa\r
+TfMinute = 0x8\r
+TfHour = 0x6\r
+TfWeekday = 0xe\r
+TfDay = 0x4\r
+TfMonth = 0x2\r
+TfYear = 0x0\r
+TfMilliseconds = 0xc\r
+\r
+/* KTHREAD */\r
+ThType = 0x0\r
+ThSize = 0x2\r
+ThLock = 0x0\r
+ThDebugActive = 0x3\r
+ThSignalState = 0x4\r
+ThInitialStack = 0x18\r
+ThStackLimit = 0x1c\r
+ThKernelStack = 0x20\r
+ThThreadLock = 0x24\r
+ThAlerted = 0x5e\r
+ThApcState = 0x28\r
+ThPriority = 0x5b\r
+ThSwapBusy = 0x5d\r
+ThNextProcessor = 0x40\r
+ThDeferredProcessor = 0x41\r
+ThApcQueueLock = 0x44\r
+ThContextSwitches = 0x48\r
+ThState = 0x4c\r
+ThNpxState = 0x4d\r
+ThWaitIrql = 0x4e\r
+ThWaitMode = 0x4f\r
+ThWaitStatus = 0x50\r
+ThWaitBlockList = 0x54\r
+ThGateObject = 0x54\r
+ThWaitListEntry = 0x60\r
+ThSwapListEntry = 0x60\r
+ThQueue = 0x68\r
+ThWaitTime = 0x6c\r
+ThCombinedApcDisable = 0x70\r
+ThKernelApcDisable = 0x70\r
+ThSpecialApcDisable = 0x72\r
+ThTeb = 0x74\r
+ThTimer = 0x78\r
+ThThreadFlags = 0xa0\r
+ThServiceTable = 0x118\r
+ThWaitBlock = 0xa8\r
+ThResourceIndex = 0xef\r
+ThQueueListEntry = 0x108\r
+ThTrapFrame = 0x110\r
+ThCallbackStack = 0x114\r
+ThApcStateIndex = 0x11c\r
+ThIdealProcessor = 0x11d\r
+ThBasePriority = 0x121\r
+ThPriorityDecrement = 0x122\r
+ThAdjustReason = 0x42\r
+ThAdjustIncrement = 0x43\r
+ThPreviousMode = 0xd7\r
+ThSaturation = 0x123\r
+ThFreezeCount = 0x14f\r
+ThUserAffinity = 0x124\r
+ThProcess = 0x128\r
+ThAffinity = 0x12c\r
+ThUserIdealProcessor = 0x151\r
+ThApcStatePointer = 0x130\r
+ThSavedApcState = 0x138\r
+ThWaitReason = 0x5a\r
+ThSuspendCount = 0x150\r
+ThWin32Thread = 0x154\r
+ThStackBase = 0x158\r
+ThSuspendApc = 0x15c\r
+ThPowerState = 0x18b\r
+ThKernelTime = 0x160\r
+ThLegoData = 0x184\r
+ThLargeStack = 0x107\r
+ThUserTime = 0x18c\r
+ThSuspendSemaphore = 0x190\r
+ThSListFaultCount = 0x1a4\r
+ThThreadListEntry = 0x1a8\r
+ThMutantListHead = 0x10\r
+ThSListFaultAddress = 0x1b0\r
+KernelThreadObjectLength = 0x1b8\r
+ExecutiveThreadObjectLength = 0x250\r
+\r
+/* KTIMER */\r
+TiType = 0x0\r
+TiSize = 0x2\r
+TiInserted = 0x3\r
+TiSignalState = 0x4\r
+TiDueTime = 0x10\r
+TiTimerListEntry = 0x18\r
+TiDpc = 0x20\r
+TiPeriod = 0x24\r
+TimerObjectLength = 0x28\r
+\r
+/* TIME */\r
+\r
+/* KUSER_SHARED_DATA */\r
+UsTickCountMultiplier = 0x4\r
+UsInterruptTime = 0x8\r
+UsSystemTime = 0x14\r
+UsTimeZoneBias = 0x20\r
+UsImageNumberLow = 0x2c\r
+UsImageNumberHigh = 0x2e\r
+UsNtSystemRoot = 0x30\r
+UsMaxStackTraceDepth = 0x238\r
+UsCryptoExponent = 0x23c\r
+UsTimeZoneId = 0x240\r
+UsLargePageMinimum = 0x244\r
+UsReserved2 = 0x248\r
+UsNtProductType = 0x264\r
+UsProductTypeIsValid = 0x268\r
+UsNtMajorVersion = 0x26c\r
+UsNtMinorVersion = 0x270\r
+UsProcessorFeatures = 0x274\r
+UsReserved1 = 0x2b4\r
+UsReserved3 = 0x2b8\r
+UsTimeSlip = 0x2bc\r
+UsAlternativeArchitecture = 0x2c0\r
+UsSystemExpirationDate = 0x2c8\r
+UsSuiteMask = 0x2d0\r
+UsKdDebuggerEnabled = 0x2d4\r
+UsActiveConsoleId = 0x2d8\r
+UsDismountCount = 0x2dc\r
+UsComPlusPackage = 0x2e0\r
+UsLastSystemRITEventTickCount = 0x2e4\r
+UsNumberOfPhysicalPages = 0x2e8\r
+UsSafeBootMode = 0x2ec\r
+UsTestRetInstruction = 0x2f8\r
+UsSystemCall = 0x300\r
+UsSystemCallReturn = 0x304\r
+UsSystemCallPad = 0x308\r
+UsTickCount = 0x320\r
+UsTickCountQuad = 0x320\r
+UsWow64SharedInformation = 0x340\r
+\r
+/* KWAIT_BLOCK */\r
+WbWaitListEntry = 0x0\r
+WbThread = 0x8\r
+WbObject = 0xc\r
+WbNextWaitBlock = 0x10\r
+WbWaitKey = 0x14\r
+WbWaitType = 0x16\r
+\r
+/* CR0 flags */\r
+CR0_PE = 0x1\r
+CR0_MP = 0x2\r
+CR0_EM = 0x4\r
+CR0_TS = 0x8\r
+CR0_ET = 0x10\r
+CR0_NE = 0x20\r
+CR0_WP = 0x10000\r
+CR0_AM = 0x40000\r
+CR0_NW = 0x20000000\r
+CR0_CD = 0x40000000\r
+CR0_PG = 0x80000000\r
+\r
+/* CR4 flags */\r
+CR4_VME = 0x1\r
+CR4_PVI = 0x2\r
+CR4_TSD = 0x4\r
+CR4_DE = 0x8\r
+CR4_PSE = 0x10\r
+CR4_PAE = 0x20\r
+CR4_MCE = 0x40\r
+CR4_PGE = 0x80\r
+CR4_FXSR = 0x200\r
+CR4_XMMEXCPT = 0x400\r
+\r
+/* KeFeatureBits flags */\r
+KF_RDTSC = 0x2\r
+KF_CR4 = 0x4\r
+KF_GLOBAL_PAGE = 0x10\r
+KF_LARGE_PAGE = 0x20\r
+KF_CMPXCHG8B = 0x80\r
+KF_FAST_SYSCALL = 0x1000\r
+KF_V86_VIS = 0x1\r
+\r
+/* Machine type definitions */\r
+MACHINE_TYPE_ISA = 0x0\r
+MACHINE_TYPE_EISA = 0x1\r
+MACHINE_TYPE_MCA = 0x2\r
+\r
+/* EFLAGS */\r
+EFLAGS_TF = 0x100\r
+EFLAGS_INTERRUPT_MASK = 0x200\r
+EFLAGS_V86_MASK = 0x20000\r
+EFLAGS_ALIGN_CHECK = 0x40000\r
+EFLAGS_VIF = 0x80000\r
+EFLAGS_VIP = 0x100000\r
+EFLAGS_USER_SANITIZE = 0x3f4dd7\r
+\r
+/* KDGT selectors */\r
+KGDT_R3_DATA = 0x20\r
+KGDT_R3_CODE = 0x18\r
+KGDT_R0_CODE = 0x8\r
+KGDT_R0_DATA = 0x10\r
+KGDT_R0_PCR = 0x30\r
+KGDT_TSS = 0x28\r
+KGDT_R3_TEB = 0x38\r
+KGDT_DF_TSS = 0x50\r
+KGDT_NMI_TSS = 0x58\r
+KGDT_LDT = 0x48\r
+NPX_STATE_NOT_LOADED = 0xa\r
+NPX_STATE_LOADED = 0x0\r
+PF_XMMI_INSTRUCTIONS_AVAILABLE = 0x6\r
+EFLAG_SELECT = 0xc000\r
+\r
+/* CONTEXT */\r
+CsContextFlags = 0x0\r
+CsDr0 = 0x4\r
+CsDr1 = 0x8\r
+CsDr2 = 0xc\r
+CsDr3 = 0x10\r
+CsDr6 = 0x14\r
+CsDr7 = 0x18\r
+CsFloatSave = 0x1c\r
+CsSegGs = 0x8c\r
+CsSegFs = 0x90\r
+CsSegEs = 0x94\r
+CsSegDs = 0x98\r
+CsEdi = 0x9c\r
+CsEsi = 0xa0\r
+CsEbx = 0xa4\r
+CsEdx = 0xa8\r
+CsEcx = 0xac\r
+CsEax = 0xb0\r
+CsEbp = 0xb4\r
+CsEip = 0xb8\r
+CsSegCs = 0xbc\r
+CsEflags = 0xc0\r
+CsEsp = 0xc4\r
+CsSegSs = 0xc8\r
+CsExtendedRegisters = 0xcc\r
+ContextFrameLength = 0x2cc\r
+CONTEXT_LENGTH = 0x2cc\r
+\r
+/* KGDTENTRY */\r
+KgdtBaseLow = 0x2\r
+KgdtBaseMid = 0x4\r
+KgdtBaseHi = 0x7\r
+KgdtLimitHi = 0x6\r
+KgdtLimitLow = 0x0\r
+\r
+/* KTRAP_FRAME */\r
+TsExceptionList = 0x4c\r
+TsPreviousPreviousMode = 0x48\r
+TsSegGs = 0x30\r
+TsSegFs = 0x50\r
+TsSegEs = 0x34\r
+TsSegDs = 0x38\r
+TsEdi = 0x54\r
+TsEsi = 0x58\r
+TsEbp = 0x60\r
+TsEbx = 0x5c\r
+TsEdx = 0x3c\r
+TsEcx = 0x40\r
+TsEax = 0x44\r
+TsErrCode = 0x64\r
+TsEip = 0x68\r
+TsSegCs = 0x6c\r
+TsEflags = 0x70\r
+TsHardwareEsp = 0x74\r
+TsHardwareSegSs = 0x78\r
+TsTempSegCs = 0x10\r
+TsTempEsp = 0x14\r
+TsDbgEbp = 0x0\r
+TsDbgEip = 0x4\r
+TsDbgArgMark = 0x8\r
+TsDbgArgPointer = 0xc\r
+TsDr0 = 0x18\r
+TsDr1 = 0x1c\r
+TsDr2 = 0x20\r
+TsDr3 = 0x24\r
+TsDr6 = 0x28\r
+TsDr7 = 0x2c\r
+TsV86Es = 0x7c\r
+TsV86Ds = 0x80\r
+TsV86Fs = 0x84\r
+TsV86Gs = 0x88\r
+KTRAP_FRAME_LENGTH = 0x8c\r
+KTRAP_FRAME_ALIGN = 0x4\r
+FRAME_EDITED = 0xfff8\r
+\r
+/* KTSS */\r
+TssEsp0 = 0x4\r
+TssCR3 = 0x1c\r
+TssEip = 0x20\r
+TssEFlags = 0x24\r
+TssEax = 0x28\r
+TssEbx = 0x34\r
+TssEcx = 0x2c\r
+TssEdx = 0x30\r
+TssEsp = 0x38\r
+TssEbp = 0x3c\r
+TssEsi = 0x40\r
+TssEdi = 0x44\r
+TssEs = 0x48\r
+TssCs = 0x4c\r
+TssSs = 0x50\r
+TssDs = 0x54\r
+TssFs = 0x58\r
+TssGs = 0x5c\r
+TssLDT = 0x60\r
+TssIoMapBase = 0x66\r
+TssIoMaps = 0x68\r
+TssLength = 0x20ac\r
+\r
+/* KPCR */\r
+KPCR_EXCEPTION_LIST = 0x0\r
+KPCR_PERF_GLOBAL_GROUP_MASK = 0x8\r
+KPCR_CONTEXT_SWITCHES = 0x10\r
+KPCR_TEB = 0x18\r
+KPCR_SELF = 0x1c\r
+KPCR_PRCB = 0x20\r
+KPCR_IDT = 0x38\r
+KPCR_GDT = 0x3c\r
+KPCR_TSS = 0x40\r
+KPCR_STALL_SCALE_FACTOR = 0x4c\r
+KPCR_PRCB_DATA = 0x120\r
+KPCR_CURRENT_THREAD = 0x124\r
+KPCR_PRCB_NEXT_THREAD = 0x128\r
+KPCR_PRCB_DPC_QUEUE_DEPTH = 0xa4c\r
+KPCR_PRCB_DPC_STACK = 0xa68\r
+KPCR_PRCB_MAXIMUM_DPC_QUEUE_DEPTH = 0xa6c\r
+KPCR_PRCB_DPC_ROUTINE_ACTIVE = 0xa7a\r
+KPCR_PRCB_TIMER_REQUEST = 0xa88\r
+KPCR_PRCB_QUANTUM_END = 0xaa1\r
+KPCR_PRCB_DEFERRED_READY_LIST_HEAD = 0xc10\r
+KPCR_PRCB_POWER_STATE_IDLE_FUNCTION = 0xec0\r
+\r
+/* KTRAP_FRAME */\r
+KTRAP_FRAME_DEBUGEBP = 0x0\r
+KTRAP_FRAME_DEBUGEIP = 0x4\r
+KTRAP_FRAME_TEMPESP = 0x14\r
+KTRAP_FRAME_DR0 = 0x18\r
+KTRAP_FRAME_DR1 = 0x1c\r
+KTRAP_FRAME_DR2 = 0x20\r
+KTRAP_FRAME_DR3 = 0x24\r
+KTRAP_FRAME_DR6 = 0x28\r
+KTRAP_FRAME_DR7 = 0x2c\r
+KTRAP_FRAME_GS = 0x30\r
+KTRAP_FRAME_ES = 0x34\r
+KTRAP_FRAME_DS = 0x38\r
+KTRAP_FRAME_EDX = 0x3c\r
+KTRAP_FRAME_ECX = 0x40\r
+KTRAP_FRAME_EAX = 0x44\r
+KTRAP_FRAME_PREVIOUS_MODE = 0x48\r
+KTRAP_FRAME_EXCEPTION_LIST = 0x4c\r
+KTRAP_FRAME_FS = 0x50\r
+KTRAP_FRAME_EDI = 0x54\r
+KTRAP_FRAME_ESI = 0x58\r
+KTRAP_FRAME_EBX = 0x5c\r
+KTRAP_FRAME_EBP = 0x60\r
+KTRAP_FRAME_ERROR_CODE = 0x64\r
+KTRAP_FRAME_EIP = 0x68\r
+KTRAP_FRAME_EFLAGS = 0x70\r
+KTRAP_FRAME_ESP = 0x74\r
+KTRAP_FRAME_SS = 0x78\r
+KTRAP_FRAME_V86_ES = 0x7c\r
+KTRAP_FRAME_V86_DS = 0x80\r
+KTRAP_FRAME_V86_FS = 0x84\r
+KTRAP_FRAME_V86_GS = 0x88\r
+KTRAP_FRAME_SIZE = 0x8c\r
+FRAME_EDITED = 0xfff8\r
+\r
+/* CONTEXT */\r
+CONTEXT_FLAGS = 0x0\r
+CONTEXT_SEGGS = 0x8c\r
+CONTEXT_SEGFS = 0x90\r
+CONTEXT_SEGES = 0x94\r
+CONTEXT_SEGDS = 0x98\r
+CONTEXT_EDI = 0x9c\r
+CONTEXT_ESI = 0xa0\r
+CONTEXT_EBX = 0xa4\r
+CONTEXT_EDX = 0xa8\r
+CONTEXT_ECX = 0xac\r
+CONTEXT_EAX = 0xb0\r
+CONTEXT_EBP = 0xb4\r
+CONTEXT_EIP = 0xb8\r
+CONTEXT_SEGCS = 0xbc\r
+CONTEXT_EFLAGS = 0xc0\r
+CONTEXT_ESP = 0xc4\r
+CONTEXT_SEGSS = 0xc8\r
+CONTEXT_FRAME_LENGTH = 0x2cc\r
+\r
+/* FIBER */\r
+FIBER_PARAMETER = 0x0\r
+FIBER_EXCEPTION_LIST = 0x4\r
+FIBER_STACK_BASE = 0x8\r
+FIBER_STACK_LIMIT = 0xc\r
+FIBER_DEALLOCATION_STACK = 0x10\r
+FIBER_CONTEXT = 0x14\r
+FIBER_CONTEXT_FLAGS = 0x14\r
+FIBER_CONTEXT_EAX = 0xc4\r
+FIBER_CONTEXT_EBX = 0xb8\r
+FIBER_CONTEXT_ECX = 0xc0\r
+FIBER_CONTEXT_EDX = 0xbc\r
+FIBER_CONTEXT_ESI = 0xb4\r
+FIBER_CONTEXT_EDI = 0xb0\r
+FIBER_CONTEXT_EBP = 0xc8\r
+FIBER_CONTEXT_EIP = 0xcc\r
+FIBER_CONTEXT_ESP = 0xd8\r
+FIBER_CONTEXT_DR6 = 0x28\r
+FIBER_CONTEXT_FLOAT_SAVE_CONTROL_WORD = 0x30\r
+FIBER_CONTEXT_FLOAT_SAVE_STATUS_WORD = 0x34\r
+FIBER_CONTEXT_FLOAT_SAVE_TAG_WORD = 0x38\r
+FIBER_GUARANTEED_STACK_BYTES = 0x2e0\r
+FIBER_FLS_DATA = 0x2e4\r
+FIBER_ACTIVATION_CONTEXT_STACK = 0x2e8\r
+\r
+/* KTSS */\r
+KTSS_IOMAPBASE = 0x66\r
+KTSS_ESP0 = 0x4\r
+\r
+/* EXCEPTION_RECORD */\r
+EXCEPTION_RECORD_EXCEPTION_CODE = 0x0\r
+EXCEPTION_RECORD_EXCEPTION_FLAGS = 0x4\r
+EXCEPTION_RECORD_EXCEPTION_RECORD = 0x8\r
+EXCEPTION_RECORD_EXCEPTION_ADDRESS = 0xc\r
+EXCEPTION_RECORD_NUMBER_PARAMETERS = 0x10\r
+EXCEPTION_RECORD_EXCEPTION_ADDRESS = 0xc\r
+SIZEOF_EXCEPTION_RECORD = 0x50\r
+EXCEPTION_RECORD_LENGTH = 0x50\r
+\r
+/* KTHREAD */\r
+KTHREAD_DEBUG_ACTIVE = 0x3\r
+KTHREAD_INITIAL_STACK = 0x18\r
+KTHREAD_STACK_LIMIT = 0x1c\r
+KTHREAD_TEB = 0x74\r
+KTHREAD_KERNEL_STACK = 0x20\r
+KTHREAD_APCSTATE_PROCESS = 0x38\r
+KTHREAD_PENDING_KERNEL_APC = 0x3d\r
+KTHREAD_CONTEXT_SWITCHES = 0x48\r
+KTHREAD_STATE_ = 0x4c\r
+KTHREAD_NPX_STATE = 0x4d\r
+KTHREAD_WAIT_IRQL = 0x4e\r
+KTHREAD_WAIT_REASON = 0x5a\r
+KTHREAD_COMBINED_APC_DISABLE = 0x70\r
+KTHREAD_SPECIAL_APC_DISABLE = 0x72\r
+KTHREAD_LARGE_STACK = 0x107\r
+KTHREAD_TRAP_FRAME = 0x110\r
+KTHREAD_CALLBACK_STACK = 0x114\r
+KTHREAD_APC_STATE_INDEX = 0x11c\r
+KTHREAD_STACK_BASE = 0x158\r
+\r
+/* KPROCESS */\r
+KPROCESS_DIRECTORY_TABLE_BASE = 0x18\r
+KPROCESS_LDT_DESCRIPTOR0 = 0x20\r
+KPROCESS_LDT_DESCRIPTOR1 = 0x24\r
+KPROCESS_INT21_DESCRIPTOR0 = 0x28\r
+KPROCESS_INT21_DESCRIPTOR1 = 0x2c\r
+KPROCESS_IOPM_OFFSET = 0x30\r
+\r
+/* Teb */\r
+TEB_EXCEPTION_LIST = 0x0\r
+TEB_STACK_LIMIT = 0x8\r
+TEB_STACK_BASE = 0x4\r
+TEB_SELF = 0x18\r
+TEB_FIBER_DATA = 0x10\r
+TEB_PEB = 0x30\r
+TEB_EXCEPTION_CODE = 0x1a4\r
+PEB_KERNEL_CALLBACK_TABLE = 0x2c\r
+TEB_FLS_DATA = 0xfb4\r
+TEB_ACTIVATION_CONTEXT_STACK_POINTER = 0x1a8\r
+TEB_GUARANTEED_STACK_BYTES = 0xf78\r
+TEB_DEALLOCATION_STACK = 0xe0c\r
+\r
+/* Misc */\r
+NPX_FRAME_LENGTH = 0x210\r
+FN_CR0_NPX_STATE = 0x20c\r
+DR7_RESERVED_MASK = 0xdc00\r
+FP_CONTROL_WORD = 0x0\r
+FP_STATUS_WORD = 0x4\r
+FP_TAG_WORD = 0x8\r
+FP_DATA_SELECTOR = 0x18\r
+CBSTACK_RESULT = 0x20\r
+CBSTACK_RESULT_LENGTH = 0x24\r
+CBSTACK_TRAP_FRAME = 0x4\r
+CBSTACK_CALLBACK_STACK = 0x8\r
+SIZEOF_FX_SAVE_AREA = 0x210\r
+KUSER_SHARED_SYSCALL = 0x7ffe0300\r
+EXCEPTION_EXECUTE_HANDLER = 0x1\r
+STATUS_CALLBACK_POP_STACK = 0xc0000423\r
+CONTEXT_ALIGNED_SIZE = 0x2cc\r
+PROCESSOR_FEATURE_FXSR = 0x7ffe0278\r
* PROGRAMER: Timo Kreuzer (timo.kreuzer@reactos.org)
*/
-#include <reactos/asm.h>
+#include <asm.inc>
/* GLOBALS ****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS *****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* FUNCTIONS *****************************************************************/
* PROGRAMMERS: Timo Kreuzer
*/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
#define SLIST8A_DEPTH_MASK HEX(000000000000FFFF)
#define SLIST8A_DEPTH_INC HEX(0000000000000001)
* PROGRAMER: Alex Ionescu (alex@relsoft.net)
*/
-.intel_syntax noprefix
+#include <asm.inc>
/* GLOBALS ****************************************************************/
-.globl _DbgBreakPoint@0
-.globl _DbgBreakPointWithStatus@4
-.globl _DbgUserBreakPoint@0
-.globl _DebugService@20
-.globl _DebugService2@12
-.globl _DbgBreakPointNoBugCheck@0
-.globl _RtlpBreakWithStatusInstruction@0
+PUBLIC _DbgBreakPoint@0
+PUBLIC _DbgBreakPointWithStatus@4
+PUBLIC _DbgUserBreakPoint@0
+PUBLIC _DebugService@20
+PUBLIC _DebugService2@12
+PUBLIC _DbgBreakPointNoBugCheck@0
+PUBLIC _RtlpBreakWithStatusInstruction@0
/* FUNCTIONS ***************************************************************/
-.func DbgBreakPointNoBugCheck@0
+.code
+
_DbgBreakPointNoBugCheck@0:
int 3
ret
-.endfunc
-.func DbgBreakPoint@0
_DbgBreakPoint@0:
_DbgUserBreakPoint@0:
int 3
ret
-.endfunc
-.func DbgBreakPointWithStatus@4
_DbgBreakPointWithStatus@4:
mov eax, [esp+4]
_RtlpBreakWithStatusInstruction@0:
int 3
ret 4
-.endfunc
-.func DebugService2@12
+
_DebugService2@12:
/* Setup the stack */
mov eax, [ebp+16]
mov ecx, [ebp+8]
mov edx, [ebp+12]
- int 0x2D
+ int HEX(2D)
int 3
/* Restore stack */
pop ebp
ret 12
-.endfunc
-.func DebugService@20
+
_DebugService@20:
/* Setup the stack */
mov edx, [ebp+16]
mov ebx, [ebp+20]
mov edi, [ebp+24]
- int 0x2D
+ int HEX(2D)
int 3
/* Restore registers */
/* Return */
pop ebp
ret 20
-.endfunc
+
+END
/* INCLUDES ******************************************************************/
-#include <ndk/asm.h>
-.intel_syntax noprefix
+#include <asm.inc>
+#include <ks386.inc>
+
+EXTERN _RtlpCheckForActiveDebugger@0:PROC
+EXTERN _RtlDispatchException@8:PROC
+EXTERN _ZwContinue@8:PROC
+EXTERN _ZwRaiseException@12:PROC
#define ExceptionContinueSearch 1
#define ExceptionNestedException 2
/* FUNCTIONS *****************************************************************/
-.func RtlpGetExceptionList@0
-.globl _RtlpGetExceptionList@0
+.code
+
+PUBLIC _RtlpGetExceptionList@0
_RtlpGetExceptionList@0:
/* Return the exception list */
mov eax, fs:[TEB_EXCEPTION_LIST]
ret
-.endfunc
-.func RtlpSetExceptionList@4
-.globl _RtlpSetExceptionList@4
+
+PUBLIC _RtlpSetExceptionList@4
_RtlpSetExceptionList@4:
/* Get the new list */
/* Return */
ret 4
-.endfunc
-.func RtlCaptureContext@4
-.globl _RtlCaptureContext@4
+
+PUBLIC _RtlCaptureContext@4
_RtlCaptureContext@4:
/* Preserve EBX and put the context in it */
/* Capture the other regs */
jmp CaptureRest
-.endfunc
-.func RtlpCaptureContext@4
-.globl _RtlpCaptureContext@4
+
+PUBLIC _RtlpCaptureContext@4
_RtlpCaptureContext@4:
/* Preserve EBX and put the context in it */
/* Return to the caller */
pop ebx
ret 4
-.endfunc
-.func RtlpExecuteHandlerForException@20
-.globl _RtlpExecuteHandlerForException@20
+
+PUBLIC _RtlpExecuteHandlerForException@20
_RtlpExecuteHandlerForException@20:
/* Copy the routine in EDX */
/* Jump to common routine */
jmp _RtlpExecuteHandler@20
-.endfunc
-.func RtlpExecuteHandlerForUnwind@20
-.globl _RtlpExecuteHandlerForUnwind@20
+
+PUBLIC _RtlpExecuteHandlerForUnwind@20
_RtlpExecuteHandlerForUnwind@20:
/* Copy the routine in EDX */
mov edx, offset _RtlpUnwindProtector
-.endfunc
-.func RtlpExecuteHandler@20
+
_RtlpExecuteHandler@20:
/* Save non-volatile */
xor edi, edi
/* Call the 2nd-stage executer */
- push [esp+0x20]
- push [esp+0x20]
- push [esp+0x20]
- push [esp+0x20]
- push [esp+0x20]
+ push [esp+32]
+ push [esp+32]
+ push [esp+32]
+ push [esp+32]
+ push [esp+32]
call _RtlpExecuteHandler2@20
/* Restore non-volatile */
pop edi
pop esi
pop ebx
- ret 0x14
-.endfunc
+ ret 20
+
-.func RtlpExecuteHandler2@20
-.globl _RtlpExecuteHandler2@20
+PUBLIC _RtlpExecuteHandler2@20
_RtlpExecuteHandler2@20:
/* Set up stack frame */
mov ebp, esp
/* Save the Frame */
- push [ebp+0xC]
+ push [ebp+12]
/* Push handler address */
push edx
mov [fs:TEB_EXCEPTION_LIST], esp
/* Call the handler */
- push [ebp+0x14]
- push [ebp+0x10]
- push [ebp+0xC]
+ push [ebp+20]
+ push [ebp+16]
+ push [ebp+12]
push [ebp+8]
- mov ecx, [ebp+0x18]
+ mov ecx, [ebp+24]
call ecx
/* Unlink us */
/* Undo stack frame and return */
mov esp, ebp
pop ebp
- ret 0x14
-.endfunc
+ ret 20
+
-.func RtlpExceptionProtector
_RtlpExceptionProtector:
/* Assume we'll continue */
return:
ret 16
-.endfunc
-.func RtlpUnwindProtector
+
_RtlpUnwindProtector:
/* Assume we'll continue */
.return:
ret 16
-.endfunc
-.func RtlRaiseException@4
-.globl _RtlRaiseException@4
+
+PUBLIC _RtlRaiseException@4
_RtlRaiseException@4:
/* Set up stack frame */
/* If we returned, raise a status */
push eax
call _RtlRaiseStatus@4
-.endfunc
-.func RtlRaiseStatus@4
-.globl _RtlRaiseStatus@4
+
+PUBLIC _RtlRaiseStatus@4
_RtlRaiseStatus@4:
/* Set up stack frame */
/* If we returned, raise a status */
push eax
call _RtlRaiseStatus@4
-.endfunc
+
+END
* PROGRAMMERS: Timo Kreuzer
*/
-.intel_syntax noprefix
+#include <asm.inc>
/* FUNCTIONS ****************************************************************/
-
+.code
/* PSLIST_ENTRY
* NTAPI
* RtlInterlockedPopEntrySList(
* IN PSLIST_HEADER ListHead);
*/
-.global _ExpInterlockedPopEntrySListResume@0
-.global _ExpInterlockedPopEntrySListEnd@0
-.global _ExpInterlockedPopEntrySListFault@0
-.global _RtlInterlockedPopEntrySList@4
+PUBLIC _ExpInterlockedPopEntrySListResume@0
+PUBLIC _ExpInterlockedPopEntrySListEnd@0
+PUBLIC _ExpInterlockedPopEntrySListFault@0
+PUBLIC _RtlInterlockedPopEntrySList@4
_RtlInterlockedPopEntrySList@4:
/* Save registers */
/* Load ListHead->Depth and ListHead->Sequence into edx */
mov edx, [ebp + 4]
-1:
/* Check if ListHead->Next is NULL */
or eax, eax
- jz 2f
+ jz _ExpInterlockedPopEntrySList2
/* Copy Depth and Sequence number and adjust Depth */
lea ecx, [edx - 1]
jnz _ExpInterlockedPopEntrySListResume@0
/* Restore registers and return */
-2:
+_ExpInterlockedPopEntrySList2:
pop ebp
pop ebx
ret 4
* IN PSLIST_HEADER ListHead,
* IN PSLIST_ENTRY ListEntry);
*/
-.global _RtlInterlockedPushEntrySList@8
+PUBLIC _RtlInterlockedPushEntrySList@8
_RtlInterlockedPushEntrySList@8:
/* Save registers */
/* Load ListHead->Depth and ListHead->Sequence into edx */
mov edx, [ebp + 4]
-1:
+_RtlpInterlockedPushEntrySListResume:
/* Set ListEntry->Next to ListHead->Next */
mov [ebx], eax
/* Copy ListHead->Depth and ListHead->Sequence and adjust them */
- lea ecx, [edx + 0x10001]
+ lea ecx, [edx + HEX(10001)]
/* If [ebp] equals edx:eax, exchange it with ecx:ebx */
LOCK cmpxchg8b qword ptr [ebp]
/* If not equal, retry with edx:eax, being the content of [ebp] now */
- jnz 1b
+ jnz _RtlpInterlockedPushEntrySListResume
/* Restore registers and return */
pop ebp
* RtlInterlockedFlushSList(
* IN PSINGLE_LIST_ENTRY ListHead);
*/
-.global _RtlInterlockedFlushSList@4
+PUBLIC _RtlInterlockedFlushSList@4
_RtlInterlockedFlushSList@4:
/* Save registers */
/* Load ListHead->Depth and ListHead->Sequence into edx */
mov edx, [ebp + 4]
-1:
+_RtlpInterlockedFlushSListResume:
/* Check if ListHead->Next is NULL */
or eax, eax
- jz 2f
+ jz _RtlpInterlockedFlushSListEnd
/* Copy Depth and Sequence number to ecx */
mov ecx, edx
LOCK cmpxchg8b qword ptr [ebp]
/* If not equal, retry with edx:eax, being the content of [ebp] now */
- jnz 1b
+ jnz _RtlpInterlockedFlushSListResume
/* Restore registers and return */
-2:
+_RtlpInterlockedFlushSListEnd:
pop ebp
pop ebx
ret 4
+
+END
-#include <ndk/asm.h>
-.intel_syntax noprefix
+/*
+ * COPYRIGHT: GNU GPL - See COPYING in the top level directory
+ * PROJECT: ReactOS Run-Time Library
+ * PURPOSE:
+ * FILE: lib/rtl/i386/res_asm.S
+ * PROGRAMER:
+ */
+
+#include <asm.inc>
+#include <ks386.inc>
+
+EXTERN _LdrpAccessResource@16:PROC
/*
* On x86, Shrinker, an executable compressor, depends on the
* "call access_resource" instruction being there.
*/
-.globl _LdrAccessResource@16
+.code
+PUBLIC _LdrAccessResource@16
_LdrAccessResource@16:
push ebp
mov ebp, esp
call _LdrpAccessResource@16
leave
ret 16
+
+END
/*
- * COPYRIGHT: See COPYING in the top level directory
+ * COPYRIGHT: GNU GPL - See COPYING in the top level directory
* PROJECT: ReactOS Run-Time Library
* PURPOSE: Memory functions
* FILE: lib/rtl/i386/rtlswap.S
* PROGRAMER: Alex Ionescu (alex.ionescu@reactos.org)
*/
-.intel_syntax noprefix
+#include <asm.inc>
/* GLOBALS *******************************************************************/
-.globl _RtlCompareMemory@12
-.globl _RtlCompareMemoryUlong@12
-.globl _RtlFillMemory@12
-.globl _RtlFillMemoryUlong@12
-.globl _RtlMoveMemory@12
-.globl _RtlZeroMemory@8
-.globl @RtlPrefetchMemoryNonTemporal@8
+PUBLIC _RtlCompareMemory@12
+PUBLIC _RtlCompareMemoryUlong@12
+PUBLIC _RtlFillMemory@12
+PUBLIC _RtlFillMemoryUlong@12
+PUBLIC _RtlMoveMemory@12
+PUBLIC _RtlZeroMemory@8
+PUBLIC @RtlPrefetchMemoryNonTemporal@8
/* FUNCTIONS *****************************************************************/
+.code
-.func RtlCompareMemory@12
_RtlCompareMemory@12:
/* Save volatiles */
pop edi
pop esi
ret 12
-.endfunc
-.func RtlCompareMemoryUlong@12
+
_RtlCompareMemoryUlong@12:
/* Get pointers and size in ULONGs */
mov eax, edi
pop edi
ret 12
-.endfunc
-.func RtlFillMemory@12
+
_RtlFillMemory@12:
/* Get pointers and size */
rep stosb
pop edi
ret 12
-.endfunc
-.func RtlFillMemoryUlong@12
+
_RtlFillMemoryUlong@12:
/* Get pointer, size and pattern */
rep stosd
pop edi
ret 12
-.endfunc
-.func RtlFillMemoryUlonglong@16
+
_RtlFillMemoryUlonglong@16:
/* Save volatiles */
pop esi
pop edi
ret 16
-.endfunc
-.func RtlZeroMemory@8
+
_RtlZeroMemory@8:
/* Get pointers and size */
rep stosb
pop edi
ret 8
-.endfunc
-.func RtlMoveMemory@12
+
_RtlMoveMemory@12:
/* Save volatiles */
rep movsb
cld
jmp DoneMove
-.endfunc
-.func @RtlPrefetchMemoryNonTemporal@8, @RtlPrefetchMemoryNonTemporal@8
+
@RtlPrefetchMemoryNonTemporal@8:
/*
/* Keep looping for the next line, or return if done */
ja FetchLine
ret
-.endfunc
+
/* FIXME: HACK */
_Ke386CacheAlignment:
- .long 0x40
+ .long 64
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
+#include <asm.inc>
-.intel_syntax noprefix
-.global MsgUnimplemented
+PUBLIC MsgUnimplemented
MsgUnimplemented:
.asciz "WARNING: %s at %s:%d is UNIMPLEMENTED!\n"
ret
.endp
+END
/* EOF */
/* INCLUDES ******************************************************************/
-#include <ndk/asm.h>
-.intel_syntax noprefix
+#include <asm.inc>
+#include <ksamd64.inc>
#define DISPOSITION_DISMISS 0
#define DISPOSITION_CONTINUE_SEARCH 1
_except_handler3:
ret
.endfunc
+
+END
* PROGRAMER: KJK::Hyperion <noog@libero.it>
*/
-.globl __chkstk
-.globl __alloca_probe
+#include <asm.inc>
+#include <ks386.inc>
+
+#define PAGE_SIZE 4096
+
+PUBLIC __chkstk
+PUBLIC __alloca_probe
+PUBLIC __alloca_probe_16
+.code
+
+ /* 16 byte aligned alloca probe
+ * EAX = size to be allocated */
+__alloca_probe_16:
+ /* save the ECX register */
+ push ecx
+
+ /* ecx = top of the previous stack frame */
+ lea ecx, [esp + 8]
+
+ /* Calculate end of allocation */
+ sub ecx, eax
+
+ /* Get the misalignment */
+ and ecx, 15
+
+ /* Add the misalignment to the original alloc size */
+ add eax, ecx
+
+ /* Check for overflow */
+ jnc l1
+
+ /* Set maximum value */
+ mov eax, HEX(0ffffffff)
+l1:
+ /* Restore ecx */
+ pop ecx
+ /* Fall through to __chkstk */
/*
_chkstk() is called by all stack allocations of more than 4 KB. It grows the
__chkstk:
__alloca_probe:
-/* EAX = size to be allocated */
-/* save the ECX register */
- pushl %ecx
+ /* EAX = size to be allocated */
+ /* save the ECX register */
+ push ecx
-/* ECX = top of the previous stack frame */
- leal 8(%esp), %ecx
+ /* ECX = top of the previous stack frame */
+ lea ecx, [esp + 8]
-/* probe the desired memory, page by page */
- cmpl $0x1000, %eax
- jge .l_MoreThanAPage
- jmp .l_LessThanAPage
+ /* probe the desired memory, page by page */
+ cmp eax, PAGE_SIZE
+ jl .l_LessThanAPage
.l_MoreThanAPage:
-/* raise the top of the stack by a page and probe */
- subl $0x1000, %ecx
- testl %eax, 0(%ecx)
+ /* raise the top of the stack by a page and probe */
+ sub ecx, PAGE_SIZE
+ test [ecx], eax
-/* loop if still more than a page must be probed */
- subl $0x1000, %eax
- cmpl $0x1000, %eax
- jge .l_MoreThanAPage
+ /* loop if still more than a page must be probed */
+ sub eax, PAGE_SIZE
+ cmp eax, PAGE_SIZE
+ jge .l_MoreThanAPage
.l_LessThanAPage:
-/* raise the top of the stack by EAX bytes (size % 4096) and probe */
- subl %eax, %ecx
- testl %eax, 0(%ecx)
+ /* raise the top of the stack by EAX bytes (size % 4096) and probe */
+ sub ecx, eax
+ test [ecx], eax
-/* EAX = top of the stack */
- movl %esp, %eax
+ /* EAX = top of the stack */
+ mov eax, esp
-/* allocate the memory */
- movl %ecx, %esp
+ /* allocate the memory */
+ mov esp, ecx
-/* restore ECX */
- movl 0(%eax), %ecx
+ /* restore ECX */
+ mov ecx, [eax]
-/* restore the return address */
- movl 4(%eax), %eax
- pushl %eax
+ /* restore the return address */
+ mov eax, [eax + 4]
+ push eax
-/* return */
- ret
+ /* return */
+ ret
/* EOF */
+END
/* INCLUDES ******************************************************************/
-#include <ndk/asm.h>
+#include <asm.inc>
+#include <ks386.inc>
-/* GLOBALS *******************************************************************/
-
-.globl __EH_prolog
+/* FUNCTIONS *****************************************************************/
+.code
+PUBLIC __EH_prolog
// Copied from Wine.
__EH_prolog:
- pushl $-1
- pushl %eax
- pushl %fs:0
- movl %esp, %fs:0
- movl 12(%esp), %eax
- movl %ebp, 12(%esp)
- leal 12(%esp), %ebp
- pushl %eax
+ push -1
+ push eax
+ push fs:0
+ mov fs:0, esp
+ mov eax, [esp + 12]
+ mov [esp + 12], ebp
+ lea ebp, [esp + 12]
+ push eax
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <ndk/asm.h>
-.intel_syntax noprefix
+#include <asm.inc>
#define DISPOSITION_DISMISS 0
#define DISPOSITION_CONTINUE_SEARCH 1
#define DISPOSITION_COLLIDED_UNWIND 3
+#define EXCEPTION_EXIT_UNWIND 4
+#define EXCEPTION_UNWINDING 2
+
+
+EXTERN _RtlUnwind@16:PROC
+
/* GLOBALS *******************************************************************/
-.globl __global_unwind2
-.globl __local_unwind2
-.globl __abnormal_termination
-.globl __except_handler2
-.globl __except_handler3
+PUBLIC __global_unwind2
+PUBLIC __local_unwind2
+PUBLIC __abnormal_termination
+PUBLIC __except_handler2
+PUBLIC __except_handler3
/* FUNCTIONS *****************************************************************/
-.func unwind_handler
+.code
_unwind_handler:
/* Check if we were unwinding and continue search if not */
unwind_handler_return:
ret
-.endfunc
-.func _global_unwind2
+
__global_unwind2:
/* Create stack and save all registers */
mov esp, ebp
pop ebp
ret
-.endfunc
-.func _abnormal_termination
+
__abnormal_termination:
/* Assume false */
/* Return */
ab_return:
ret
-.endfunc
-.func _local_unwind2
+
__local_unwind2:
/* Save volatiles */
pop esi
pop ebx
ret
-.endfunc
-.func _except_handler2
+
__except_handler2:
/* Setup stack and save volatiles */
mov [ebx+12], eax
/* Call except handler */
- call [edi+ecx*4+8]
+ call dword ptr [edi+ecx*4+8]
except_continue2:
/* Reload try level and except again */
mov esp, ebp
pop ebp
ret
-.endfunc
-.func _except_handler3
+
__except_handler3:
/* Setup stack and save volatiles */
mov esp, ebp
pop ebp
ret
-.endfunc
+
+END
("fxtract\n\t"
: "=t" (__junk), "=u" (__val) : "0" (__x));
#else
-#error REVIEW ME
+#pragma message ("REVIEW ME")
__asm fld [__x];
__asm fxtract;
__asm fstp st(0);
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
PUBLIC ceilf
ceilf:
+ sub rsp, 16
+
/* Put parameter on the stack */
- movss [rsp - 0x10], xmm0
- fld dword ptr [rsp]
+ movss [rsp], xmm0
+ fld dword ptr [rsp]
/* Change fpu control word to round up */
- fstcw [rsp - 0x10]
- mov eax, [rsp - 0x10]
- or eax, 0x00800
- and eax, 0x0fbff
- mov [rsp - 0x08], eax
- fldcw [rsp - 0x08]
+ fstcw [rsp + 8]
+ mov eax, [rsp + 8]
+ or eax, HEX(00800)
+ and eax, HEX(0fbff)
+ mov [rsp + 12], eax
+ fldcw [rsp + 12]
/* Round to integer */
frndint
/* Restore fpu control word */
- fldcw [rsp - 0x10]
+ fldcw [rsp + 8]
- fstp dword ptr [rsp - 0x10]
- movss xmm0, [rsp - 0x10]
+ fstp dword ptr [rsp]
+ movss xmm0, [rsp]
+
+ add rsp, 16
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
UNIMPLEMENTED exp
ret
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* FUNCTIONS ****************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
floor:
UNIMPLEMENTED floor
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* FUNCTIONS ****************************************************************/
PUBLIC floorf
floorf:
+ sub rsp, 16
+
/* Put parameter on the stack */
- movss [rsp - 0x10], xmm0
+ movss [rsp], xmm0
fld dword ptr [rsp]
/* Change fpu control word to round down */
- fstcw [rsp - 0x10]
- mov eax, [rsp - 0x10]
+ fstcw [rsp]
+ mov eax, [rsp]
or eax, 0x00400
and eax, 0x0f7ff
- mov [rsp - 0x08], eax
- fldcw [rsp - 0x08]
+ mov [rsp + 8], eax
+ fldcw [rsp + 8]
/* Round to integer */
frndint
/* Restore fpu control word */
- fldcw [rsp - 0x10]
+ fldcw [rsp]
- fstp dword ptr [rsp - 0x10]
- movss xmm0, [rsp - 0x10]
+ fstp dword ptr [rsp]
+ movss xmm0, [rsp]
+ add rsp, 16
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
fmod:
UNIMPLEMENTED fmod
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
fmodf:
UNIMPLEMENTED fmodf
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
ldexp:
UNIMPLEMENTED ldexp
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
/* DATA *********************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* DATA *********************************************************************/
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* DATA *********************************************************************/
sqrt:
UNIMPLEMENTED sqrt
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* DATA *********************************************************************/
sqrtf:
sqrtss xmm0, xmm0
ret
+
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
/* DATA *********************************************************************/
tan:
UNIMPLEMENTED tan
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
- .globl __alldiv
- .globl __fltused
-
+
+#include <asm.inc>
+
+PUBLIC __alldiv
+PUBLIC __fltused
+
/* DATA ********************************************************************/
+.data
+ASSUME CS:NOTHING, DS:NOTHING, ES:NOTHING, FS:NOTHING, GS:NOTHING
__fltused:
- .long 0x9875
+ .long HEX(9875)
-.intel_syntax noprefix
/* FUNCTIONS ***************************************************************/
+.code
//
// lldiv - signed long divide
pop edi
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __alldvrm
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __alldvrm
+
/* FUNCTIONS ***************************************************************/
+.code
__alldvrm:
push edi
mov eax,DVNDHI // hi word of a
or eax,eax // test to see if signed
- jge short ....L1 // skip rest if a is already positive
+ jge short .L1 // skip rest if a is already positive
inc edi // complement result sign flag
inc ebp // complement result sign flag
mov edx,DVNDLO // lo word of a
sbb eax,0
mov DVNDHI,eax // save positive value
mov DVNDLO,edx
-....L1:
+.L1:
mov eax,DVSRHI // hi word of b
or eax,eax // test to see if signed
- jge short ....L2 // skip rest if b is already positive
+ jge short .L2 // skip rest if b is already positive
inc edi // complement the result sign flag
mov edx,DVSRLO // lo word of a
neg eax // make b positive
sbb eax,0
mov DVSRHI,eax // save positive value
mov DVSRLO,edx
-....L2:
+.L2:
//
// Now do the divide. First look to see if the divisor is less than 4194304K.
//
or eax,eax // check to see if divisor < 4194304K
- jnz short ....L3 // nope, gotta do this the hard way
+ jnz short .L3 // nope, gotta do this the hard way
mov ecx,DVSRLO // load divisor
mov eax,DVNDHI // load high word of dividend
xor edx,edx
mov eax,esi // set up low word of quotient
mul dword ptr DVSRLO // LOWORD(QUOT) * DVSR
add edx,ecx // EDX:EAX = QUOT * DVSR
- jmp short ....L4 // complete remainder calculation
+ jmp short .L4 // complete remainder calculation
//
// Here we do it the hard way. Remember, eax contains the high word of DVSR
//
-....L3:
+.L3:
mov ebx,eax // ebx:ecx <- divisor
mov ecx,DVSRLO
mov edx,DVNDHI // edx:eax <- dividend
mov eax,DVNDLO
-....L5:
+.L5:
shr ebx,1 // shift divisor right one bit
rcr ecx,1
shr edx,1 // shift dividend right one bit
rcr eax,1
or ebx,ebx
- jnz short ....L5 // loop until divisor < 4194304K
+ jnz short .L5 // loop until divisor < 4194304K
div ecx // now divide, ignore remainder
mov esi,eax // save quotient
mov eax,DVSRLO
mul esi // QUOT * DVSRLO
add edx,ecx // EDX:EAX = QUOT * DVSR
- jc short ....L6 // carry means Quotient is off by 1
+ jc short .L6 // carry means Quotient is off by 1
//
// do long compare here between original dividend and the result of the
//
cmp edx,DVNDHI // compare hi words of result and original
- ja short ....L6 // if result > original, do subtract
- jb short ....L7 // if result < original, we are ok
+ ja short .L6 // if result > original, do subtract
+ jb short .L7 // if result < original, we are ok
cmp eax,DVNDLO // hi words are equal, compare lo words
- jbe short ....L7 // if less or equal we are ok, else subtract
-....L6:
+ jbe short .L7 // if less or equal we are ok, else subtract
+.L6:
dec esi // subtract 1 from quotient
sub eax,DVSRLO // subtract divisor from result
sbb edx,DVSRHI
-....L7:
+.L7:
xor ebx,ebx // ebx:esi <- quotient
-....L4:
+.L4:
//
// Calculate remainder by subtracting the result from the original dividend.
// Since the result is already in a register, we will do the subtract in the
//
dec ebp // check result sign flag
- jns short ....L9 // result is ok, set up the quotient
+ jns short .L9 // result is ok, set up the quotient
neg edx // otherwise, negate the result
neg eax
sbb edx,0
//
// Now we need to get the quotient into edx:eax and the remainder into ebx:ecx.
//
-....L9:
+.L9:
mov ecx,edx
mov edx,ebx
mov ebx,ecx
//
dec edi // check to see if result is negative
- jnz short ....L8 // if EDI == 0, result should be negative
+ jnz short .L8 // if EDI == 0, result should be negative
neg edx // otherwise, negate the result
neg eax
sbb edx,0
// Restore the saved registers and return.
//
-....L8:
+.L8:
pop ebp
pop esi
pop edi
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __allmul
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __allmul
/* FUNCTIONS ***************************************************************/
+.code
//
// llmul - long multiply routine
ret 16 // callee restores the stack
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __allrem
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __allrem
+
/* FUNCTIONS ***************************************************************/
+.code
//
// llrem - signed long remainder
pop ebx
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __allshl
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __allshl
/* FUNCTIONS ***************************************************************/
+.code
//
// llshl - long shift left
xor eax,eax
xor edx,edx
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __allshr
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __allshr
+
/* FUNCTIONS ***************************************************************/
+.code
//
// llshr - long shift right
sar edx,31
mov eax,edx
ret
+
+END
--- /dev/null
+
+#include <asm.inc>
+
+PUBLIC _atan2
+
+.code
+_atan2:
+ push ebp
+ mov ebp, esp
+
+ fld qword ptr [ebp + 8]
+ fld qword ptr [ebp + 16]
+ fpatan
+
+ pop ebp
+ ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _atan
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _atan
+
/* FUNCTIONS ***************************************************************/
+.code
_atan:
push ebp
fpatan // Take the arctangent
pop ebp
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
- .globl __aulldiv
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __aulldiv
/* FUNCTIONS ***************************************************************/
+.code
//
// ulldiv - unsigned long divide
mov eax,DVSRHI // check to see if divisor < 4194304K
or eax,eax
- jnz short ..L1 // nope, gotta do this the hard way
+ jnz short .L1 // nope, gotta do this the hard way
mov ecx,DVSRLO // load divisor
mov eax,DVNDHI // load high word of dividend
xor edx,edx
mov eax,DVNDLO // edx:eax <- remainder:lo word of dividend
div ecx // get low order bits of quotient
mov edx,ebx // edx:eax <- quotient hi:quotient lo
- jmp short ..L2 // restore stack and return
+ jmp short .L2 // restore stack and return
//
// Here we do it the hard way. Remember, eax contains DVSRHI
//
-..L1:
+.L1:
mov ecx,eax // ecx:ebx <- divisor
mov ebx,DVSRLO
mov edx,DVNDHI // edx:eax <- dividend
mov eax,DVNDLO
-..L3:
+.L3:
shr ecx,1 // shift divisor right one bit// hi bit <- 0
rcr ebx,1
shr edx,1 // shift dividend right one bit// hi bit <- 0
rcr eax,1
or ecx,ecx
- jnz short ..L3 // loop until divisor < 4194304K
+ jnz short .L3 // loop until divisor < 4194304K
div ebx // now divide, ignore remainder
mov esi,eax // save quotient
mov eax,DVSRLO
mul esi // QUOT * DVSRLO
add edx,ecx // EDX:EAX = QUOT * DVSR
- jc short ..L4 // carry means Quotient is off by 1
+ jc short .L4 // carry means Quotient is off by 1
//
// do long compare here between original dividend and the result of the
//
cmp edx,DVNDHI // compare hi words of result and original
- ja short ..L4 // if result > original, do subtract
- jb short ..L5 // if result < original, we are ok
+ ja short .L4 // if result > original, do subtract
+ jb short .L5 // if result < original, we are ok
cmp eax,DVNDLO // hi words are equal, compare lo words
- jbe short ..L5 // if less or equal we are ok, else subtract
-..L4:
+ jbe short .L5 // if less or equal we are ok, else subtract
+.L4:
dec esi // subtract 1 from quotient
-..L5:
+.L5:
xor edx,edx // edx:eax <- quotient
mov eax,esi
// Restore the saved registers and return.
//
-..L2:
+.L2:
pop esi
pop ebx
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __aulldvrm
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __aulldvrm
+
/* FUNCTIONS ***************************************************************/
+.code
__aulldvrm:
mov eax,DVSRHI // check to see if divisor < 4194304K
or eax,eax
- jnz short .....L1 // nope, gotta do this the hard way
+ jnz short .L1 // nope, gotta do this the hard way
mov ecx,DVSRLO // load divisor
mov eax,DVNDHI // load high word of dividend
xor edx,edx
mov eax,esi // set up low word of quotient
mul dword ptr DVSRLO // LOWORD(QUOT) * DVSR
add edx,ecx // EDX:EAX = QUOT * DVSR
- jmp short .....L2 // complete remainder calculation
+ jmp short .L2 // complete remainder calculation
//
// Here we do it the hard way. Remember, eax contains DVSRHI
//
-.....L1:
+.L1:
mov ecx,eax // ecx:ebx <- divisor
mov ebx,DVSRLO
mov edx,DVNDHI // edx:eax <- dividend
mov eax,DVNDLO
-.....L3:
+.L3:
shr ecx,1 // shift divisor right one bit// hi bit <- 0
rcr ebx,1
shr edx,1 // shift dividend right one bit// hi bit <- 0
rcr eax,1
or ecx,ecx
- jnz short .....L3 // loop until divisor < 4194304K
+ jnz short .L3 // loop until divisor < 4194304K
div ebx // now divide, ignore remainder
mov esi,eax // save quotient
mov eax,DVSRLO
mul esi // QUOT * DVSRLO
add edx,ecx // EDX:EAX = QUOT * DVSR
- jc short .....L4 // carry means Quotient is off by 1
+ jc short .L4 // carry means Quotient is off by 1
//
// do long compare here between original dividend and the result of the
//
cmp edx,DVNDHI // compare hi words of result and original
- ja short .....L4 // if result > original, do subtract
- jb short .....L5 // if result < original, we are ok
+ ja short .L4 // if result > original, do subtract
+ jb short .L5 // if result < original, we are ok
cmp eax,DVNDLO // hi words are equal, compare lo words
- jbe short .....L5 // if less or equal we are ok, else subtract
-.....L4:
+ jbe short .L5 // if less or equal we are ok, else subtract
+.L4:
dec esi // subtract 1 from quotient
sub eax,DVSRLO // subtract divisor from result
sbb edx,DVSRHI
-.....L5:
+.L5:
xor ebx,ebx // ebx:esi <- quotient
-.....L2:
+.L2:
//
// Calculate remainder by subtracting the result from the original dividend.
// Since the result is already in a register, we will do the subtract in the
pop esi
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __aullrem
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __aullrem
+
/* FUNCTIONS ***************************************************************/
+.code
//
// ullrem - unsigned long remainder
mov eax,DVSRHI // check to see if divisor < 4194304K
or eax,eax
- jnz short ...L1 // nope, gotta do this the hard way
+ jnz short .L1 // nope, gotta do this the hard way
mov ecx,DVSRLO // load divisor
mov eax,DVNDHI // load high word of dividend
xor edx,edx
div ecx // edx <- final remainder
mov eax,edx // edx:eax <- remainder
xor edx,edx
- jmp short ...L2 // restore stack and return
+ jmp short .L2 // restore stack and return
//
// Here we do it the hard way. Remember, eax contains DVSRHI
//
-...L1:
+.L1:
mov ecx,eax // ecx:ebx <- divisor
mov ebx,DVSRLO
mov edx,DVNDHI // edx:eax <- dividend
mov eax,DVNDLO
-...L3:
+.L3:
shr ecx,1 // shift divisor right one bit// hi bit <- 0
rcr ebx,1
shr edx,1 // shift dividend right one bit// hi bit <- 0
rcr eax,1
or ecx,ecx
- jnz short ...L3 // loop until divisor < 4194304K
+ jnz short .L3 // loop until divisor < 4194304K
div ebx // now divide, ignore remainder
//
xchg ecx,eax // put partial product in ECX, get quotient in EAX
mul dword ptr DVSRLO
add edx,ecx // EDX:EAX = QUOT * DVSR
- jc short ...L4 // carry means Quotient is off by 1
+ jc short .L4 // carry means Quotient is off by 1
//
// do long compare here between original dividend and the result of the
//
cmp edx,DVNDHI // compare hi words of result and original
- ja short ...L4 // if result > original, do subtract
- jb short ...L5 // if result < original, we're ok
+ ja short .L4 // if result > original, do subtract
+ jb short .L5 // if result < original, we're ok
cmp eax,DVNDLO // hi words are equal, compare lo words
- jbe short ...L5 // if less or equal we're ok, else subtract
-...L4:
+ jbe short .L5 // if less or equal we're ok, else subtract
+.L4:
sub eax,DVSRLO // subtract divisor from result
sbb edx,DVSRHI
-...L5:
+.L5:
//
// Calculate remainder by subtracting the result from the original dividend.
// Restore the saved registers and return.
//
-...L2:
+.L2:
pop ebx
ret 16
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __aullshr
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __aullshr
+
/* FUNCTIONS ***************************************************************/
+.code
//
// ullshr - long shift right
// depends only on the high order bit of edx).
//
cmp cl,64
- jae short ..RETZERO
+ jae short .RETZERO
//
// Handle shifts of between 0 and 31 bits
//
cmp cl, 32
- jae short ..MORE32
+ jae short .MORE32
shrd eax,edx,cl
shr edx,cl
ret
//
// Handle shifts of between 32 and 63 bits
//
-..MORE32:
+.MORE32:
mov eax,edx
xor edx,edx
and cl,31
//
// return 0 in edx:eax
//
-..RETZERO:
+.RETZERO:
xor eax,eax
xor edx,edx
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _ceil
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _ceil
/* FUNCTIONS ***************************************************************/
+.code
_ceil:
push ebp
fld qword ptr [ebp+8] // Load real from stack
fstcw [ebp-2] // Save control word
fclex // Clear exceptions
- mov word ptr [ebp-4],0xb63 // Rounding control word
+ mov word ptr [ebp-4], HEX(0b63) // Rounding control word
fldcw [ebp-4] // Set new rounding control
frndint // Round to integer
fclex // Clear exceptions
mov esp,ebp // Deallocate temporary space
pop ebp
ret
+
+END
* This file is part of the w64 mingw-runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
-#include <_mingw_mac.h>
-
- .file "ceilf.S"
- .text
- .align 4
-.globl __MINGW_USYMBOL(ceilf)
- .def __MINGW_USYMBOL(ceilf); .scl 2; .type 32; .endef
-__MINGW_USYMBOL(ceilf):
-#ifdef _WIN64
- subq $24,%rsp
- movss %xmm0,8(%rsp)
- flds 8(%rsp)
-
- fstcw 4(%rsp) /* store fpu control word */
-
- movl $0x0800,%edx /* round towards +oo */
- orl 4(%rsp),%edx
- andl $0xfbff,%edx
- movl %edx,(%rsp)
- fldcw (%rsp) /* load modified control word */
- frndint /* round */
+#include <asm.inc>
- fldcw 4(%rsp) /* restore original control word */
- fstps 8(%rsp)
- movss 8(%rsp),%xmm0
- addq $24,%rsp
- ret
-#else
- flds 4(%esp)
- subl $8,%esp
+.code
+.align 4
+
+PUBLIC _ceilf
+_ceilf:
- fstcw 4(%esp) /* store fpu control word */
+ fld dword ptr [esp + 4]
+ sub esp, 8
+
+ fstcw [esp + 4] /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
- movl $0x0800,%edx /* round towards +oo */
- orl 4(%esp),%edx
- andl $0xfbff,%edx
- movl %edx,(%esp)
- fldcw (%esp) /* load modified control word */
+ mov edx, [esp + 4]
+ or edx, HEX(0800) /* round towards +oo */
+ and edx, HEX(fbff)
+ mov [esp], edx
+ fldcw [esp] /* load modified control word */
frndint /* round */
- fldcw 4(%esp) /* restore original control word */
+ fldcw [esp + 4] /* restore original control word */
- addl $8,%esp
+ add esp, 8
ret
-#endif
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _cos
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _cos
/* FUNCTIONS ***************************************************************/
+.code
_cos:
push ebp
fcos // Take the cosine
pop ebp
ret
+
+END
--- /dev/null
+
+#include <asm.inc>
+
+PUBLIC _exp
+
+/* FUNCTIONS ***************************************************************/
+.code
+
+_exp:
+ push ebp
+ mov ebp, esp
+
+ fld qword ptr [ebp + 8]
+ fldl2e
+ fmul st, st(1)
+ fst st(1)
+ frndint
+ fxch st(1)
+ fsub st, st(1)
+ f2xm1
+ fld1
+ faddp st(1), st
+ fscale
+ fstp st(1)
+
+ pop ebp
+ ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _fabs
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _fabs
+
/* FUNCTIONS ***************************************************************/
+.code
_fabs:
push ebp
fabs // Take the absolute value
pop ebp
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _floor
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _floor
+
/* FUNCTIONS ***************************************************************/
+.code
_floor:
push ebp
fld qword ptr [ebp+8] // Load real from stack
fstcw [ebp-2] // Save control word
fclex // Clear exceptions
- mov word ptr [ebp-4],0x763 // Rounding control word
+ mov word ptr [ebp-4], HEX(0763) // Rounding control word
fldcw [ebp-4] // Set new rounding control
frndint // Round to integer
fclex // Clear exceptions
mov esp,ebp
pop ebp
ret
+
+END
* Removed header file dependency for use in libmingwex.a by
* Danny Smith <dannysmith@users.sourceforge.net>
*/
-#include <_mingw_mac.h>
-
- .file "floorf.S"
- .text
-#ifdef _WIN64
- .align 8
-#else
- .align 4
-#endif
-.globl __MINGW_USYMBOL(floorf)
- .def __MINGW_USYMBOL(floorf); .scl 2; .type 32; .endef
-__MINGW_USYMBOL(floorf):
-#ifdef _WIN64
- subq $24,%rsp
- movss %xmm0,8(%rsp)
- flds 8(%rsp)
-
- fstcw 4(%rsp) /* store fpu control word */
- movl $0x400,%edx /* round towards -oo */
- orl 4(%rsp),%edx
- andl $0xf7ff,%edx
- movl %edx,(%rsp)
- fldcw (%rsp) /* load modified control word */
- frndint /* round */
+#include <asm.inc>
- fldcw 4(%rsp) /* restore original control word */
+.code
+.align 4
- fstps 8(%rsp)
- movss 8(%rsp),%xmm0
- addq $24,%rsp
- ret
-#else
- flds 4(%esp)
- subl $8,%esp
+PUBLIC _floorf
+_floorf:
- fstcw 4(%esp) /* store fpu control word */
+ fld dword ptr [esp + 4]
+ sub esp, 8
+
+ fstcw [esp + 4] /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
- movl $0x400,%edx /* round towards -oo */
- orl 4(%esp),%edx
- andl $0xf7ff,%edx
- movl %edx,(%esp)
- fldcw (%esp) /* load modified control word */
+ mov edx, [esp + 4]
+ or edx, HEX(0400) /* round towards -oo */
+ and edx, HEX(0f7ff)
+ mov [esp], edx
+ fldcw [esp] /* load modified control word */
frndint /* round */
- fldcw 4(%esp) /* restore original control word */
+ fldcw [esp + 4] /* restore original control word */
- addl $8,%esp
+ add esp, 8
ret
-#endif
+
+END
--- /dev/null
+
+#include <asm.inc>
+
+PUBLIC _fmod
+
+/* FUNCTIONS ***************************************************************/
+.code
+
+_fmod:
+ push ebp
+ mov ebp, esp
+
+ fld qword ptr [ebp + 8]
+ fld qword ptr [ebp + 16]
+ fxch st(1)
+l1:
+ fprem
+ fstsw ax
+ sahf
+ jp l1
+ fstp st(1)
+
+ pop ebp
+ ret
+
+END
--- /dev/null
+
+#include <asm.inc>
+
+PUBLIC _fmodf
+
+/* FUNCTIONS ***************************************************************/
+.code
+
+_fmodf:
+ push ebp
+ mov ebp, esp
+
+ fld dword ptr [esp + 4]
+ fld dword ptr [esp + 8]
+ fxch st(1)
+l1:
+ fprem
+ fstsw ax
+ sahf
+ jp l1
+ fstp st(1)
+
+ pop ebp
+ ret
+
+END
--- /dev/null
+/*
+ * COPYRIGHT: See COPYING in the top level directory
+ * PROJECT: ReactOS kernel
+ * PURPOSE: Run-Time Library
+ * FILE: lib/rtl/i386/ftol2.S
+ * PROGRAMER:
+ *
+ */
+
+#include <asm.inc>
+
+EXTERN __ftol:PROC
+PUBLIC __ftol2
+PUBLIC __ftol2_sse
+
+/* FUNCTIONS ***************************************************************/
+.code
+
+/*
+ * This routine is called by MSVC-generated code to convert from floating point
+ * to integer representation. The floating point number to be converted is
+ * on the top of the floating point stack.
+ */
+__ftol2:
+__ftol2_sse:
+ jmp __ftol
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl __ftol
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC __ftol
+
/* FUNCTIONS ***************************************************************/
+.code
/*
* This routine is called by MSVC-generated code to convert from floating point
fstcw [ebp-2]
wait
mov ax, [ebp-2]
- or ah, 0xC
+ or ah, 12
mov [ebp-4], ax
fldcw [ebp-4]
/* Remove stack frame and return*/
leave
ret
+
+END
* PROGRAMER: Magnus Olsen (magnus@greatlord.com)
*
*/
-
-.globl _log10
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _log10
/* FUNCTIONS ***************************************************************/
+.code
_log10:
pop ebp
ret
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _log
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _log
+
/* FUNCTIONS ***************************************************************/
+.code
_log:
push ebp
fyl2x // Compute the natural log(x)
pop ebp
ret
+
+END
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
-/* Reacros modifications */
+/* Reactos modifications */
+#include <asm.inc>
+
#define ALIGNARG(log2) log2
#define ASM_TYPE_DIRECTIVE(name,typearg)
#define ASM_SIZE_DIRECTIVE(name)
#define cfi_adjust_cfa_offset(x)
-#define ENTRY(x)
-#define END(x)
-.global _pow
- .text
+PUBLIC _pow
+
+.data
+ASSUME nothing
.align ALIGNARG(4)
ASM_TYPE_DIRECTIVE(infinity,@object)
+
inf_zero:
infinity:
- .byte 0, 0, 0, 0, 0, 0, 0xf0, 0x7f
+ .byte 0, 0, 0, 0, 0, 0, HEX(f0), HEX(7f)
ASM_SIZE_DIRECTIVE(infinity)
ASM_TYPE_DIRECTIVE(zero,@object)
-zero: .double 0.0
+zero:
+ .double 0.0
ASM_SIZE_DIRECTIVE(zero)
ASM_TYPE_DIRECTIVE(minf_mzero,@object)
+
minf_mzero:
minfinity:
- .byte 0, 0, 0, 0, 0, 0, 0xf0, 0xff
+ .byte 0, 0, 0, 0, 0, 0, HEX(f0), HEX(ff)
+
mzero:
- .byte 0, 0, 0, 0, 0, 0, 0, 0x80
+ .byte 0, 0, 0, 0, 0, 0, 0, HEX(80)
ASM_SIZE_DIRECTIVE(minf_mzero)
ASM_TYPE_DIRECTIVE(one,@object)
-one: .double 1.0
+
+one:
+ .double 1.0
ASM_SIZE_DIRECTIVE(one)
ASM_TYPE_DIRECTIVE(limit,@object)
-limit: .double 0.29
+
+limit:
+ .double 0.29
ASM_SIZE_DIRECTIVE(limit)
ASM_TYPE_DIRECTIVE(p63,@object)
-p63: .byte 0, 0, 0, 0, 0, 0, 0xe0, 0x43
+
+p63:
+ .byte 0, 0, 0, 0, 0, 0, HEX(e0), HEX(43)
ASM_SIZE_DIRECTIVE(p63)
#ifdef PIC
#define MOX(op,x,f) op##@GOTOFF(%ecx,x,f)
#else
#define MO(op) op
-#define MOX(op,x,f) op(,x,f)
+#define MOX(op,x,f) op[x*f]
#endif
- .text
+.code
_pow:
-ENTRY(__ieee754_pow)
- fldl 12(%esp) // y
+ fld qword ptr [esp + 12] // y
fxam
#ifdef PIC
LOAD_PIC_REG (cx)
#endif
- fnstsw
- movb %ah, %dl
- andb $0x45, %ah
- cmpb $0x40, %ah // is y == 0 ?
- je 11f
+ fnstsw ax
+ mov dl, ah
+ and ah, HEX(045)
+ cmp ah, HEX(040) // is y == 0 ?
+ je L11
- cmpb $0x05, %ah // is y == ±inf ?
- je 12f
+ cmp ah, 5 // is y == ±inf ?
+ je L12
- cmpb $0x01, %ah // is y == NaN ?
- je 30f
+ cmp ah, 1 // is y == NaN ?
+ je L30
- fldl 4(%esp) // x : y
+ fld qword ptr [esp + 4] // x : y
- subl $8,%esp
+ sub esp, 8
cfi_adjust_cfa_offset (8)
fxam
- fnstsw
- movb %ah, %dh
- andb $0x45, %ah
- cmpb $0x40, %ah
- je 20f // x is ±0
+ fnstsw ax
+ mov dh, ah
+ and ah, HEX(45)
+ cmp ah, HEX(040)
+ je L20 // x is ±0
- cmpb $0x05, %ah
- je 15f // x is ±inf
+ cmp ah, 5
+ je L15 // x is ±inf
- fxch // y : x
+ fxch st(1) // y : x
/* fistpll raises invalid exception for |y| >= 1L<<63. */
- fld %st // y : y : x
+ fld st // y : y : x
fabs // |y| : y : x
- fcompl MO(p63) // y : x
- fnstsw
+ fcomp qword ptr MO(p63) // y : x
+ fnstsw ax
sahf
- jnc 2f
+ jnc L2
/* First see whether `y' is a natural number. In this case we
can use a more precise algorithm. */
- fld %st // y : y : x
- fistpll (%esp) // y : x
- fildll (%esp) // int(y) : y : x
- fucomp %st(1) // y : x
- fnstsw
+ fld st // y : y : x
+ fistp qword ptr [esp] // y : x
+ fild qword ptr [esp] // int(y) : y : x
+ fucomp st(1) // y : x
+ fnstsw ax
sahf
- jne 2f
+ jne L2
/* OK, we have an integer value for y. */
- popl %eax
+ pop eax
cfi_adjust_cfa_offset (-4)
- popl %edx
+ pop edx
cfi_adjust_cfa_offset (-4)
- orl $0, %edx
- fstp %st(0) // x
- jns 4f // y >= 0, jump
- fdivrl MO(one) // 1/x (now referred to as x)
- negl %eax
- adcl $0, %edx
- negl %edx
-4: fldl MO(one) // 1 : x
- fxch
-
-6: shrdl $1, %edx, %eax
- jnc 5f
- fxch
- fmul %st(1) // x : ST*x
- fxch
-5: fmul %st(0), %st // x*x : ST*x
- shrl $1, %edx
- movl %eax, %ecx
- orl %edx, %ecx
- jnz 6b
- fstp %st(0) // ST*x
+ or edx, 0
+ fstp st // x
+ jns L4 // y >= 0, jump
+ fdivr qword ptr MO(one) // 1/x (now referred to as x)
+ neg eax
+ adc edx, 0
+ neg edx
+L4: fld qword ptr MO(one) // 1 : x
+ fxch st(1)
+
+L6: shrd eax, edx, 1
+ jnc L5
+ fxch st(1)
+ fmul st, st(1) // x : ST*x
+ fxch st(1)
+L5: fmul st, st // x*x : ST*x
+ shr edx, 1
+ mov ecx, eax
+ or ecx, edx
+ jnz L6
+ fstp st // ST*x
ret
/* y is ±NAN */
-30: fldl 4(%esp) // x : y
- fldl MO(one) // 1.0 : x : y
- fucomp %st(1) // x : y
- fnstsw
+L30:
+ fld qword ptr [esp + 4] // x : y
+ fld qword ptr MO(one) // 1.0 : x : y
+ fucomp st(1) // x : y
+ fnstsw ax
sahf
- je 31f
- fxch // y : x
-31: fstp %st(1)
+ je L31
+ fxch st(1) // y : x
+L31:fstp st(1)
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
-2: /* y is a real number. */
- fxch // x : y
- fldl MO(one) // 1.0 : x : y
- fldl MO(limit) // 0.29 : 1.0 : x : y
- fld %st(2) // x : 0.29 : 1.0 : x : y
- fsub %st(2) // x-1 : 0.29 : 1.0 : x : y
+L2: /* y is a real number. */
+ fxch st(1) // x : y
+ fld qword ptr MO(one) // 1.0 : x : y
+ fld qword ptr MO(limit) // 0.29 : 1.0 : x : y
+ fld st(2) // x : 0.29 : 1.0 : x : y
+ fsub st, st(2) // x-1 : 0.29 : 1.0 : x : y
fabs // |x-1| : 0.29 : 1.0 : x : y
fucompp // 1.0 : x : y
- fnstsw
- fxch // x : 1.0 : y
+ fnstsw ax
+ fxch st(1) // x : 1.0 : y
sahf
- ja 7f
- fsub %st(1) // x-1 : 1.0 : y
+ ja L7
+ fsub st, st(1) // x-1 : 1.0 : y
fyl2xp1 // log2(x) : y
- jmp 8f
+ jmp L8
-7: fyl2x // log2(x) : y
-8: fmul %st(1) // y*log2(x) : y
- fst %st(1) // y*log2(x) : y*log2(x)
+L7: fyl2x // log2(x) : y
+L8: fmul st, st(1) // y*log2(x) : y
+ fst st(1) // y*log2(x) : y*log2(x)
frndint // int(y*log2(x)) : y*log2(x)
- fsubr %st, %st(1) // int(y*log2(x)) : fract(y*log2(x))
+ fsubr st(1), st // int(y*log2(x)) : fract(y*log2(x))
fxch // fract(y*log2(x)) : int(y*log2(x))
f2xm1 // 2^fract(y*log2(x))-1 : int(y*log2(x))
- faddl MO(one) // 2^fract(y*log2(x)) : int(y*log2(x))
+ fadd qword ptr MO(one) // 2^fract(y*log2(x)) : int(y*log2(x))
fscale // 2^fract(y*log2(x))*2^int(y*log2(x)) : int(y*log2(x))
- addl $8, %esp
+ add esp, 8
cfi_adjust_cfa_offset (-8)
- fstp %st(1) // 2^fract(y*log2(x))*2^int(y*log2(x))
+ fstp st(1) // 2^fract(y*log2(x))*2^int(y*log2(x))
ret
// pow(x,±0) = 1
.align ALIGNARG(4)
-11: fstp %st(0) // pop y
- fldl MO(one)
+L11:fstp st(0) // pop y
+ fld qword ptr MO(one)
ret
// y == ±inf
.align ALIGNARG(4)
-12: fstp %st(0) // pop y
- fldl MO(one) // 1
- fldl 4(%esp) // x : 1
+L12: fstp st(0) // pop y
+ fld qword ptr MO(one) // 1
+ fld qword ptr [esp + 4] // x : 1
fabs // abs(x) : 1
fucompp // < 1, == 1, or > 1
- fnstsw
- andb $0x45, %ah
- cmpb $0x45, %ah
- je 13f // jump if x is NaN
-
- cmpb $0x40, %ah
- je 14f // jump if |x| == 1
-
- shlb $1, %ah
- xorb %ah, %dl
- andl $2, %edx
- fldl MOX(inf_zero, %edx, 4)
+ fnstsw ax
+ and ah, HEX(45)
+ cmp ah, HEX(45)
+ je L13 // jump if x is NaN
+
+ cmp ah, HEX(40)
+ je L14 // jump if |x| == 1
+
+ shl ah, 1
+ xor dl, ah
+ and edx, 2
+ fld qword ptr MOX(inf_zero, edx, 4)
ret
.align ALIGNARG(4)
-14: fldl MO(one)
+L14:fld qword ptr MO(one)
ret
.align ALIGNARG(4)
-13: fldl 4(%esp) // load x == NaN
+L13:fld qword ptr [esp + 4] // load x == NaN
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
// x is ±inf
-15: fstp %st(0) // y
- testb $2, %dh
- jz 16f // jump if x == +inf
+L15: fstp st(0) // y
+ test dh, 2
+ jz L16 // jump if x == +inf
// We must find out whether y is an odd integer.
- fld %st // y : y
- fistpll (%esp) // y
- fildll (%esp) // int(y) : y
+ fld st // y : y
+ fistp qword ptr [esp] // y
+ fild qword ptr [esp] // int(y) : y
fucompp // <empty>
- fnstsw
+ fnstsw ax
sahf
- jne 17f
+ jne L17
// OK, the value is an integer, but is the number of bits small
// enough so that all are coming from the mantissa?
- popl %eax
+ pop eax
cfi_adjust_cfa_offset (-4)
- popl %edx
+ pop edx
cfi_adjust_cfa_offset (-4)
- andb $1, %al
- jz 18f // jump if not odd
- movl %edx, %eax
- orl %edx, %edx
- jns 155f
- negl %eax
-155: cmpl $0x00200000, %eax
- ja 18f // does not fit in mantissa bits
+ and al, 1
+ jz L18 // jump if not odd
+ mov eax, edx
+ or edx, edx
+ jns L155
+ neg eax
+L155:
+ cmp eax, HEX(000200000)
+ ja L18 // does not fit in mantissa bits
// It's an odd integer.
- shrl $31, %edx
- fldl MOX(minf_mzero, %edx, 8)
+ shr edx, 31
+ fld qword ptr MOX(minf_mzero, edx, 8)
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
-16: fcompl MO(zero)
- addl $8, %esp
+L16:fcomp qword ptr MO(zero)
+ add esp, 8
cfi_adjust_cfa_offset (-8)
- fnstsw
- shrl $5, %eax
- andl $8, %eax
- fldl MOX(inf_zero, %eax, 1)
+ fnstsw ax
+ shr eax, 5
+ and eax, 8
+ fld qword ptr MOX(inf_zero, eax, 1)
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
-17: shll $30, %edx // sign bit for y in right position
- addl $8, %esp
+L17: shl ecx, 30 // sign bit for y in right position
+ add esp, 8
cfi_adjust_cfa_offset (-8)
-18: shrl $31, %edx
- fldl MOX(inf_zero, %edx, 8)
+L18: shr edx, 31
+ fld qword ptr MOX(inf_zero, edx, 8)
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
// x is ±0
-20: fstp %st(0) // y
- testb $2, %dl
- jz 21f // y > 0
+L20: fstp st(0) // y
+ test dl, 2
+ jz L21 // y > 0
// x is ±0 and y is < 0. We must find out whether y is an odd integer.
- testb $2, %dh
- jz 25f
+ test dh, 2
+ jz L25
- fld %st // y : y
- fistpll (%esp) // y
- fildll (%esp) // int(y) : y
+ fld st // y : y
+ fistp qword ptr [esp] // y
+ fild qword ptr [esp] // int(y) : y
fucompp // <empty>
- fnstsw
+ fnstsw ax
sahf
- jne 26f
+ jne L26
// OK, the value is an integer, but is the number of bits small
// enough so that all are coming from the mantissa?
- popl %eax
+ pop eax
cfi_adjust_cfa_offset (-4)
- popl %edx
+ pop edx
cfi_adjust_cfa_offset (-4)
- andb $1, %al
- jz 27f // jump if not odd
- cmpl $0xffe00000, %edx
- jbe 27f // does not fit in mantissa bits
+ and al, 1
+ jz L27 // jump if not odd
+ cmp edx, HEX(0ffe00000)
+ jbe L27 // does not fit in mantissa bits
// It's an odd integer.
// Raise divide-by-zero exception and get minus infinity value.
- fldl MO(one)
- fdivl MO(zero)
+ fld qword ptr MO(one)
+ fdiv qword ptr MO(zero)
fchs
ret
cfi_adjust_cfa_offset (8)
-25: fstp %st(0)
-26: addl $8, %esp
+L25: fstp st(0)
+L26: add esp, 8
cfi_adjust_cfa_offset (-8)
-27: // Raise divide-by-zero exception and get infinity value.
- fldl MO(one)
- fdivl MO(zero)
+L27: // Raise divide-by-zero exception and get infinity value.
+ fld qword ptr MO(one)
+ fdiv qword ptr MO(zero)
ret
cfi_adjust_cfa_offset (8)
.align ALIGNARG(4)
// x is ±0 and y is > 0. We must find out whether y is an odd integer.
-21: testb $2, %dh
- jz 22f
+L21:test dh, 2
+ jz L22
- fld %st // y : y
- fistpll (%esp) // y
- fildll (%esp) // int(y) : y
+ fld st // y : y
+ fistp qword ptr [esp] // y
+ fild qword ptr [esp] // int(y) : y
fucompp // <empty>
- fnstsw
+ fnstsw ax
sahf
- jne 23f
+ jne L23
// OK, the value is an integer, but is the number of bits small
// enough so that all are coming from the mantissa?
- popl %eax
+ pop eax
cfi_adjust_cfa_offset (-4)
- popl %edx
+ pop edx
cfi_adjust_cfa_offset (-4)
- andb $1, %al
- jz 24f // jump if not odd
- cmpl $0xffe00000, %edx
- jae 24f // does not fit in mantissa bits
+ and al, 1
+ jz L24 // jump if not odd
+ cmp edx, HEX(0ffe00000)
+ jae L24 // does not fit in mantissa bits
// It's an odd integer.
- fldl MO(mzero)
+ fld qword ptr MO(mzero)
ret
cfi_adjust_cfa_offset (8)
-22: fstp %st(0)
-23: addl $8, %esp // Don't use 2 x pop
+L22: fstp st(0)
+L23: add esp, 8 // Don't use 2 x pop
cfi_adjust_cfa_offset (-8)
-24: fldl MO(zero)
+L24: fld qword ptr MO(zero)
ret
-END(__ieee754_pow)
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _sin
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _sin
/* FUNCTIONS ***************************************************************/
+.code
_sin:
push ebp // Save register bp
fsin // Take the sine
pop ebp // Restore register bp
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _sqrt
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _sqrt
+
/* FUNCTIONS ***************************************************************/
+.code
_sqrt:
push ebp
fsqrt // Take the square root
pop ebp
ret
+
+END
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
-
-.globl _tan
-
-.intel_syntax noprefix
+#include <asm.inc>
+
+PUBLIC _tan
+
/* FUNCTIONS ***************************************************************/
+.code
_tan:
push ebp
mov esp,ebp // Deallocate temporary space
pop ebp
ret
+
+END
* FILE: lib/sdk/crt/mem/i386/memchr.s
*/
+#include <asm.inc>
+#include <ks386.inc>
+
/*
* void* memchr(const void* s, int c, size_t n)
*/
-.globl _memchr
+PUBLIC _memchr
+.code
_memchr:
- push %ebp
- mov %esp,%ebp
- push %edi
- mov 0x8(%ebp),%edi
- mov 0xc(%ebp),%eax
- mov 0x10(%ebp),%ecx
+ push ebp
+ mov ebp, esp
+ push edi
+ mov edi, [ebp + 8]
+ mov eax, [ebp + 12]
+ mov ecx, [ebp + 16]
cld
- jecxz .Lnotfound
- repne scasb
- je .Lfound
+ jecxz .Lnotfound
+ repne scasb
+ je .Lfound
.Lnotfound:
- mov $1,%edi
+ mov edi, 1
.Lfound:
- mov %edi,%eax
- dec %eax
- pop %edi
+ mov eax, edi
+ dec eax
+ pop edi
leave
ret
+
+END
-/*
- * void *memcpy (void *to, const void *from, size_t count)
- *
- * NOTE: This code is a duplicate of memmove function from memmove_asm.s
- */
-
-.globl _memcpy
-
-_memcpy:
- push %ebp
- mov %esp,%ebp
-
- push %esi
- push %edi
-
- mov 8(%ebp),%edi
- mov 12(%ebp),%esi
- mov 16(%ebp),%ecx
-
- cmp %esi,%edi
- jbe .CopyUp
- mov %ecx,%eax
- add %esi,%eax
- cmp %eax,%edi
- jb .CopyDown
-
-.CopyUp:
- cld
-
- cmp $16,%ecx
- jb .L1
- mov %ecx,%edx
- test $3,%edi
- je .L2
-/*
- * Make the destination dword aligned
- */
- mov %edi,%ecx
- and $3,%ecx
- sub $5,%ecx
- not %ecx
- sub %ecx,%edx
- rep movsb
- mov %edx,%ecx
-.L2:
- shr $2,%ecx
- rep movsl
- mov %edx,%ecx
- and $3,%ecx
-.L1:
- test %ecx,%ecx
- je .L3
- rep movsb
-.L3:
- mov 8(%ebp),%eax
- pop %edi
- pop %esi
- leave
- ret
-
-.CopyDown:
- std
-
- add %ecx,%edi
- add %ecx,%esi
-
- cmp $16,%ecx
- jb .L4
- mov %ecx,%edx
- test $3,%edi
- je .L5
-
-/*
- * Make the destination dword aligned
- */
- mov %edi,%ecx
- and $3,%ecx
- sub %ecx,%edx
- dec %esi
- dec %edi
- rep movsb
- mov %edx,%ecx
-
- sub $3,%esi
- sub $3,%edi
-.L6:
- shr $2,%ecx
- rep movsl
- mov %edx,%ecx
- and $3,%ecx
- je .L7
- add $3,%esi
- add $3,%edi
-.L8:
- rep movsb
-.L7:
- cld
- mov 8(%ebp),%eax
- pop %edi
- pop %esi
- leave
- ret
-.L5:
- sub $4,%edi
- sub $4,%esi
- jmp .L6
-
-.L4:
- test %ecx,%ecx
- je .L7
- dec %esi
- dec %edi
- jmp .L8
-
/*
- * void *memmove (void *to, const void *from, size_t count)
+ * void *memcpy (void *to, const void *from, size_t count)
*
- * NOTE: This code is duplicated in memcpy_asm.s
*/
-.globl _memmove
+#include <asm.inc>
+#include <ks386.inc>
+PUBLIC _memcpy
+PUBLIC _memmove
+.code
+
+_memcpy:
_memmove:
- push %ebp
- mov %esp,%ebp
+ push ebp
+ mov ebp, esp
- push %esi
- push %edi
+ push esi
+ push edi
- mov 8(%ebp),%edi
- mov 12(%ebp),%esi
- mov 16(%ebp),%ecx
+ mov edi, [ebp + 8]
+ mov esi, [ebp + 12]
+ mov ecx, [ebp + 16]
- cmp %esi,%edi
+ cmp edi, esi
jbe .CopyUp
- mov %ecx,%eax
- add %esi,%eax
- cmp %eax,%edi
- jb .CopyDown
-
+ mov eax, ecx
+ add eax, esi
+ cmp edi, eax
+ jb .CopyDown
+
.CopyUp:
cld
- cmp $16,%ecx
- jb .L1
- mov %ecx,%edx
- test $3,%edi
- je .L2
+ cmp ecx, 16
+ jb .L1
+ mov edx, ecx
+ test edi, 3
+ je .L2
/*
* Make the destination dword aligned
*/
- mov %edi,%ecx
- and $3,%ecx
- sub $5,%ecx
- not %ecx
- sub %ecx,%edx
- rep movsb
- mov %edx,%ecx
+ mov ecx, edi
+ and ecx, 3
+ sub ecx, 5
+ not ecx
+ sub edx, ecx
+ rep movsb
+ mov ecx, edx
.L2:
- shr $2,%ecx
- rep movsl
- mov %edx,%ecx
- and $3,%ecx
+ shr ecx, 2
+ rep movsd
+ mov ecx, edx
+ and ecx, 3
.L1:
- test %ecx,%ecx
- je .L3
- rep movsb
+ test ecx, ecx
+ je .L3
+ rep movsb
.L3:
- mov 8(%ebp),%eax
- pop %edi
- pop %esi
+ mov eax, [ebp + 8]
+ pop edi
+ pop esi
leave
ret
.CopyDown:
- std
+ std
- add %ecx,%edi
- add %ecx,%esi
+ add edi, ecx
+ add esi, ecx
- cmp $16,%ecx
- jb .L4
- mov %ecx,%edx
- test $3,%edi
- je .L5
+ cmp ecx, 16
+ jb .L4
+ mov edx, ecx
+ test edi, 3
+ je .L5
/*
* Make the destination dword aligned
*/
- mov %edi,%ecx
- and $3,%ecx
- sub %ecx,%edx
- dec %esi
- dec %edi
- rep movsb
- mov %edx,%ecx
+ mov ecx, edi
+ and ecx, 3
+ sub edx, ecx
+ dec esi
+ dec edi
+ rep movsb
+ mov ecx, edx
- sub $3,%esi
- sub $3,%edi
+ sub esi, 3
+ sub edi, 3
.L6:
- shr $2,%ecx
- rep movsl
- mov %edx,%ecx
- and $3,%ecx
- je .L7
- add $3,%esi
- add $3,%edi
+ shr ecx, 2
+ rep movsd
+ mov ecx, edx
+ and ecx, 3
+ je .L7
+ add esi, 3
+ add edi, 3
.L8:
- rep movsb
+ rep movsb
.L7:
cld
- mov 8(%ebp),%eax
- pop %edi
- pop %esi
+ mov eax, [ebp + 8]
+ pop edi
+ pop esi
leave
ret
.L5:
- sub $4,%edi
- sub $4,%esi
- jmp .L6
-
+ sub edi, 4
+ sub esi, 4
+ jmp .L6
+
.L4:
- test %ecx,%ecx
- je .L7
- dec %esi
- dec %edi
- jmp .L8
+ test ecx, ecx
+ je .L7
+ dec esi
+ dec edi
+ jmp .L8
+END
* $Id$
*/
+#include <asm.inc>
+#include <ks386.inc>
+
/*
* void *memset (void *src, int val, size_t count)
*/
-.globl _memset
+PUBLIC _memset
+.code
_memset:
- push %ebp
- mov %esp,%ebp
- push %edi
- mov 0x8(%ebp),%edi
- movzb 0xc(%ebp),%eax
- mov 0x10(%ebp),%ecx
+ push ebp
+ mov ebp, esp
+ push edi
+ mov edi, [ebp + 8]
+ movzx eax, byte ptr [ebp + 12]
+ mov ecx, [ebp + 16]
cld
- cmp $16,%ecx
- jb .L1
- mov $0x01010101,%edx
- mul %edx
- mov %ecx,%edx
- test $3,%edi
- je .L2
- mov %edi,%ecx
- and $3,%ecx
- sub $5,%ecx
- not %ecx
- sub %ecx,%edx
- rep stosb
- mov %edx,%ecx
+ cmp ecx, 16
+ jb .L1
+ mov edx, HEX(01010101)
+ mul edx
+ mov edx, ecx
+ test edi, 3
+ je .L2
+ mov ecx, edi
+ and ecx, 3
+ sub ecx, 5
+ not ecx
+ sub edx, ecx
+ rep stosb
+ mov ecx, edx
.L2:
- shr $2,%ecx
- rep stosl
- mov %edx,%ecx
- and $3,%ecx
+ shr ecx, 2
+ rep stosd
+ mov ecx, edx
+ and ecx, 3
.L1:
- test %ecx,%ecx
- je .L3
- rep stosb
+ test ecx, ecx
+ je .L3
+ rep stosb
.L3:
- pop %edi
- mov 0x8(%ebp),%eax
+ pop edi
+ mov eax, [ebp + 8]
leave
ret
+END
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/amd64/asm.h>
+#include <asm.inc>
+#include <ksamd64.inc>
#define JUMP_BUFFER_Frame 0x00
#define JUMP_BUFFER_Rbx 0x08
inc rax
2: jmp r8
.endp longjmp
+
+END
* complete implementation
*/
+#include <asm.inc>
+
#define JB_BP 0
#define JB_BX 1
#define JB_DI 2
#define JMPBUF 4
+.code
/*
* int
* _setjmp(jmp_buf env);
* Notes:
* Sets up the jmp_buf
*/
-.globl __setjmp
+PUBLIC __setjmp
__setjmp:
- xorl %eax, %eax
- movl JMPBUF(%esp), %edx
+ xor eax, eax
+ mov edx, JMPBUF[esp]
/* Save registers. */
- movl %ebp, (JB_BP*4)(%edx) /* Save caller's frame pointer. */
- movl %ebx, (JB_BX*4)(%edx)
- movl %edi, (JB_DI*4)(%edx)
- movl %esi, (JB_SI*4)(%edx)
- leal JMPBUF(%esp), %ecx /* Save SP as it will be after we return. */
- movl %ecx, (JB_SP*4)(%edx)
- movl PCOFF(%esp), %ecx /* Save PC we are returning to now. */
- movl %ecx, (JB_IP*4)(%edx)
+ mov [edx + JB_BP*4], ebp /* Save caller's frame pointer. */
+ mov [edx + JB_BX*4], ebx
+ mov [edx + JB_DI*4], edi
+ mov [edx + JB_SI*4], esi
+ lea ecx, JMPBUF[esp] /* Save SP as it will be after we return. */
+ mov [edx + JB_SP*4], ecx
+ mov ecx, PCOFF[esp] /* Save PC we are returning to now. */
+ mov [edx + JB_IP*4], ecx
ret
/*
* Notes:
* Sets up the jmp_buf
*/
-.globl __setjmp3
+PUBLIC __setjmp3
__setjmp3:
- xorl %eax, %eax
- movl JMPBUF(%esp), %edx
+ xor eax, eax
+ mov edx, JMPBUF[esp]
/* Save registers. */
- movl %ebp, (JB_BP*4)(%edx) /* Save caller's frame pointer. */
- movl %ebx, (JB_BX*4)(%edx)
- movl %edi, (JB_DI*4)(%edx)
- movl %esi, (JB_SI*4)(%edx)
- leal JMPBUF(%esp), %ecx /* Save SP as it will be after we return. */
- movl %ecx, (JB_SP*4)(%edx)
- movl PCOFF(%esp), %ecx /* Save PC we are returning to now. */
- movl %ecx, (JB_IP*4)(%edx)
+ mov [edx + JB_BP*4], ebp /* Save caller's frame pointer. */
+ mov [edx + JB_BX*4], ebx
+ mov [edx + JB_DI*4], edi
+ mov [edx + JB_SI*4], esi
+ lea ecx, JMPBUF[esp] /* Save SP as it will be after we return. */
+ mov [edx + JB_SP*4], ecx
+ mov ecx, PCOFF[esp] /* Save PC we are returning to now. */
+ mov [edx + JB_IP*4], ecx
ret
-#define VAL 8
-
/*
* void
* longjmp(jmp_buf env, int value);
* Notes:
* Non-local goto
*/
-.globl _longjmp
+PUBLIC _longjmp
_longjmp:
- movl JMPBUF(%esp), %ecx /* User's jmp_buf in %ecx. */
+ mov ecx, JMPBUF[esp] /* User's jmp_buf in %ecx. */
- movl VAL(%esp), %eax /* Second argument is return value. */
+ mov eax, [esp + 8] /* Second argument is return value. */
/* Save the return address now. */
- movl (JB_IP*4)(%ecx), %edx
+ mov edx, [edx + JB_IP*4]
/* Restore registers. */
- movl (JB_BP*4)(%ecx), %ebp
- movl (JB_BX*4)(%ecx), %ebx
- movl (JB_DI*4)(%ecx), %edi
- movl (JB_SI*4)(%ecx), %esi
- movl (JB_SP*4)(%ecx), %esp
+ mov ebp, [edx + JB_BP*4]
+ mov ebx, [edx + JB_BX*4]
+ mov edi, [edx + JB_DI*4]
+ mov esi, [edx + JB_SI*4]
+ mov esp, [edx + JB_SP*4]
/* Jump to saved PC. */
- jmp *%edx
+ jmp dword ptr [edx]
+
+END
/* $Id$
*/
-#include "tcscat.h"
+#include "tcscat.inc"
/* EOF */
/* $Id$
*/
-#include "tcschr.h"
+#include "tcschr.inc"
/* EOF */
/* $Id$
*/
-#include "tcscmp.h"
+#include "tcscmp.inc"
/* EOF */
/* $Id$
*/
-#include "tcscpy.h"
+#include "tcscpy.inc"
/* EOF */
/* $Id$
*/
-#include "tcslen.h"
+#include "tcslen.inc"
/* EOF */
/* $Id$
*/
-#include "tcsncat.h"
+#include "tcsncat.inc"
/* EOF */
/* $Id$
*/
-#include "tcsncmp.h"
+#include "tcsncmp.inc"
/* EOF */
/* $Id$
*/
-#include "tcsncpy.h"
+#include "tcsncpy.inc"
/* EOF */
/* $Id$
*/
-#include "tcsnlen.h"
+#include "tcsnlen.inc"
/* EOF */
/* $Id$
*/
-#include "tcsrchr.h"
+#include "tcsrchr.inc"
/* EOF */
#define _tlods lodsw
#define _tstos stosw
-#define _tsize $2
+#define _tsize 2
#define _treg(_O_) _O_ ## x
-#define _tdec(_O_) sub $2, _O_
-#define _tinc(_O_) add $2, _O_
+#define _tdec(_O_) sub _O_, 2
+#define _tinc(_O_) add _O_, 2
#else
#define _tlods lodsb
#define _tstos stosb
-#define _tsize $1
+#define _tsize 1
#define _treg(_O_) _O_ ## l
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcscat
-
-_tcscat:
- push %esi
- push %edi
- mov 0x0C(%esp), %edi
- mov 0x10(%esp), %esi
-
- xor %eax, %eax
- mov $-1, %ecx
- cld
-
- repne _tscas
- _tdec(%edi)
-
-.L1:
- _tlods
- _tstos
- test %_treg(a), %_treg(a)
- jnz .L1
-
- mov 0x0C(%esp), %eax
- pop %edi
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcscat.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcscat\r
+.code\r
+\r
+_tcscat:\r
+ push esi\r
+ push edi\r
+ mov edi, [esp + 12]\r
+ mov esi, [esp + 16]\r
+\r
+ xor eax, eax\r
+ mov ecx, -1\r
+ cld\r
+\r
+ repne _tscas\r
+ _tdec(edi)\r
+\r
+.L1:\r
+ _tlods\r
+ _tstos\r
+ test _treg(a), _treg(a)\r
+ jnz .L1\r
+\r
+ mov eax, [esp + 12]\r
+ pop edi\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcschr
-
-_tcschr:
- push %esi
- mov 0x8(%esp), %esi
- mov 0xC(%esp), %edx
-
- cld
-
-.L1:
- _tlods
- cmp %_treg(a), %_treg(d)
- je .L2
- test %_treg(a), %_treg(a)
- jnz .L1
- mov _tsize, %esi
-
-.L2:
- mov %esi, %eax
- _tdec(%eax)
-
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcschr.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcschr\r
+.code\r
+\r
+_tcschr:\r
+ push esi\r
+ mov esi, [esp + 8]\r
+ mov edx, [esp + 12]\r
+ cld\r
+\r
+.L1:\r
+ _tlods\r
+ cmp _treg(d), _treg(a)\r
+ je .L2\r
+ test _treg(a), _treg(a)\r
+ jnz .L1\r
+ mov esi, _tsize\r
+\r
+.L2:\r
+ mov eax, esi\r
+ _tdec(eax)\r
+\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcscmp
-
-_tcscmp:
- push %esi
- push %edi
- mov 0x0C(%esp), %esi
- mov 0x10(%esp), %edi
- xor %eax, %eax
- cld
-
-.L1:
- _tlods
- _tscas
- jne .L2
- test %eax, %eax
- jne .L1
- xor %eax, %eax
- jmp .L3
-
-.L2:
- sbb %eax, %eax
- or $1, %al
-
-.L3:
- pop %edi
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcscmp.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcscmp\r
+.code\r
+\r
+_tcscmp:\r
+ push esi\r
+ push edi\r
+ mov esi, [esp + 12]\r
+ mov edi, [esp + 16]\r
+ xor eax, eax\r
+ cld\r
+\r
+.L1:\r
+ _tlods\r
+ _tscas\r
+ jne .L2\r
+ test eax, eax\r
+ jne .L1\r
+ xor eax, eax\r
+ jmp .L3\r
+\r
+.L2:\r
+ sbb eax, eax\r
+ or al, 1\r
+\r
+.L3:\r
+ pop edi\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcscpy
-
-_tcscpy:
- push %esi
- push %edi
- mov 0x0C(%esp), %edi
- mov 0x10(%esp), %esi
- cld
-
-.L1:
- _tlods
- _tstos
- test %_treg(a), %_treg(a)
- jnz .L1
-
- mov 0x0C(%esp), %eax
-
- pop %edi
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcscpy.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcscpy\r
+.code\r
+\r
+_tcscpy:\r
+ push esi\r
+ push edi\r
+ mov edi, [esp + 12]\r
+ mov esi, [esp + 16]\r
+ cld\r
+\r
+.L1:\r
+ _tlods\r
+ _tstos\r
+ test _treg(a), _treg(a)\r
+ jnz .L1\r
+\r
+ mov eax, [esp + 12]\r
+\r
+ pop edi\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
-*/
-
-#include "tchar.h"
-
-.globl _tcslen
-
-_tcslen:
- push %edi
- mov 0x8(%esp), %edi
- xor %eax, %eax
- test %edi,%edi
- jz _tcslen_end
-
- mov $-1, %ecx
- cld
-
- repne _tscas
-
- not %ecx
- dec %ecx
-
- mov %ecx, %eax
-
-_tcslen_end:
- pop %edi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcslen.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+*/\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcslen\r
+.code\r
+\r
+_tcslen:\r
+ push edi\r
+ mov edi, [esp + 8]\r
+ xor eax, eax\r
+ test edi, edi\r
+ jz _tcslen_end\r
+\r
+ mov ecx, -1\r
+ cld\r
+\r
+ repne _tscas\r
+\r
+ not ecx\r
+ dec ecx\r
+\r
+ mov eax, ecx\r
+\r
+_tcslen_end:\r
+ pop edi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcsncat
-
-_tcsncat:
- push %esi
- push %edi
- mov 0x0C(%esp), %edi
- mov 0x10(%esp), %esi
- cld
-
- xor %eax, %eax
- mov $-1, %ecx
- repne _tscas
- _tdec(%edi)
-
- mov 0x14(%esp),%ecx
-
-.L1:
- dec %ecx
- js .L2
- _tlods
- _tstos
- test %_treg(a), %_treg(a)
- jne .L1
- jmp .L3
-
-.L2:
- xor %eax, %eax
- _tstos
-
-.L3:
- mov 0x0C(%esp), %eax
- pop %edi
- pop %esi
-
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcsncat.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcsncat\r
+.code\r
+\r
+_tcsncat:\r
+ push esi\r
+ push edi\r
+ mov edi, [esp + 12]\r
+ mov esi, [esp + 16]\r
+ cld\r
+\r
+ xor eax, eax\r
+ mov ecx, -1\r
+ repne _tscas\r
+ _tdec(edi)\r
+\r
+ mov ecx, [esp + 20]\r
+\r
+.L1:\r
+ dec ecx\r
+ js .L2\r
+ _tlods\r
+ _tstos\r
+ test _treg(a), _treg(a)\r
+ jne .L1\r
+ jmp .L3\r
+\r
+.L2:\r
+ xor eax, eax\r
+ _tstos\r
+\r
+.L3:\r
+ mov eax, [esp + 12]\r
+ pop edi\r
+ pop esi\r
+\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcsncmp
-
-_tcsncmp:
- push %esi
- push %edi
- mov 0x0C(%esp), %esi /* s1 */
- mov 0x10(%esp), %edi /* s2 */
- mov 0x14(%esp), %ecx /* n */
-
- xor %eax,%eax
- cld
-
-.L1:
- dec %ecx
- js .L2
- _tlods
- _tscas
- jne .L3
- test %eax, %eax
- jne .L1
-
-.L2:
- xor %eax, %eax
- jmp .L4
-
-.L3:
- sbb %eax, %eax
- or $1, %al
-
-.L4:
- pop %edi
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcsncmp.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcsncmp\r
+.code\r
+\r
+_tcsncmp:\r
+ push esi\r
+ push edi\r
+ mov esi, [esp + 12] /* s1 */\r
+ mov edi, [esp + 16] /* s2 */\r
+ mov ecx, [esp + 20] /* n */\r
+\r
+ xor eax, eax\r
+ cld\r
+\r
+.L1:\r
+ dec ecx\r
+ js .L2\r
+ _tlods\r
+ _tscas\r
+ jne .L3\r
+ test eax, eax\r
+ jne .L1\r
+\r
+.L2:\r
+ xor eax, eax\r
+ jmp .L4\r
+\r
+.L3:\r
+ sbb eax, eax\r
+ or al, 1\r
+\r
+.L4:\r
+ pop edi\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcsncpy
-
-_tcsncpy:
- push %esi
- push %edi
- mov 0x0C(%esp), %edi /* s1 */
- mov 0x10(%esp), %esi /* s2 */
- mov 0x14(%esp), %ecx /* n */
-
- xor %eax, %eax
- cld
-
-.L1:
- dec %ecx
- js .L2
- _tlods
- _tstos
- test %_treg(a), %_treg(a)
- jnz .L1
- rep _tstos
-
-.L2:
- mov 0x0C(%esp), %eax
-
- pop %edi
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcsncpy.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcsncpy\r
+.code\r
+\r
+_tcsncpy:\r
+ push esi\r
+ push edi\r
+ mov edi, [esp + 12] /* s1 */\r
+ mov esi, [esp + 16] /* s2 */\r
+ mov ecx, [esp + 20] /* n */\r
+\r
+ xor eax, eax\r
+ cld\r
+\r
+.L1:\r
+ dec ecx\r
+ js .L2\r
+ _tlods\r
+ _tstos\r
+ test _treg(a), _treg(a)\r
+ jnz .L1\r
+ rep _tstos\r
+\r
+.L2:\r
+ mov eax, [esp + 12]\r
+\r
+ pop edi\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
-*/
-
-#include "tchar.h"
-
-.globl _tcsnlen
-
-_tcsnlen:
- push %edi
- mov 0x8(%esp), %edi
- mov 0xC(%esp), %ecx
- xor %eax, %eax
- test %ecx, %ecx
- jz .L1
- mov %ecx, %edx
-
- cld
-
- repne _tscas
-
- sete %al
- sub %ecx, %edx
- sub %eax, %edx
- mov %edx, %eax
-
-.L1:
- pop %edi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcsnlen.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+*/\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcsnlen\r
+.code\r
+\r
+_tcsnlen:\r
+ push edi\r
+ mov edi, [esp + 8]\r
+ mov ecx, [esp + 12]\r
+ xor eax, eax\r
+ test ecx, ecx\r
+ jz .L1\r
+ mov edx, ecx\r
+\r
+ cld\r
+\r
+ repne _tscas\r
+\r
+ sete al\r
+ sub edx, ecx\r
+ sub edx, eax\r
+ mov eax, edx\r
+\r
+.L1:\r
+ pop edi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
+++ /dev/null
-/* $Id$
- */
-
-#include "tchar.h"
-
-.globl _tcsrchr
-
-_tcsrchr:
- push %esi
- mov 0x8(%esp), %esi
- mov 0xC(%esp), %edx
-
- cld
- mov _tsize, %ecx
-
-.L1:
- _tlods
- cmp %_treg(a), %_treg(d)
- jne .L2
- mov %esi, %ecx
-
-.L2:
- test %_treg(a), %_treg(a)
- jnz .L1
-
- mov %ecx, %eax
- _tdec(%eax)
- pop %esi
- ret
-
-/* EOF */
--- /dev/null
+/* $Id: tcsrchr.inc 49591 2010-11-15 01:29:12Z tkreuzer $\r
+ */\r
+\r
+#include "tchar.h"\r
+#include <asm.inc>\r
+\r
+PUBLIC _tcsrchr\r
+.code\r
+\r
+_tcsrchr:\r
+ push esi\r
+ mov esi, [esp + 8]\r
+ mov edx, [esp + 12]\r
+\r
+ cld\r
+ mov ecx, _tsize\r
+\r
+.L1:\r
+ _tlods\r
+ cmp _treg(d), _treg(a)\r
+ jne .L2\r
+ mov ecx, esi\r
+\r
+.L2:\r
+ test _treg(a), _treg(a)\r
+ jnz .L1\r
+\r
+ mov eax, ecx\r
+ _tdec(eax)\r
+ pop esi\r
+ ret\r
+\r
+END\r
+/* EOF */\r
*/
#define _UNICODE
-#include "tcscat.h"
+#include "tcscat.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcschr.h"
+#include "tcschr.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcscmp.h"
+#include "tcscmp.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcscpy.h"
+#include "tcscpy.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcslen.h"
+#include "tcslen.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcsncat.h"
+#include "tcsncat.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcsncmp.h"
+#include "tcsncmp.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcsncpy.h"
+#include "tcsncpy.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcsnlen.h"
+#include "tcsnlen.inc"
/* EOF */
*/
#define _UNICODE
-#include "tcsrchr.h"
+#include "tcsrchr.inc"
/* EOF */
/* INCLUDES ******************************************************************/
-#include <reactos/asm.h>
-#include <ndk/asm.h>
+#include <asm.inc>
+#include <ks386.inc>
#include <internal/i386/asmmacro.S>
/* FUNCTIONS ****************************************************************/
.code32
-.text
/*
* NOTE: These functions must obey the following rules:
*ExInterlockedAddLargeStatistic(IN PLARGE_INTEGER Addend,
* IN ULONG Increment)
*/
-.global @ExInterlockedAddLargeStatistic@8
+PUBLIC @ExInterlockedAddLargeStatistic@8
@ExInterlockedAddLargeStatistic@8:
#ifdef CONFIG_SMP
lock add [ecx], edx
/* Check for carry bit and return */
- jb 1f
+ jb .l1
ret
-1:
+.l1:
/* Add carry */
lock adc dword ptr [ecx+4], 0
#else
* IN ULONG Increment,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedAddUlong@12
+PUBLIC @ExfInterlockedAddUlong@12
@ExfInterlockedAddUlong@12:
/* Save flags */
* IN PLIST_ENTRY ListEntry,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedInsertHeadList@12
+PUBLIC @ExfInterlockedInsertHeadList@12
@ExfInterlockedInsertHeadList@12:
#ifdef CONFIG_SMP
/* Check if list was empty */
xor eax, ecx
- jz 2f
+ jz .l2
/* Return list pointer */
xor eax, ecx
-2:
+.l2:
ret 4
#ifdef CONFIG_SMP
* IN PLIST_ENTRY ListEntry,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedInsertTailList@12
+PUBLIC @ExfInterlockedInsertTailList@12
@ExfInterlockedInsertTailList@12:
#ifdef CONFIG_SMP
/* Check if list was empty */
xor eax, ecx
- jz 2f
+ jz .l3
/* Return list pointer */
xor eax, ecx
-2:
+.l3:
ret 4
#ifdef CONFIG_SMP
*ExfInterlockedRemoveHeadList(IN PLIST_ENTRY ListHead,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedRemoveHeadList@8
+PUBLIC @ExfInterlockedRemoveHeadList@8
@ExfInterlockedRemoveHeadList@8:
/* Save flags and disable interrupts */
/* Check if it's empty */
cmp eax, ecx
- je 2f
+ je .l4
/* Get the next entry and do the deletion */
#ifdef CONFIG_SMP
/* Return */
ret
-2:
+.l4:
/* Release lock */
RELEASE_SPINLOCK(edx)
*ExfInterlockedPopEntryList(IN PSINGLE_LIST_ENTRY ListHead,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedPopEntryList@8
+PUBLIC @ExfInterlockedPopEntryList@8
@ExfInterlockedPopEntryList@8:
/* Save flags and disable interrupts */
/* Check if it's empty */
or eax, eax
- je 3f
+ je .l6
/* Get next entry and do deletion */
#ifdef CONFIG_SMP
pop edx
#endif
-2:
+.l5:
/* Release lock */
RELEASE_SPINLOCK(edx)
/* Return */
ret
-3:
+.l6:
/* Return empty list */
xor eax, eax
- jmp 2b
+ jmp .l5
#ifdef CONFIG_SMP
.spin5:
* IN PSINGLE_LIST_ENTRY ListEntry,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExfInterlockedPushEntryList@12
+PUBLIC @ExfInterlockedPushEntryList@12
@ExfInterlockedPushEntryList@12:
/* Save flags */
*ExInterlockedPopEntrySList(IN PSINGLE_LIST_ENTRY ListHead,
* IN PKSPIN_LOCK Lock)
*/
-.global @ExInterlockedPopEntrySList@8
-.global @InterlockedPopEntrySList@4
-.global _ExpInterlockedPopEntrySListResume@0
-.global _ExpInterlockedPopEntrySListFault@0
-.global _ExpInterlockedPopEntrySListEnd@0
+PUBLIC @ExInterlockedPopEntrySList@8
+PUBLIC @InterlockedPopEntrySList@4
+PUBLIC _ExpInterlockedPopEntrySListResume@0
+PUBLIC _ExpInterlockedPopEntrySListFault@0
+PUBLIC _ExpInterlockedPopEntrySListEnd@0
@ExInterlockedPopEntrySList@8:
@InterlockedPopEntrySList@4:
/* Check if the list is empty */
or eax, eax
- jz 2f
+ jz .l7
/* Copy sequence number and adjust it */
lea ecx, [edx-1]
jnz _ExpInterlockedPopEntrySListResume@0
/* Restore registers and return */
-2:
+.l7:
pop ebp
pop ebx
ret
* IN PSINGLE_LIST_ENTRY ListEntry,
* &