2 #include <sys/callout.h>
3 #include <oskitfreebsd.h>
4 #include <oskitdebug.h>
8 struct callout
*callout
;
10 void init_freebsd_sched() {
13 int tsleep( void *token
, int priority
, char *wmesg
, int tmio
) {
14 if( !OtcpEvent
.Sleep
) panic("no sleep");
16 OtcpEvent
.Sleep( OtcpEvent
.ClientData
, token
, priority
, wmesg
, tmio
);
19 void wakeup( struct socket
*so
, void *token
) {
23 (OSK_MID_TRACE
,("XXX Bytes to receive: %d state %x\n",
24 so
->so_rcv
.sb_cc
, so
->so_state
));
26 if( so
->so_state
& SS_ISCONNECTED
) {
27 OS_DbgPrint(OSK_MID_TRACE
,("Socket connected!\n"));
31 OS_DbgPrint(OSK_MID_TRACE
,("Socket accepting q\n"));
34 if( so
->so_rcv
.sb_cc
> 0 ) {
35 OS_DbgPrint(OSK_MID_TRACE
,("Socket readable\n"));
38 if( 0 < sbspace(&so
->so_snd
) ) {
39 OS_DbgPrint(OSK_MID_TRACE
,("Socket writeable\n"));
42 if( so
->so_state
& SS_CANTRCVMORE
) {
43 OS_DbgPrint(OSK_MID_TRACE
,("Socket can't be read any longer\n"));
47 OS_DbgPrint(OSK_MID_TRACE
,("Wakeup %x (socket %x, state %x)!\n",
51 if( OtcpEvent
.SocketState
)
52 OtcpEvent
.SocketState( OtcpEvent
.ClientData
,
54 so
? so
->so_connection
: 0,
57 if( OtcpEvent
.Wakeup
)
58 OtcpEvent
.Wakeup( OtcpEvent
.ClientData
, token
);
60 OS_DbgPrint(OSK_MID_TRACE
,("Wakeup done %x\n", token
));
63 /* ---------------------------------------------------------------------- */
71 callout
= (struct callout
*)
72 malloc(sizeof(struct callout
) * ncallout
, M_FREE
, M_WAITOK
);
74 panic("can't allocate callout queue!\n");
80 for (i
= 1; i
< ncallout
; i
++)
81 callout
[i
-1].c_next
= &callout
[i
];
85 /* get clock up and running */
89 /* inittodr(0); // what does this do? */
90 /* boottime = kern_time; */
91 /* Start a clock we can use for timeouts */
95 extern unsigned bio_imask
; /* group of interrupts masked with splbio() */
96 extern unsigned cpl
; /* current priority level mask */
97 extern volatile unsigned idelayed
; /* interrupts to become pending */
98 extern volatile unsigned ipending
; /* active interrupts masked by cpl */
99 extern unsigned net_imask
; /* group of interrupts masked with splimp() */
100 extern unsigned stat_imask
; /* interrupts masked with splstatclock() */
101 extern unsigned tty_imask
; /* group of interrupts masked with spltty() */
104 * ipending has to be volatile so that it is read every time it is accessed
105 * in splx() and spl0(), but we don't want it to be read nonatomically when
106 * it is changed. Pretending that ipending is a plain int happens to give
107 * suitable atomic code for "ipending |= constant;".
109 #define setdelayed() (*(unsigned *)&ipending |= loadandclear(&idelayed))
110 #define setsoftast() (*(unsigned *)&ipending |= SWI_AST_PENDING)
111 #define setsoftclock() (*(unsigned *)&ipending |= SWI_CLOCK_PENDING)
112 #define setsoftnet() (*(unsigned *)&ipending |= SWI_NET_PENDING)
113 #define setsofttty() (*(unsigned *)&ipending |= SWI_TTY_PENDING)
115 #define schedsofttty() (*(unsigned *)&idelayed |= SWI_TTY_PENDING)
117 #define GENSPL(name, set_cpl) \
118 static __inline int name(void) \
122 __asm __volatile("" : : : "memory"); \
129 OS_DbgPrint(OSK_MID_TRACE
,("Called SPLZ\n"));
133 * functions to save and restore the current cpl
135 void save_cpl(unsigned *x
)
140 void restore_cpl(unsigned x
)