26 static fibre_t *atomic_runq_buf[8];
42 sizeof(atomic_runq_buf),
sizeof(atomic_runq_buf[0])),
46 static void handle_atomic_runq(
void)
56 static void handle_timerq(
void)
62 while (NULL != node &&
71 static fibre_t *get_next_task(
void)
80 static void update_current_state(
void)
82 kernel.current->state = kernel.state;
84 switch (kernel.state) {
102 static uint32_t get_next_wakeup(
void)
104 if (!messageq_empty(&kernel.atomic_runq) || !list_empty(&kernel.runq))
107 if (list_empty(&kernel.timerq))
114 static void add_taint(
char id)
117 assert(
id < 8*
sizeof(kernel.taint_flags));
118 atomic_fetch_or(&kernel.taint_flags, 1 <<
id);
131 return kernel.current;
145 !list_empty(&kernel.runq) ||
146 !list_empty(&kernel.timerq) ||
147 !messageq_empty(&kernel.atomic_runq)) {
148 handle_atomic_runq();
150 update_current_state();
152 kernel.current = get_next_task();
155 if (kernel.current) {
156 kernel.state = kernel.current->fn(kernel.current);
161 return get_next_wakeup();
166 memset(f, 0,
sizeof(*f));
175 handle_atomic_runq();
200 handle_atomic_runq();
213 kernel.current->duetime = duetime;
214 if (!
list_contains(&kernel.runq, &kernel.current->link, NULL))
220 void *basep,
size_t base_len,
size_t msg_len)
242 return messageq_empty(&evtq->
eventq);
#define containerof(ptr, type, member)
void messageq_init(messageq_t *mq, void *basep, size_t base_len, size_t msg_len)
list_node_t * list_iterate(list_t *list, list_iterator_t *iter)
void fibre_init(fibre_t *f, fibre_entrypoint_t *fn)
Dynamic initializer for a fibre descriptor.
void * fibre_eventq_claim(fibre_eventq_t *evtq)
Request memory resources to send an event to a fibre.
bool list_contains(list_t *list, list_node_t *node, list_iterator_t *iter)
void messageq_release(messageq_t *mq, void *msg)
uint32_t fibre_scheduler_next(uint32_t time)
Schedule the next fibre.
void * messageq_claim(messageq_t *mq)
bool fibre_eventq_empty(fibre_eventq_t *evtq)
Return true if the fibre's event queue is empty.
void fibre_eventq_init(fibre_eventq_t *evtq, fibre_entrypoint_t *fn, void *basep, size_t base_len, size_t msg_len)
Dynamic initializer for a fibre and eventq descriptor.
bool fibre_kill(fibre_t *f)
bool fibre_eventq_send(fibre_eventq_t *evtq, void *evtp)
Send an event to a fibre.
int fibre_entrypoint_t(struct fibre *)
void fibre_eventq_release(fibre_eventq_t *evtq, void *evtp)
Release a message previously received by a fibre.
void * fibre_eventq_receive(fibre_eventq_t *evtq)
Recevied a message previously send to the fibre.
void * messageq_receive(messageq_t *mq)
list_node_t * list_iterator_remove(list_iterator_t *iter)
int32_t cyclecmp32(uint32_t a, uint32_t b)
Compares values that may be subject to overflow.
void list_insert(list_t *list, list_node_t *node)
bool fibre_run_atomic(fibre_t *f)
#define FIBRE_UNBOUNDED_SLEEP
An approximation of infinitely far in the future.
#define MESSAGEQ_VAR_INIT(basep, base_len, msg_len)
Fibre and eventq descriptor.
fibre_t * fibre_self()
Returns the currently active fibre descriptor.
void list_insert_sorted(list_t *list, list_node_t *node, list_node_compare_t *nodecmp)
list_node_t * list_extract(list_t *list)
void messageq_send(messageq_t *mq, void *msg)
void fibre_run(fibre_t *f)
bool fibre_timeout(uint32_t duetime)
bool list_remove(list_t *list, list_node_t *node)