diff --git a/Makefile b/Makefile index 23ef4d06..7644e410 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ deps += $(LIB_OBJS:%.o=%.o.d) APPS := coop echo hello mqueues semaphore mutex cond \ pipes pipes_small pipes_struct prodcons progress \ rtsched suspend test64 timer timer_kill \ - cpubench test_libc + cpubench test_utils # Output files for __link target IMAGE_BASE := $(BUILD_DIR)/image diff --git a/app/rtsched.c b/app/rtsched.c index 56402814..ed142b03 100644 --- a/app/rtsched.c +++ b/app/rtsched.c @@ -404,12 +404,7 @@ static int32_t edf_sched(void) /* Scan all tasks to find the one with earliest deadline */ list_node_t *node = list_next(kcb->tasks->head); while (node && node != kcb->tasks->tail) { - if (!node->data) { - node = list_next(node); - continue; - } - - tcb_t *task = (tcb_t *) node->data; + tcb_t *task = tcb_from_global_node(node); /* Consider both READY and RUNNING RT tasks for preemptive scheduling */ if ((task->state == TASK_READY || task->state == TASK_RUNNING) && diff --git a/app/test_libc.c b/app/test_utils.c similarity index 87% rename from app/test_libc.c rename to app/test_utils.c index bf8e34ca..3a798085 100644 --- a/app/test_libc.c +++ b/app/test_utils.c @@ -298,6 +298,49 @@ void test_mixed_formats(void) ASSERT_TEST(buf[test_strlen(buf)] == '\0', "Mixed format null termination"); } +/* Test 11: List helpers behavior */ +typedef struct { + int val; + list_node_t node; +} list_node_item_t; + +void test_list_pushback_and_remove(void) +{ + list_t *list = list_create(); + + list_node_item_t first = {.node.next = NULL, .val = 1}; + list_node_item_t second = {.node.next = NULL, .val = 2}; + list_node_item_t third = {.node.next = NULL, .val = 3}; + + /* Check node push back normally - unlinked and linked */ + list_pushback(list, &first.node); + ASSERT_TEST(list->length == 1, "Push back first node "); + + list_pushback(list, &second.node); + list_node_item_t *item = + container_of(list->head->next, list_node_item_t, node); + ASSERT_TEST(list->length == 2 && item->val == 1, + "Push back second node and order preserved "); + + + list_pushback(list, &third.node); + item = container_of(list->head->next->next->next, list_node_item_t, node); + ASSERT_TEST(list->length == 3 && item->val == 3, "Push back third node "); + + /* Remove second node */ + list_remove(list, &second.node); + item = container_of(list->head->next, list_node_item_t, node); + ASSERT_TEST(list->length == 2 && item->val == 1, "Remove second node "); + + /* Remove non-existing node (second time) */ + + item = container_of(list_pop(list), list_node_item_t, node); + ASSERT_TEST(list->length == 1 && item->val == 1, "Pop node "); + + list_clear(list); + ASSERT_TEST(list_is_empty(list), "List is cleared "); +} + void test_runner(void) { printf("\n=== LibC Test Suite ===\n"); @@ -313,6 +356,7 @@ void test_runner(void) test_buffer_boundaries(); test_isr_safety(); test_mixed_formats(); + test_list_pushback_and_remove(); printf("\n=== Test Summary ===\n"); printf("Tests run: %d\n", tests_run); diff --git a/app/timer.c b/app/timer.c index 19f53466..4a1ce757 100644 --- a/app/timer.c +++ b/app/timer.c @@ -54,13 +54,22 @@ int32_t app_main(void) mo_timer_create(timer_callback, 1000, (void *) 1); mo_timer_create(timer_callback, 3000, (void *) 2); mo_timer_create(timer_callback, 500, (void *) 3); + mo_timer_create(timer_callback, 100, (void *) 4); /* Start all created timers in auto-reload mode. * Note: In this simple case, the IDs will be 0x6000, 0x6001, and 0x6002. */ mo_timer_start(0x6000, TIMER_AUTORELOAD); - mo_timer_start(0x6001, TIMER_AUTORELOAD); + mo_timer_start(0x6001, TIMER_ONESHOT); mo_timer_start(0x6002, TIMER_AUTORELOAD); + mo_timer_start(0x6003, TIMER_AUTORELOAD); + + /* Timer destroy to confirm functions workable; now only timer 3 will run + * and timer 2 only one shot */ + mo_timer_destroy(0x6000); + mo_timer_cancel(0x6003); + /* Destroyed timer can't be resumed */ + mo_timer_start(0x6000, TIMER_AUTORELOAD); /* Spawn a single idle task to keep the kernel running. */ mo_task_spawn(idle_task, DEFAULT_STACK_SIZE); diff --git a/arch/riscv/hal.c b/arch/riscv/hal.c index 04ecc7d0..8ba2a4d6 100644 --- a/arch/riscv/hal.c +++ b/arch/riscv/hal.c @@ -653,7 +653,7 @@ void hal_switch_stack(void **old_sp, void *new_sp) */ void hal_interrupt_tick(void) { - tcb_t *task = kcb->task_current->data; + tcb_t *task = tcb_from_global_node(kcb->task_current); if (unlikely(!task)) hal_panic(); diff --git a/include/lib/list.h b/include/lib/list.h index 298e6c83..bbeb1433 100644 --- a/include/lib/list.h +++ b/include/lib/list.h @@ -15,7 +15,6 @@ /* List node */ typedef struct list_node { struct list_node *next; - void *data; } list_node_t; /* Public list descriptor */ @@ -45,10 +44,8 @@ static inline list_t *list_create(void) } head->next = tail; - head->data = NULL; tail->next = NULL; - tail->data = NULL; list->head = head; list->tail = tail; @@ -78,16 +75,11 @@ static inline list_node_t *list_cnext(const list_t *list, /* Push and pop */ -static inline list_node_t *list_pushback(list_t *list, void *data) +static inline list_node_t *list_pushback(list_t *list, list_node_t *node) { - if (unlikely(!list)) - return NULL; - - list_node_t *node = malloc(sizeof(*node)); - if (unlikely(!node)) + if (unlikely(!list || !node || node->next)) return NULL; - node->data = data; node->next = list->tail; /* Insert before tail sentinel */ @@ -100,22 +92,21 @@ static inline list_node_t *list_pushback(list_t *list, void *data) return node; } -static inline void *list_pop(list_t *list) +static inline list_node_t *list_pop(list_t *list) { if (unlikely(list_is_empty(list))) return NULL; list_node_t *first = list->head->next; list->head->next = first->next; + first->next = NULL; - void *data = first->data; - free(first); list->length--; - return data; + return first; } -/* Remove a specific node; returns its data */ -static inline void *list_remove(list_t *list, list_node_t *target) +/* Remove a specific node from the list */ +static inline list_node_t *list_remove(list_t *list, list_node_t *target) { if (unlikely(!list || !target || list_is_empty(list))) return NULL; @@ -128,10 +119,9 @@ static inline void *list_remove(list_t *list, list_node_t *target) return NULL; /* node not found */ prev->next = target->next; - void *data = target->data; - free(target); + target->next = NULL; list->length--; - return data; + return target; } /* Iteration */ diff --git a/include/private/utils.h b/include/private/utils.h index 9d9edd2f..29162c97 100644 --- a/include/private/utils.h +++ b/include/private/utils.h @@ -8,6 +8,28 @@ */ #include +#include + +/* + * container_of - get the pointer to the parent structure from a member pointer + * + * @ptr: pointer to the struct member + * @type: type of the parent structure + * @member: name of the member within the parent structure + * + * This macro computes the address of the parent structure by subtracting + * the member's offset within the structure. + */ +#define container_of(ptr, type, member) \ + ((type *) ((char *) (ptr) - offsetof(type, member))) + +/* tcb list node helpers */ +#define tcb_from_global_node(p) container_of(p, tcb_t, global_node) +#define tcb_from_mutex_node(p) container_of(p, tcb_t, mutex_node) + +/* timer list node helpers */ +#define timer_from_node(p) container_of(p, timer_t, t_node) +#define timer_from_running_node(p) container_of(p, timer_t, t_running_node) /* Compiler Optimization Hints * diff --git a/include/sys/task.h b/include/sys/task.h index 0d3aaa4d..66319122 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -86,6 +86,10 @@ typedef struct tcb { /* Stack Protection */ uint32_t canary; /* Random stack canary for overflow detection */ + + /* Embedded nodes */ + list_node_t global_node; /* Global task list */ + list_node_t mutex_node; /* Mutex waiting list */ } tcb_t; /* Kernel Control Block (KCB) diff --git a/include/sys/timer.h b/include/sys/timer.h index 397eb9dd..c37efc37 100644 --- a/include/sys/timer.h +++ b/include/sys/timer.h @@ -39,6 +39,10 @@ typedef struct { /* Callback Configuration */ void *(*callback)(void *arg); /* Function to execute upon timer expiry */ void *arg; /* User-defined argument passed to callback */ + + /* Embedded node for timer */ + list_node_t t_node; /* All timer list node*/ + list_node_t t_running_node; /* Running timer list node */ } timer_t; /* Timer Management Functions */ diff --git a/kernel/main.c b/kernel/main.c index ce0dc08a..45b96c63 100644 --- a/kernel/main.c +++ b/kernel/main.c @@ -63,7 +63,7 @@ int32_t main(void) * 'kcb->task_current' was set by the first call to mo_task_spawn. * This function transfers control and does not return. */ - tcb_t *first_task = kcb->task_current->data; + tcb_t *first_task = tcb_from_global_node(kcb->task_current); if (!first_task) panic(ERR_NO_TASKS); diff --git a/kernel/mutex.c b/kernel/mutex.c index 5ff9c8aa..a0be47db 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -45,20 +45,19 @@ static inline void cond_invalidate(cond_t *c) */ static bool remove_self_from_waiters(list_t *waiters) { - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!waiters || !kcb || !kcb->task_current)) return false; - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); /* Search for and remove self from waiters list */ - list_node_t *curr = waiters->head->next; - while (curr && curr != waiters->tail) { - if (curr->data == self) { - list_remove(waiters, curr); + list_node_t *curr_mutex_node = waiters->head->next; + while (curr_mutex_node && curr_mutex_node != waiters->tail) { + if (tcb_from_mutex_node(curr_mutex_node) == self) { + list_remove(waiters, curr_mutex_node); return true; } - curr = curr->next; + curr_mutex_node = curr_mutex_node->next; } return false; } @@ -66,14 +65,13 @@ static bool remove_self_from_waiters(list_t *waiters) /* Atomic block operation with enhanced error checking */ static void mutex_block_atomic(list_t *waiters) { - if (unlikely(!waiters || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!waiters || !kcb || !kcb->task_current)) panic(ERR_SEM_OPERATION); - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); /* Add to waiters list */ - if (unlikely(!list_pushback(waiters, self))) + if (unlikely(!list_pushback(waiters, &self->mutex_node))) panic(ERR_SEM_OPERATION); /* Block and yield atomically */ @@ -218,8 +216,8 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks) } /* Slow path: must block with timeout using delay mechanism */ - tcb_t *self = kcb->task_current->data; - if (unlikely(!list_pushback(m->waiters, self))) { + tcb_t *self = tcb_from_global_node(kcb->task_current); + if (unlikely(!list_pushback(m->waiters, &self->mutex_node))) { NOSCHED_LEAVE(); panic(ERR_SEM_OPERATION); } @@ -277,7 +275,8 @@ int32_t mo_mutex_unlock(mutex_t *m) m->owner_tid = 0; } else { /* Transfer ownership to next waiter (FIFO) */ - tcb_t *next_owner = (tcb_t *) list_pop(m->waiters); + list_node_t *next_owner_node = (list_node_t *) list_pop(m->waiters); + tcb_t *next_owner = tcb_from_mutex_node(next_owner_node); if (likely(next_owner)) { /* Validate task state before waking */ if (likely(next_owner->state == TASK_BLOCKED)) { @@ -378,11 +377,11 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m) if (unlikely(!mo_mutex_owned_by_current(m))) return ERR_NOT_OWNER; - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); /* Atomically add to wait list */ NOSCHED_ENTER(); - if (unlikely(!list_pushback(c->waiters, self))) { + if (unlikely(!list_pushback(c->waiters, &self->mutex_node))) { NOSCHED_LEAVE(); panic(ERR_SEM_OPERATION); } @@ -420,11 +419,11 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks) return ERR_TIMEOUT; } - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); /* Atomically add to wait list with timeout */ NOSCHED_ENTER(); - if (unlikely(!list_pushback(c->waiters, self))) { + if (unlikely(!list_pushback(c->waiters, &self->mutex_node))) { NOSCHED_LEAVE(); panic(ERR_SEM_OPERATION); } diff --git a/kernel/task.c b/kernel/task.c index 1304a1ee..070cb149 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -145,10 +145,10 @@ static void task_stack_check(void) if (!should_check) return; - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !kcb->task_current)) panic(ERR_STACK_CHECK); - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); if (unlikely(!is_valid_task(self))) panic(ERR_STACK_CHECK); @@ -171,10 +171,10 @@ static void task_stack_check(void) static list_node_t *delay_update_batch(list_node_t *node, void *arg) { uint32_t *ready_count = (uint32_t *) arg; - if (unlikely(!node || !node->data)) + if (unlikely(!node)) return NULL; - tcb_t *t = node->data; + tcb_t *t = tcb_from_global_node(node); /* Skip non-blocked tasks (common case) */ if (likely(t->state != TASK_BLOCKED)) @@ -249,10 +249,10 @@ static inline void process_deferred_timer_work(void) static list_node_t *delay_update(list_node_t *node, void *arg) { (void) arg; - if (unlikely(!node || !node->data)) + if (unlikely(!node)) return NULL; - tcb_t *t = node->data; + tcb_t *t = tcb_from_global_node(node); /* Skip non-blocked tasks (common case) */ if (likely(t->state != TASK_BLOCKED)) @@ -270,16 +270,14 @@ static list_node_t *delay_update(list_node_t *node, void *arg) /* Task search callbacks for finding tasks in the master list. */ static list_node_t *idcmp(list_node_t *node, void *arg) { - return (node && node->data && - ((tcb_t *) node->data)->id == (uint16_t) (size_t) arg) + return (node && tcb_from_global_node(node)->id == (uint16_t) (size_t) arg) ? node : NULL; } static list_node_t *refcmp(list_node_t *node, void *arg) { - return (node && node->data && ((tcb_t *) node->data)->entry == arg) ? node - : NULL; + return (node && tcb_from_global_node(node)->entry == arg) ? node : NULL; } /* Task lookup with caching */ @@ -295,7 +293,7 @@ static list_node_t *find_task_node_by_id(uint16_t id) */ list_node_t *node = kcb->tasks->head->next; while (node != kcb->tasks->tail) { - if (node->data == cached) + if (tcb_from_global_node(node) == cached) return node; node = node->next; } @@ -303,8 +301,8 @@ static list_node_t *find_task_node_by_id(uint16_t id) /* Fall back to full search and update cache */ list_node_t *node = list_foreach(kcb->tasks, idcmp, (void *) (size_t) id); - if (node && node->data) - cache_task(id, (tcb_t *) node->data); + if (node) + cache_task(id, tcb_from_global_node(node)); return node; } @@ -382,10 +380,10 @@ void sched_dequeue_task(tcb_t *task) /* Handle time slice expiration for current task */ void sched_tick_current_task(void) { - if (unlikely(!kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb->task_current)) return; - tcb_t *current_task = kcb->task_current->data; + tcb_t *current_task = tcb_from_global_node(kcb->task_current); /* Decrement time slice */ if (current_task->time_slice > 0) @@ -434,10 +432,10 @@ void sched_wakeup_task(tcb_t *task) */ uint16_t sched_select_next_task(void) { - if (unlikely(!kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb->task_current)) panic(ERR_NO_TASKS); - tcb_t *current_task = kcb->task_current->data; + tcb_t *current_task = tcb_from_global_node(kcb->task_current); /* Mark current task as ready if it was running */ if (current_task->state == TASK_RUNNING) @@ -451,10 +449,10 @@ uint16_t sched_select_next_task(void) do { /* Move to next task (circular) */ node = list_cnext(kcb->tasks, node); - if (!node || !node->data) + if (!node) continue; - tcb_t *task = node->data; + tcb_t *task = tcb_from_global_node(node); /* Skip non-ready tasks */ if (task->state != TASK_READY) @@ -480,9 +478,9 @@ uint16_t sched_select_next_task(void) * it if blocked) */ list_node_t *any_node = list_next(kcb->tasks->head); while (any_node && any_node != kcb->tasks->tail) { - if (any_node->data) { + if (any_node) { kcb->task_current = any_node; - tcb_t *any_task = any_node->data; + tcb_t *any_task = tcb_from_global_node(any_node); return any_task->id; } any_node = list_next(any_node); @@ -524,7 +522,7 @@ void dispatcher(int from_timer) /* Top-level context-switch for preemptive scheduling. */ void dispatch(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !kcb->task_current)) panic(ERR_NO_TASKS); /* Save current context - only needed for cooperative mode. @@ -533,7 +531,8 @@ void dispatch(void) */ if (!kcb->preemptive) { /* Cooperative mode: use setjmp/longjmp mechanism */ - if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0) + if (hal_context_save( + tcb_from_global_node(kcb->task_current)->context) != 0) return; } @@ -555,7 +554,7 @@ void dispatch(void) } /* Hook for real-time scheduler - if it selects a task, use it */ - tcb_t *prev_task = kcb->task_current->data; + tcb_t *prev_task = tcb_from_global_node(kcb->task_current); int32_t rt_task_id = kcb->rt_sched(); if (rt_task_id < 0) { @@ -563,12 +562,12 @@ void dispatch(void) } else { /* RT scheduler selected a task - update current task pointer */ list_node_t *rt_node = find_task_node_by_id((uint16_t) rt_task_id); - if (rt_node && rt_node->data) { - tcb_t *rt_task = rt_node->data; + if (rt_node) { + tcb_t *rt_task = tcb_from_global_node(rt_node); /* Different task - perform context switch */ if (rt_node != kcb->task_current) { - if (kcb->task_current && kcb->task_current->data) { - tcb_t *prev = kcb->task_current->data; + if (kcb->task_current) { + tcb_t *prev = tcb_from_global_node(kcb->task_current); if (prev->state == TASK_RUNNING) prev->state = TASK_READY; } @@ -587,7 +586,7 @@ void dispatch(void) } /* Check if we're still on the same task (no actual switch needed) */ - tcb_t *next_task = kcb->task_current->data; + tcb_t *next_task = tcb_from_global_node(kcb->task_current); /* In preemptive mode, if selected task has pending delay, keep trying to * find ready task. We check delay > 0 instead of state == BLOCKED because @@ -598,9 +597,9 @@ void dispatch(void) while (next_task->delay > 0 && attempts < 10) { /* Try next task in round-robin */ kcb->task_current = list_cnext(kcb->tasks, kcb->task_current); - if (!kcb->task_current || !kcb->task_current->data) + if (!kcb->task_current) kcb->task_current = list_next(kcb->tasks->head); - next_task = kcb->task_current->data; + next_task = tcb_from_global_node(kcb->task_current); attempts++; } @@ -641,7 +640,7 @@ void dispatch(void) /* Cooperative context switch */ void yield(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !kcb->task_current)) return; /* Process deferred timer work during yield */ @@ -665,7 +664,7 @@ void yield(void) } /* Cooperative mode: use setjmp/longjmp mechanism */ - if (hal_context_save(((tcb_t *) kcb->task_current->data)->context) != 0) + if (hal_context_save(tcb_from_global_node(kcb->task_current)->context) != 0) return; #if CONFIG_STACK_PROTECTION @@ -676,7 +675,7 @@ void yield(void) list_foreach(kcb->tasks, delay_update, NULL); sched_select_next_task(); /* Use O(1) priority scheduler */ - hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1); + hal_context_restore(tcb_from_global_node(kcb->task_current)->context, 1); } /* Stack initialization with minimal overhead */ @@ -734,6 +733,10 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) tcb->state = TASK_STOPPED; tcb->flags = 0; + /* Initialize embedded list nodes */ + tcb->global_node.next = NULL; + tcb->mutex_node.next = NULL; + /* Set default priority with proper scheduler fields */ tcb->prio = TASK_PRIO_NORMAL; tcb->prio_level = extract_priority_level(TASK_PRIO_NORMAL); @@ -758,20 +761,15 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req) } } - list_node_t *node = list_pushback(kcb->tasks, tcb); - if (!node) { - CRITICAL_LEAVE(); - free(tcb->stack); - free(tcb); - panic(ERR_TCB_ALLOC); - } + list_pushback(kcb->tasks, &tcb->global_node); + /* Assign unique ID and update counts */ tcb->id = kcb->next_tid++; kcb->task_count++; /* Cached count of active tasks for quick access */ if (!kcb->task_current) - kcb->task_current = node; + kcb->task_current = &tcb->global_node; CRITICAL_LEAVE(); @@ -808,7 +806,7 @@ int32_t mo_task_cancel(uint16_t id) return ERR_TASK_NOT_FOUND; } - tcb_t *tcb = node->data; + tcb_t *tcb = tcb_from_global_node(node); if (!tcb || tcb->state == TASK_RUNNING) { CRITICAL_LEAVE(); return ERR_TASK_CANT_REMOVE; @@ -848,12 +846,12 @@ void mo_task_delay(uint16_t ticks) return; NOSCHED_ENTER(); - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) { + if (unlikely(!kcb || !kcb->task_current)) { NOSCHED_LEAVE(); return; } - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); /* Set delay and blocked state - scheduler will skip blocked tasks */ self->delay = ticks; @@ -875,7 +873,7 @@ int32_t mo_task_suspend(uint16_t id) return ERR_TASK_NOT_FOUND; } - tcb_t *task = node->data; + tcb_t *task = tcb_from_global_node(node); if (!task || (task->state != TASK_READY && task->state != TASK_RUNNING && task->state != TASK_BLOCKED)) { CRITICAL_LEAVE(); @@ -883,7 +881,7 @@ int32_t mo_task_suspend(uint16_t id) } task->state = TASK_SUSPENDED; - bool is_current = (kcb->task_current->data == task); + bool is_current = (kcb->task_current == node); CRITICAL_LEAVE(); @@ -905,7 +903,7 @@ int32_t mo_task_resume(uint16_t id) return ERR_TASK_NOT_FOUND; } - tcb_t *task = node->data; + tcb_t *task = tcb_from_global_node(node); if (!task || task->state != TASK_SUSPENDED) { CRITICAL_LEAVE(); return ERR_TASK_CANT_RESUME; @@ -930,7 +928,7 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority) return ERR_TASK_NOT_FOUND; } - tcb_t *task = node->data; + tcb_t *task = tcb_from_global_node(node); if (!task) { CRITICAL_LEAVE(); return ERR_TASK_NOT_FOUND; @@ -957,7 +955,7 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority) return ERR_TASK_NOT_FOUND; } - tcb_t *task = node->data; + tcb_t *task = tcb_from_global_node(node); if (!task) { CRITICAL_LEAVE(); return ERR_TASK_NOT_FOUND; @@ -971,9 +969,9 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority) uint16_t mo_task_id(void) { - if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) + if (unlikely(!kcb || !kcb->task_current)) return 0; - return ((tcb_t *) kcb->task_current->data)->id; + return tcb_from_global_node(kcb->task_current)->id; } int32_t mo_task_idref(void *task_entry) @@ -985,7 +983,7 @@ int32_t mo_task_idref(void *task_entry) list_node_t *node = list_foreach(kcb->tasks, refcmp, task_entry); CRITICAL_LEAVE(); - return node ? ((tcb_t *) node->data)->id : ERR_TASK_NOT_FOUND; + return node ? tcb_from_global_node(node)->id : ERR_TASK_NOT_FOUND; } void mo_task_wfi(void) @@ -1025,14 +1023,13 @@ uint64_t mo_uptime(void) void _sched_block(queue_t *wait_q) { - if (unlikely(!wait_q || !kcb || !kcb->task_current || - !kcb->task_current->data)) + if (unlikely(!wait_q || !kcb || !kcb->task_current)) panic(ERR_SEM_OPERATION); /* Process deferred timer work before blocking */ process_deferred_timer_work(); - tcb_t *self = kcb->task_current->data; + tcb_t *self = tcb_from_global_node(kcb->task_current); if (queue_enqueue(wait_q, self) != 0) panic(ERR_SEM_OPERATION); diff --git a/kernel/timer.c b/kernel/timer.c index f3cc08aa..310d6cf6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -16,10 +16,10 @@ #include "private/error.h" #include "private/utils.h" -/* Pre-allocated node pool for reduced malloc/free overhead */ +/* Pre-allocated timer pool for reduced malloc/free overhead */ #define TIMER_NODE_POOL_SIZE 16 -static list_node_t timer_node_pool[TIMER_NODE_POOL_SIZE]; -static uint16_t pool_free_mask = 0xFFFF; /* Bitmask for free nodes */ +static timer_t timer_pool[TIMER_NODE_POOL_SIZE]; +static uint16_t pool_free_mask = 0xFFFF; /* Bitmask for free timers */ /* Master list of all created timers, kept sorted by ID for faster lookup */ static list_t *all_timers_list = NULL; @@ -32,30 +32,27 @@ static struct { } timer_cache[4]; static uint8_t timer_cache_index = 0; -/* Get a node from the pool, fall back to malloc if pool is empty */ -static list_node_t *get_timer_node(void) +/* Get a timer from the pool */ +static timer_t *get_timer(void) { /* Find first free node in pool */ for (int i = 0; i < TIMER_NODE_POOL_SIZE; i++) { if (pool_free_mask & (1 << i)) { pool_free_mask &= ~(1 << i); - return &timer_node_pool[i]; + return &timer_pool[i]; } } - /* Pool exhausted, fall back to malloc */ - return malloc(sizeof(list_node_t)); + /* Pool exhausted */ + return NULL; } -/* Return a node to the pool, or free if it's not from pool */ -static void return_timer_node(list_node_t *node) +/* Return the timer to the pool, mark only */ +static void return_timer(timer_t *timer) { /* Check if node is from our pool */ - if (node >= timer_node_pool && - node < timer_node_pool + TIMER_NODE_POOL_SIZE) { - int index = node - timer_node_pool; + if (timer >= timer_pool && timer < timer_pool + TIMER_NODE_POOL_SIZE) { + int index = (timer - &timer_pool[0]); pool_free_mask |= (1 << index); - } else { - free(node); } } @@ -105,10 +102,10 @@ static int32_t timer_subsystem_init(void) return ERR_FAIL; } - /* Initialize node pool */ + /* Initialize timer pool */ + for (int i = 0; i < TIMER_NODE_POOL_SIZE; i++) { - timer_node_pool[i].data = NULL; - timer_node_pool[i].next = NULL; + memset(&timer_pool[i], 0, sizeof(timer_t)); } timer_initialized = true; @@ -116,55 +113,40 @@ static int32_t timer_subsystem_init(void) return ERR_OK; } -/* Fast removal of timer from active list by data pointer */ -static void timer_remove_item_by_data(list_t *list, void *data) +/* Fast removal of timer from active list */ +static void timer_remove_from_running_list(list_t *list, timer_t *t) { if (unlikely(!list || list_is_empty(list))) return; - list_node_t *prev = list->head; - list_node_t *curr = prev->next; - - while (curr != list->tail) { - if (curr->data == data) { - prev->next = curr->next; - return_timer_node(curr); - list->length--; - return; - } - prev = curr; - curr = curr->next; - } + list_remove(list, &t->t_running_node); } -/* Sorted insert with early termination for common cases */ -static int32_t timer_sorted_insert(timer_t *timer) +/* Insert timer into the running timer list */ +static int32_t timer_sorted_insert_running_list(timer_t *timer) { - list_node_t *new_node = get_timer_node(); - if (unlikely(!new_node)) + if (unlikely(!timer || timer->t_running_node.next)) return ERR_FAIL; - new_node->data = timer; - /* Fast path: if list is empty or timer should go at end */ list_node_t *prev = kcb->timer_list->head; if (prev->next == kcb->timer_list->tail) { /* Empty list */ - new_node->next = kcb->timer_list->tail; - prev->next = new_node; + timer->t_running_node.next = kcb->timer_list->tail; + prev->next = &timer->t_running_node; kcb->timer_list->length++; return ERR_OK; } /* Find insertion point */ while (prev->next != kcb->timer_list->tail) { - timer_t *current_timer = (timer_t *) prev->next->data; + timer_t *current_timer = timer_from_running_node(prev->next); if (timer->deadline_ticks < current_timer->deadline_ticks) break; prev = prev->next; } - new_node->next = prev->next; - prev->next = new_node; + timer->t_running_node.next = prev->next; + prev->next = &timer->t_running_node; kcb->timer_list->length++; return ERR_OK; } @@ -183,7 +165,7 @@ static timer_t *timer_find_by_id_fast(uint16_t id) /* Linear search for now - could be optimized to binary search if needed */ list_node_t *node = all_timers_list->head->next; while (node != all_timers_list->tail) { - timer_t *timer = (timer_t *) node->data; + timer_t *timer = timer_from_node(node); if (timer->id == id) { cache_timer(id, timer); return timer; @@ -204,7 +186,7 @@ static list_node_t *timer_find_node_by_id(uint16_t id) list_node_t *node = all_timers_list->head->next; while (node != all_timers_list->tail) { - if (((timer_t *) node->data)->id == id) + if (timer_from_node(node)->id == id) return node; node = node->next; } @@ -219,22 +201,21 @@ void _timer_tick_handler(void) if (unlikely(!timer_initialized || !kcb->timer_list || list_is_empty(kcb->timer_list))) return; - uint32_t now = mo_ticks(); - timer_t *expired_timers[TIMER_BATCH_SIZE]; /* Smaller batch size */ + list_node_t * + expired_timers_running_nodes[TIMER_BATCH_SIZE]; /* Smaller batch size */ int expired_count = 0; /* Collect expired timers in one pass, limited to batch size */ while (!list_is_empty(kcb->timer_list) && expired_count < TIMER_BATCH_SIZE) { list_node_t *node = kcb->timer_list->head->next; - timer_t *t = (timer_t *) node->data; + timer_t *t = timer_from_running_node(node); if (now >= t->deadline_ticks) { - expired_timers[expired_count++] = t; - kcb->timer_list->head->next = node->next; - kcb->timer_list->length--; - return_timer_node(node); + expired_timers_running_nodes[expired_count++] = + list_pop(kcb->timer_list); + } else { /* First timer not expired, so none further down are */ break; @@ -243,7 +224,8 @@ void _timer_tick_handler(void) /* Process all expired timers */ for (int i = 0; i < expired_count; i++) { - timer_t *t = expired_timers[i]; + list_node_t *expired_running_node = expired_timers_running_nodes[i]; + timer_t *t = timer_from_running_node(expired_running_node); /* Execute callback */ if (likely(t->callback)) @@ -254,7 +236,8 @@ void _timer_tick_handler(void) /* Calculate next expected fire tick to prevent cumulative error */ t->last_expected_fire_tick += MS_TO_TICKS(t->period_ms); t->deadline_ticks = t->last_expected_fire_tick; - timer_sorted_insert(t); /* Re-insert for next expiration */ + /* Re-insert for next expiration */ + timer_sorted_insert_running_list(t); } else { t->mode = TIMER_DISABLED; /* One-shot timers are done */ } @@ -262,26 +245,24 @@ void _timer_tick_handler(void) } /* Insert timer into sorted position in all_timers_list */ -static int32_t timer_insert_sorted_by_id(timer_t *timer) +static void timer_insert_sorted_timer_list(timer_t *timer) { - list_node_t *new_node = get_timer_node(); - if (unlikely(!new_node)) - return ERR_FAIL; - new_node->data = timer; + if (unlikely(!timer || timer->t_node.next)) + return; /* Find insertion point to maintain ID sort order */ list_node_t *prev = all_timers_list->head; while (prev->next != all_timers_list->tail) { - timer_t *current = (timer_t *) prev->next->data; + timer_t *current = timer_from_node(prev->next); if (timer->id < current->id) break; prev = prev->next; } - new_node->next = prev->next; - prev->next = new_node; + timer->t_node.next = prev->next; + prev->next = &timer->t_node; all_timers_list->length++; - return ERR_OK; + return; } int32_t mo_timer_create(void *(*callback)(void *arg), @@ -295,8 +276,9 @@ int32_t mo_timer_create(void *(*callback)(void *arg), if (unlikely(timer_subsystem_init() != ERR_OK)) return ERR_FAIL; - timer_t *t = malloc(sizeof(timer_t)); - if (unlikely(!t)) + /* Try to get a static timer from the pool */ + timer_t *t = get_timer(); + if (!t) return ERR_FAIL; NOSCHED_ENTER(); @@ -310,13 +292,11 @@ int32_t mo_timer_create(void *(*callback)(void *arg), t->last_expected_fire_tick = 0; t->mode = TIMER_DISABLED; t->_reserved = 0; + t->t_node.next = NULL; + t->t_running_node.next = NULL; /* Insert into sorted all_timers_list */ - if (unlikely(timer_insert_sorted_by_id(t) != ERR_OK)) { - NOSCHED_LEAVE(); - free(t); - return ERR_FAIL; - } + timer_insert_sorted_timer_list(t); /* Add to cache */ cache_timer(t->id, t); @@ -338,11 +318,11 @@ int32_t mo_timer_destroy(uint16_t id) return ERR_FAIL; } - timer_t *t = (timer_t *) node->data; + timer_t *t = timer_from_node(node); /* Remove from active list if running */ if (t->mode != TIMER_DISABLED) - timer_remove_item_by_data(kcb->timer_list, t); + timer_remove_from_running_list(kcb->timer_list, t); /* Remove from cache */ for (int i = 0; i < 4; i++) { @@ -362,9 +342,7 @@ int32_t mo_timer_destroy(uint16_t id) all_timers_list->length--; } - free(t); - return_timer_node(node); - + return_timer(t); NOSCHED_LEAVE(); return ERR_OK; } @@ -386,18 +364,14 @@ int32_t mo_timer_start(uint16_t id, uint8_t mode) /* Remove from active list if already running */ if (t->mode != TIMER_DISABLED) - timer_remove_item_by_data(kcb->timer_list, t); + timer_remove_from_running_list(kcb->timer_list, t); /* Configure and start timer */ t->mode = mode; t->last_expected_fire_tick = mo_ticks() + MS_TO_TICKS(t->period_ms); t->deadline_ticks = t->last_expected_fire_tick; - if (unlikely(timer_sorted_insert(t) != ERR_OK)) { - t->mode = TIMER_DISABLED; - NOSCHED_LEAVE(); - return ERR_FAIL; - } + timer_sorted_insert_running_list(t); NOSCHED_LEAVE(); return ERR_OK; @@ -416,7 +390,7 @@ int32_t mo_timer_cancel(uint16_t id) return ERR_FAIL; } - timer_remove_item_by_data(kcb->timer_list, t); + timer_remove_from_running_list(kcb->timer_list, t); t->mode = TIMER_DISABLED; NOSCHED_LEAVE();