Skip to content

Commit 60825f3

Browse files
committed
Use ready queue helpers in mutex and semaphore path
Invoke _sched_block_enqueue() and _sched_block_dequeue() helpers for all transitions into or out of TASK_BLOCKED state. This change keeps the scheduler ready queue and mutex/semaphore semantics aligned and consistent.
1 parent a7ace5e commit 60825f3

File tree

2 files changed

+11
-6
lines changed

2 files changed

+11
-6
lines changed

kernel/mutex.c

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,9 @@ static void mutex_block_atomic(list_t *waiters)
7878

7979
/* Block and yield atomically */
8080
self->state = TASK_BLOCKED;
81+
82+
/* Explicit remove list node from the ready queue */
83+
_sched_block_dequeue(self);
8184
_yield(); /* This releases NOSCHED when we context switch */
8285
}
8386

@@ -227,6 +230,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
227230
/* Set up timeout using task delay mechanism */
228231
self->delay = ticks;
229232
self->state = TASK_BLOCKED;
233+
_sched_block_dequeue(self);
230234

231235
NOSCHED_LEAVE();
232236

@@ -282,7 +286,7 @@ int32_t mo_mutex_unlock(mutex_t *m)
282286
/* Validate task state before waking */
283287
if (likely(next_owner->state == TASK_BLOCKED)) {
284288
m->owner_tid = next_owner->id;
285-
next_owner->state = TASK_READY;
289+
_sched_block_enqueue(next_owner);
286290
/* Clear any pending timeout since we're granting ownership */
287291
next_owner->delay = 0;
288292
} else {
@@ -395,7 +399,7 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
395399
/* Failed to unlock - remove from wait list and restore state */
396400
NOSCHED_ENTER();
397401
remove_self_from_waiters(c->waiters);
398-
self->state = TASK_READY;
402+
_sched_block_enqueue(self);
399403
NOSCHED_LEAVE();
400404
return unlock_result;
401405
}
@@ -430,6 +434,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
430434
}
431435
self->delay = ticks;
432436
self->state = TASK_BLOCKED;
437+
_sched_block_dequeue(self);
433438
NOSCHED_LEAVE();
434439

435440
/* Release mutex */
@@ -438,7 +443,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
438443
/* Failed to unlock - cleanup and restore */
439444
NOSCHED_ENTER();
440445
remove_self_from_waiters(c->waiters);
441-
self->state = TASK_READY;
446+
_sched_block_enqueue(self);
442447
self->delay = 0;
443448
NOSCHED_LEAVE();
444449
return unlock_result;
@@ -483,7 +488,7 @@ int32_t mo_cond_signal(cond_t *c)
483488
if (likely(waiter)) {
484489
/* Validate task state before waking */
485490
if (likely(waiter->state == TASK_BLOCKED)) {
486-
waiter->state = TASK_READY;
491+
_sched_block_enqueue(waiter);
487492
/* Clear any pending timeout since we're signaling */
488493
waiter->delay = 0;
489494
} else {
@@ -510,7 +515,7 @@ int32_t mo_cond_broadcast(cond_t *c)
510515
if (likely(waiter)) {
511516
/* Validate task state before waking */
512517
if (likely(waiter->state == TASK_BLOCKED)) {
513-
waiter->state = TASK_READY;
518+
_sched_block_enqueue(waiter);
514519
/* Clear any pending timeout since we're broadcasting */
515520
waiter->delay = 0;
516521
} else {

kernel/semaphore.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ void mo_sem_signal(sem_t *s)
176176
if (likely(awakened_task)) {
177177
/* Validate awakened task state consistency */
178178
if (likely(awakened_task->state == TASK_BLOCKED)) {
179-
awakened_task->state = TASK_READY;
179+
_sched_block_enqueue(awakened_task);
180180
should_yield = true;
181181
} else {
182182
/* Task state inconsistency - this should not happen */

0 commit comments

Comments
 (0)