Skip to content

Commit 261b4e4

Browse files
committed
Replace mutex_block_atomic by _sched_block_mutex
This commit replaces mutex_block_atomic() with _sched_block_mutex() to align mutex blocking behavior with the new scheduler design. Blocked tasks are now properly dequeued from the ready queue, and no deferred timer processing is performed.
1 parent af6e300 commit 261b4e4

File tree

1 file changed

+1
-19
lines changed

1 file changed

+1
-19
lines changed

kernel/mutex.c

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -64,24 +64,6 @@ static bool remove_self_from_waiters(list_t *waiters)
6464
return false;
6565
}
6666

67-
/* Atomic block operation with enhanced error checking */
68-
static void mutex_block_atomic(list_t *waiters)
69-
{
70-
if (unlikely(!waiters || !kcb || !kcb->task_current ||
71-
!kcb->task_current->data))
72-
panic(ERR_SEM_OPERATION);
73-
74-
tcb_t *self = kcb->task_current->data;
75-
76-
/* Add to waiters list */
77-
if (unlikely(!list_pushback(waiters, self)))
78-
panic(ERR_SEM_OPERATION);
79-
80-
/* Block and yield atomically */
81-
self->state = TASK_BLOCKED;
82-
_yield(); /* This releases NOSCHED when we context switch */
83-
}
84-
8567
int32_t mo_mutex_init(mutex_t *m)
8668
{
8769
if (unlikely(!m))
@@ -162,7 +144,7 @@ int32_t mo_mutex_lock(mutex_t *m)
162144
}
163145

164146
/* Slow path: mutex is owned, must block atomically */
165-
mutex_block_atomic(m->waiters);
147+
_sched_block_mutex(m->waiters);
166148

167149
/* When we return here, we've been woken by mo_mutex_unlock()
168150
* and ownership has been transferred to us. */

0 commit comments

Comments
 (0)