44 * that are independent of the semaphore module.
55 */
66
7+ #include <spinlock.h>
78#include <lib/libc.h>
89#include <sys/mutex.h>
910#include <sys/task.h>
1011
1112#include "private/error.h"
1213
14+ static spinlock_t mutex_lock = SPINLOCK_INITIALIZER ;
15+ static uint32_t mutex_flags = 0 ;
16+
1317/* Validate mutex pointer and structure integrity */
1418static inline bool mutex_is_valid (const mutex_t * m )
1519{
@@ -87,17 +91,17 @@ int32_t mo_mutex_destroy(mutex_t *m)
8791 if (!mutex_is_valid (m ))
8892 return ERR_FAIL ;
8993
90- NOSCHED_ENTER ( );
94+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
9195
9296 /* Check if any tasks are waiting */
9397 if (!list_is_empty (m -> waiters )) {
94- NOSCHED_LEAVE ( );
98+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
9599 return ERR_TASK_BUSY ;
96100 }
97101
98102 /* Check if mutex is still owned */
99103 if (m -> owner_tid != 0 ) {
100- NOSCHED_LEAVE ( );
104+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
101105 return ERR_TASK_BUSY ;
102106 }
103107
@@ -107,7 +111,7 @@ int32_t mo_mutex_destroy(mutex_t *m)
107111 m -> waiters = NULL ;
108112 m -> owner_tid = 0 ;
109113
110- NOSCHED_LEAVE ( );
114+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
111115
112116 list_destroy (waiters );
113117 return ERR_OK ;
@@ -120,18 +124,18 @@ int32_t mo_mutex_lock(mutex_t *m)
120124
121125 uint16_t self_tid = mo_task_id ();
122126
123- NOSCHED_ENTER ( );
127+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
124128
125129 /* Non-recursive: reject if caller already owns it */
126130 if (m -> owner_tid == self_tid ) {
127- NOSCHED_LEAVE ( );
131+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
128132 return ERR_TASK_BUSY ;
129133 }
130134
131135 /* Fast path: mutex is free, acquire immediately */
132136 if (m -> owner_tid == 0 ) {
133137 m -> owner_tid = self_tid ;
134- NOSCHED_LEAVE ( );
138+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
135139 return ERR_OK ;
136140 }
137141
@@ -151,7 +155,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
151155 uint16_t self_tid = mo_task_id ();
152156 int32_t result = ERR_TASK_BUSY ;
153157
154- NOSCHED_ENTER ( );
158+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
155159
156160 if (m -> owner_tid == self_tid ) {
157161 /* Already owned by caller (non-recursive) */
@@ -163,7 +167,7 @@ int32_t mo_mutex_trylock(mutex_t *m)
163167 }
164168 /* else: owned by someone else, return ERR_TASK_BUSY */
165169
166- NOSCHED_LEAVE ( );
170+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
167171 return result ;
168172}
169173
@@ -178,37 +182,37 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
178182 uint16_t self_tid = mo_task_id ();
179183 uint32_t deadline = mo_ticks () + ticks ;
180184
181- NOSCHED_ENTER ( );
185+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
182186
183187 /* Non-recursive check */
184188 if (m -> owner_tid == self_tid ) {
185- NOSCHED_LEAVE ( );
189+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
186190 return ERR_TASK_BUSY ;
187191 }
188192
189193 /* Fast path: mutex is free */
190194 if (m -> owner_tid == 0 ) {
191195 m -> owner_tid = self_tid ;
192- NOSCHED_LEAVE ( );
196+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
193197 return ERR_OK ;
194198 }
195199
196200 /* Must block with timeout */
197201 tcb_t * self = kcb -> task_current -> data ;
198202 if (!list_pushback (m -> waiters , self )) {
199- NOSCHED_LEAVE ( );
203+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
200204 panic (ERR_SEM_OPERATION );
201205 }
202206 self -> state = TASK_BLOCKED ;
203207
204- NOSCHED_LEAVE ( );
208+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
205209
206210 /* Wait loop with timeout check */
207211 while (self -> state == TASK_BLOCKED && mo_ticks () < deadline )
208212 mo_task_yield ();
209213
210214 int32_t status ;
211- NOSCHED_ENTER ( );
215+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
212216
213217 if (self -> state == TASK_BLOCKED ) {
214218 /* Timeout occurred - remove ourselves from wait list */
@@ -224,7 +228,7 @@ int32_t mo_mutex_timedlock(mutex_t *m, uint32_t ticks)
224228 status = ERR_OK ;
225229 }
226230
227- NOSCHED_LEAVE ( );
231+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
228232 return status ;
229233}
230234
@@ -235,11 +239,11 @@ int32_t mo_mutex_unlock(mutex_t *m)
235239
236240 uint16_t self_tid = mo_task_id ();
237241
238- NOSCHED_ENTER ( );
242+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
239243
240244 /* Verify caller owns the mutex */
241245 if (m -> owner_tid != self_tid ) {
242- NOSCHED_LEAVE ( );
246+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
243247 return ERR_NOT_OWNER ;
244248 }
245249
@@ -265,7 +269,7 @@ int32_t mo_mutex_unlock(mutex_t *m)
265269 }
266270 }
267271
268- NOSCHED_LEAVE ( );
272+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
269273 return ERR_OK ;
270274}
271275
@@ -283,9 +287,9 @@ int32_t mo_mutex_waiting_count(mutex_t *m)
283287 return -1 ;
284288
285289 int32_t count ;
286- NOSCHED_ENTER ( );
290+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
287291 count = m -> waiters ? (int32_t ) m -> waiters -> length : 0 ;
288- NOSCHED_LEAVE ( );
292+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
289293
290294 return count ;
291295}
@@ -317,11 +321,11 @@ int32_t mo_cond_destroy(cond_t *c)
317321 if (!cond_is_valid (c ))
318322 return ERR_FAIL ;
319323
320- NOSCHED_ENTER ( );
324+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
321325
322326 /* Check if any tasks are waiting */
323327 if (!list_is_empty (c -> waiters )) {
324- NOSCHED_LEAVE ( );
328+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
325329 return ERR_TASK_BUSY ;
326330 }
327331
@@ -330,7 +334,7 @@ int32_t mo_cond_destroy(cond_t *c)
330334 list_t * waiters = c -> waiters ;
331335 c -> waiters = NULL ;
332336
333- NOSCHED_LEAVE ( );
337+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
334338
335339 list_destroy (waiters );
336340 return ERR_OK ;
@@ -348,27 +352,27 @@ int32_t mo_cond_wait(cond_t *c, mutex_t *m)
348352 return ERR_NOT_OWNER ;
349353
350354 /* Atomically add to wait list and block */
351- NOSCHED_ENTER ( );
355+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
352356 tcb_t * self = kcb -> task_current -> data ;
353357 if (!list_pushback (c -> waiters , self )) {
354- NOSCHED_LEAVE ( );
358+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
355359 panic (ERR_SEM_OPERATION );
356360 }
357361 self -> state = TASK_BLOCKED ;
358- NOSCHED_LEAVE ( );
362+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
359363
360364 /* Release mutex and yield */
361365 int32_t unlock_result = mo_mutex_unlock (m );
362366 if (unlock_result != ERR_OK ) {
363367 /* Failed to unlock - remove from wait list */
364- NOSCHED_ENTER ( );
368+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
365369 list_node_t * self_node = find_node_by_data (c -> waiters , self );
366370 if (self_node ) {
367371 list_remove (c -> waiters , self_node );
368372 free (self_node );
369373 }
370374 self -> state = TASK_READY ;
371- NOSCHED_LEAVE ( );
375+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
372376 return unlock_result ;
373377 }
374378
@@ -394,27 +398,27 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
394398 uint32_t deadline = mo_ticks () + ticks ;
395399
396400 /* Atomically add to wait list */
397- NOSCHED_ENTER ( );
401+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
398402 tcb_t * self = kcb -> task_current -> data ;
399403 if (!list_pushback (c -> waiters , self )) {
400- NOSCHED_LEAVE ( );
404+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
401405 panic (ERR_SEM_OPERATION );
402406 }
403407 self -> state = TASK_BLOCKED ;
404- NOSCHED_LEAVE ( );
408+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
405409
406410 /* Release mutex */
407411 int32_t unlock_result = mo_mutex_unlock (m );
408412 if (unlock_result != ERR_OK ) {
409413 /* Failed to unlock - cleanup */
410- NOSCHED_ENTER ( );
414+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
411415 list_node_t * self_node = find_node_by_data (c -> waiters , self );
412416 if (self_node ) {
413417 list_remove (c -> waiters , self_node );
414418 free (self_node );
415419 }
416420 self -> state = TASK_READY ;
417- NOSCHED_LEAVE ( );
421+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
418422 return unlock_result ;
419423 }
420424
@@ -424,7 +428,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
424428 }
425429
426430 int32_t wait_status ;
427- NOSCHED_ENTER ( );
431+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
428432
429433 if (self -> state == TASK_BLOCKED ) {
430434 /* Timeout - remove from wait list */
@@ -440,7 +444,7 @@ int32_t mo_cond_timedwait(cond_t *c, mutex_t *m, uint32_t ticks)
440444 wait_status = ERR_OK ;
441445 }
442446
443- NOSCHED_LEAVE ( );
447+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
444448
445449 /* Re-acquire mutex regardless of timeout status */
446450 int32_t lock_result = mo_mutex_lock (m );
@@ -454,7 +458,7 @@ int32_t mo_cond_signal(cond_t *c)
454458 if (!cond_is_valid (c ))
455459 return ERR_FAIL ;
456460
457- NOSCHED_ENTER ( );
461+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
458462
459463 if (!list_is_empty (c -> waiters )) {
460464 tcb_t * waiter = (tcb_t * ) list_pop (c -> waiters );
@@ -469,7 +473,7 @@ int32_t mo_cond_signal(cond_t *c)
469473 }
470474 }
471475
472- NOSCHED_LEAVE ( );
476+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
473477 return ERR_OK ;
474478}
475479
@@ -478,7 +482,7 @@ int32_t mo_cond_broadcast(cond_t *c)
478482 if (!cond_is_valid (c ))
479483 return ERR_FAIL ;
480484
481- NOSCHED_ENTER ( );
485+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
482486
483487 while (!list_is_empty (c -> waiters )) {
484488 tcb_t * waiter = (tcb_t * ) list_pop (c -> waiters );
@@ -493,7 +497,7 @@ int32_t mo_cond_broadcast(cond_t *c)
493497 }
494498 }
495499
496- NOSCHED_LEAVE ( );
500+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
497501 return ERR_OK ;
498502}
499503
@@ -503,9 +507,9 @@ int32_t mo_cond_waiting_count(cond_t *c)
503507 return -1 ;
504508
505509 int32_t count ;
506- NOSCHED_ENTER ( );
510+ spin_lock_irqsave ( & mutex_lock , & mutex_flags );
507511 count = c -> waiters ? (int32_t ) c -> waiters -> length : 0 ;
508- NOSCHED_LEAVE ( );
512+ spin_unlock_irqrestore ( & mutex_lock , mutex_flags );
509513
510514 return count ;
511515}
0 commit comments