Skip to content

Commit 9e4bdb7

Browse files
committed
Replace interrupt masking with spinlock in semaphore for SMP support
The original semaphore implementation used NOSCHED_ENTER() and NOSCHED_LEAVE() to protect critical sections by disabling interrupts, which was sufficient in single-core environments. To support SMP, we replace these macros with a spinlock based on RV32A atomic instructions. This ensures safe access to shared semaphore state, including the count and wait queue, when multiple harts operate concurrently. This change is necessary to avoid race conditions during mo_sem_wait(), mo_sem_signal(), and other semaphore operations under multi-hart scheduling.
1 parent 3f9d1bb commit 9e4bdb7

File tree

1 file changed

+16
-12
lines changed

1 file changed

+16
-12
lines changed

kernel/semaphore.c

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
*/
99

1010
#include <hal.h>
11+
#include <spinlock.h>
1112
#include <sys/semaphore.h>
1213
#include <sys/task.h>
1314

@@ -24,6 +25,9 @@ struct sem_t {
2425
/* Magic number for semaphore validation */
2526
#define SEM_MAGIC 0x53454D00 /* "SEM\0" */
2627

28+
static spinlock_t semaphore_lock = SPINLOCK_INITIALIZER;
29+
static uint32_t semaphore_flags = 0;
30+
2731
static inline bool sem_is_valid(const sem_t *s)
2832
{
2933
return (s && s->magic == SEM_MAGIC && s->wait_q);
@@ -68,11 +72,11 @@ int32_t mo_sem_destroy(sem_t *s)
6872
if (!sem_is_valid(s))
6973
return ERR_FAIL;
7074

71-
NOSCHED_ENTER();
75+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
7276

7377
/* Check if any tasks are waiting - unsafe to destroy if so */
7478
if (queue_count(s->wait_q) > 0) {
75-
NOSCHED_LEAVE();
79+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
7680
return ERR_TASK_BUSY;
7781
}
7882

@@ -81,7 +85,7 @@ int32_t mo_sem_destroy(sem_t *s)
8185
queue_t *wait_q = s->wait_q;
8286
s->wait_q = NULL;
8387

84-
NOSCHED_LEAVE();
88+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
8589

8690
/* Clean up resources outside critical section */
8791
queue_destroy(wait_q);
@@ -96,19 +100,19 @@ void mo_sem_wait(sem_t *s)
96100
panic(ERR_SEM_OPERATION);
97101
}
98102

99-
NOSCHED_ENTER();
103+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
100104

101105
/* Fast path: resource available and no waiters (preserves FIFO) */
102106
if (s->count > 0 && queue_count(s->wait_q) == 0) {
103107
s->count--;
104-
NOSCHED_LEAVE();
108+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
105109
return;
106110
}
107111

108112
/* Slow path: must wait for resource */
109113
/* Verify wait queue has capacity (should never fail for valid semaphore) */
110114
if (queue_count(s->wait_q) >= s->max_waiters) {
111-
NOSCHED_LEAVE();
115+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
112116
panic(ERR_SEM_OPERATION); /* Queue overflow - system error */
113117
}
114118

@@ -133,15 +137,15 @@ int32_t mo_sem_trywait(sem_t *s)
133137

134138
int32_t result = ERR_FAIL;
135139

136-
NOSCHED_ENTER();
140+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
137141

138142
/* Only succeed if resource is available AND no waiters (preserves FIFO) */
139143
if (s->count > 0 && queue_count(s->wait_q) == 0) {
140144
s->count--;
141145
result = ERR_OK;
142146
}
143147

144-
NOSCHED_LEAVE();
148+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
145149
return result;
146150
}
147151

@@ -155,7 +159,7 @@ void mo_sem_signal(sem_t *s)
155159
bool should_yield = false;
156160
tcb_t *awakened_task = NULL;
157161

158-
NOSCHED_ENTER();
162+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
159163

160164
/* Check if any tasks are waiting */
161165
if (queue_count(s->wait_q) > 0) {
@@ -181,7 +185,7 @@ void mo_sem_signal(sem_t *s)
181185
/* Silently ignore overflow - semaphore remains at max count */
182186
}
183187

184-
NOSCHED_LEAVE();
188+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
185189

186190
/* Yield outside critical section to allow awakened task to run.
187191
* This improves responsiveness if the awakened task has higher priority.
@@ -209,9 +213,9 @@ int32_t mo_sem_waiting_count(sem_t *s)
209213

210214
int32_t count;
211215

212-
NOSCHED_ENTER();
216+
spin_lock_irqsave(&semaphore_lock, &semaphore_flags);
213217
count = queue_count(s->wait_q);
214-
NOSCHED_LEAVE();
218+
spin_unlock_irqrestore(&semaphore_lock, semaphore_flags);
215219

216220
return count;
217221
}

0 commit comments

Comments
 (0)