Skip to content

Commit c746d0b

Browse files
committed
Replace interrupt masking with spinlock in task management for SMP support
The original task management code used CRITICAL_ENTER() / CRITICAL_LEAVE() and NOSCHED_ENTER() / NOSCHED_LEAVE() to protect critical sections by disabling interrupts, which was sufficient for single-core systems. To support SMP, these macros are replaced with a spinlock based on RV32A atomic instructions. This ensures that multiple harts can safely access and modify shared task data such as ready queues, priority values, and task control blocks. This change is essential for enabling multi-hart task scheduling without introducing race conditions in the kernel task subsystem.
1 parent 075a90c commit c746d0b

File tree

1 file changed

+33
-29
lines changed

1 file changed

+33
-29
lines changed

kernel/task.c

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
*/
77

88
#include <hal.h>
9+
#include <spinlock.h>
910
#include <lib/queue.h>
1011
#include <sys/task.h>
1112

@@ -47,6 +48,9 @@ static struct {
4748
} task_cache[TASK_CACHE_SIZE];
4849
static uint8_t cache_index = 0;
4950

51+
static spinlock_t task_lock = SPINLOCK_INITIALIZER;
52+
static uint32_t task_flags = 0;
53+
5054
static inline bool is_valid_task(tcb_t *task)
5155
{
5256
return (task && task->stack && task->stack_sz >= MIN_TASK_STACK_SIZE &&
@@ -383,12 +387,12 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
383387
}
384388

385389
/* Minimize critical section duration */
386-
CRITICAL_ENTER();
390+
spin_lock_irqsave(&task_lock, &task_flags);
387391

388392
if (!kcb->tasks) {
389393
kcb->tasks = list_create();
390394
if (!kcb->tasks) {
391-
CRITICAL_LEAVE();
395+
spin_unlock_irqrestore(&task_lock, task_flags);
392396
free(tcb->stack);
393397
free(tcb);
394398
panic(ERR_KCB_ALLOC);
@@ -397,7 +401,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
397401

398402
list_node_t *node = list_pushback(kcb->tasks, tcb);
399403
if (!node) {
400-
CRITICAL_LEAVE();
404+
spin_unlock_irqrestore(&task_lock, task_flags);
401405
free(tcb->stack);
402406
free(tcb);
403407
panic(ERR_TCB_ALLOC);
@@ -410,7 +414,7 @@ int32_t mo_task_spawn(void *task_entry, uint16_t stack_size_req)
410414
if (!kcb->task_current)
411415
kcb->task_current = node;
412416

413-
CRITICAL_LEAVE();
417+
spin_unlock_irqrestore(&task_lock, task_flags);
414418

415419
/* Initialize execution context outside critical section */
416420
hal_context_init(&tcb->context, (size_t) tcb->stack, new_stack_size,
@@ -430,16 +434,16 @@ int32_t mo_task_cancel(uint16_t id)
430434
if (id == 0 || id == mo_task_id())
431435
return ERR_TASK_CANT_REMOVE;
432436

433-
CRITICAL_ENTER();
437+
spin_lock_irqsave(&task_lock, &task_flags);
434438
list_node_t *node = find_task_node_by_id(id);
435439
if (!node) {
436-
CRITICAL_LEAVE();
440+
spin_unlock_irqrestore(&task_lock, task_flags);
437441
return ERR_TASK_NOT_FOUND;
438442
}
439443

440444
tcb_t *tcb = node->data;
441445
if (!tcb || tcb->state == TASK_RUNNING) {
442-
CRITICAL_LEAVE();
446+
spin_unlock_irqrestore(&task_lock, task_flags);
443447
return ERR_TASK_CANT_REMOVE;
444448
}
445449

@@ -459,7 +463,7 @@ int32_t mo_task_cancel(uint16_t id)
459463
if (kcb->last_ready_hint == node)
460464
kcb->last_ready_hint = NULL;
461465

462-
CRITICAL_LEAVE();
466+
spin_unlock_irqrestore(&task_lock, task_flags);
463467

464468
/* Free memory outside critical section */
465469
free(tcb->stack);
@@ -478,16 +482,16 @@ void mo_task_delay(uint16_t ticks)
478482
if (!ticks)
479483
return;
480484

481-
NOSCHED_ENTER();
485+
spin_lock_irqsave(&task_lock, &task_flags);
482486
if (unlikely(!kcb || !kcb->task_current || !kcb->task_current->data)) {
483-
NOSCHED_LEAVE();
487+
spin_unlock_irqrestore(&task_lock, task_flags);
484488
return;
485489
}
486490

487491
tcb_t *self = kcb->task_current->data;
488492
self->delay = ticks;
489493
self->state = TASK_BLOCKED;
490-
NOSCHED_LEAVE();
494+
spin_unlock_irqrestore(&task_lock, task_flags);
491495

492496
mo_task_yield();
493497
}
@@ -497,17 +501,17 @@ int32_t mo_task_suspend(uint16_t id)
497501
if (id == 0)
498502
return ERR_TASK_NOT_FOUND;
499503

500-
CRITICAL_ENTER();
504+
spin_lock_irqsave(&task_lock, &task_flags);
501505
list_node_t *node = find_task_node_by_id(id);
502506
if (!node) {
503-
CRITICAL_LEAVE();
507+
spin_unlock_irqrestore(&task_lock, task_flags);
504508
return ERR_TASK_NOT_FOUND;
505509
}
506510

507511
tcb_t *task = node->data;
508512
if (!task || (task->state != TASK_READY && task->state != TASK_RUNNING &&
509513
task->state != TASK_BLOCKED)) {
510-
CRITICAL_LEAVE();
514+
spin_unlock_irqrestore(&task_lock, task_flags);
511515
return ERR_TASK_CANT_SUSPEND;
512516
}
513517

@@ -518,7 +522,7 @@ int32_t mo_task_suspend(uint16_t id)
518522
if (kcb->last_ready_hint == node)
519523
kcb->last_ready_hint = NULL;
520524

521-
CRITICAL_LEAVE();
525+
spin_unlock_irqrestore(&task_lock, task_flags);
522526

523527
if (is_current)
524528
mo_task_yield();
@@ -531,21 +535,21 @@ int32_t mo_task_resume(uint16_t id)
531535
if (id == 0)
532536
return ERR_TASK_NOT_FOUND;
533537

534-
CRITICAL_ENTER();
538+
spin_lock_irqsave(&task_lock, &task_flags);
535539
list_node_t *node = find_task_node_by_id(id);
536540
if (!node) {
537-
CRITICAL_LEAVE();
541+
spin_unlock_irqrestore(&task_lock, task_flags);
538542
return ERR_TASK_NOT_FOUND;
539543
}
540544

541545
tcb_t *task = node->data;
542546
if (!task || task->state != TASK_SUSPENDED) {
543-
CRITICAL_LEAVE();
547+
spin_unlock_irqrestore(&task_lock, task_flags);
544548
return ERR_TASK_CANT_RESUME;
545549
}
546550

547551
task->state = TASK_READY;
548-
CRITICAL_LEAVE();
552+
spin_unlock_irqrestore(&task_lock, task_flags);
549553
return ERR_OK;
550554
}
551555

@@ -554,22 +558,22 @@ int32_t mo_task_priority(uint16_t id, uint16_t priority)
554558
if (id == 0 || !is_valid_priority(priority))
555559
return ERR_TASK_INVALID_PRIO;
556560

557-
CRITICAL_ENTER();
561+
spin_lock_irqsave(&task_lock, &task_flags);
558562
list_node_t *node = find_task_node_by_id(id);
559563
if (!node) {
560-
CRITICAL_LEAVE();
564+
spin_unlock_irqrestore(&task_lock, task_flags);
561565
return ERR_TASK_NOT_FOUND;
562566
}
563567

564568
tcb_t *task = node->data;
565569
if (!task) {
566-
CRITICAL_LEAVE();
570+
spin_unlock_irqrestore(&task_lock, task_flags);
567571
return ERR_TASK_NOT_FOUND;
568572
}
569573

570574
uint8_t base = (uint8_t) (priority >> 8);
571575
task->prio = ((uint16_t) base << 8) | base;
572-
CRITICAL_LEAVE();
576+
spin_unlock_irqrestore(&task_lock, task_flags);
573577

574578
return ERR_OK;
575579
}
@@ -579,21 +583,21 @@ int32_t mo_task_rt_priority(uint16_t id, void *priority)
579583
if (id == 0)
580584
return ERR_TASK_NOT_FOUND;
581585

582-
CRITICAL_ENTER();
586+
spin_lock_irqsave(&task_lock, &task_flags);
583587
list_node_t *node = find_task_node_by_id(id);
584588
if (!node) {
585-
CRITICAL_LEAVE();
589+
spin_unlock_irqrestore(&task_lock, task_flags);
586590
return ERR_TASK_NOT_FOUND;
587591
}
588592

589593
tcb_t *task = node->data;
590594
if (!task) {
591-
CRITICAL_LEAVE();
595+
spin_unlock_irqrestore(&task_lock, task_flags);
592596
return ERR_TASK_NOT_FOUND;
593597
}
594598

595599
task->rt_prio = priority;
596-
CRITICAL_LEAVE();
600+
spin_unlock_irqrestore(&task_lock, task_flags);
597601
return ERR_OK;
598602
}
599603

@@ -609,9 +613,9 @@ int32_t mo_task_idref(void *task_entry)
609613
if (!task_entry || !kcb->tasks)
610614
return ERR_TASK_NOT_FOUND;
611615

612-
CRITICAL_ENTER();
616+
spin_lock_irqsave(&task_lock, &task_flags);
613617
list_node_t *node = list_foreach(kcb->tasks, refcmp, task_entry);
614-
CRITICAL_LEAVE();
618+
spin_unlock_irqrestore(&task_lock, task_flags);
615619

616620
return node ? ((tcb_t *) node->data)->id : ERR_TASK_NOT_FOUND;
617621
}

0 commit comments

Comments
 (0)