Skip to content

Commit 7948f50

Browse files
committed
[libcpu-riscv]: [common64 virt64]:Fix the normal startup of the SMP architecture.
Tests conducted on bsp: qemu-virt64-riscv. Currently, the command line cannot start normally. This is because the SMP architecture requires scheduling information update operations; secondly, it does not yet support context switching operations within interrupts. Solution: In the two functions (rt_hw_context_switch_to and rt_hw_context_switch) in context_gcc.S, add a call to rt_cpus_lock_status_restore to update the scheduler information. For the second issue, if scheduling is triggered in an interrupt, pcpu->irq_switch_flag will be set to 1; thus, rt_scheduler_do_irq_switch is called in interrupt_gcc.S to determine whether to perform context switching. Signed-off-by: Mengchen Teng <[email protected]>
1 parent 6c0a7d1 commit 7948f50

File tree

4 files changed

+56
-2
lines changed

4 files changed

+56
-2
lines changed

libcpu/risc-v/common64/context_gcc.S

Lines changed: 29 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,14 +69,27 @@
6969
.endm
7070

7171
/*
72+
* #ifdef RT_USING_SMP
73+
* void rt_hw_context_switch_to(rt_ubase_t to, stuct rt_thread *to_thread);
74+
* #else
7275
* void rt_hw_context_switch_to(rt_ubase_t to);
73-
*
74-
* a0 --> to SP pointer
76+
* #endif
77+
* a0 --> to
78+
* a1 --> to_thread
7579
*/
7680
.globl rt_hw_context_switch_to
7781
rt_hw_context_switch_to:
7882
LOAD sp, (a0)
7983

84+
#ifdef RT_USING_SMP
85+
/*
86+
* Pass the previous CPU lock status to
87+
* rt_cpus_lock_status_restore for restoration
88+
*/
89+
mv a0, a1
90+
call rt_cpus_lock_status_restore
91+
#endif
92+
8093
call rt_thread_self
8194
mv s1, a0
8295

@@ -88,10 +101,15 @@ rt_hw_context_switch_to:
88101
sret
89102

90103
/*
104+
* #ifdef RT_USING_SMP
105+
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread);
106+
* #else
91107
* void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to);
108+
* #endif
92109
*
93110
* a0 --> from SP pointer
94111
* a1 --> to SP pointer
112+
* a2 --> to_thread
95113
*
96114
* It should only be used on local interrupt disable
97115
*/
@@ -103,6 +121,15 @@ rt_hw_context_switch:
103121
// restore to thread SP
104122
LOAD sp, (a1)
105123

124+
#ifdef RT_USING_SMP
125+
/*
126+
* Pass the previous CPU lock status to
127+
* rt_cpus_lock_status_restore for restoration
128+
*/
129+
mv a0, a2
130+
call rt_cpus_lock_status_restore
131+
#endif /*RT_USING_SMP*/
132+
106133
// restore Address Space
107134
call rt_thread_self
108135
mv s1, a0

libcpu/risc-v/common64/cpuport.c

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,18 @@ void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to, rt_thread_t
117117

118118
return;
119119
}
120+
#else
121+
void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread)
122+
{
123+
/* Perform architecture-specific context switch. This call will
124+
* restore the target thread context and should not return when a
125+
* switch is performed. The caller (scheduler) invoked this function
126+
* in a context where local IRQs are disabled. */
127+
rt_uint32_t level;
128+
level = rt_hw_local_irq_disable();
129+
rt_hw_context_switch((rt_ubase_t)from, (rt_ubase_t)to, to_thread);
130+
rt_hw_local_irq_enable(level);
131+
}
120132
#endif /* end of RT_USING_SMP */
121133

122134
/** shutdown CPU */

libcpu/risc-v/common64/interrupt_gcc.S

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,17 @@ _handle_interrupt_and_exception:
6060
call handle_trap
6161

6262
_interrupt_exit:
63+
#ifndef RT_USING_SMP
6364
la s0, rt_thread_switch_interrupt_flag
6465
lw s2, 0(s0)
6566
beqz s2, _resume_execution
6667
sw zero, 0(s0)
68+
#else
69+
mv a0, sp
70+
call rt_scheduler_do_irq_switch
71+
// if failed, jump to __resume_execution
72+
j _resume_execution
73+
#endif /* RT_USING_SMP */
6774

6875
_context_switch:
6976
la t0, rt_interrupt_from_thread

libcpu/risc-v/virt64/interrupt.c

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,14 @@ void rt_hw_interrupt_init()
9494
}
9595

9696
#ifdef RT_USING_SMP
97+
rt_bool_t rt_hw_interrupt_is_disabled(void)
98+
{
99+
/* Determine the interrupt enable state */
100+
rt_ubase_t sstatus;
101+
__asm__ volatile("csrr %0, sstatus" : "=r"(sstatus));
102+
return (sstatus & SSTATUS_SIE) == 0;
103+
}
104+
97105
void rt_hw_spin_lock_init(rt_hw_spinlock_t *_lock)
98106
{
99107

0 commit comments

Comments
 (0)