Skip to content

Commit 8f5cb44

Browse files
atishp04palmer-dabbelt
authored andcommitted
RISC-V: KVM: Support sstc extension
Sstc extension allows the guest to program the vstimecmp CSR directly instead of making an SBI call to the hypervisor to program the next event. The timer interrupt is also directly injected to the guest by the hardware in this case. To maintain backward compatibility, the hypervisors also update the vstimecmp in an SBI set_time call if the hardware supports it. Thus, the older kernels in guest also take advantage of the sstc extension. Reviewed-by: Anup Patel <[email protected]> Signed-off-by: Atish Patra <[email protected]> Acked-by: Anup Patel <[email protected]> Link: https://lore.kernel.org/all/CAAhSdy2mb6wyqy0NAn9BcTWKMYEc0Z4zU3s3j7oNqBz6eDQ9sg@mail.gmail.com/ Signed-off-by: Palmer Dabbelt <[email protected]>
1 parent 9801002 commit 8f5cb44

File tree

4 files changed

+153
-7
lines changed

4 files changed

+153
-7
lines changed

arch/riscv/include/asm/kvm_vcpu_timer.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,11 @@ struct kvm_vcpu_timer {
2828
u64 next_cycles;
2929
/* Underlying hrtimer instance */
3030
struct hrtimer hrt;
31+
32+
/* Flag to check if sstc is enabled or not */
33+
bool sstc_enabled;
34+
/* A function pointer to switch between stimecmp or hrtimer at runtime */
35+
int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
3136
};
3237

3338
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
@@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
4045
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
4146
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
4247
void kvm_riscv_guest_timer_init(struct kvm *kvm);
48+
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
49+
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
4350

4451
#endif

arch/riscv/include/uapi/asm/kvm.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ enum KVM_RISCV_ISA_EXT_ID {
9797
KVM_RISCV_ISA_EXT_I,
9898
KVM_RISCV_ISA_EXT_M,
9999
KVM_RISCV_ISA_EXT_SVPBMT,
100+
KVM_RISCV_ISA_EXT_SSTC,
100101
KVM_RISCV_ISA_EXT_MAX,
101102
};
102103

arch/riscv/kvm/vcpu.c

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
5252
RISCV_ISA_EXT_i,
5353
RISCV_ISA_EXT_m,
5454
RISCV_ISA_EXT_SVPBMT,
55+
RISCV_ISA_EXT_SSTC,
5556
};
5657

5758
static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
@@ -85,6 +86,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
8586
case KVM_RISCV_ISA_EXT_C:
8687
case KVM_RISCV_ISA_EXT_I:
8788
case KVM_RISCV_ISA_EXT_M:
89+
case KVM_RISCV_ISA_EXT_SSTC:
8890
return false;
8991
default:
9092
break;
@@ -203,7 +205,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
203205

204206
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
205207
{
206-
return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
208+
return kvm_riscv_vcpu_timer_pending(vcpu);
207209
}
208210

209211
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -785,6 +787,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
785787
if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
786788
henvcfg |= ENVCFG_PBMTE;
787789

790+
if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC))
791+
henvcfg |= ENVCFG_STCE;
788792
csr_write(CSR_HENVCFG, henvcfg);
789793
#ifdef CONFIG_32BIT
790794
csr_write(CSR_HENVCFGH, henvcfg >> 32);
@@ -828,6 +832,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
828832
vcpu->arch.isa);
829833
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
830834

835+
kvm_riscv_vcpu_timer_save(vcpu);
836+
831837
csr->vsstatus = csr_read(CSR_VSSTATUS);
832838
csr->vsie = csr_read(CSR_VSIE);
833839
csr->vstvec = csr_read(CSR_VSTVEC);

arch/riscv/kvm/vcpu_timer.c

Lines changed: 138 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
6969
return 0;
7070
}
7171

72-
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
72+
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
73+
{
74+
#if defined(CONFIG_32BIT)
75+
csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
76+
csr_write(CSR_VSTIMECMPH, ncycles >> 32);
77+
#else
78+
csr_write(CSR_VSTIMECMP, ncycles);
79+
#endif
80+
return 0;
81+
}
82+
83+
static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
7384
{
7485
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
7586
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
@@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
8899
return 0;
89100
}
90101

102+
int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
103+
{
104+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
105+
106+
return t->timer_next_event(vcpu, ncycles);
107+
}
108+
109+
static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
110+
{
111+
u64 delta_ns;
112+
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
113+
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
114+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
115+
116+
if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
117+
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
118+
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
119+
return HRTIMER_RESTART;
120+
}
121+
122+
t->next_set = false;
123+
kvm_vcpu_kick(vcpu);
124+
125+
return HRTIMER_NORESTART;
126+
}
127+
128+
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
129+
{
130+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
131+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
132+
133+
if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
134+
kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
135+
return true;
136+
else
137+
return false;
138+
}
139+
140+
static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
141+
{
142+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
143+
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
144+
u64 delta_ns;
145+
146+
if (!t->init_done)
147+
return;
148+
149+
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
150+
if (delta_ns) {
151+
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
152+
t->next_set = true;
153+
}
154+
}
155+
156+
static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
157+
{
158+
kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
159+
}
160+
91161
int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
92162
const struct kvm_one_reg *reg)
93163
{
@@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
180250
return -EINVAL;
181251

182252
hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
183-
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
184253
t->init_done = true;
185254
t->next_set = false;
186255

256+
/* Enable sstc for every vcpu if available in hardware */
257+
if (riscv_isa_extension_available(NULL, SSTC)) {
258+
t->sstc_enabled = true;
259+
t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
260+
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
261+
} else {
262+
t->sstc_enabled = false;
263+
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
264+
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
265+
}
266+
187267
return 0;
188268
}
189269

@@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)
199279

200280
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
201281
{
282+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
283+
284+
t->next_cycles = -1ULL;
202285
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
203286
}
204287

205-
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
288+
static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
206289
{
207290
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
208291

209-
#ifdef CONFIG_64BIT
210-
csr_write(CSR_HTIMEDELTA, gt->time_delta);
211-
#else
292+
#if defined(CONFIG_32BIT)
212293
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
213294
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
295+
#else
296+
csr_write(CSR_HTIMEDELTA, gt->time_delta);
214297
#endif
215298
}
216299

300+
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
301+
{
302+
struct kvm_vcpu_csr *csr;
303+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
304+
305+
kvm_riscv_vcpu_update_timedelta(vcpu);
306+
307+
if (!t->sstc_enabled)
308+
return;
309+
310+
csr = &vcpu->arch.guest_csr;
311+
#if defined(CONFIG_32BIT)
312+
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
313+
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
314+
#else
315+
csr_write(CSR_VSTIMECMP, t->next_cycles);
316+
#endif
317+
318+
/* timer should be enabled for the remaining operations */
319+
if (unlikely(!t->init_done))
320+
return;
321+
322+
kvm_riscv_vcpu_timer_unblocking(vcpu);
323+
}
324+
325+
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
326+
{
327+
struct kvm_vcpu_csr *csr;
328+
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
329+
330+
if (!t->sstc_enabled)
331+
return;
332+
333+
csr = &vcpu->arch.guest_csr;
334+
t = &vcpu->arch.timer;
335+
#if defined(CONFIG_32BIT)
336+
t->next_cycles = csr_read(CSR_VSTIMECMP);
337+
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
338+
#else
339+
t->next_cycles = csr_read(CSR_VSTIMECMP);
340+
#endif
341+
/* timer should be enabled for the remaining operations */
342+
if (unlikely(!t->init_done))
343+
return;
344+
345+
if (kvm_vcpu_is_blocking(vcpu))
346+
kvm_riscv_vcpu_timer_blocking(vcpu);
347+
}
348+
217349
void kvm_riscv_guest_timer_init(struct kvm *kvm)
218350
{
219351
struct kvm_guest_timer *gt = &kvm->arch.timer;

0 commit comments

Comments
 (0)