Skip to content

Commit 709ea4a

Browse files
committed
Merge tag 'kvm-riscv-fixes-6.16-2' of https://github.com/kvm-riscv/linux into HEAD
KVM/riscv fixes for 6.16, take #2 - Disable vstimecmp before exiting to user-space - Move HGEI[E|P] CSR access to IMSIC virtualization
2 parents be85438 + 4cec89d commit 709ea4a

File tree

6 files changed

+74
-55
lines changed

6 files changed

+74
-55
lines changed

arch/riscv/include/asm/kvm_aia.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,9 @@ DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
8787

8888
extern struct kvm_device_ops kvm_riscv_aia_device_ops;
8989

90+
bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu);
91+
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu);
92+
void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu);
9093
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
9194
int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
9295

@@ -161,7 +164,6 @@ void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
161164
int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
162165
void __iomem **hgei_va, phys_addr_t *hgei_pa);
163166
void kvm_riscv_aia_free_hgei(int cpu, int hgei);
164-
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
165167

166168
void kvm_riscv_aia_enable(void);
167169
void kvm_riscv_aia_disable(void);

arch/riscv/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,9 @@ static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
306306
return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
307307
}
308308

309+
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
310+
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
311+
309312
#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
310313

311314
void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,

arch/riscv/kvm/aia.c

Lines changed: 7 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -30,28 +30,6 @@ unsigned int kvm_riscv_aia_nr_hgei;
3030
unsigned int kvm_riscv_aia_max_ids;
3131
DEFINE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
3232

33-
static int aia_find_hgei(struct kvm_vcpu *owner)
34-
{
35-
int i, hgei;
36-
unsigned long flags;
37-
struct aia_hgei_control *hgctrl = get_cpu_ptr(&aia_hgei);
38-
39-
raw_spin_lock_irqsave(&hgctrl->lock, flags);
40-
41-
hgei = -1;
42-
for (i = 1; i <= kvm_riscv_aia_nr_hgei; i++) {
43-
if (hgctrl->owners[i] == owner) {
44-
hgei = i;
45-
break;
46-
}
47-
}
48-
49-
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
50-
51-
put_cpu_ptr(&aia_hgei);
52-
return hgei;
53-
}
54-
5533
static inline unsigned long aia_hvictl_value(bool ext_irq_pending)
5634
{
5735
unsigned long hvictl;
@@ -95,7 +73,6 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
9573

9674
bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
9775
{
98-
int hgei;
9976
unsigned long seip;
10077

10178
if (!kvm_riscv_aia_available())
@@ -114,11 +91,7 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
11491
if (!kvm_riscv_aia_initialized(vcpu->kvm) || !seip)
11592
return false;
11693

117-
hgei = aia_find_hgei(vcpu);
118-
if (hgei > 0)
119-
return !!(ncsr_read(CSR_HGEIP) & BIT(hgei));
120-
121-
return false;
94+
return kvm_riscv_vcpu_aia_imsic_has_interrupt(vcpu);
12295
}
12396

12497
void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu)
@@ -164,6 +137,9 @@ void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu)
164137
csr_write(CSR_HVIPRIO2H, csr->hviprio2h);
165138
#endif
166139
}
140+
141+
if (kvm_riscv_aia_initialized(vcpu->kvm))
142+
kvm_riscv_vcpu_aia_imsic_load(vcpu, cpu);
167143
}
168144

169145
void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
@@ -174,6 +150,9 @@ void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu)
174150
if (!kvm_riscv_aia_available())
175151
return;
176152

153+
if (kvm_riscv_aia_initialized(vcpu->kvm))
154+
kvm_riscv_vcpu_aia_imsic_put(vcpu);
155+
177156
if (kvm_riscv_nacl_available()) {
178157
nsh = nacl_shmem();
179158
csr->vsiselect = nacl_csr_read(nsh, CSR_VSISELECT);
@@ -472,22 +451,6 @@ void kvm_riscv_aia_free_hgei(int cpu, int hgei)
472451
raw_spin_unlock_irqrestore(&hgctrl->lock, flags);
473452
}
474453

475-
void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable)
476-
{
477-
int hgei;
478-
479-
if (!kvm_riscv_aia_available())
480-
return;
481-
482-
hgei = aia_find_hgei(owner);
483-
if (hgei > 0) {
484-
if (enable)
485-
csr_set(CSR_HGEIE, BIT(hgei));
486-
else
487-
csr_clear(CSR_HGEIE, BIT(hgei));
488-
}
489-
}
490-
491454
static irqreturn_t hgei_interrupt(int irq, void *dev_id)
492455
{
493456
int i;

arch/riscv/kvm/aia_imsic.c

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -676,6 +676,48 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu,
676676
imsic_swfile_extirq_update(vcpu);
677677
}
678678

679+
bool kvm_riscv_vcpu_aia_imsic_has_interrupt(struct kvm_vcpu *vcpu)
680+
{
681+
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
682+
unsigned long flags;
683+
bool ret = false;
684+
685+
/*
686+
* The IMSIC SW-file directly injects interrupt via hvip so
687+
* only check for interrupt when IMSIC VS-file is being used.
688+
*/
689+
690+
read_lock_irqsave(&imsic->vsfile_lock, flags);
691+
if (imsic->vsfile_cpu > -1)
692+
ret = !!(csr_read(CSR_HGEIP) & BIT(imsic->vsfile_hgei));
693+
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
694+
695+
return ret;
696+
}
697+
698+
void kvm_riscv_vcpu_aia_imsic_load(struct kvm_vcpu *vcpu, int cpu)
699+
{
700+
/*
701+
* No need to explicitly clear HGEIE CSR bits because the
702+
* hgei interrupt handler (aka hgei_interrupt()) will always
703+
* clear it for us.
704+
*/
705+
}
706+
707+
void kvm_riscv_vcpu_aia_imsic_put(struct kvm_vcpu *vcpu)
708+
{
709+
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
710+
unsigned long flags;
711+
712+
if (!kvm_vcpu_is_blocking(vcpu))
713+
return;
714+
715+
read_lock_irqsave(&imsic->vsfile_lock, flags);
716+
if (imsic->vsfile_cpu > -1)
717+
csr_set(CSR_HGEIE, BIT(imsic->vsfile_hgei));
718+
read_unlock_irqrestore(&imsic->vsfile_lock, flags);
719+
}
720+
679721
void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
680722
{
681723
unsigned long flags;
@@ -781,6 +823,9 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
781823
* producers to the new IMSIC VS-file.
782824
*/
783825

826+
/* Ensure HGEIE CSR bit is zero before using the new IMSIC VS-file */
827+
csr_clear(CSR_HGEIE, BIT(new_vsfile_hgei));
828+
784829
/* Zero-out new IMSIC VS-file */
785830
imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
786831

arch/riscv/kvm/vcpu.c

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -207,16 +207,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
207207
return kvm_riscv_vcpu_timer_pending(vcpu);
208208
}
209209

210-
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
211-
{
212-
kvm_riscv_aia_wakeon_hgei(vcpu, true);
213-
}
214-
215-
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
216-
{
217-
kvm_riscv_aia_wakeon_hgei(vcpu, false);
218-
}
219-
220210
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
221211
{
222212
return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&

arch/riscv/kvm/vcpu_timer.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,8 +345,24 @@ void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
345345
/*
346346
* The vstimecmp CSRs are saved by kvm_riscv_vcpu_timer_sync()
347347
* upon every VM exit so no need to save here.
348+
*
349+
* If VS-timer expires when no VCPU running on a host CPU then
350+
* WFI executed by such host CPU will be effective NOP resulting
351+
* in no power savings. This is because as-per RISC-V Privileged
352+
* specificaiton: "WFI is also required to resume execution for
353+
* locally enabled interrupts pending at any privilege level,
354+
* regardless of the global interrupt enable at each privilege
355+
* level."
356+
*
357+
* To address the above issue, vstimecmp CSR must be set to -1UL
358+
* over here when VCPU is scheduled-out or exits to user space.
348359
*/
349360

361+
csr_write(CSR_VSTIMECMP, -1UL);
362+
#if defined(CONFIG_32BIT)
363+
csr_write(CSR_VSTIMECMPH, -1UL);
364+
#endif
365+
350366
/* timer should be enabled for the remaining operations */
351367
if (unlikely(!t->init_done))
352368
return;

0 commit comments

Comments
 (0)