Skip to content
3 changes: 3 additions & 0 deletions arch/riscv/include/asm/kvm_vcpu_pmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,9 @@ void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long flags,
struct kvm_vcpu_sbi_return *retdata);
int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long num_events,
unsigned long flags, struct kvm_vcpu_sbi_return *retdata);
void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);

Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/include/asm/kvm_vcpu_sbi.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#define KVM_SBI_IMPID 3

#define KVM_SBI_VERSION_MAJOR 2
#define KVM_SBI_VERSION_MAJOR 3
#define KVM_SBI_VERSION_MINOR 0

enum kvm_riscv_sbi_ext_status {
Expand Down
13 changes: 13 additions & 0 deletions arch/riscv/include/asm/sbi.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ enum sbi_ext_pmu_fid {
SBI_EXT_PMU_COUNTER_FW_READ,
SBI_EXT_PMU_COUNTER_FW_READ_HI,
SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
SBI_EXT_PMU_EVENT_GET_INFO,
};

union sbi_pmu_ctr_info {
Expand All @@ -158,9 +159,20 @@ struct riscv_pmu_snapshot_data {
u64 reserved[447];
};

struct riscv_pmu_event_info {
u32 event_idx;
u32 output;
u64 event_data;
};

#define RISCV_PMU_EVENT_INFO_OUTPUT_MASK 0x01

Comment on lines +162 to +169
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guarantee ABI-stable layout for struct riscv_pmu_event_info.

This structure is exchanged with firmware, so its in-memory layout must be identical on 32- and 64-bit kernels and across compilers.
Because the first two u32 members are followed by a u64, the compiler will naturally add 4 bytes of padding after output on 32-bit builds, but may keep the structure packed (16 bytes) or grow it to 24 bytes on 64-bit builds depending on the ABI rules. Any mismatch will corrupt the buffer consumed by SBI.

Please make the layout explicit:

 struct riscv_pmu_event_info {
-	u32 event_idx;
-	u32 output;
-	u64 event_data;
+	u32 event_idx;
+	u32 output;
+	u64 event_data;
 } __packed;               /* or __aligned(4) */
+
+static_assert(sizeof(struct riscv_pmu_event_info) == 16, "SBI ABI break!");

Alternatively, use BUILD_BUG_ON() in a C file if you would rather avoid <linux/build_bug.h> in the UAPI header.

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In arch/riscv/include/asm/sbi.h around lines 162 to 169, the struct
riscv_pmu_event_info has an ABI stability issue due to potential padding
differences between 32- and 64-bit kernels. To fix this, explicitly control the
struct layout by adding explicit padding fields or using compiler attributes
like __packed__ to ensure the struct size and alignment are consistent across
architectures. Alternatively, add a BUILD_BUG_ON() check in a C file to verify
the struct size at compile time and prevent ABI mismatches.

#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
#define RISCV_PMU_PLAT_FW_EVENT_MASK GENMASK_ULL(61, 0)
/* SBI v3.0 allows extended hpmeventX width value */
#define RISCV_PMU_RAW_EVENT_V2_MASK GENMASK_ULL(55, 0)
#define RISCV_PMU_RAW_EVENT_IDX 0x20000
#define RISCV_PMU_RAW_EVENT_V2_IDX 0x30000
#define RISCV_PLAT_FW_EVENT 0xFFFF

/** General pmu event codes specified in SBI PMU extension */
Expand Down Expand Up @@ -218,6 +230,7 @@ enum sbi_pmu_event_type {
SBI_PMU_EVENT_TYPE_HW = 0x0,
SBI_PMU_EVENT_TYPE_CACHE = 0x1,
SBI_PMU_EVENT_TYPE_RAW = 0x2,
SBI_PMU_EVENT_TYPE_RAW_V2 = 0x3,
SBI_PMU_EVENT_TYPE_FW = 0xf,
};

Expand Down
75 changes: 71 additions & 4 deletions arch/riscv/kvm/vcpu_pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ static u32 kvm_pmu_get_perf_event_type(unsigned long eidx)
type = PERF_TYPE_HW_CACHE;
break;
case SBI_PMU_EVENT_TYPE_RAW:
case SBI_PMU_EVENT_TYPE_RAW_V2:
case SBI_PMU_EVENT_TYPE_FW:
type = PERF_TYPE_RAW;
break;
Expand Down Expand Up @@ -128,6 +129,9 @@ static u64 kvm_pmu_get_perf_event_config(unsigned long eidx, uint64_t evt_data)
case SBI_PMU_EVENT_TYPE_RAW:
config = evt_data & RISCV_PMU_RAW_EVENT_MASK;
break;
case SBI_PMU_EVENT_TYPE_RAW_V2:
config = evt_data & RISCV_PMU_RAW_EVENT_V2_MASK;
break;
case SBI_PMU_EVENT_TYPE_FW:
if (ecode < SBI_PMU_FW_MAX)
config = (1ULL << 63) | ecode;
Expand Down Expand Up @@ -405,8 +409,6 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
int snapshot_area_size = sizeof(struct riscv_pmu_snapshot_data);
int sbiret = 0;
gpa_t saddr;
unsigned long hva;
bool writable;

if (!kvpmu || flags) {
sbiret = SBI_ERR_INVALID_PARAM;
Expand All @@ -428,8 +430,7 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
goto out;
}

hva = kvm_vcpu_gfn_to_hva_prot(vcpu, saddr >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable) {
if (kvm_vcpu_validate_gpa_range(vcpu, saddr, PAGE_SIZE, true)) {
sbiret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
Comment on lines +433 to 436
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Potential memory leak on repeated snapshot-set calls

kvpmu->sdata is (re)allocated unconditionally, but the old buffer is only freed when the caller passes SBI_SHMEM_DISABLE.
A guest can therefore leak memory by calling SNAPSHOT_SET_SHMEM twice with different addresses.

Diff suggestion:

-    kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);
+    kfree(kvpmu->sdata);
+    kvpmu->sdata = kzalloc(snapshot_area_size, GFP_ATOMIC);

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In arch/riscv/kvm/vcpu_pmu.c around lines 433 to 436, the code reallocates
kvpmu->sdata without freeing the old buffer unless SBI_SHMEM_DISABLE is passed,
causing a memory leak on repeated SNAPSHOT_SET_SHMEM calls. To fix this, before
reallocating kvpmu->sdata, check if it is already allocated and free the
existing buffer to prevent leaks when the guest calls SNAPSHOT_SET_SHMEM
multiple times with different addresses.

Expand All @@ -452,6 +453,72 @@ int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long s
return 0;
}

int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long num_events,
unsigned long flags, struct kvm_vcpu_sbi_return *retdata)
{
struct riscv_pmu_event_info *einfo;
int shmem_size = num_events * sizeof(*einfo);
gpa_t shmem;
u32 eidx, etype;
u64 econfig;
int ret;

if (flags != 0 || (saddr_low & (SZ_16 - 1))) {
ret = SBI_ERR_INVALID_PARAM;
goto out;
}

shmem = saddr_low;
if (saddr_high != 0) {
if (IS_ENABLED(CONFIG_32BIT)) {
shmem |= ((gpa_t)saddr_high << 32);
} else {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
}

if (kvm_vcpu_validate_gpa_range(vcpu, shmem, shmem_size, true)) {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}

einfo = kzalloc(shmem_size, GFP_KERNEL);
if (!einfo)
return -ENOMEM;

ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}

for (int i = 0; i < num_events; i++) {
eidx = einfo[i].event_idx;
etype = kvm_pmu_get_perf_event_type(eidx);
econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data);
ret = riscv_pmu_get_event_info(etype, econfig, NULL);
if (ret > 0)
einfo[i].output = 1;
else
einfo[i].output = 0;
}

kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}

free_mem:
kfree(einfo);
out:
retdata->err_val = ret;

return 0;
}
Comment on lines +456 to +520
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Multiple functional bugs in kvm_riscv_vcpu_pmu_event_info()

  1. ret is uninitialised on the success path – we may return a random value to the guest.
  2. The return value of kvm_vcpu_write_guest() is ignored, so write failures go unnoticed.
  3. Heap allocation uses GFP_KERNEL; this path executes in the vCPU exit handler and may run in atomic context. Existing code in this file allocates with GFP_ATOMIC.
  4. An earlier failure in the loop is overwritten by the next iteration; we should report any error, not just the last one.

Proposed fix:

@@
-	struct riscv_pmu_event_info *einfo;
-	int shmem_size = num_events * sizeof(*einfo);
+	struct riscv_pmu_event_info *einfo;
+	const int shmem_size = num_events * sizeof(*einfo);
 	gpa_t shmem;
 	u32 eidx, etype;
 	u64 econfig;
-	int ret;
+	int ret = 0;
+	int err_seen = 0;
@@
-	einfo = kzalloc(shmem_size, GFP_KERNEL);
+	einfo = kzalloc(shmem_size, GFP_ATOMIC);
@@
-		ret = riscv_pmu_get_event_info(etype, econfig, NULL);
-		if (ret > 0)
+		ret = riscv_pmu_get_event_info(etype, econfig, NULL);
+		if (ret > 0)
 			einfo[i].output = 1;
 		else
 			einfo[i].output = 0;
-	}
-
-	kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
-	if (ret) {
-		ret = SBI_ERR_FAILURE;
-		goto free_mem;
+		if (ret < 0)
+			err_seen = 1;
+	}
+
+	ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
+	if (ret) {
+		ret = SBI_ERR_FAILURE;
+		goto free_mem;
 	}
-
-free_mem:
-	kfree(einfo);
-out:
-	retdata->err_val = ret;
-
-	return 0;
+free_mem:
+	kfree(einfo);
+out:
+	if (err_seen && !ret)
+		ret = SBI_ERR_FAILURE;
+	retdata->err_val = ret;
+
+	return 0;
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long num_events,
unsigned long flags, struct kvm_vcpu_sbi_return *retdata)
{
struct riscv_pmu_event_info *einfo;
int shmem_size = num_events * sizeof(*einfo);
gpa_t shmem;
u32 eidx, etype;
u64 econfig;
int ret;
if (flags != 0 || (saddr_low & (SZ_16 - 1))) {
ret = SBI_ERR_INVALID_PARAM;
goto out;
}
shmem = saddr_low;
if (saddr_high != 0) {
if (IS_ENABLED(CONFIG_32BIT)) {
shmem |= ((gpa_t)saddr_high << 32);
} else {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
}
if (kvm_vcpu_validate_gpa_range(vcpu, shmem, shmem_size, true)) {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
einfo = kzalloc(shmem_size, GFP_KERNEL);
if (!einfo)
return -ENOMEM;
ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}
for (int i = 0; i < num_events; i++) {
eidx = einfo[i].event_idx;
etype = kvm_pmu_get_perf_event_type(eidx);
econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data);
ret = riscv_pmu_get_event_info(etype, econfig, NULL);
if (ret > 0)
einfo[i].output = 1;
else
einfo[i].output = 0;
}
kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}
free_mem:
kfree(einfo);
out:
retdata->err_val = ret;
return 0;
}
int kvm_riscv_vcpu_pmu_event_info(struct kvm_vcpu *vcpu, unsigned long saddr_low,
unsigned long saddr_high, unsigned long num_events,
unsigned long flags, struct kvm_vcpu_sbi_return *retdata)
{
struct riscv_pmu_event_info *einfo;
const int shmem_size = num_events * sizeof(*einfo);
gpa_t shmem;
u32 eidx, etype;
u64 econfig;
int ret = 0;
int err_seen = 0;
if (flags != 0 || (saddr_low & (SZ_16 - 1))) {
ret = SBI_ERR_INVALID_PARAM;
goto out;
}
shmem = saddr_low;
if (saddr_high != 0) {
if (IS_ENABLED(CONFIG_32BIT)) {
shmem |= ((gpa_t)saddr_high << 32);
} else {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
}
if (kvm_vcpu_validate_gpa_range(vcpu, shmem, shmem_size, true)) {
ret = SBI_ERR_INVALID_ADDRESS;
goto out;
}
einfo = kzalloc(shmem_size, GFP_ATOMIC);
if (!einfo)
return -ENOMEM;
ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}
for (int i = 0; i < num_events; i++) {
eidx = einfo[i].event_idx;
etype = kvm_pmu_get_perf_event_type(eidx);
econfig = kvm_pmu_get_perf_event_config(eidx, einfo[i].event_data);
ret = riscv_pmu_get_event_info(etype, econfig, NULL);
if (ret > 0)
einfo[i].output = 1;
else
einfo[i].output = 0;
if (ret < 0)
err_seen = 1;
}
ret = kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
if (ret) {
ret = SBI_ERR_FAILURE;
goto free_mem;
}
free_mem:
kfree(einfo);
out:
if (err_seen && !ret)
ret = SBI_ERR_FAILURE;
retdata->err_val = ret;
return 0;
}
🤖 Prompt for AI Agents
In arch/riscv/kvm/vcpu_pmu.c from lines 456 to 520, initialize the variable ret
to 0 at the start to avoid returning uninitialized values on success. Check and
handle the return value of kvm_vcpu_write_guest to detect write failures and set
ret accordingly. Replace GFP_KERNEL with GFP_ATOMIC in the kzalloc call to
ensure safe allocation in atomic context. Modify the loop to preserve any error
returned by riscv_pmu_get_event_info instead of overwriting ret on each
iteration, so that any error is reported properly.

Comment on lines +456 to +520
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

⚠️ Potential issue

Several correctness issues in kvm_riscv_vcpu_pmu_event_info()

  1. Integer-overflow & sign issues
    int shmem_size = num_events * sizeof(*einfo);
    num_events comes from the guest, may be 64-bit and very large.
    • Multiplication is done in unsigned long then truncated to signed int.
    • This can overflow or wrap negative, leading to over-allocation or OOB
    when validating the GPA range.

  2. Use-after-success semantics
    The same ret variable is reused for
    – the result of kvm_vcpu_read_guest(),
    – every iteration of riscv_pmu_get_event_info(),
    – later used after kvm_vcpu_write_guest().
    A successful, positive return value from the last iteration turns into
    an error path because the final if (ret) check interprets any non-zero
    as failure.

  3. Event-0 handled as “not present”
    output is set only when ret > 0. If the PMU maps an event to index 0,
    output will be cleared even though the event is valid.

  4. Memory allocation style
    kzalloc(shmem_size, GFP_KERNEL) does not protect against the overflow
    mentioned in (1). Prefer kvcalloc() / kvmalloc_array() which has a
    built-in overflow check.

A robust rewrite could look like:

-    int shmem_size = num_events * sizeof(*einfo);
+    size_t shmem_size;
+
+    if (!num_events ||
+        check_mul_overflow(num_events, sizeof(*einfo), &shmem_size)) {
+        ret = SBI_ERR_INVALID_PARAM;
+        goto out;
+    }

-    einfo = kzalloc(shmem_size, GFP_KERNEL);
+    einfo = kvmalloc_array(num_events, sizeof(*einfo), GFP_KERNEL | __GFP_ZERO);

@@
-    ret = kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size);
-    if (ret) {
+    if (kvm_vcpu_read_guest(vcpu, shmem, einfo, shmem_size)) {
         ret = SBI_ERR_FAILURE;
         goto free_mem;
     }
@@
-        ret = riscv_pmu_get_event_info(etype, econfig, NULL);
-        if (ret > 0)
+        int rc = riscv_pmu_get_event_info(etype, econfig, NULL);
+        if (rc >= 0)
             einfo[i].output = 1;
         else
             einfo[i].output = 0;
     }
@@
-    kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
-    if (ret) {
+    if (kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size)) {
         ret = SBI_ERR_FAILURE;
         goto free_mem;
     }

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In arch/riscv/kvm/vcpu_pmu.c around lines 456 to 520, fix integer overflow by
changing shmem_size to an unsigned long or size_t and use a safe allocation
function like kvcalloc or kvmalloc_array to allocate einfo, which checks for
overflow. Separate the ret variable usage by introducing a new variable for the
loop over riscv_pmu_get_event_info() results to avoid overwriting the error
status from kvm_vcpu_read_guest() and kvm_vcpu_write_guest(). Adjust the logic
that sets einfo[i].output to treat ret >= 0 as valid (not just ret > 0) so event
index 0 is correctly handled as present. Finally, ensure GPA range validation
uses the correct size type matching shmem_size to prevent out-of-bounds errors.

Comment on lines +456 to +520
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Uninitialised ret, ignored error paths, and GFP context issues in kvm_riscv_vcpu_pmu_event_info

Several correctness problems can surface here:

  1. ret is not initialised on the success path. If num_events == 0, ret remains uninitialised and we propagate stack garbage to the guest via retdata->err_val.
  2. Return value of kvm_vcpu_write_guest() is ignored; failures after populating einfo are silently lost.
  3. ret is reused for two unrelated purposes (value from riscv_pmu_get_event_info() and from the final HVA write) which can mask an earlier guest-write failure.
  4. kzalloc(shmem_size, GFP_KERNEL) may sleep inside the vcpu exit path; the snapshot helper above correctly uses GFP_ATOMIC.
  5. No sanity cap on num_events; an attacker can request an unbounded allocation. RISCV_KVM_MAX_COUNTERS or a PAGE-based limit should be applied.
  6. Missing alignment check for num_events * sizeof() can overflow int.

Proposed minimal fix:

@@
-    struct riscv_pmu_event_info *einfo;
-    int shmem_size = num_events * sizeof(*einfo);
+    struct riscv_pmu_event_info *einfo;
+    size_t shmem_size;
+    int ret = 0;          /* default: success */
+
+    if (num_events == 0 || num_events > RISCV_KVM_MAX_COUNTERS)
+        return retdata->err_val = SBI_ERR_INVALID_PARAM, 0;
+
+    if (check_mul_overflow(num_events, sizeof(*einfo), &shmem_size))
+        return retdata->err_val = SBI_ERR_INVALID_PARAM, 0;
@@
-    einfo = kzalloc(shmem_size, GFP_KERNEL);
+    einfo = kzalloc(shmem_size, GFP_ATOMIC);
@@
-        ret = riscv_pmu_get_event_info(etype, econfig, NULL);
-        if (ret > 0)
+        int rc = riscv_pmu_get_event_info(etype, econfig, NULL);
+        if (rc > 0)
             einfo[i].output = 1;
         else
             einfo[i].output = 0;
@@
-    kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size);
-    if (ret) {
-        ret = SBI_ERR_FAILURE;
+    if (kvm_vcpu_write_guest(vcpu, shmem, einfo, shmem_size)) {
+        ret = SBI_ERR_FAILURE;
         goto free_mem;
     }

(The full patch will need appropriate headers for check_mul_overflow().)

These issues can cause guest-visible mis-reports and, in the worst case, host memory exhaustion.

🤖 Prompt for AI Agents
In arch/riscv/kvm/vcpu_pmu.c around lines 456 to 520, fix uninitialized ret by
initializing it to 0 at the start to avoid propagating garbage on success; add a
sanity check to cap num_events using RISCV_KVM_MAX_COUNTERS or a PAGE-based
limit and use check_mul_overflow() to prevent integer overflow in shmem_size
calculation; replace kzalloc with GFP_ATOMIC to avoid sleeping in vcpu exit
path; check and handle the return value of kvm_vcpu_write_guest to detect and
propagate errors properly; use separate variables for return values from
riscv_pmu_get_event_info and kvm_vcpu_write_guest to avoid masking errors;
ensure all error paths set ret appropriately before assigning retdata->err_val.


int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu,
struct kvm_vcpu_sbi_return *retdata)
{
Expand Down
3 changes: 3 additions & 0 deletions arch/riscv/kvm/vcpu_sbi_pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ static int kvm_sbi_ext_pmu_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
case SBI_EXT_PMU_SNAPSHOT_SET_SHMEM:
ret = kvm_riscv_vcpu_pmu_snapshot_set_shmem(vcpu, cp->a0, cp->a1, cp->a2, retdata);
break;
case SBI_EXT_PMU_EVENT_GET_INFO:
ret = kvm_riscv_vcpu_pmu_event_info(vcpu, cp->a0, cp->a1, cp->a2, cp->a3, retdata);
break;
default:
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
}
Expand Down
6 changes: 2 additions & 4 deletions arch/riscv/kvm/vcpu_sbi_sta.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,6 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
unsigned long hva;
bool writable;
gpa_t shmem;
int ret;

Expand All @@ -111,8 +109,8 @@ static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
return SBI_ERR_INVALID_ADDRESS;
}

Copy link
Preview

Copilot AI May 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Although the comment mentions 64-byte alignment, there's no explicit check for alignment here; add '(shmem & 63) == 0' validation before range checking.

Suggested change
/* Ensure the shmem is 64-byte aligned as required by the spec. */
if (shmem & (SZ_64 - 1))
return SBI_ERR_INVALID_PARAM;

Copilot uses AI. Check for mistakes.

hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable)
/* The spec requires the shmem to be 64-byte aligned. */
Copy link
Preview

Copilot AI May 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Although you note the 64-byte alignment requirement, there is no check before calling kvm_vcpu_validate_gpa_range. Add an explicit alignment check, for example: if (shmem & 63) return SBI_ERR_INVALID_ADDRESS;.

Suggested change
/* The spec requires the shmem to be 64-byte aligned. */
/* The spec requires the shmem to be 64-byte aligned. */
if (shmem & (SZ_64 - 1))
return SBI_ERR_INVALID_ADDRESS;

Copilot uses AI. Check for mistakes.

if (kvm_vcpu_validate_gpa_range(vcpu, shmem, 64, true))
return SBI_ERR_INVALID_ADDRESS;

ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
Expand Down
Loading