|
| 1 | +// SPDX-License-Identifier: GPL-1.0-or-later |
| 2 | +/* |
| 3 | + * AMD Processor P-state Frequency Driver Unit Test |
| 4 | + * |
| 5 | + * Copyright (C) 2022 Advanced Micro Devices, Inc. All Rights Reserved. |
| 6 | + * |
| 7 | + * Author: Meng Li <[email protected]> |
| 8 | + * |
| 9 | + * The AMD P-State Unit Test is a test module for testing the amd-pstate |
| 10 | + * driver. 1) It can help all users to verify their processor support |
| 11 | + * (SBIOS/Firmware or Hardware). 2) Kernel can have a basic function |
| 12 | + * test to avoid the kernel regression during the update. 3) We can |
| 13 | + * introduce more functional or performance tests to align the result |
| 14 | + * together, it will benefit power and performance scale optimization. |
| 15 | + * |
| 16 | + * This driver implements basic framework with plans to enhance it with |
| 17 | + * additional test cases to improve the depth and coverage of the test. |
| 18 | + * |
| 19 | + * See Documentation/admin-guide/pm/amd-pstate.rst Unit Tests for |
| 20 | + * amd-pstate to get more detail. |
| 21 | + */ |
| 22 | + |
| 23 | +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 24 | + |
| 25 | +#include <linux/kernel.h> |
| 26 | +#include <linux/module.h> |
| 27 | +#include <linux/moduleparam.h> |
| 28 | +#include <linux/fs.h> |
| 29 | +#include <linux/amd-pstate.h> |
| 30 | + |
| 31 | +#include <acpi/cppc_acpi.h> |
| 32 | + |
| 33 | +/* |
| 34 | + * Abbreviations: |
| 35 | + * amd_pstate_ut: used as a shortform for AMD P-State unit test. |
| 36 | + * It helps to keep variable names smaller, simpler |
| 37 | + */ |
| 38 | +enum amd_pstate_ut_result { |
| 39 | + AMD_PSTATE_UT_RESULT_PASS, |
| 40 | + AMD_PSTATE_UT_RESULT_FAIL, |
| 41 | +}; |
| 42 | + |
| 43 | +struct amd_pstate_ut_struct { |
| 44 | + const char *name; |
| 45 | + void (*func)(u32 index); |
| 46 | + enum amd_pstate_ut_result result; |
| 47 | +}; |
| 48 | + |
| 49 | +/* |
| 50 | + * Kernel module for testing the AMD P-State unit test |
| 51 | + */ |
| 52 | +static void amd_pstate_ut_acpi_cpc_valid(u32 index); |
| 53 | +static void amd_pstate_ut_check_enabled(u32 index); |
| 54 | +static void amd_pstate_ut_check_perf(u32 index); |
| 55 | +static void amd_pstate_ut_check_freq(u32 index); |
| 56 | + |
| 57 | +static struct amd_pstate_ut_struct amd_pstate_ut_cases[] = { |
| 58 | + {"amd_pstate_ut_acpi_cpc_valid", amd_pstate_ut_acpi_cpc_valid }, |
| 59 | + {"amd_pstate_ut_check_enabled", amd_pstate_ut_check_enabled }, |
| 60 | + {"amd_pstate_ut_check_perf", amd_pstate_ut_check_perf }, |
| 61 | + {"amd_pstate_ut_check_freq", amd_pstate_ut_check_freq } |
| 62 | +}; |
| 63 | + |
| 64 | +static bool get_shared_mem(void) |
| 65 | +{ |
| 66 | + bool result = false; |
| 67 | + char path[] = "/sys/module/amd_pstate/parameters/shared_mem"; |
| 68 | + char buf[5] = {0}; |
| 69 | + struct file *filp = NULL; |
| 70 | + loff_t pos = 0; |
| 71 | + ssize_t ret; |
| 72 | + |
| 73 | + if (!boot_cpu_has(X86_FEATURE_CPPC)) { |
| 74 | + filp = filp_open(path, FMODE_PREAD, 0); |
| 75 | + if (IS_ERR(filp)) |
| 76 | + pr_err("%s unable to open %s file!\n", __func__, path); |
| 77 | + else { |
| 78 | + ret = kernel_read(filp, &buf, sizeof(buf), &pos); |
| 79 | + if (ret < 0) |
| 80 | + pr_err("%s read %s file fail ret=%ld!\n", |
| 81 | + __func__, path, (long)ret); |
| 82 | + filp_close(filp, NULL); |
| 83 | + } |
| 84 | + |
| 85 | + if ('Y' == *buf) |
| 86 | + result = true; |
| 87 | + } |
| 88 | + |
| 89 | + return result; |
| 90 | +} |
| 91 | + |
| 92 | +/* |
| 93 | + * check the _CPC object is present in SBIOS. |
| 94 | + */ |
| 95 | +static void amd_pstate_ut_acpi_cpc_valid(u32 index) |
| 96 | +{ |
| 97 | + if (acpi_cpc_valid()) |
| 98 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 99 | + else { |
| 100 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 101 | + pr_err("%s the _CPC object is not present in SBIOS!\n", __func__); |
| 102 | + } |
| 103 | +} |
| 104 | + |
| 105 | +static void amd_pstate_ut_pstate_enable(u32 index) |
| 106 | +{ |
| 107 | + int ret = 0; |
| 108 | + u64 cppc_enable = 0; |
| 109 | + |
| 110 | + ret = rdmsrl_safe(MSR_AMD_CPPC_ENABLE, &cppc_enable); |
| 111 | + if (ret) { |
| 112 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 113 | + pr_err("%s rdmsrl_safe MSR_AMD_CPPC_ENABLE ret=%d error!\n", __func__, ret); |
| 114 | + return; |
| 115 | + } |
| 116 | + if (cppc_enable) |
| 117 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 118 | + else { |
| 119 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 120 | + pr_err("%s amd pstate must be enabled!\n", __func__); |
| 121 | + } |
| 122 | +} |
| 123 | + |
| 124 | +/* |
| 125 | + * check if amd pstate is enabled |
| 126 | + */ |
| 127 | +static void amd_pstate_ut_check_enabled(u32 index) |
| 128 | +{ |
| 129 | + if (get_shared_mem()) |
| 130 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 131 | + else |
| 132 | + amd_pstate_ut_pstate_enable(index); |
| 133 | +} |
| 134 | + |
| 135 | +/* |
| 136 | + * check if performance values are reasonable. |
| 137 | + * highest_perf >= nominal_perf > lowest_nonlinear_perf > lowest_perf > 0 |
| 138 | + */ |
| 139 | +static void amd_pstate_ut_check_perf(u32 index) |
| 140 | +{ |
| 141 | + int cpu = 0, ret = 0; |
| 142 | + u32 highest_perf = 0, nominal_perf = 0, lowest_nonlinear_perf = 0, lowest_perf = 0; |
| 143 | + u64 cap1 = 0; |
| 144 | + struct cppc_perf_caps cppc_perf; |
| 145 | + struct cpufreq_policy *policy = NULL; |
| 146 | + struct amd_cpudata *cpudata = NULL; |
| 147 | + |
| 148 | + highest_perf = amd_get_highest_perf(); |
| 149 | + |
| 150 | + for_each_possible_cpu(cpu) { |
| 151 | + policy = cpufreq_cpu_get(cpu); |
| 152 | + if (!policy) |
| 153 | + break; |
| 154 | + cpudata = policy->driver_data; |
| 155 | + |
| 156 | + if (get_shared_mem()) { |
| 157 | + ret = cppc_get_perf_caps(cpu, &cppc_perf); |
| 158 | + if (ret) { |
| 159 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 160 | + pr_err("%s cppc_get_perf_caps ret=%d error!\n", __func__, ret); |
| 161 | + return; |
| 162 | + } |
| 163 | + |
| 164 | + nominal_perf = cppc_perf.nominal_perf; |
| 165 | + lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf; |
| 166 | + lowest_perf = cppc_perf.lowest_perf; |
| 167 | + } else { |
| 168 | + ret = rdmsrl_safe_on_cpu(cpu, MSR_AMD_CPPC_CAP1, &cap1); |
| 169 | + if (ret) { |
| 170 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 171 | + pr_err("%s read CPPC_CAP1 ret=%d error!\n", __func__, ret); |
| 172 | + return; |
| 173 | + } |
| 174 | + |
| 175 | + nominal_perf = AMD_CPPC_NOMINAL_PERF(cap1); |
| 176 | + lowest_nonlinear_perf = AMD_CPPC_LOWNONLIN_PERF(cap1); |
| 177 | + lowest_perf = AMD_CPPC_LOWEST_PERF(cap1); |
| 178 | + } |
| 179 | + |
| 180 | + if ((highest_perf != READ_ONCE(cpudata->highest_perf)) || |
| 181 | + (nominal_perf != READ_ONCE(cpudata->nominal_perf)) || |
| 182 | + (lowest_nonlinear_perf != READ_ONCE(cpudata->lowest_nonlinear_perf)) || |
| 183 | + (lowest_perf != READ_ONCE(cpudata->lowest_perf))) { |
| 184 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 185 | + pr_err("%s cpu%d highest=%d %d nominal=%d %d lowest_nonlinear=%d %d lowest=%d %d, they should be equal!\n", |
| 186 | + __func__, cpu, highest_perf, cpudata->highest_perf, |
| 187 | + nominal_perf, cpudata->nominal_perf, |
| 188 | + lowest_nonlinear_perf, cpudata->lowest_nonlinear_perf, |
| 189 | + lowest_perf, cpudata->lowest_perf); |
| 190 | + return; |
| 191 | + } |
| 192 | + |
| 193 | + if (!((highest_perf >= nominal_perf) && |
| 194 | + (nominal_perf > lowest_nonlinear_perf) && |
| 195 | + (lowest_nonlinear_perf > lowest_perf) && |
| 196 | + (lowest_perf > 0))) { |
| 197 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 198 | + pr_err("%s cpu%d highest=%d >= nominal=%d > lowest_nonlinear=%d > lowest=%d > 0, the formula is incorrect!\n", |
| 199 | + __func__, cpu, highest_perf, nominal_perf, |
| 200 | + lowest_nonlinear_perf, lowest_perf); |
| 201 | + return; |
| 202 | + } |
| 203 | + } |
| 204 | + |
| 205 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 206 | +} |
| 207 | + |
| 208 | +/* |
| 209 | + * Check if frequency values are reasonable. |
| 210 | + * max_freq >= nominal_freq > lowest_nonlinear_freq > min_freq > 0 |
| 211 | + * check max freq when set support boost mode. |
| 212 | + */ |
| 213 | +static void amd_pstate_ut_check_freq(u32 index) |
| 214 | +{ |
| 215 | + int cpu = 0; |
| 216 | + struct cpufreq_policy *policy = NULL; |
| 217 | + struct amd_cpudata *cpudata = NULL; |
| 218 | + |
| 219 | + for_each_possible_cpu(cpu) { |
| 220 | + policy = cpufreq_cpu_get(cpu); |
| 221 | + if (!policy) |
| 222 | + break; |
| 223 | + cpudata = policy->driver_data; |
| 224 | + |
| 225 | + if (!((cpudata->max_freq >= cpudata->nominal_freq) && |
| 226 | + (cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) && |
| 227 | + (cpudata->lowest_nonlinear_freq > cpudata->min_freq) && |
| 228 | + (cpudata->min_freq > 0))) { |
| 229 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 230 | + pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n", |
| 231 | + __func__, cpu, cpudata->max_freq, cpudata->nominal_freq, |
| 232 | + cpudata->lowest_nonlinear_freq, cpudata->min_freq); |
| 233 | + return; |
| 234 | + } |
| 235 | + |
| 236 | + if (cpudata->min_freq != policy->min) { |
| 237 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 238 | + pr_err("%s cpu%d cpudata_min_freq=%d policy_min=%d, they should be equal!\n", |
| 239 | + __func__, cpu, cpudata->min_freq, policy->min); |
| 240 | + return; |
| 241 | + } |
| 242 | + |
| 243 | + if (cpudata->boost_supported) { |
| 244 | + if ((policy->max == cpudata->max_freq) || |
| 245 | + (policy->max == cpudata->nominal_freq)) |
| 246 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 247 | + else { |
| 248 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 249 | + pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n", |
| 250 | + __func__, cpu, policy->max, cpudata->max_freq, |
| 251 | + cpudata->nominal_freq); |
| 252 | + return; |
| 253 | + } |
| 254 | + } else { |
| 255 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL; |
| 256 | + pr_err("%s cpu%d must support boost!\n", __func__, cpu); |
| 257 | + return; |
| 258 | + } |
| 259 | + } |
| 260 | + |
| 261 | + amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS; |
| 262 | +} |
| 263 | + |
| 264 | +static int __init amd_pstate_ut_init(void) |
| 265 | +{ |
| 266 | + u32 i = 0, arr_size = ARRAY_SIZE(amd_pstate_ut_cases); |
| 267 | + |
| 268 | + for (i = 0; i < arr_size; i++) { |
| 269 | + amd_pstate_ut_cases[i].func(i); |
| 270 | + switch (amd_pstate_ut_cases[i].result) { |
| 271 | + case AMD_PSTATE_UT_RESULT_PASS: |
| 272 | + pr_info("%-4d %-20s\t success!\n", i+1, amd_pstate_ut_cases[i].name); |
| 273 | + break; |
| 274 | + case AMD_PSTATE_UT_RESULT_FAIL: |
| 275 | + default: |
| 276 | + pr_info("%-4d %-20s\t fail!\n", i+1, amd_pstate_ut_cases[i].name); |
| 277 | + break; |
| 278 | + } |
| 279 | + } |
| 280 | + |
| 281 | + return 0; |
| 282 | +} |
| 283 | + |
| 284 | +static void __exit amd_pstate_ut_exit(void) |
| 285 | +{ |
| 286 | +} |
| 287 | + |
| 288 | +module_init(amd_pstate_ut_init); |
| 289 | +module_exit(amd_pstate_ut_exit); |
| 290 | + |
| 291 | +MODULE_AUTHOR( "Meng Li <[email protected]>"); |
| 292 | +MODULE_DESCRIPTION("AMD P-state driver Test module"); |
| 293 | +MODULE_LICENSE("GPL"); |
0 commit comments