Skip to content

Commit fc4b8fc

Browse files
Kan LiangPeter Zijlstra
Kan Liang
authored and
Peter Zijlstra
committed
perf/x86: Hybrid PMU support for intel_ctrl
The intel_ctrl is the counter mask of a PMU. The PMU counter information may be different among hybrid PMUs, each hybrid PMU should use its own intel_ctrl to check and access the counters. When handling a certain hybrid PMU, apply the intel_ctrl from the corresponding hybrid PMU. When checking the HW existence, apply the PMU and number of counters from the corresponding hybrid PMU as well. Perf will check the HW existence for each Hybrid PMU before registration. Expose the check_hw_exists() for a later patch. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Andi Kleen <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent d0946a8 commit fc4b8fc

File tree

3 files changed

+24
-14
lines changed

3 files changed

+24
-14
lines changed

arch/x86/events/core.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -231,7 +231,7 @@ static void release_pmc_hardware(void) {}
231231

232232
#endif
233233

234-
static bool check_hw_exists(void)
234+
bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
235235
{
236236
u64 val, val_fail = -1, val_new= ~0;
237237
int i, reg, reg_fail = -1, ret = 0;
@@ -242,7 +242,7 @@ static bool check_hw_exists(void)
242242
* Check to see if the BIOS enabled any of the counters, if so
243243
* complain and bail.
244244
*/
245-
for (i = 0; i < x86_pmu.num_counters; i++) {
245+
for (i = 0; i < num_counters; i++) {
246246
reg = x86_pmu_config_addr(i);
247247
ret = rdmsrl_safe(reg, &val);
248248
if (ret)
@@ -256,13 +256,13 @@ static bool check_hw_exists(void)
256256
}
257257
}
258258

259-
if (x86_pmu.num_counters_fixed) {
259+
if (num_counters_fixed) {
260260
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
261261
ret = rdmsrl_safe(reg, &val);
262262
if (ret)
263263
goto msr_fail;
264-
for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
265-
if (fixed_counter_disabled(i))
264+
for (i = 0; i < num_counters_fixed; i++) {
265+
if (fixed_counter_disabled(i, pmu))
266266
continue;
267267
if (val & (0x03 << i*4)) {
268268
bios_fail = 1;
@@ -1547,7 +1547,7 @@ void perf_event_print_debug(void)
15471547
cpu, idx, prev_left);
15481548
}
15491549
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1550-
if (fixed_counter_disabled(idx))
1550+
if (fixed_counter_disabled(idx, cpuc->pmu))
15511551
continue;
15521552
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
15531553

@@ -1992,7 +1992,7 @@ static int __init init_hw_perf_events(void)
19921992
pmu_check_apic();
19931993

19941994
/* sanity check that the hardware exists or is emulated */
1995-
if (!check_hw_exists())
1995+
if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed))
19961996
return 0;
19971997

19981998
pr_cont("%s PMU driver.\n", x86_pmu.name);

arch/x86/events/intel/core.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2153,10 +2153,11 @@ static void intel_pmu_disable_all(void)
21532153
static void __intel_pmu_enable_all(int added, bool pmi)
21542154
{
21552155
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2156+
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
21562157

21572158
intel_pmu_lbr_enable_all(pmi);
21582159
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2159-
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2160+
intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
21602161

21612162
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
21622163
struct perf_event *event =
@@ -2709,6 +2710,7 @@ int intel_pmu_save_and_restart(struct perf_event *event)
27092710
static void intel_pmu_reset(void)
27102711
{
27112712
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2713+
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
27122714
unsigned long flags;
27132715
int idx;
27142716

@@ -2724,7 +2726,7 @@ static void intel_pmu_reset(void)
27242726
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
27252727
}
27262728
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
2727-
if (fixed_counter_disabled(idx))
2729+
if (fixed_counter_disabled(idx, cpuc->pmu))
27282730
continue;
27292731
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
27302732
}
@@ -2753,6 +2755,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
27532755
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
27542756
int bit;
27552757
int handled = 0;
2758+
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
27562759

27572760
inc_irq_stat(apic_perf_irqs);
27582761

@@ -2798,7 +2801,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
27982801

27992802
handled++;
28002803
x86_pmu.drain_pebs(regs, &data);
2801-
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2804+
status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
28022805

28032806
/*
28042807
* PMI throttle may be triggered, which stops the PEBS event.
@@ -3804,10 +3807,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
38043807
{
38053808
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
38063809
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3810+
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
38073811

38083812
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3809-
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3810-
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3813+
arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3814+
arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
38113815
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
38123816
arr[0].guest &= ~cpuc->pebs_enabled;
38133817
else

arch/x86/events/perf_event.h

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -634,6 +634,7 @@ enum {
634634
struct x86_hybrid_pmu {
635635
struct pmu pmu;
636636
union perf_capabilities intel_cap;
637+
u64 intel_ctrl;
637638
};
638639

639640
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
@@ -998,6 +999,9 @@ static inline int x86_pmu_rdpmc_index(int index)
998999
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
9991000
}
10001001

1002+
bool check_hw_exists(struct pmu *pmu, int num_counters,
1003+
int num_counters_fixed);
1004+
10011005
int x86_add_exclusive(unsigned int what);
10021006

10031007
void x86_del_exclusive(unsigned int what);
@@ -1102,9 +1106,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
11021106
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
11031107
char *page);
11041108

1105-
static inline bool fixed_counter_disabled(int i)
1109+
static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
11061110
{
1107-
return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
1111+
u64 intel_ctrl = hybrid(pmu, intel_ctrl);
1112+
1113+
return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
11081114
}
11091115

11101116
#ifdef CONFIG_CPU_SUP_AMD

0 commit comments

Comments
 (0)