@@ -185,16 +185,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
185
185
186
186
#ifdef CONFIG_X86_LOCAL_APIC
187
187
188
+ static inline int get_possible_num_counters (void )
189
+ {
190
+ int i , num_counters = x86_pmu .num_counters ;
191
+
192
+ if (!is_hybrid ())
193
+ return num_counters ;
194
+
195
+ for (i = 0 ; i < x86_pmu .num_hybrid_pmus ; i ++ )
196
+ num_counters = max_t (int , num_counters , x86_pmu .hybrid_pmu [i ].num_counters );
197
+
198
+ return num_counters ;
199
+ }
200
+
188
201
static bool reserve_pmc_hardware (void )
189
202
{
190
- int i ;
203
+ int i , num_counters = get_possible_num_counters () ;
191
204
192
- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
205
+ for (i = 0 ; i < num_counters ; i ++ ) {
193
206
if (!reserve_perfctr_nmi (x86_pmu_event_addr (i )))
194
207
goto perfctr_fail ;
195
208
}
196
209
197
- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
210
+ for (i = 0 ; i < num_counters ; i ++ ) {
198
211
if (!reserve_evntsel_nmi (x86_pmu_config_addr (i )))
199
212
goto eventsel_fail ;
200
213
}
@@ -205,7 +218,7 @@ static bool reserve_pmc_hardware(void)
205
218
for (i -- ; i >= 0 ; i -- )
206
219
release_evntsel_nmi (x86_pmu_config_addr (i ));
207
220
208
- i = x86_pmu . num_counters ;
221
+ i = num_counters ;
209
222
210
223
perfctr_fail :
211
224
for (i -- ; i >= 0 ; i -- )
@@ -216,9 +229,9 @@ static bool reserve_pmc_hardware(void)
216
229
217
230
static void release_pmc_hardware (void )
218
231
{
219
- int i ;
232
+ int i , num_counters = get_possible_num_counters () ;
220
233
221
- for (i = 0 ; i < x86_pmu . num_counters ; i ++ ) {
234
+ for (i = 0 ; i < num_counters ; i ++ ) {
222
235
release_perfctr_nmi (x86_pmu_event_addr (i ));
223
236
release_evntsel_nmi (x86_pmu_config_addr (i ));
224
237
}
@@ -946,6 +959,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
946
959
947
960
int x86_schedule_events (struct cpu_hw_events * cpuc , int n , int * assign )
948
961
{
962
+ int num_counters = hybrid (cpuc -> pmu , num_counters );
949
963
struct event_constraint * c ;
950
964
struct perf_event * e ;
951
965
int n0 , i , wmin , wmax , unsched = 0 ;
@@ -1021,7 +1035,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1021
1035
1022
1036
/* slow path */
1023
1037
if (i != n ) {
1024
- int gpmax = x86_pmu . num_counters ;
1038
+ int gpmax = num_counters ;
1025
1039
1026
1040
/*
1027
1041
* Do not allow scheduling of more than half the available
@@ -1042,7 +1056,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1042
1056
* the extra Merge events needed by large increment events.
1043
1057
*/
1044
1058
if (x86_pmu .flags & PMU_FL_PAIR ) {
1045
- gpmax = x86_pmu . num_counters - cpuc -> n_pair ;
1059
+ gpmax = num_counters - cpuc -> n_pair ;
1046
1060
WARN_ON (gpmax <= 0 );
1047
1061
}
1048
1062
@@ -1129,10 +1143,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
1129
1143
*/
1130
1144
static int collect_events (struct cpu_hw_events * cpuc , struct perf_event * leader , bool dogrp )
1131
1145
{
1146
+ int num_counters = hybrid (cpuc -> pmu , num_counters );
1147
+ int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
1132
1148
struct perf_event * event ;
1133
1149
int n , max_count ;
1134
1150
1135
- max_count = x86_pmu . num_counters + x86_pmu . num_counters_fixed ;
1151
+ max_count = num_counters + num_counters_fixed ;
1136
1152
1137
1153
/* current number of events already accepted */
1138
1154
n = cpuc -> n_events ;
@@ -1499,18 +1515,18 @@ void perf_event_print_debug(void)
1499
1515
{
1500
1516
u64 ctrl , status , overflow , pmc_ctrl , pmc_count , prev_left , fixed ;
1501
1517
u64 pebs , debugctl ;
1502
- struct cpu_hw_events * cpuc ;
1518
+ int cpu = smp_processor_id ();
1519
+ struct cpu_hw_events * cpuc = & per_cpu (cpu_hw_events , cpu );
1520
+ int num_counters = hybrid (cpuc -> pmu , num_counters );
1521
+ int num_counters_fixed = hybrid (cpuc -> pmu , num_counters_fixed );
1503
1522
unsigned long flags ;
1504
- int cpu , idx ;
1523
+ int idx ;
1505
1524
1506
- if (!x86_pmu . num_counters )
1525
+ if (!num_counters )
1507
1526
return ;
1508
1527
1509
1528
local_irq_save (flags );
1510
1529
1511
- cpu = smp_processor_id ();
1512
- cpuc = & per_cpu (cpu_hw_events , cpu );
1513
-
1514
1530
if (x86_pmu .version >= 2 ) {
1515
1531
rdmsrl (MSR_CORE_PERF_GLOBAL_CTRL , ctrl );
1516
1532
rdmsrl (MSR_CORE_PERF_GLOBAL_STATUS , status );
@@ -1533,7 +1549,7 @@ void perf_event_print_debug(void)
1533
1549
}
1534
1550
pr_info ("CPU#%d: active: %016llx\n" , cpu , * (u64 * )cpuc -> active_mask );
1535
1551
1536
- for (idx = 0 ; idx < x86_pmu . num_counters ; idx ++ ) {
1552
+ for (idx = 0 ; idx < num_counters ; idx ++ ) {
1537
1553
rdmsrl (x86_pmu_config_addr (idx ), pmc_ctrl );
1538
1554
rdmsrl (x86_pmu_event_addr (idx ), pmc_count );
1539
1555
@@ -1546,7 +1562,7 @@ void perf_event_print_debug(void)
1546
1562
pr_info ("CPU#%d: gen-PMC%d left: %016llx\n" ,
1547
1563
cpu , idx , prev_left );
1548
1564
}
1549
- for (idx = 0 ; idx < x86_pmu . num_counters_fixed ; idx ++ ) {
1565
+ for (idx = 0 ; idx < num_counters_fixed ; idx ++ ) {
1550
1566
if (fixed_counter_disabled (idx , cpuc -> pmu ))
1551
1567
continue ;
1552
1568
rdmsrl (MSR_ARCH_PERFMON_FIXED_CTR0 + idx , pmc_count );
@@ -2781,6 +2797,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
2781
2797
void perf_get_x86_pmu_capability (struct x86_pmu_capability * cap )
2782
2798
{
2783
2799
cap -> version = x86_pmu .version ;
2800
+ /*
2801
+ * KVM doesn't support the hybrid PMU yet.
2802
+ * Return the common value in global x86_pmu,
2803
+ * which available for all cores.
2804
+ */
2784
2805
cap -> num_counters_gp = x86_pmu .num_counters ;
2785
2806
cap -> num_counters_fixed = x86_pmu .num_counters_fixed ;
2786
2807
cap -> bit_width_gp = x86_pmu .cntval_bits ;
0 commit comments