62
62
#define FTRACE_HASH_DEFAULT_BITS 10
63
63
#define FTRACE_HASH_MAX_BITS 12
64
64
65
- #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66
-
67
65
#ifdef CONFIG_DYNAMIC_FTRACE
68
66
#define INIT_OPS_HASH (opsname ) \
69
67
.func_hash = &opsname.local_hash, \
@@ -113,11 +111,9 @@ static int ftrace_disabled __read_mostly;
113
111
114
112
static DEFINE_MUTEX (ftrace_lock );
115
113
116
- static struct ftrace_ops * ftrace_control_list __read_mostly = & ftrace_list_end ;
117
114
static struct ftrace_ops * ftrace_ops_list __read_mostly = & ftrace_list_end ;
118
115
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub ;
119
116
static struct ftrace_ops global_ops ;
120
- static struct ftrace_ops control_ops ;
121
117
122
118
static void ftrace_ops_recurs_func (unsigned long ip , unsigned long parent_ip ,
123
119
struct ftrace_ops * op , struct pt_regs * regs );
@@ -203,24 +199,27 @@ void clear_ftrace_function(void)
203
199
ftrace_trace_function = ftrace_stub ;
204
200
}
205
201
206
- static void control_ops_disable_all (struct ftrace_ops * ops )
202
+ static void per_cpu_ops_disable_all (struct ftrace_ops * ops )
207
203
{
208
204
int cpu ;
209
205
210
206
for_each_possible_cpu (cpu )
211
207
* per_cpu_ptr (ops -> disabled , cpu ) = 1 ;
212
208
}
213
209
214
- static int control_ops_alloc (struct ftrace_ops * ops )
210
+ static int per_cpu_ops_alloc (struct ftrace_ops * ops )
215
211
{
216
212
int __percpu * disabled ;
217
213
214
+ if (WARN_ON_ONCE (!(ops -> flags & FTRACE_OPS_FL_PER_CPU )))
215
+ return - EINVAL ;
216
+
218
217
disabled = alloc_percpu (int );
219
218
if (!disabled )
220
219
return - ENOMEM ;
221
220
222
221
ops -> disabled = disabled ;
223
- control_ops_disable_all (ops );
222
+ per_cpu_ops_disable_all (ops );
224
223
return 0 ;
225
224
}
226
225
@@ -256,10 +255,11 @@ static inline void update_function_graph_func(void) { }
256
255
static ftrace_func_t ftrace_ops_get_list_func (struct ftrace_ops * ops )
257
256
{
258
257
/*
259
- * If this is a dynamic ops or we force list func,
258
+ * If this is a dynamic, RCU, or per CPU ops, or we force list func,
260
259
* then it needs to call the list anyway.
261
260
*/
262
- if (ops -> flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC )
261
+ if (ops -> flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
262
+ FTRACE_OPS_FL_RCU ) || FTRACE_FORCE_LIST_FUNC )
263
263
return ftrace_ops_list_func ;
264
264
265
265
return ftrace_ops_get_func (ops );
@@ -383,26 +383,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
383
383
return 0 ;
384
384
}
385
385
386
- static void add_ftrace_list_ops (struct ftrace_ops * * list ,
387
- struct ftrace_ops * main_ops ,
388
- struct ftrace_ops * ops )
389
- {
390
- int first = * list == & ftrace_list_end ;
391
- add_ftrace_ops (list , ops );
392
- if (first )
393
- add_ftrace_ops (& ftrace_ops_list , main_ops );
394
- }
395
-
396
- static int remove_ftrace_list_ops (struct ftrace_ops * * list ,
397
- struct ftrace_ops * main_ops ,
398
- struct ftrace_ops * ops )
399
- {
400
- int ret = remove_ftrace_ops (list , ops );
401
- if (!ret && * list == & ftrace_list_end )
402
- ret = remove_ftrace_ops (& ftrace_ops_list , main_ops );
403
- return ret ;
404
- }
405
-
406
386
static void ftrace_update_trampoline (struct ftrace_ops * ops );
407
387
408
388
static int __register_ftrace_function (struct ftrace_ops * ops )
@@ -430,14 +410,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
430
410
if (!core_kernel_data ((unsigned long )ops ))
431
411
ops -> flags |= FTRACE_OPS_FL_DYNAMIC ;
432
412
433
- if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
434
- if (control_ops_alloc (ops ))
413
+ if (ops -> flags & FTRACE_OPS_FL_PER_CPU ) {
414
+ if (per_cpu_ops_alloc (ops ))
435
415
return - ENOMEM ;
436
- add_ftrace_list_ops (& ftrace_control_list , & control_ops , ops );
437
- /* The control_ops needs the trampoline update */
438
- ops = & control_ops ;
439
- } else
440
- add_ftrace_ops (& ftrace_ops_list , ops );
416
+ }
417
+
418
+ add_ftrace_ops (& ftrace_ops_list , ops );
441
419
442
420
/* Always save the function, and reset at unregistering */
443
421
ops -> saved_func = ops -> func ;
@@ -460,11 +438,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
460
438
if (WARN_ON (!(ops -> flags & FTRACE_OPS_FL_ENABLED )))
461
439
return - EBUSY ;
462
440
463
- if (ops -> flags & FTRACE_OPS_FL_CONTROL ) {
464
- ret = remove_ftrace_list_ops (& ftrace_control_list ,
465
- & control_ops , ops );
466
- } else
467
- ret = remove_ftrace_ops (& ftrace_ops_list , ops );
441
+ ret = remove_ftrace_ops (& ftrace_ops_list , ops );
468
442
469
443
if (ret < 0 )
470
444
return ret ;
@@ -2630,7 +2604,7 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2630
2604
{
2631
2605
}
2632
2606
2633
- static void control_ops_free (struct ftrace_ops * ops )
2607
+ static void per_cpu_ops_free (struct ftrace_ops * ops )
2634
2608
{
2635
2609
free_percpu (ops -> disabled );
2636
2610
}
@@ -2731,13 +2705,13 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2731
2705
2732
2706
if (!command || !ftrace_enabled ) {
2733
2707
/*
2734
- * If these are control ops, they still need their
2708
+ * If these are per_cpu ops, they still need their
2735
2709
* per_cpu field freed. Since, function tracing is
2736
2710
* not currently active, we can just free them
2737
2711
* without synchronizing all CPUs.
2738
2712
*/
2739
- if (ops -> flags & FTRACE_OPS_FL_CONTROL )
2740
- control_ops_free (ops );
2713
+ if (ops -> flags & FTRACE_OPS_FL_PER_CPU )
2714
+ per_cpu_ops_free (ops );
2741
2715
return 0 ;
2742
2716
}
2743
2717
@@ -2778,7 +2752,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2778
2752
/*
2779
2753
* Dynamic ops may be freed, we must make sure that all
2780
2754
* callers are done before leaving this function.
2781
- * The same goes for freeing the per_cpu data of the control
2755
+ * The same goes for freeing the per_cpu data of the per_cpu
2782
2756
* ops.
2783
2757
*
2784
2758
* Again, normal synchronize_sched() is not good enough.
@@ -2789,13 +2763,13 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2789
2763
* infrastructure to do the synchronization, thus we must do it
2790
2764
* ourselves.
2791
2765
*/
2792
- if (ops -> flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL )) {
2766
+ if (ops -> flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU )) {
2793
2767
schedule_on_each_cpu (ftrace_sync );
2794
2768
2795
2769
arch_ftrace_trampoline_free (ops );
2796
2770
2797
- if (ops -> flags & FTRACE_OPS_FL_CONTROL )
2798
- control_ops_free (ops );
2771
+ if (ops -> flags & FTRACE_OPS_FL_PER_CPU )
2772
+ per_cpu_ops_free (ops );
2799
2773
}
2800
2774
2801
2775
return 0 ;
@@ -5185,44 +5159,6 @@ void ftrace_reset_array_ops(struct trace_array *tr)
5185
5159
tr -> ops -> func = ftrace_stub ;
5186
5160
}
5187
5161
5188
- static void
5189
- ftrace_ops_control_func (unsigned long ip , unsigned long parent_ip ,
5190
- struct ftrace_ops * op , struct pt_regs * regs )
5191
- {
5192
- if (unlikely (trace_recursion_test (TRACE_CONTROL_BIT )))
5193
- return ;
5194
-
5195
- /*
5196
- * Some of the ops may be dynamically allocated,
5197
- * they must be freed after a synchronize_sched().
5198
- */
5199
- preempt_disable_notrace ();
5200
- trace_recursion_set (TRACE_CONTROL_BIT );
5201
-
5202
- /*
5203
- * Control funcs (perf) uses RCU. Only trace if
5204
- * RCU is currently active.
5205
- */
5206
- if (!rcu_is_watching ())
5207
- goto out ;
5208
-
5209
- do_for_each_ftrace_op (op , ftrace_control_list ) {
5210
- if (!(op -> flags & FTRACE_OPS_FL_STUB ) &&
5211
- !ftrace_function_local_disabled (op ) &&
5212
- ftrace_ops_test (op , ip , regs ))
5213
- op -> func (ip , parent_ip , op , regs );
5214
- } while_for_each_ftrace_op (op );
5215
- out :
5216
- trace_recursion_clear (TRACE_CONTROL_BIT );
5217
- preempt_enable_notrace ();
5218
- }
5219
-
5220
- static struct ftrace_ops control_ops = {
5221
- .func = ftrace_ops_control_func ,
5222
- .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED ,
5223
- INIT_OPS_HASH (control_ops )
5224
- };
5225
-
5226
5162
static inline void
5227
5163
__ftrace_ops_list_func (unsigned long ip , unsigned long parent_ip ,
5228
5164
struct ftrace_ops * ignored , struct pt_regs * regs )
@@ -5239,8 +5175,22 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5239
5175
* they must be freed after a synchronize_sched().
5240
5176
*/
5241
5177
preempt_disable_notrace ();
5178
+
5242
5179
do_for_each_ftrace_op (op , ftrace_ops_list ) {
5243
- if (ftrace_ops_test (op , ip , regs )) {
5180
+ /*
5181
+ * Check the following for each ops before calling their func:
5182
+ * if RCU flag is set, then rcu_is_watching() must be true
5183
+ * if PER_CPU is set, then ftrace_function_local_disable()
5184
+ * must be false
5185
+ * Otherwise test if the ip matches the ops filter
5186
+ *
5187
+ * If any of the above fails then the op->func() is not executed.
5188
+ */
5189
+ if ((!(op -> flags & FTRACE_OPS_FL_RCU ) || rcu_is_watching ()) &&
5190
+ (!(op -> flags & FTRACE_OPS_FL_PER_CPU ) ||
5191
+ !ftrace_function_local_disabled (op )) &&
5192
+ ftrace_ops_test (op , ip , regs )) {
5193
+
5244
5194
if (FTRACE_WARN_ON (!op -> func )) {
5245
5195
pr_warn ("op=%p %pS\n" , op , op );
5246
5196
goto out ;
0 commit comments