Skip to content

Commit c68c0fa

Browse files
committed
ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too
Jiri Olsa noted that the change to replace the control_ops did not update the trampoline for when running perf on a single CPU and with CONFIG_PREEMPT disabled (where dynamic ops, like perf, can use trampolines directly). The result was that perf function could be called when RCU is not watching as well as not handle the ftrace_local_disable(). Modify the ftrace_ops_get_func() to also check the RCU and PER_CPU ops flags and use the recursive function if they are set. The recursive function is modified to check those flags and execute the appropriate checks if they are set. Link: http://lkml.kernel.org/r/[email protected] Reported-by: Jiri Olsa <[email protected]> Patch-fixed-up-by: Jiri Olsa <[email protected]> Tested-by: Jiri Olsa <[email protected]> Signed-off-by: Steven Rostedt <[email protected]>
1 parent ba27f2b commit c68c0fa

File tree

1 file changed

+18
-12
lines changed

1 file changed

+18
-12
lines changed

kernel/trace/ftrace.c

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -115,9 +115,6 @@ static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
115115
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
116116
static struct ftrace_ops global_ops;
117117

118-
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
119-
struct ftrace_ops *op, struct pt_regs *regs);
120-
121118
#if ARCH_SUPPORTS_FTRACE_OPS
122119
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
123120
struct ftrace_ops *op, struct pt_regs *regs);
@@ -5231,20 +5228,29 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
52315228

52325229
/*
52335230
* If there's only one function registered but it does not support
5234-
* recursion, this function will be called by the mcount trampoline.
5235-
* This function will handle recursion protection.
5231+
* recursion, needs RCU protection and/or requires per cpu handling, then
5232+
* this function will be called by the mcount trampoline.
52365233
*/
5237-
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5234+
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
52385235
struct ftrace_ops *op, struct pt_regs *regs)
52395236
{
52405237
int bit;
52415238

5239+
if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
5240+
return;
5241+
52425242
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
52435243
if (bit < 0)
52445244
return;
52455245

5246-
op->func(ip, parent_ip, op, regs);
5246+
preempt_disable_notrace();
52475247

5248+
if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
5249+
!ftrace_function_local_disabled(op)) {
5250+
op->func(ip, parent_ip, op, regs);
5251+
}
5252+
5253+
preempt_enable_notrace();
52485254
trace_clear_recursion(bit);
52495255
}
52505256

@@ -5262,12 +5268,12 @@ static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
52625268
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
52635269
{
52645270
/*
5265-
* If the func handles its own recursion, call it directly.
5266-
* Otherwise call the recursion protected function that
5267-
* will call the ftrace ops function.
5271+
* If the function does not handle recursion, needs to be RCU safe,
5272+
* or does per cpu logic, then we need to call the assist handler.
52685273
*/
5269-
if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5270-
return ftrace_ops_recurs_func;
5274+
if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
5275+
ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
5276+
return ftrace_ops_assist_func;
52715277

52725278
return ops->func;
52735279
}

0 commit comments

Comments
 (0)