Skip to content

Commit a21ee60

Browse files
author
Peter Zijlstra
committed
lockdep: Change hardirq{s_enabled,_context} to per-cpu variables
Currently all IRQ-tracking state is in task_struct, this means that task_struct needs to be defined before we use it. Especially for lockdep_assert_irq*() this can lead to header-hell. Move the hardirq state into per-cpu variables to avoid the task_struct dependency. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Ingo Molnar <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent a634291 commit a21ee60

File tree

6 files changed

+52
-43
lines changed

6 files changed

+52
-43
lines changed

include/linux/irqflags.h

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
#include <linux/typecheck.h>
1616
#include <asm/irqflags.h>
17+
#include <asm/percpu.h>
1718

1819
/* Currently lockdep_softirqs_on/off is used only by lockdep */
1920
#ifdef CONFIG_PROVE_LOCKING
@@ -31,26 +32,30 @@
3132
#endif
3233

3334
#ifdef CONFIG_TRACE_IRQFLAGS
35+
36+
DECLARE_PER_CPU(int, hardirqs_enabled);
37+
DECLARE_PER_CPU(int, hardirq_context);
38+
3439
extern void trace_hardirqs_on_prepare(void);
3540
extern void trace_hardirqs_off_finish(void);
3641
extern void trace_hardirqs_on(void);
3742
extern void trace_hardirqs_off(void);
38-
# define lockdep_hardirq_context(p) ((p)->hardirq_context)
43+
# define lockdep_hardirq_context(p) (this_cpu_read(hardirq_context))
3944
# define lockdep_softirq_context(p) ((p)->softirq_context)
40-
# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled)
45+
# define lockdep_hardirqs_enabled(p) (this_cpu_read(hardirqs_enabled))
4146
# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
42-
# define lockdep_hardirq_enter() \
43-
do { \
44-
if (!current->hardirq_context++) \
45-
current->hardirq_threaded = 0; \
47+
# define lockdep_hardirq_enter() \
48+
do { \
49+
if (this_cpu_inc_return(hardirq_context) == 1) \
50+
current->hardirq_threaded = 0; \
4651
} while (0)
4752
# define lockdep_hardirq_threaded() \
4853
do { \
4954
current->hardirq_threaded = 1; \
5055
} while (0)
5156
# define lockdep_hardirq_exit() \
5257
do { \
53-
current->hardirq_context--; \
58+
this_cpu_dec(hardirq_context); \
5459
} while (0)
5560
# define lockdep_softirq_enter() \
5661
do { \

include/linux/lockdep.h

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
#define __LINUX_LOCKDEP_H
1212

1313
#include <linux/lockdep_types.h>
14+
#include <asm/percpu.h>
1415

1516
struct task_struct;
1617

@@ -529,28 +530,29 @@ do { \
529530
lock_release(&(lock)->dep_map, _THIS_IP_); \
530531
} while (0)
531532

532-
#define lockdep_assert_irqs_enabled() do { \
533-
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
534-
!current->hardirqs_enabled, \
535-
"IRQs not enabled as expected\n"); \
536-
} while (0)
533+
DECLARE_PER_CPU(int, hardirqs_enabled);
534+
DECLARE_PER_CPU(int, hardirq_context);
537535

538-
#define lockdep_assert_irqs_disabled() do { \
539-
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
540-
current->hardirqs_enabled, \
541-
"IRQs not disabled as expected\n"); \
542-
} while (0)
536+
#define lockdep_assert_irqs_enabled() \
537+
do { \
538+
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirqs_enabled)); \
539+
} while (0)
543540

544-
#define lockdep_assert_in_irq() do { \
545-
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
546-
!current->hardirq_context, \
547-
"Not in hardirq as expected\n"); \
548-
} while (0)
541+
#define lockdep_assert_irqs_disabled() \
542+
do { \
543+
WARN_ON_ONCE(debug_locks && this_cpu_read(hardirqs_enabled)); \
544+
} while (0)
545+
546+
#define lockdep_assert_in_irq() \
547+
do { \
548+
WARN_ON_ONCE(debug_locks && !this_cpu_read(hardirq_context)); \
549+
} while (0)
549550

550551
#else
551552
# define might_lock(lock) do { } while (0)
552553
# define might_lock_read(lock) do { } while (0)
553554
# define might_lock_nested(lock, subclass) do { } while (0)
555+
554556
# define lockdep_assert_irqs_enabled() do { } while (0)
555557
# define lockdep_assert_irqs_disabled() do { } while (0)
556558
# define lockdep_assert_in_irq() do { } while (0)
@@ -560,7 +562,7 @@ do { \
560562

561563
# define lockdep_assert_RT_in_threaded_ctx() do { \
562564
WARN_ONCE(debug_locks && !current->lockdep_recursion && \
563-
current->hardirq_context && \
565+
lockdep_hardirq_context(current) && \
564566
!(current->hardirq_threaded || current->irq_config), \
565567
"Not in threaded context on PREEMPT_RT as expected\n"); \
566568
} while (0)

include/linux/sched.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -990,8 +990,6 @@ struct task_struct {
990990
unsigned long hardirq_disable_ip;
991991
unsigned int hardirq_enable_event;
992992
unsigned int hardirq_disable_event;
993-
int hardirqs_enabled;
994-
int hardirq_context;
995993
u64 hardirq_chain_key;
996994
unsigned long softirq_disable_ip;
997995
unsigned long softirq_enable_ip;

kernel/fork.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1954,8 +1954,8 @@ static __latent_entropy struct task_struct *copy_process(
19541954

19551955
rt_mutex_init_task(p);
19561956

1957+
lockdep_assert_irqs_enabled();
19571958
#ifdef CONFIG_PROVE_LOCKING
1958-
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
19591959
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
19601960
#endif
19611961
retval = -EAGAIN;
@@ -2036,7 +2036,6 @@ static __latent_entropy struct task_struct *copy_process(
20362036
#endif
20372037
#ifdef CONFIG_TRACE_IRQFLAGS
20382038
p->irq_events = 0;
2039-
p->hardirqs_enabled = 0;
20402039
p->hardirq_enable_ip = 0;
20412040
p->hardirq_enable_event = 0;
20422041
p->hardirq_disable_ip = _THIS_IP_;
@@ -2046,7 +2045,6 @@ static __latent_entropy struct task_struct *copy_process(
20462045
p->softirq_enable_event = 0;
20472046
p->softirq_disable_ip = 0;
20482047
p->softirq_disable_event = 0;
2049-
p->hardirq_context = 0;
20502048
p->softirq_context = 0;
20512049
#endif
20522050

kernel/locking/lockdep.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2062,9 +2062,9 @@ print_bad_irq_dependency(struct task_struct *curr,
20622062
pr_warn("-----------------------------------------------------\n");
20632063
pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
20642064
curr->comm, task_pid_nr(curr),
2065-
curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
2065+
lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
20662066
curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2067-
curr->hardirqs_enabled,
2067+
lockdep_hardirqs_enabled(curr),
20682068
curr->softirqs_enabled);
20692069
print_lock(next);
20702070

@@ -3658,7 +3658,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
36583658
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
36593659
return;
36603660

3661-
if (unlikely(current->hardirqs_enabled)) {
3661+
if (unlikely(lockdep_hardirqs_enabled(current))) {
36623662
/*
36633663
* Neither irq nor preemption are disabled here
36643664
* so this is racy by nature but losing one hit
@@ -3686,7 +3686,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
36863686
* Can't allow enabling interrupts while in an interrupt handler,
36873687
* that's general bad form and such. Recursion, limited stack etc..
36883688
*/
3689-
if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3689+
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context(current)))
36903690
return;
36913691

36923692
current->hardirq_chain_key = current->curr_chain_key;
@@ -3724,7 +3724,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
37243724
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
37253725
return;
37263726

3727-
if (curr->hardirqs_enabled) {
3727+
if (lockdep_hardirqs_enabled(curr)) {
37283728
/*
37293729
* Neither irq nor preemption are disabled here
37303730
* so this is racy by nature but losing one hit
@@ -3751,7 +3751,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
37513751

37523752
skip_checks:
37533753
/* we'll do an OFF -> ON transition: */
3754-
curr->hardirqs_enabled = 1;
3754+
this_cpu_write(hardirqs_enabled, 1);
37553755
curr->hardirq_enable_ip = ip;
37563756
curr->hardirq_enable_event = ++curr->irq_events;
37573757
debug_atomic_inc(hardirqs_on_events);
@@ -3783,11 +3783,11 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
37833783
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
37843784
return;
37853785

3786-
if (curr->hardirqs_enabled) {
3786+
if (lockdep_hardirqs_enabled(curr)) {
37873787
/*
37883788
* We have done an ON -> OFF transition:
37893789
*/
3790-
curr->hardirqs_enabled = 0;
3790+
this_cpu_write(hardirqs_enabled, 0);
37913791
curr->hardirq_disable_ip = ip;
37923792
curr->hardirq_disable_event = ++curr->irq_events;
37933793
debug_atomic_inc(hardirqs_off_events);
@@ -3832,7 +3832,7 @@ void lockdep_softirqs_on(unsigned long ip)
38323832
* usage bit for all held locks, if hardirqs are
38333833
* enabled too:
38343834
*/
3835-
if (curr->hardirqs_enabled)
3835+
if (lockdep_hardirqs_enabled(curr))
38363836
mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
38373837
lockdep_recursion_finish();
38383838
}
@@ -3881,7 +3881,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
38813881
*/
38823882
if (!hlock->trylock) {
38833883
if (hlock->read) {
3884-
if (curr->hardirq_context)
3884+
if (lockdep_hardirq_context(curr))
38853885
if (!mark_lock(curr, hlock,
38863886
LOCK_USED_IN_HARDIRQ_READ))
38873887
return 0;
@@ -3890,7 +3890,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
38903890
LOCK_USED_IN_SOFTIRQ_READ))
38913891
return 0;
38923892
} else {
3893-
if (curr->hardirq_context)
3893+
if (lockdep_hardirq_context(curr))
38943894
if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
38953895
return 0;
38963896
if (curr->softirq_context)
@@ -3928,7 +3928,7 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
39283928

39293929
static inline unsigned int task_irq_context(struct task_struct *task)
39303930
{
3931-
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
3931+
return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context(task) +
39323932
LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
39333933
}
39343934

@@ -4021,7 +4021,7 @@ static inline short task_wait_context(struct task_struct *curr)
40214021
* Set appropriate wait type for the context; for IRQs we have to take
40224022
* into account force_irqthread as that is implied by PREEMPT_RT.
40234023
*/
4024-
if (curr->hardirq_context) {
4024+
if (lockdep_hardirq_context(curr)) {
40254025
/*
40264026
* Check if force_irqthreads will run us threaded.
40274027
*/
@@ -4864,11 +4864,11 @@ static void check_flags(unsigned long flags)
48644864
return;
48654865

48664866
if (irqs_disabled_flags(flags)) {
4867-
if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4867+
if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled(current))) {
48684868
printk("possible reason: unannotated irqs-off.\n");
48694869
}
48704870
} else {
4871-
if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4871+
if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled(current))) {
48724872
printk("possible reason: unannotated irqs-on.\n");
48734873
}
48744874
}

kernel/softirq.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,12 @@ static bool ksoftirqd_running(unsigned long pending)
107107
* where hardirqs are disabled legitimately:
108108
*/
109109
#ifdef CONFIG_TRACE_IRQFLAGS
110+
111+
DEFINE_PER_CPU(int, hardirqs_enabled);
112+
DEFINE_PER_CPU(int, hardirq_context);
113+
EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
114+
EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
115+
110116
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
111117
{
112118
unsigned long flags;

0 commit comments

Comments
 (0)