Skip to content

Commit 4d00409

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
lockdep: Fix lockdep recursion
Steve reported that lockdep_assert*irq*(), when nested inside lockdep itself, will trigger a false-positive. One example is the stack-trace code, as called from inside lockdep, triggering tracing, which in turn calls RCU, which then uses lockdep_assert_irqs_disabled(). Fixes: a21ee60 ("lockdep: Change hardirq{s_enabled,_context} to per-cpu variables") Reported-by: Steven Rostedt <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]>
1 parent 2bb8945 commit 4d00409

File tree

2 files changed

+67
-45
lines changed

2 files changed

+67
-45
lines changed

include/linux/lockdep.h

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,7 @@ do { \
534534

535535
DECLARE_PER_CPU(int, hardirqs_enabled);
536536
DECLARE_PER_CPU(int, hardirq_context);
537+
DECLARE_PER_CPU(unsigned int, lockdep_recursion);
537538

538539
/*
539540
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
@@ -543,33 +544,35 @@ DECLARE_PER_CPU(int, hardirq_context);
543544
* read the value from our previous CPU.
544545
*/
545546

547+
#define __lockdep_enabled (debug_locks && !raw_cpu_read(lockdep_recursion))
548+
546549
#define lockdep_assert_irqs_enabled() \
547550
do { \
548-
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
551+
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirqs_enabled)); \
549552
} while (0)
550553

551554
#define lockdep_assert_irqs_disabled() \
552555
do { \
553-
WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
556+
WARN_ON_ONCE(__lockdep_enabled && raw_cpu_read(hardirqs_enabled)); \
554557
} while (0)
555558

556559
#define lockdep_assert_in_irq() \
557560
do { \
558-
WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
561+
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirq_context)); \
559562
} while (0)
560563

561564
#define lockdep_assert_preemption_enabled() \
562565
do { \
563566
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
564-
debug_locks && \
567+
__lockdep_enabled && \
565568
(preempt_count() != 0 || \
566569
!raw_cpu_read(hardirqs_enabled))); \
567570
} while (0)
568571

569572
#define lockdep_assert_preemption_disabled() \
570573
do { \
571574
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
572-
debug_locks && \
575+
__lockdep_enabled && \
573576
(preempt_count() == 0 && \
574577
raw_cpu_read(hardirqs_enabled))); \
575578
} while (0)

kernel/locking/lockdep.c

Lines changed: 59 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644);
7676
#define lock_stat 0
7777
#endif
7878

79+
DEFINE_PER_CPU(unsigned int, lockdep_recursion);
80+
EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
81+
82+
static inline bool lockdep_enabled(void)
83+
{
84+
if (!debug_locks)
85+
return false;
86+
87+
if (raw_cpu_read(lockdep_recursion))
88+
return false;
89+
90+
if (current->lockdep_recursion)
91+
return false;
92+
93+
return true;
94+
}
95+
7996
/*
8097
* lockdep_lock: protects the lockdep graph, the hashes and the
8198
* class/list/hash allocators.
@@ -93,15 +110,15 @@ static inline void lockdep_lock(void)
93110

94111
arch_spin_lock(&__lock);
95112
__owner = current;
96-
current->lockdep_recursion++;
113+
__this_cpu_inc(lockdep_recursion);
97114
}
98115

99116
static inline void lockdep_unlock(void)
100117
{
101118
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
102119
return;
103120

104-
current->lockdep_recursion--;
121+
__this_cpu_dec(lockdep_recursion);
105122
__owner = NULL;
106123
arch_spin_unlock(&__lock);
107124
}
@@ -393,10 +410,15 @@ void lockdep_init_task(struct task_struct *task)
393410
task->lockdep_recursion = 0;
394411
}
395412

413+
static __always_inline void lockdep_recursion_inc(void)
414+
{
415+
__this_cpu_inc(lockdep_recursion);
416+
}
417+
396418
static __always_inline void lockdep_recursion_finish(void)
397419
{
398-
if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
399-
current->lockdep_recursion = 0;
420+
if (WARN_ON_ONCE(__this_cpu_dec_return(lockdep_recursion)))
421+
__this_cpu_write(lockdep_recursion, 0);
400422
}
401423

402424
void lockdep_set_selftest_task(struct task_struct *task)
@@ -3659,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
36593681
if (unlikely(in_nmi()))
36603682
return;
36613683

3662-
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
3684+
if (unlikely(__this_cpu_read(lockdep_recursion)))
36633685
return;
36643686

36653687
if (unlikely(lockdep_hardirqs_enabled())) {
@@ -3695,7 +3717,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
36953717

36963718
current->hardirq_chain_key = current->curr_chain_key;
36973719

3698-
current->lockdep_recursion++;
3720+
lockdep_recursion_inc();
36993721
__trace_hardirqs_on_caller();
37003722
lockdep_recursion_finish();
37013723
}
@@ -3728,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
37283750
goto skip_checks;
37293751
}
37303752

3731-
if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
3753+
if (unlikely(__this_cpu_read(lockdep_recursion)))
37323754
return;
37333755

37343756
if (lockdep_hardirqs_enabled()) {
@@ -3781,7 +3803,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
37813803
if (in_nmi()) {
37823804
if (!IS_ENABLED(CONFIG_TRACE_IRQFLAGS_NMI))
37833805
return;
3784-
} else if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
3806+
} else if (__this_cpu_read(lockdep_recursion))
37853807
return;
37863808

37873809
/*
@@ -3814,7 +3836,7 @@ void lockdep_softirqs_on(unsigned long ip)
38143836
{
38153837
struct irqtrace_events *trace = &current->irqtrace;
38163838

3817-
if (unlikely(!debug_locks || current->lockdep_recursion))
3839+
if (unlikely(!lockdep_enabled()))
38183840
return;
38193841

38203842
/*
@@ -3829,7 +3851,7 @@ void lockdep_softirqs_on(unsigned long ip)
38293851
return;
38303852
}
38313853

3832-
current->lockdep_recursion++;
3854+
lockdep_recursion_inc();
38333855
/*
38343856
* We'll do an OFF -> ON transition:
38353857
*/
@@ -3852,7 +3874,7 @@ void lockdep_softirqs_on(unsigned long ip)
38523874
*/
38533875
void lockdep_softirqs_off(unsigned long ip)
38543876
{
3855-
if (unlikely(!debug_locks || current->lockdep_recursion))
3877+
if (unlikely(!lockdep_enabled()))
38563878
return;
38573879

38583880
/*
@@ -4233,11 +4255,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
42334255
if (subclass) {
42344256
unsigned long flags;
42354257

4236-
if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion))
4258+
if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
42374259
return;
42384260

42394261
raw_local_irq_save(flags);
4240-
current->lockdep_recursion++;
4262+
lockdep_recursion_inc();
42414263
register_lock_class(lock, subclass, 1);
42424264
lockdep_recursion_finish();
42434265
raw_local_irq_restore(flags);
@@ -4920,11 +4942,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
49204942
{
49214943
unsigned long flags;
49224944

4923-
if (unlikely(current->lockdep_recursion))
4945+
if (unlikely(!lockdep_enabled()))
49244946
return;
49254947

49264948
raw_local_irq_save(flags);
4927-
current->lockdep_recursion++;
4949+
lockdep_recursion_inc();
49284950
check_flags(flags);
49294951
if (__lock_set_class(lock, name, key, subclass, ip))
49304952
check_chain_key(current);
@@ -4937,11 +4959,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
49374959
{
49384960
unsigned long flags;
49394961

4940-
if (unlikely(current->lockdep_recursion))
4962+
if (unlikely(!lockdep_enabled()))
49414963
return;
49424964

49434965
raw_local_irq_save(flags);
4944-
current->lockdep_recursion++;
4966+
lockdep_recursion_inc();
49454967
check_flags(flags);
49464968
if (__lock_downgrade(lock, ip))
49474969
check_chain_key(current);
@@ -4979,7 +5001,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
49795001

49805002
static bool lockdep_nmi(void)
49815003
{
4982-
if (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)
5004+
if (raw_cpu_read(lockdep_recursion))
49835005
return false;
49845006

49855007
if (!in_nmi())
@@ -5000,7 +5022,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
50005022

50015023
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
50025024

5003-
if (unlikely(current->lockdep_recursion)) {
5025+
if (!debug_locks)
5026+
return;
5027+
5028+
if (unlikely(!lockdep_enabled())) {
50045029
/* XXX allow trylock from NMI ?!? */
50055030
if (lockdep_nmi() && !trylock) {
50065031
struct held_lock hlock;
@@ -5023,7 +5048,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
50235048
raw_local_irq_save(flags);
50245049
check_flags(flags);
50255050

5026-
current->lockdep_recursion++;
5051+
lockdep_recursion_inc();
50275052
__lock_acquire(lock, subclass, trylock, read, check,
50285053
irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
50295054
lockdep_recursion_finish();
@@ -5037,13 +5062,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
50375062

50385063
trace_lock_release(lock, ip);
50395064

5040-
if (unlikely(current->lockdep_recursion))
5065+
if (unlikely(!lockdep_enabled()))
50415066
return;
50425067

50435068
raw_local_irq_save(flags);
50445069
check_flags(flags);
50455070

5046-
current->lockdep_recursion++;
5071+
lockdep_recursion_inc();
50475072
if (__lock_release(lock, ip))
50485073
check_chain_key(current);
50495074
lockdep_recursion_finish();
@@ -5056,13 +5081,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
50565081
unsigned long flags;
50575082
int ret = 0;
50585083

5059-
if (unlikely(current->lockdep_recursion))
5084+
if (unlikely(!lockdep_enabled()))
50605085
return 1; /* avoid false negative lockdep_assert_held() */
50615086

50625087
raw_local_irq_save(flags);
50635088
check_flags(flags);
50645089

5065-
current->lockdep_recursion++;
5090+
lockdep_recursion_inc();
50665091
ret = __lock_is_held(lock, read);
50675092
lockdep_recursion_finish();
50685093
raw_local_irq_restore(flags);
@@ -5077,13 +5102,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
50775102
struct pin_cookie cookie = NIL_COOKIE;
50785103
unsigned long flags;
50795104

5080-
if (unlikely(current->lockdep_recursion))
5105+
if (unlikely(!lockdep_enabled()))
50815106
return cookie;
50825107

50835108
raw_local_irq_save(flags);
50845109
check_flags(flags);
50855110

5086-
current->lockdep_recursion++;
5111+
lockdep_recursion_inc();
50875112
cookie = __lock_pin_lock(lock);
50885113
lockdep_recursion_finish();
50895114
raw_local_irq_restore(flags);
@@ -5096,13 +5121,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
50965121
{
50975122
unsigned long flags;
50985123

5099-
if (unlikely(current->lockdep_recursion))
5124+
if (unlikely(!lockdep_enabled()))
51005125
return;
51015126

51025127
raw_local_irq_save(flags);
51035128
check_flags(flags);
51045129

5105-
current->lockdep_recursion++;
5130+
lockdep_recursion_inc();
51065131
__lock_repin_lock(lock, cookie);
51075132
lockdep_recursion_finish();
51085133
raw_local_irq_restore(flags);
@@ -5113,13 +5138,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
51135138
{
51145139
unsigned long flags;
51155140

5116-
if (unlikely(current->lockdep_recursion))
5141+
if (unlikely(!lockdep_enabled()))
51175142
return;
51185143

51195144
raw_local_irq_save(flags);
51205145
check_flags(flags);
51215146

5122-
current->lockdep_recursion++;
5147+
lockdep_recursion_inc();
51235148
__lock_unpin_lock(lock, cookie);
51245149
lockdep_recursion_finish();
51255150
raw_local_irq_restore(flags);
@@ -5249,15 +5274,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
52495274

52505275
trace_lock_acquired(lock, ip);
52515276

5252-
if (unlikely(!lock_stat || !debug_locks))
5253-
return;
5254-
5255-
if (unlikely(current->lockdep_recursion))
5277+
if (unlikely(!lock_stat || !lockdep_enabled()))
52565278
return;
52575279

52585280
raw_local_irq_save(flags);
52595281
check_flags(flags);
5260-
current->lockdep_recursion++;
5282+
lockdep_recursion_inc();
52615283
__lock_contended(lock, ip);
52625284
lockdep_recursion_finish();
52635285
raw_local_irq_restore(flags);
@@ -5270,15 +5292,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
52705292

52715293
trace_lock_contended(lock, ip);
52725294

5273-
if (unlikely(!lock_stat || !debug_locks))
5274-
return;
5275-
5276-
if (unlikely(current->lockdep_recursion))
5295+
if (unlikely(!lock_stat || !lockdep_enabled()))
52775296
return;
52785297

52795298
raw_local_irq_save(flags);
52805299
check_flags(flags);
5281-
current->lockdep_recursion++;
5300+
lockdep_recursion_inc();
52825301
__lock_acquired(lock, ip);
52835302
lockdep_recursion_finish();
52845303
raw_local_irq_restore(flags);

0 commit comments

Comments
 (0)