@@ -76,6 +76,23 @@ module_param(lock_stat, int, 0644);
76
76
#define lock_stat 0
77
77
#endif
78
78
79
+ DEFINE_PER_CPU (unsigned int , lockdep_recursion );
80
+ EXPORT_PER_CPU_SYMBOL_GPL (lockdep_recursion );
81
+
82
+ static inline bool lockdep_enabled (void )
83
+ {
84
+ if (!debug_locks )
85
+ return false;
86
+
87
+ if (raw_cpu_read (lockdep_recursion ))
88
+ return false;
89
+
90
+ if (current -> lockdep_recursion )
91
+ return false;
92
+
93
+ return true;
94
+ }
95
+
79
96
/*
80
97
* lockdep_lock: protects the lockdep graph, the hashes and the
81
98
* class/list/hash allocators.
@@ -93,15 +110,15 @@ static inline void lockdep_lock(void)
93
110
94
111
arch_spin_lock (& __lock );
95
112
__owner = current ;
96
- current -> lockdep_recursion ++ ;
113
+ __this_cpu_inc ( lockdep_recursion ) ;
97
114
}
98
115
99
116
static inline void lockdep_unlock (void )
100
117
{
101
118
if (debug_locks && DEBUG_LOCKS_WARN_ON (__owner != current ))
102
119
return ;
103
120
104
- current -> lockdep_recursion -- ;
121
+ __this_cpu_dec ( lockdep_recursion ) ;
105
122
__owner = NULL ;
106
123
arch_spin_unlock (& __lock );
107
124
}
@@ -393,10 +410,15 @@ void lockdep_init_task(struct task_struct *task)
393
410
task -> lockdep_recursion = 0 ;
394
411
}
395
412
413
+ static __always_inline void lockdep_recursion_inc (void )
414
+ {
415
+ __this_cpu_inc (lockdep_recursion );
416
+ }
417
+
396
418
static __always_inline void lockdep_recursion_finish (void )
397
419
{
398
- if (WARN_ON_ONCE (( -- current -> lockdep_recursion ) & LOCKDEP_RECURSION_MASK ))
399
- current -> lockdep_recursion = 0 ;
420
+ if (WARN_ON_ONCE (__this_cpu_dec_return ( lockdep_recursion )))
421
+ __this_cpu_write ( lockdep_recursion , 0 ) ;
400
422
}
401
423
402
424
void lockdep_set_selftest_task (struct task_struct * task )
@@ -3659,7 +3681,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
3659
3681
if (unlikely (in_nmi ()))
3660
3682
return ;
3661
3683
3662
- if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3684
+ if (unlikely (__this_cpu_read ( lockdep_recursion ) ))
3663
3685
return ;
3664
3686
3665
3687
if (unlikely (lockdep_hardirqs_enabled ())) {
@@ -3695,7 +3717,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
3695
3717
3696
3718
current -> hardirq_chain_key = current -> curr_chain_key ;
3697
3719
3698
- current -> lockdep_recursion ++ ;
3720
+ lockdep_recursion_inc () ;
3699
3721
__trace_hardirqs_on_caller ();
3700
3722
lockdep_recursion_finish ();
3701
3723
}
@@ -3728,7 +3750,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
3728
3750
goto skip_checks ;
3729
3751
}
3730
3752
3731
- if (unlikely (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK ))
3753
+ if (unlikely (__this_cpu_read ( lockdep_recursion ) ))
3732
3754
return ;
3733
3755
3734
3756
if (lockdep_hardirqs_enabled ()) {
@@ -3781,7 +3803,7 @@ void noinstr lockdep_hardirqs_off(unsigned long ip)
3781
3803
if (in_nmi ()) {
3782
3804
if (!IS_ENABLED (CONFIG_TRACE_IRQFLAGS_NMI ))
3783
3805
return ;
3784
- } else if (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK )
3806
+ } else if (__this_cpu_read ( lockdep_recursion ) )
3785
3807
return ;
3786
3808
3787
3809
/*
@@ -3814,7 +3836,7 @@ void lockdep_softirqs_on(unsigned long ip)
3814
3836
{
3815
3837
struct irqtrace_events * trace = & current -> irqtrace ;
3816
3838
3817
- if (unlikely (!debug_locks || current -> lockdep_recursion ))
3839
+ if (unlikely (!lockdep_enabled () ))
3818
3840
return ;
3819
3841
3820
3842
/*
@@ -3829,7 +3851,7 @@ void lockdep_softirqs_on(unsigned long ip)
3829
3851
return ;
3830
3852
}
3831
3853
3832
- current -> lockdep_recursion ++ ;
3854
+ lockdep_recursion_inc () ;
3833
3855
/*
3834
3856
* We'll do an OFF -> ON transition:
3835
3857
*/
@@ -3852,7 +3874,7 @@ void lockdep_softirqs_on(unsigned long ip)
3852
3874
*/
3853
3875
void lockdep_softirqs_off (unsigned long ip )
3854
3876
{
3855
- if (unlikely (!debug_locks || current -> lockdep_recursion ))
3877
+ if (unlikely (!lockdep_enabled () ))
3856
3878
return ;
3857
3879
3858
3880
/*
@@ -4233,11 +4255,11 @@ void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
4233
4255
if (subclass ) {
4234
4256
unsigned long flags ;
4235
4257
4236
- if (DEBUG_LOCKS_WARN_ON (current -> lockdep_recursion ))
4258
+ if (DEBUG_LOCKS_WARN_ON (! lockdep_enabled () ))
4237
4259
return ;
4238
4260
4239
4261
raw_local_irq_save (flags );
4240
- current -> lockdep_recursion ++ ;
4262
+ lockdep_recursion_inc () ;
4241
4263
register_lock_class (lock , subclass , 1 );
4242
4264
lockdep_recursion_finish ();
4243
4265
raw_local_irq_restore (flags );
@@ -4920,11 +4942,11 @@ void lock_set_class(struct lockdep_map *lock, const char *name,
4920
4942
{
4921
4943
unsigned long flags ;
4922
4944
4923
- if (unlikely (current -> lockdep_recursion ))
4945
+ if (unlikely (! lockdep_enabled () ))
4924
4946
return ;
4925
4947
4926
4948
raw_local_irq_save (flags );
4927
- current -> lockdep_recursion ++ ;
4949
+ lockdep_recursion_inc () ;
4928
4950
check_flags (flags );
4929
4951
if (__lock_set_class (lock , name , key , subclass , ip ))
4930
4952
check_chain_key (current );
@@ -4937,11 +4959,11 @@ void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
4937
4959
{
4938
4960
unsigned long flags ;
4939
4961
4940
- if (unlikely (current -> lockdep_recursion ))
4962
+ if (unlikely (! lockdep_enabled () ))
4941
4963
return ;
4942
4964
4943
4965
raw_local_irq_save (flags );
4944
- current -> lockdep_recursion ++ ;
4966
+ lockdep_recursion_inc () ;
4945
4967
check_flags (flags );
4946
4968
if (__lock_downgrade (lock , ip ))
4947
4969
check_chain_key (current );
@@ -4979,7 +5001,7 @@ static void verify_lock_unused(struct lockdep_map *lock, struct held_lock *hlock
4979
5001
4980
5002
static bool lockdep_nmi (void )
4981
5003
{
4982
- if (current -> lockdep_recursion & LOCKDEP_RECURSION_MASK )
5004
+ if (raw_cpu_read ( lockdep_recursion ) )
4983
5005
return false;
4984
5006
4985
5007
if (!in_nmi ())
@@ -5000,7 +5022,10 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5000
5022
5001
5023
trace_lock_acquire (lock , subclass , trylock , read , check , nest_lock , ip );
5002
5024
5003
- if (unlikely (current -> lockdep_recursion )) {
5025
+ if (!debug_locks )
5026
+ return ;
5027
+
5028
+ if (unlikely (!lockdep_enabled ())) {
5004
5029
/* XXX allow trylock from NMI ?!? */
5005
5030
if (lockdep_nmi () && !trylock ) {
5006
5031
struct held_lock hlock ;
@@ -5023,7 +5048,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
5023
5048
raw_local_irq_save (flags );
5024
5049
check_flags (flags );
5025
5050
5026
- current -> lockdep_recursion ++ ;
5051
+ lockdep_recursion_inc () ;
5027
5052
__lock_acquire (lock , subclass , trylock , read , check ,
5028
5053
irqs_disabled_flags (flags ), nest_lock , ip , 0 , 0 );
5029
5054
lockdep_recursion_finish ();
@@ -5037,13 +5062,13 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
5037
5062
5038
5063
trace_lock_release (lock , ip );
5039
5064
5040
- if (unlikely (current -> lockdep_recursion ))
5065
+ if (unlikely (! lockdep_enabled () ))
5041
5066
return ;
5042
5067
5043
5068
raw_local_irq_save (flags );
5044
5069
check_flags (flags );
5045
5070
5046
- current -> lockdep_recursion ++ ;
5071
+ lockdep_recursion_inc () ;
5047
5072
if (__lock_release (lock , ip ))
5048
5073
check_chain_key (current );
5049
5074
lockdep_recursion_finish ();
@@ -5056,13 +5081,13 @@ noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
5056
5081
unsigned long flags ;
5057
5082
int ret = 0 ;
5058
5083
5059
- if (unlikely (current -> lockdep_recursion ))
5084
+ if (unlikely (! lockdep_enabled () ))
5060
5085
return 1 ; /* avoid false negative lockdep_assert_held() */
5061
5086
5062
5087
raw_local_irq_save (flags );
5063
5088
check_flags (flags );
5064
5089
5065
- current -> lockdep_recursion ++ ;
5090
+ lockdep_recursion_inc () ;
5066
5091
ret = __lock_is_held (lock , read );
5067
5092
lockdep_recursion_finish ();
5068
5093
raw_local_irq_restore (flags );
@@ -5077,13 +5102,13 @@ struct pin_cookie lock_pin_lock(struct lockdep_map *lock)
5077
5102
struct pin_cookie cookie = NIL_COOKIE ;
5078
5103
unsigned long flags ;
5079
5104
5080
- if (unlikely (current -> lockdep_recursion ))
5105
+ if (unlikely (! lockdep_enabled () ))
5081
5106
return cookie ;
5082
5107
5083
5108
raw_local_irq_save (flags );
5084
5109
check_flags (flags );
5085
5110
5086
- current -> lockdep_recursion ++ ;
5111
+ lockdep_recursion_inc () ;
5087
5112
cookie = __lock_pin_lock (lock );
5088
5113
lockdep_recursion_finish ();
5089
5114
raw_local_irq_restore (flags );
@@ -5096,13 +5121,13 @@ void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5096
5121
{
5097
5122
unsigned long flags ;
5098
5123
5099
- if (unlikely (current -> lockdep_recursion ))
5124
+ if (unlikely (! lockdep_enabled () ))
5100
5125
return ;
5101
5126
5102
5127
raw_local_irq_save (flags );
5103
5128
check_flags (flags );
5104
5129
5105
- current -> lockdep_recursion ++ ;
5130
+ lockdep_recursion_inc () ;
5106
5131
__lock_repin_lock (lock , cookie );
5107
5132
lockdep_recursion_finish ();
5108
5133
raw_local_irq_restore (flags );
@@ -5113,13 +5138,13 @@ void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie)
5113
5138
{
5114
5139
unsigned long flags ;
5115
5140
5116
- if (unlikely (current -> lockdep_recursion ))
5141
+ if (unlikely (! lockdep_enabled () ))
5117
5142
return ;
5118
5143
5119
5144
raw_local_irq_save (flags );
5120
5145
check_flags (flags );
5121
5146
5122
- current -> lockdep_recursion ++ ;
5147
+ lockdep_recursion_inc () ;
5123
5148
__lock_unpin_lock (lock , cookie );
5124
5149
lockdep_recursion_finish ();
5125
5150
raw_local_irq_restore (flags );
@@ -5249,15 +5274,12 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
5249
5274
5250
5275
trace_lock_acquired (lock , ip );
5251
5276
5252
- if (unlikely (!lock_stat || !debug_locks ))
5253
- return ;
5254
-
5255
- if (unlikely (current -> lockdep_recursion ))
5277
+ if (unlikely (!lock_stat || !lockdep_enabled ()))
5256
5278
return ;
5257
5279
5258
5280
raw_local_irq_save (flags );
5259
5281
check_flags (flags );
5260
- current -> lockdep_recursion ++ ;
5282
+ lockdep_recursion_inc () ;
5261
5283
__lock_contended (lock , ip );
5262
5284
lockdep_recursion_finish ();
5263
5285
raw_local_irq_restore (flags );
@@ -5270,15 +5292,12 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
5270
5292
5271
5293
trace_lock_contended (lock , ip );
5272
5294
5273
- if (unlikely (!lock_stat || !debug_locks ))
5274
- return ;
5275
-
5276
- if (unlikely (current -> lockdep_recursion ))
5295
+ if (unlikely (!lock_stat || !lockdep_enabled ()))
5277
5296
return ;
5278
5297
5279
5298
raw_local_irq_save (flags );
5280
5299
check_flags (flags );
5281
- current -> lockdep_recursion ++ ;
5300
+ lockdep_recursion_inc () ;
5282
5301
__lock_acquired (lock , ip );
5283
5302
lockdep_recursion_finish ();
5284
5303
raw_local_irq_restore (flags );
0 commit comments