File tree Expand file tree Collapse file tree 1 file changed +20
-1
lines changed Expand file tree Collapse file tree 1 file changed +20
-1
lines changed Original file line number Diff line number Diff line change @@ -161,9 +161,24 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
161
161
return - EBUSY ;
162
162
}
163
163
164
+ /*
165
+ * The lock may be taken in both NMI and non-NMI contexts.
166
+ * There is a false lockdep warning (inconsistent lock state),
167
+ * if lockdep enabled. The potential deadlock happens when the
168
+ * lock is contended from the same cpu. map_locked rejects
169
+ * concurrent access to the same bucket from the same CPU.
170
+ * When the lock is contended from a remote cpu, we would
171
+ * like the remote cpu to spin and wait, instead of giving
172
+ * up immediately. As this gives better throughput. So replacing
173
+ * the current raw_spin_lock_irqsave() with trylock sacrifices
174
+ * this performance gain. atomic map_locked is necessary.
175
+ * lockdep_off is invoked temporarily to fix the false warning.
176
+ */
177
+ lockdep_off ();
164
178
raw_spin_lock_irqsave (& b -> raw_lock , flags );
165
- * pflags = flags ;
179
+ lockdep_on () ;
166
180
181
+ * pflags = flags ;
167
182
return 0 ;
168
183
}
169
184
@@ -172,7 +187,11 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
172
187
unsigned long flags )
173
188
{
174
189
hash = hash & min_t (u32 , HASHTAB_MAP_LOCK_MASK , htab -> n_buckets - 1 );
190
+
191
+ lockdep_off ();
175
192
raw_spin_unlock_irqrestore (& b -> raw_lock , flags );
193
+ lockdep_on ();
194
+
176
195
__this_cpu_dec (* (htab -> map_locked [hash ]));
177
196
preempt_enable ();
178
197
}
You can’t perform that action at this time.
0 commit comments