1
1
// SPDX-License-Identifier: GPL-2.0-only
2
2
/* Copyright (c) 2014 Protonic Holland,
3
3
* David Jander
4
- * Copyright (C) 2014-2017 Pengutronix,
4
+ * Copyright (C) 2014-2021 Pengutronix,
5
5
* Marc Kleine-Budde <[email protected] >
6
6
*/
7
7
@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
174
174
int can_rx_offload_irq_offload_timestamp (struct can_rx_offload * offload ,
175
175
u64 pending )
176
176
{
177
- struct sk_buff_head skb_queue ;
178
177
unsigned int i ;
179
-
180
- __skb_queue_head_init (& skb_queue );
178
+ int received = 0 ;
181
179
182
180
for (i = offload -> mb_first ;
183
181
can_rx_offload_le (offload , i , offload -> mb_last );
@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
191
189
if (IS_ERR_OR_NULL (skb ))
192
190
continue ;
193
191
194
- __skb_queue_add_sort (& skb_queue , skb , can_rx_offload_compare );
195
- }
196
-
197
- if (!skb_queue_empty (& skb_queue )) {
198
- unsigned long flags ;
199
- u32 queue_len ;
200
-
201
- spin_lock_irqsave (& offload -> skb_queue .lock , flags );
202
- skb_queue_splice_tail (& skb_queue , & offload -> skb_queue );
203
- spin_unlock_irqrestore (& offload -> skb_queue .lock , flags );
204
-
205
- queue_len = skb_queue_len (& offload -> skb_queue );
206
- if (queue_len > offload -> skb_queue_len_max / 8 )
207
- netdev_dbg (offload -> dev , "%s: queue_len=%d\n" ,
208
- __func__ , queue_len );
209
-
210
- can_rx_offload_schedule (offload );
192
+ __skb_queue_add_sort (& offload -> skb_irq_queue , skb ,
193
+ can_rx_offload_compare );
194
+ received ++ ;
211
195
}
212
196
213
- return skb_queue_len ( & skb_queue ) ;
197
+ return received ;
214
198
}
215
199
EXPORT_SYMBOL_GPL (can_rx_offload_irq_offload_timestamp );
216
200
@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
226
210
if (!skb )
227
211
break ;
228
212
229
- skb_queue_tail (& offload -> skb_queue , skb );
213
+ __skb_queue_tail (& offload -> skb_irq_queue , skb );
230
214
received ++ ;
231
215
}
232
216
233
- if (received )
234
- can_rx_offload_schedule (offload );
235
-
236
217
return received ;
237
218
}
238
219
EXPORT_SYMBOL_GPL (can_rx_offload_irq_offload_fifo );
@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
241
222
struct sk_buff * skb , u32 timestamp )
242
223
{
243
224
struct can_rx_offload_cb * cb ;
244
- unsigned long flags ;
245
225
246
226
if (skb_queue_len (& offload -> skb_queue ) >
247
227
offload -> skb_queue_len_max ) {
@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
252
232
cb = can_rx_offload_get_cb (skb );
253
233
cb -> timestamp = timestamp ;
254
234
255
- spin_lock_irqsave (& offload -> skb_queue .lock , flags );
256
- __skb_queue_add_sort (& offload -> skb_queue , skb , can_rx_offload_compare );
257
- spin_unlock_irqrestore (& offload -> skb_queue .lock , flags );
258
-
259
- can_rx_offload_schedule (offload );
235
+ __skb_queue_add_sort (& offload -> skb_irq_queue , skb ,
236
+ can_rx_offload_compare );
260
237
261
238
return 0 ;
262
239
}
@@ -295,13 +272,33 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
295
272
return - ENOBUFS ;
296
273
}
297
274
298
- skb_queue_tail (& offload -> skb_queue , skb );
299
- can_rx_offload_schedule (offload );
275
+ __skb_queue_tail (& offload -> skb_irq_queue , skb );
300
276
301
277
return 0 ;
302
278
}
303
279
EXPORT_SYMBOL_GPL (can_rx_offload_queue_tail );
304
280
281
+ void can_rx_offload_irq_finish (struct can_rx_offload * offload )
282
+ {
283
+ unsigned long flags ;
284
+ int queue_len ;
285
+
286
+ if (skb_queue_empty_lockless (& offload -> skb_irq_queue ))
287
+ return ;
288
+
289
+ spin_lock_irqsave (& offload -> skb_queue .lock , flags );
290
+ skb_queue_splice_tail_init (& offload -> skb_irq_queue , & offload -> skb_queue );
291
+ spin_unlock_irqrestore (& offload -> skb_queue .lock , flags );
292
+
293
+ queue_len = skb_queue_len (& offload -> skb_queue );
294
+ if (queue_len > offload -> skb_queue_len_max / 8 )
295
+ netdev_dbg (offload -> dev , "%s: queue_len=%d\n" ,
296
+ __func__ , queue_len );
297
+
298
+ can_rx_offload_schedule (offload );
299
+ }
300
+ EXPORT_SYMBOL_GPL (can_rx_offload_irq_finish );
301
+
305
302
static int can_rx_offload_init_queue (struct net_device * dev ,
306
303
struct can_rx_offload * offload ,
307
304
unsigned int weight )
@@ -312,6 +309,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
312
309
offload -> skb_queue_len_max = 2 << fls (weight );
313
310
offload -> skb_queue_len_max *= 4 ;
314
311
skb_queue_head_init (& offload -> skb_queue );
312
+ __skb_queue_head_init (& offload -> skb_irq_queue );
315
313
316
314
netif_napi_add (dev , & offload -> napi , can_rx_offload_napi_poll , weight );
317
315
@@ -373,5 +371,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
373
371
{
374
372
netif_napi_del (& offload -> napi );
375
373
skb_queue_purge (& offload -> skb_queue );
374
+ __skb_queue_purge (& offload -> skb_irq_queue );
376
375
}
377
376
EXPORT_SYMBOL_GPL (can_rx_offload_del );
0 commit comments