3939#include "ibmveth.h"
4040
4141static irqreturn_t ibmveth_interrupt (int irq , void * dev_instance );
42- static void ibmveth_rxq_harvest_buffer (struct ibmveth_adapter * adapter ,
43- bool reuse );
4442static unsigned long ibmveth_get_desired_dma (struct vio_dev * vdev );
4543
4644static struct kobj_type ktype_veth_pool ;
@@ -231,7 +229,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
231229 index = pool -> free_map [free_index ];
232230 skb = NULL ;
233231
234- BUG_ON (index == IBM_VETH_INVALID_MAP );
232+ if (WARN_ON (index == IBM_VETH_INVALID_MAP )) {
233+ schedule_work (& adapter -> work );
234+ goto bad_index_failure ;
235+ }
235236
236237 /* are we allocating a new buffer or recycling an old one */
237238 if (pool -> skbuff [index ])
@@ -300,6 +301,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
300301 DMA_FROM_DEVICE );
301302 dev_kfree_skb_any (pool -> skbuff [index ]);
302303 pool -> skbuff [index ] = NULL ;
304+ bad_index_failure :
303305 adapter -> replenish_add_buff_failure ++ ;
304306
305307 mb ();
@@ -370,20 +372,36 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
370372 }
371373}
372374
373- /* remove a buffer from a pool */
374- static void ibmveth_remove_buffer_from_pool (struct ibmveth_adapter * adapter ,
375- u64 correlator , bool reuse )
375+ /**
376+ * ibmveth_remove_buffer_from_pool - remove a buffer from a pool
377+ * @adapter: adapter instance
378+ * @correlator: identifies pool and index
379+ * @reuse: whether to reuse buffer
380+ *
381+ * Return:
382+ * * %0 - success
383+ * * %-EINVAL - correlator maps to pool or index out of range
384+ * * %-EFAULT - pool and index map to null skb
385+ */
386+ static int ibmveth_remove_buffer_from_pool (struct ibmveth_adapter * adapter ,
387+ u64 correlator , bool reuse )
376388{
377389 unsigned int pool = correlator >> 32 ;
378390 unsigned int index = correlator & 0xffffffffUL ;
379391 unsigned int free_index ;
380392 struct sk_buff * skb ;
381393
382- BUG_ON (pool >= IBMVETH_NUM_BUFF_POOLS );
383- BUG_ON (index >= adapter -> rx_buff_pool [pool ].size );
394+ if (WARN_ON (pool >= IBMVETH_NUM_BUFF_POOLS ) ||
395+ WARN_ON (index >= adapter -> rx_buff_pool [pool ].size )) {
396+ schedule_work (& adapter -> work );
397+ return - EINVAL ;
398+ }
384399
385400 skb = adapter -> rx_buff_pool [pool ].skbuff [index ];
386- BUG_ON (skb == NULL );
401+ if (WARN_ON (!skb )) {
402+ schedule_work (& adapter -> work );
403+ return - EFAULT ;
404+ }
387405
388406 /* if we are going to reuse the buffer then keep the pointers around
389407 * but mark index as available. replenish will see the skb pointer and
@@ -411,6 +429,8 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
411429 mb ();
412430
413431 atomic_dec (& (adapter -> rx_buff_pool [pool ].available ));
432+
433+ return 0 ;
414434}
415435
416436/* get the current buffer on the rx queue */
@@ -420,24 +440,44 @@ static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *ada
420440 unsigned int pool = correlator >> 32 ;
421441 unsigned int index = correlator & 0xffffffffUL ;
422442
423- BUG_ON (pool >= IBMVETH_NUM_BUFF_POOLS );
424- BUG_ON (index >= adapter -> rx_buff_pool [pool ].size );
443+ if (WARN_ON (pool >= IBMVETH_NUM_BUFF_POOLS ) ||
444+ WARN_ON (index >= adapter -> rx_buff_pool [pool ].size )) {
445+ schedule_work (& adapter -> work );
446+ return NULL ;
447+ }
425448
426449 return adapter -> rx_buff_pool [pool ].skbuff [index ];
427450}
428451
429- static void ibmveth_rxq_harvest_buffer (struct ibmveth_adapter * adapter ,
430- bool reuse )
452+ /**
453+ * ibmveth_rxq_harvest_buffer - Harvest buffer from pool
454+ *
455+ * @adapter: pointer to adapter
456+ * @reuse: whether to reuse buffer
457+ *
458+ * Context: called from ibmveth_poll
459+ *
460+ * Return:
461+ * * %0 - success
462+ * * other - non-zero return from ibmveth_remove_buffer_from_pool
463+ */
464+ static int ibmveth_rxq_harvest_buffer (struct ibmveth_adapter * adapter ,
465+ bool reuse )
431466{
432467 u64 cor ;
468+ int rc ;
433469
434470 cor = adapter -> rx_queue .queue_addr [adapter -> rx_queue .index ].correlator ;
435- ibmveth_remove_buffer_from_pool (adapter , cor , reuse );
471+ rc = ibmveth_remove_buffer_from_pool (adapter , cor , reuse );
472+ if (unlikely (rc ))
473+ return rc ;
436474
437475 if (++ adapter -> rx_queue .index == adapter -> rx_queue .num_slots ) {
438476 adapter -> rx_queue .index = 0 ;
439477 adapter -> rx_queue .toggle = !adapter -> rx_queue .toggle ;
440478 }
479+
480+ return 0 ;
441481}
442482
443483static void ibmveth_free_tx_ltb (struct ibmveth_adapter * adapter , int idx )
@@ -709,6 +749,35 @@ static int ibmveth_close(struct net_device *netdev)
709749 return 0 ;
710750}
711751
752+ /**
753+ * ibmveth_reset - Handle scheduled reset work
754+ *
755+ * @w: pointer to work_struct embedded in adapter structure
756+ *
757+ * Context: This routine acquires rtnl_mutex and disables its NAPI through
758+ * ibmveth_close. It can't be called directly in a context that has
759+ * already acquired rtnl_mutex or disabled its NAPI, or directly from
760+ * a poll routine.
761+ *
762+ * Return: void
763+ */
764+ static void ibmveth_reset (struct work_struct * w )
765+ {
766+ struct ibmveth_adapter * adapter = container_of (w , struct ibmveth_adapter , work );
767+ struct net_device * netdev = adapter -> netdev ;
768+
769+ netdev_dbg (netdev , "reset starting\n" );
770+
771+ rtnl_lock ();
772+
773+ dev_close (adapter -> netdev );
774+ dev_open (adapter -> netdev , NULL );
775+
776+ rtnl_unlock ();
777+
778+ netdev_dbg (netdev , "reset complete\n" );
779+ }
780+
712781static int ibmveth_set_link_ksettings (struct net_device * dev ,
713782 const struct ethtool_link_ksettings * cmd )
714783{
@@ -1324,7 +1393,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
13241393 wmb (); /* suggested by larson1 */
13251394 adapter -> rx_invalid_buffer ++ ;
13261395 netdev_dbg (netdev , "recycling invalid buffer\n" );
1327- ibmveth_rxq_harvest_buffer (adapter , true);
1396+ if (unlikely (ibmveth_rxq_harvest_buffer (adapter , true)))
1397+ break ;
13281398 } else {
13291399 struct sk_buff * skb , * new_skb ;
13301400 int length = ibmveth_rxq_frame_length (adapter );
@@ -1334,6 +1404,8 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
13341404 __sum16 iph_check = 0 ;
13351405
13361406 skb = ibmveth_rxq_get_buffer (adapter );
1407+ if (unlikely (!skb ))
1408+ break ;
13371409
13381410 /* if the large packet bit is set in the rx queue
13391411 * descriptor, the mss will be written by PHYP eight
@@ -1357,10 +1429,12 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
13571429 if (rx_flush )
13581430 ibmveth_flush_buffer (skb -> data ,
13591431 length + offset );
1360- ibmveth_rxq_harvest_buffer (adapter , true);
1432+ if (unlikely (ibmveth_rxq_harvest_buffer (adapter , true)))
1433+ break ;
13611434 skb = new_skb ;
13621435 } else {
1363- ibmveth_rxq_harvest_buffer (adapter , false);
1436+ if (unlikely (ibmveth_rxq_harvest_buffer (adapter , false)))
1437+ break ;
13641438 skb_reserve (skb , offset );
13651439 }
13661440
@@ -1407,7 +1481,10 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
14071481 * then check once more to make sure we are done.
14081482 */
14091483 lpar_rc = h_vio_signal (adapter -> vdev -> unit_address , VIO_IRQ_ENABLE );
1410- BUG_ON (lpar_rc != H_SUCCESS );
1484+ if (WARN_ON (lpar_rc != H_SUCCESS )) {
1485+ schedule_work (& adapter -> work );
1486+ goto out ;
1487+ }
14111488
14121489 if (ibmveth_rxq_pending_buffer (adapter ) && napi_schedule (napi )) {
14131490 lpar_rc = h_vio_signal (adapter -> vdev -> unit_address ,
@@ -1428,7 +1505,7 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
14281505 if (napi_schedule_prep (& adapter -> napi )) {
14291506 lpar_rc = h_vio_signal (adapter -> vdev -> unit_address ,
14301507 VIO_IRQ_DISABLE );
1431- BUG_ON (lpar_rc != H_SUCCESS );
1508+ WARN_ON (lpar_rc != H_SUCCESS );
14321509 __napi_schedule (& adapter -> napi );
14331510 }
14341511 return IRQ_HANDLED ;
@@ -1670,6 +1747,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
16701747
16711748 adapter -> vdev = dev ;
16721749 adapter -> netdev = netdev ;
1750+ INIT_WORK (& adapter -> work , ibmveth_reset );
16731751 adapter -> mcastFilterSize = be32_to_cpu (* mcastFilterSize_p );
16741752 ibmveth_init_link_settings (netdev );
16751753
@@ -1762,6 +1840,8 @@ static void ibmveth_remove(struct vio_dev *dev)
17621840 struct ibmveth_adapter * adapter = netdev_priv (netdev );
17631841 int i ;
17641842
1843+ cancel_work_sync (& adapter -> work );
1844+
17651845 for (i = 0 ; i < IBMVETH_NUM_BUFF_POOLS ; i ++ )
17661846 kobject_put (& adapter -> rx_buff_pool [i ].kobj );
17671847
0 commit comments