@@ -36,7 +36,7 @@ static const struct smcd_ops ism_ops;
36
36
static struct ism_client * clients [MAX_CLIENTS ]; /* use an array rather than */
37
37
/* a list for fast mapping */
38
38
static u8 max_client ;
39
- static DEFINE_SPINLOCK (clients_lock );
39
+ static DEFINE_MUTEX (clients_lock );
40
40
struct ism_dev_list {
41
41
struct list_head list ;
42
42
struct mutex mutex ; /* protects ism device list */
@@ -47,14 +47,22 @@ static struct ism_dev_list ism_dev_list = {
47
47
.mutex = __MUTEX_INITIALIZER (ism_dev_list .mutex ),
48
48
};
49
49
50
+ static void ism_setup_forwarding (struct ism_client * client , struct ism_dev * ism )
51
+ {
52
+ unsigned long flags ;
53
+
54
+ spin_lock_irqsave (& ism -> lock , flags );
55
+ ism -> subs [client -> id ] = client ;
56
+ spin_unlock_irqrestore (& ism -> lock , flags );
57
+ }
58
+
50
59
int ism_register_client (struct ism_client * client )
51
60
{
52
61
struct ism_dev * ism ;
53
- unsigned long flags ;
54
62
int i , rc = - ENOSPC ;
55
63
56
64
mutex_lock (& ism_dev_list .mutex );
57
- spin_lock_irqsave (& clients_lock , flags );
65
+ mutex_lock (& clients_lock );
58
66
for (i = 0 ; i < MAX_CLIENTS ; ++ i ) {
59
67
if (!clients [i ]) {
60
68
clients [i ] = client ;
@@ -65,12 +73,14 @@ int ism_register_client(struct ism_client *client)
65
73
break ;
66
74
}
67
75
}
68
- spin_unlock_irqrestore (& clients_lock , flags );
76
+ mutex_unlock (& clients_lock );
77
+
69
78
if (i < MAX_CLIENTS ) {
70
79
/* initialize with all devices that we got so far */
71
80
list_for_each_entry (ism , & ism_dev_list .list , list ) {
72
81
ism -> priv [i ] = NULL ;
73
82
client -> add (ism );
83
+ ism_setup_forwarding (client , ism );
74
84
}
75
85
}
76
86
mutex_unlock (& ism_dev_list .mutex );
@@ -86,25 +96,32 @@ int ism_unregister_client(struct ism_client *client)
86
96
int rc = 0 ;
87
97
88
98
mutex_lock (& ism_dev_list .mutex );
89
- spin_lock_irqsave (& clients_lock , flags );
90
- clients [client -> id ] = NULL ;
91
- if (client -> id + 1 == max_client )
92
- max_client -- ;
93
- spin_unlock_irqrestore (& clients_lock , flags );
94
99
list_for_each_entry (ism , & ism_dev_list .list , list ) {
100
+ spin_lock_irqsave (& ism -> lock , flags );
101
+ /* Stop forwarding IRQs and events */
102
+ ism -> subs [client -> id ] = NULL ;
95
103
for (int i = 0 ; i < ISM_NR_DMBS ; ++ i ) {
96
104
if (ism -> sba_client_arr [i ] == client -> id ) {
97
- pr_err ("%s: attempt to unregister client '%s'"
98
- "with registered dmb(s)\n" , __func__ ,
99
- client -> name );
105
+ WARN (1 , "%s: attempt to unregister '%s' with registered dmb(s)\n" ,
106
+ __func__ , client -> name );
100
107
rc = - EBUSY ;
101
- goto out ;
108
+ goto err_reg_dmb ;
102
109
}
103
110
}
111
+ spin_unlock_irqrestore (& ism -> lock , flags );
104
112
}
105
- out :
106
113
mutex_unlock (& ism_dev_list .mutex );
107
114
115
+ mutex_lock (& clients_lock );
116
+ clients [client -> id ] = NULL ;
117
+ if (client -> id + 1 == max_client )
118
+ max_client -- ;
119
+ mutex_unlock (& clients_lock );
120
+ return rc ;
121
+
122
+ err_reg_dmb :
123
+ spin_unlock_irqrestore (& ism -> lock , flags );
124
+ mutex_unlock (& ism_dev_list .mutex );
108
125
return rc ;
109
126
}
110
127
EXPORT_SYMBOL_GPL (ism_unregister_client );
@@ -328,6 +345,7 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
328
345
struct ism_client * client )
329
346
{
330
347
union ism_reg_dmb cmd ;
348
+ unsigned long flags ;
331
349
int ret ;
332
350
333
351
ret = ism_alloc_dmb (ism , dmb );
@@ -351,7 +369,9 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
351
369
goto out ;
352
370
}
353
371
dmb -> dmb_tok = cmd .response .dmb_tok ;
372
+ spin_lock_irqsave (& ism -> lock , flags );
354
373
ism -> sba_client_arr [dmb -> sba_idx - ISM_DMB_BIT_OFFSET ] = client -> id ;
374
+ spin_unlock_irqrestore (& ism -> lock , flags );
355
375
out :
356
376
return ret ;
357
377
}
@@ -360,6 +380,7 @@ EXPORT_SYMBOL_GPL(ism_register_dmb);
360
380
int ism_unregister_dmb (struct ism_dev * ism , struct ism_dmb * dmb )
361
381
{
362
382
union ism_unreg_dmb cmd ;
383
+ unsigned long flags ;
363
384
int ret ;
364
385
365
386
memset (& cmd , 0 , sizeof (cmd ));
@@ -368,7 +389,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
368
389
369
390
cmd .request .dmb_tok = dmb -> dmb_tok ;
370
391
392
+ spin_lock_irqsave (& ism -> lock , flags );
371
393
ism -> sba_client_arr [dmb -> sba_idx - ISM_DMB_BIT_OFFSET ] = NO_CLIENT ;
394
+ spin_unlock_irqrestore (& ism -> lock , flags );
372
395
373
396
ret = ism_cmd (ism , & cmd );
374
397
if (ret && ret != ISM_ERROR )
@@ -491,6 +514,7 @@ static u16 ism_get_chid(struct ism_dev *ism)
491
514
static void ism_handle_event (struct ism_dev * ism )
492
515
{
493
516
struct ism_event * entry ;
517
+ struct ism_client * clt ;
494
518
int i ;
495
519
496
520
while ((ism -> ieq_idx + 1 ) != READ_ONCE (ism -> ieq -> header .idx )) {
@@ -499,21 +523,21 @@ static void ism_handle_event(struct ism_dev *ism)
499
523
500
524
entry = & ism -> ieq -> entry [ism -> ieq_idx ];
501
525
debug_event (ism_debug_info , 2 , entry , sizeof (* entry ));
502
- spin_lock ( & clients_lock );
503
- for ( i = 0 ; i < max_client ; ++ i )
504
- if (clients [ i ] )
505
- clients [ i ] -> handle_event (ism , entry );
506
- spin_unlock ( & clients_lock );
526
+ for ( i = 0 ; i < max_client ; ++ i ) {
527
+ clt = ism -> subs [ i ];
528
+ if (clt )
529
+ clt -> handle_event (ism , entry );
530
+ }
507
531
}
508
532
}
509
533
510
534
static irqreturn_t ism_handle_irq (int irq , void * data )
511
535
{
512
536
struct ism_dev * ism = data ;
513
- struct ism_client * clt ;
514
537
unsigned long bit , end ;
515
538
unsigned long * bv ;
516
539
u16 dmbemask ;
540
+ u8 client_id ;
517
541
518
542
bv = (void * ) & ism -> sba -> dmb_bits [ISM_DMB_WORD_OFFSET ];
519
543
end = sizeof (ism -> sba -> dmb_bits ) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET ;
@@ -530,8 +554,10 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
530
554
dmbemask = ism -> sba -> dmbe_mask [bit + ISM_DMB_BIT_OFFSET ];
531
555
ism -> sba -> dmbe_mask [bit + ISM_DMB_BIT_OFFSET ] = 0 ;
532
556
barrier ();
533
- clt = clients [ism -> sba_client_arr [bit ]];
534
- clt -> handle_irq (ism , bit + ISM_DMB_BIT_OFFSET , dmbemask );
557
+ client_id = ism -> sba_client_arr [bit ];
558
+ if (unlikely (client_id == NO_CLIENT || !ism -> subs [client_id ]))
559
+ continue ;
560
+ ism -> subs [client_id ]-> handle_irq (ism , bit + ISM_DMB_BIT_OFFSET , dmbemask );
535
561
}
536
562
537
563
if (ism -> sba -> e ) {
@@ -548,20 +574,9 @@ static u64 ism_get_local_gid(struct ism_dev *ism)
548
574
return ism -> local_gid ;
549
575
}
550
576
551
- static void ism_dev_add_work_func (struct work_struct * work )
552
- {
553
- struct ism_client * client = container_of (work , struct ism_client ,
554
- add_work );
555
-
556
- client -> add (client -> tgt_ism );
557
- atomic_dec (& client -> tgt_ism -> add_dev_cnt );
558
- wake_up (& client -> tgt_ism -> waitq );
559
- }
560
-
561
577
static int ism_dev_init (struct ism_dev * ism )
562
578
{
563
579
struct pci_dev * pdev = ism -> pdev ;
564
- unsigned long flags ;
565
580
int i , ret ;
566
581
567
582
ret = pci_alloc_irq_vectors (pdev , 1 , 1 , PCI_IRQ_MSI );
@@ -594,25 +609,16 @@ static int ism_dev_init(struct ism_dev *ism)
594
609
/* hardware is V2 capable */
595
610
ism_create_system_eid ();
596
611
597
- init_waitqueue_head (& ism -> waitq );
598
- atomic_set (& ism -> free_clients_cnt , 0 );
599
- atomic_set (& ism -> add_dev_cnt , 0 );
600
-
601
- wait_event (ism -> waitq , !atomic_read (& ism -> add_dev_cnt ));
602
- spin_lock_irqsave (& clients_lock , flags );
603
- for (i = 0 ; i < max_client ; ++ i )
612
+ mutex_lock (& ism_dev_list .mutex );
613
+ mutex_lock (& clients_lock );
614
+ for (i = 0 ; i < max_client ; ++ i ) {
604
615
if (clients [i ]) {
605
- INIT_WORK (& clients [i ]-> add_work ,
606
- ism_dev_add_work_func );
607
- clients [i ]-> tgt_ism = ism ;
608
- atomic_inc (& ism -> add_dev_cnt );
609
- schedule_work (& clients [i ]-> add_work );
616
+ clients [i ]-> add (ism );
617
+ ism_setup_forwarding (clients [i ], ism );
610
618
}
611
- spin_unlock_irqrestore (& clients_lock , flags );
612
-
613
- wait_event (ism -> waitq , !atomic_read (& ism -> add_dev_cnt ));
619
+ }
620
+ mutex_unlock (& clients_lock );
614
621
615
- mutex_lock (& ism_dev_list .mutex );
616
622
list_add (& ism -> list , & ism_dev_list .list );
617
623
mutex_unlock (& ism_dev_list .mutex );
618
624
@@ -687,36 +693,24 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
687
693
return ret ;
688
694
}
689
695
690
- static void ism_dev_remove_work_func (struct work_struct * work )
691
- {
692
- struct ism_client * client = container_of (work , struct ism_client ,
693
- remove_work );
694
-
695
- client -> remove (client -> tgt_ism );
696
- atomic_dec (& client -> tgt_ism -> free_clients_cnt );
697
- wake_up (& client -> tgt_ism -> waitq );
698
- }
699
-
700
- /* Callers must hold ism_dev_list.mutex */
701
696
static void ism_dev_exit (struct ism_dev * ism )
702
697
{
703
698
struct pci_dev * pdev = ism -> pdev ;
704
699
unsigned long flags ;
705
700
int i ;
706
701
707
- wait_event (ism -> waitq , !atomic_read (& ism -> free_clients_cnt ));
708
- spin_lock_irqsave (& clients_lock , flags );
702
+ spin_lock_irqsave (& ism -> lock , flags );
709
703
for (i = 0 ; i < max_client ; ++ i )
710
- if (clients [i ]) {
711
- INIT_WORK (& clients [i ]-> remove_work ,
712
- ism_dev_remove_work_func );
713
- clients [i ]-> tgt_ism = ism ;
714
- atomic_inc (& ism -> free_clients_cnt );
715
- schedule_work (& clients [i ]-> remove_work );
716
- }
717
- spin_unlock_irqrestore (& clients_lock , flags );
704
+ ism -> subs [i ] = NULL ;
705
+ spin_unlock_irqrestore (& ism -> lock , flags );
718
706
719
- wait_event (ism -> waitq , !atomic_read (& ism -> free_clients_cnt ));
707
+ mutex_lock (& ism_dev_list .mutex );
708
+ mutex_lock (& clients_lock );
709
+ for (i = 0 ; i < max_client ; ++ i ) {
710
+ if (clients [i ])
711
+ clients [i ]-> remove (ism );
712
+ }
713
+ mutex_unlock (& clients_lock );
720
714
721
715
if (SYSTEM_EID .serial_number [0 ] != '0' ||
722
716
SYSTEM_EID .type [0 ] != '0' )
@@ -727,15 +721,14 @@ static void ism_dev_exit(struct ism_dev *ism)
727
721
kfree (ism -> sba_client_arr );
728
722
pci_free_irq_vectors (pdev );
729
723
list_del_init (& ism -> list );
724
+ mutex_unlock (& ism_dev_list .mutex );
730
725
}
731
726
732
727
static void ism_remove (struct pci_dev * pdev )
733
728
{
734
729
struct ism_dev * ism = dev_get_drvdata (& pdev -> dev );
735
730
736
- mutex_lock (& ism_dev_list .mutex );
737
731
ism_dev_exit (ism );
738
- mutex_unlock (& ism_dev_list .mutex );
739
732
740
733
pci_release_mem_regions (pdev );
741
734
pci_disable_device (pdev );
0 commit comments