Skip to content

Commit bbffab6

Browse files
committed
Merge branch 's390-ism-fixes'
Niklas Schnelle says: ==================== s390/ism: Fixes to client handling This is v2 of the patch previously titled "s390/ism: Detangle ISM client IRQ and event forwarding". As suggested by Paolo Abeni I split the patch up. While doing so I noticed another problem that was fixed by this patch concerning the way the workqueues access the client structs. This means the second patch turning the workqueues into simple direct calls also fixes a problem. Finally I split off a third patch just for fixing ism_unregister_client()s error path. The code after these 3 patches is identical to the result of the v1 patch except that I also turned the dev_err() for still registered DMBs into a WARN(). ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents c329b26 + 266deee commit bbffab6

File tree

2 files changed

+67
-79
lines changed

2 files changed

+67
-79
lines changed

drivers/s390/net/ism_drv.c

Lines changed: 66 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ static const struct smcd_ops ism_ops;
3636
static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
3737
/* a list for fast mapping */
3838
static u8 max_client;
39-
static DEFINE_SPINLOCK(clients_lock);
39+
static DEFINE_MUTEX(clients_lock);
4040
struct ism_dev_list {
4141
struct list_head list;
4242
struct mutex mutex; /* protects ism device list */
@@ -47,14 +47,22 @@ static struct ism_dev_list ism_dev_list = {
4747
.mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
4848
};
4949

50+
static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism)
51+
{
52+
unsigned long flags;
53+
54+
spin_lock_irqsave(&ism->lock, flags);
55+
ism->subs[client->id] = client;
56+
spin_unlock_irqrestore(&ism->lock, flags);
57+
}
58+
5059
int ism_register_client(struct ism_client *client)
5160
{
5261
struct ism_dev *ism;
53-
unsigned long flags;
5462
int i, rc = -ENOSPC;
5563

5664
mutex_lock(&ism_dev_list.mutex);
57-
spin_lock_irqsave(&clients_lock, flags);
65+
mutex_lock(&clients_lock);
5866
for (i = 0; i < MAX_CLIENTS; ++i) {
5967
if (!clients[i]) {
6068
clients[i] = client;
@@ -65,12 +73,14 @@ int ism_register_client(struct ism_client *client)
6573
break;
6674
}
6775
}
68-
spin_unlock_irqrestore(&clients_lock, flags);
76+
mutex_unlock(&clients_lock);
77+
6978
if (i < MAX_CLIENTS) {
7079
/* initialize with all devices that we got so far */
7180
list_for_each_entry(ism, &ism_dev_list.list, list) {
7281
ism->priv[i] = NULL;
7382
client->add(ism);
83+
ism_setup_forwarding(client, ism);
7484
}
7585
}
7686
mutex_unlock(&ism_dev_list.mutex);
@@ -86,25 +96,32 @@ int ism_unregister_client(struct ism_client *client)
8696
int rc = 0;
8797

8898
mutex_lock(&ism_dev_list.mutex);
89-
spin_lock_irqsave(&clients_lock, flags);
90-
clients[client->id] = NULL;
91-
if (client->id + 1 == max_client)
92-
max_client--;
93-
spin_unlock_irqrestore(&clients_lock, flags);
9499
list_for_each_entry(ism, &ism_dev_list.list, list) {
100+
spin_lock_irqsave(&ism->lock, flags);
101+
/* Stop forwarding IRQs and events */
102+
ism->subs[client->id] = NULL;
95103
for (int i = 0; i < ISM_NR_DMBS; ++i) {
96104
if (ism->sba_client_arr[i] == client->id) {
97-
pr_err("%s: attempt to unregister client '%s'"
98-
"with registered dmb(s)\n", __func__,
99-
client->name);
105+
WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
106+
__func__, client->name);
100107
rc = -EBUSY;
101-
goto out;
108+
goto err_reg_dmb;
102109
}
103110
}
111+
spin_unlock_irqrestore(&ism->lock, flags);
104112
}
105-
out:
106113
mutex_unlock(&ism_dev_list.mutex);
107114

115+
mutex_lock(&clients_lock);
116+
clients[client->id] = NULL;
117+
if (client->id + 1 == max_client)
118+
max_client--;
119+
mutex_unlock(&clients_lock);
120+
return rc;
121+
122+
err_reg_dmb:
123+
spin_unlock_irqrestore(&ism->lock, flags);
124+
mutex_unlock(&ism_dev_list.mutex);
108125
return rc;
109126
}
110127
EXPORT_SYMBOL_GPL(ism_unregister_client);
@@ -328,6 +345,7 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
328345
struct ism_client *client)
329346
{
330347
union ism_reg_dmb cmd;
348+
unsigned long flags;
331349
int ret;
332350

333351
ret = ism_alloc_dmb(ism, dmb);
@@ -351,7 +369,9 @@ int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
351369
goto out;
352370
}
353371
dmb->dmb_tok = cmd.response.dmb_tok;
372+
spin_lock_irqsave(&ism->lock, flags);
354373
ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
374+
spin_unlock_irqrestore(&ism->lock, flags);
355375
out:
356376
return ret;
357377
}
@@ -360,6 +380,7 @@ EXPORT_SYMBOL_GPL(ism_register_dmb);
360380
int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
361381
{
362382
union ism_unreg_dmb cmd;
383+
unsigned long flags;
363384
int ret;
364385

365386
memset(&cmd, 0, sizeof(cmd));
@@ -368,7 +389,9 @@ int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
368389

369390
cmd.request.dmb_tok = dmb->dmb_tok;
370391

392+
spin_lock_irqsave(&ism->lock, flags);
371393
ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
394+
spin_unlock_irqrestore(&ism->lock, flags);
372395

373396
ret = ism_cmd(ism, &cmd);
374397
if (ret && ret != ISM_ERROR)
@@ -491,6 +514,7 @@ static u16 ism_get_chid(struct ism_dev *ism)
491514
static void ism_handle_event(struct ism_dev *ism)
492515
{
493516
struct ism_event *entry;
517+
struct ism_client *clt;
494518
int i;
495519

496520
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
@@ -499,21 +523,21 @@ static void ism_handle_event(struct ism_dev *ism)
499523

500524
entry = &ism->ieq->entry[ism->ieq_idx];
501525
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
502-
spin_lock(&clients_lock);
503-
for (i = 0; i < max_client; ++i)
504-
if (clients[i])
505-
clients[i]->handle_event(ism, entry);
506-
spin_unlock(&clients_lock);
526+
for (i = 0; i < max_client; ++i) {
527+
clt = ism->subs[i];
528+
if (clt)
529+
clt->handle_event(ism, entry);
530+
}
507531
}
508532
}
509533

510534
static irqreturn_t ism_handle_irq(int irq, void *data)
511535
{
512536
struct ism_dev *ism = data;
513-
struct ism_client *clt;
514537
unsigned long bit, end;
515538
unsigned long *bv;
516539
u16 dmbemask;
540+
u8 client_id;
517541

518542
bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
519543
end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
@@ -530,8 +554,10 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
530554
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
531555
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
532556
barrier();
533-
clt = clients[ism->sba_client_arr[bit]];
534-
clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
557+
client_id = ism->sba_client_arr[bit];
558+
if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id]))
559+
continue;
560+
ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
535561
}
536562

537563
if (ism->sba->e) {
@@ -548,20 +574,9 @@ static u64 ism_get_local_gid(struct ism_dev *ism)
548574
return ism->local_gid;
549575
}
550576

551-
static void ism_dev_add_work_func(struct work_struct *work)
552-
{
553-
struct ism_client *client = container_of(work, struct ism_client,
554-
add_work);
555-
556-
client->add(client->tgt_ism);
557-
atomic_dec(&client->tgt_ism->add_dev_cnt);
558-
wake_up(&client->tgt_ism->waitq);
559-
}
560-
561577
static int ism_dev_init(struct ism_dev *ism)
562578
{
563579
struct pci_dev *pdev = ism->pdev;
564-
unsigned long flags;
565580
int i, ret;
566581

567582
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
@@ -594,25 +609,16 @@ static int ism_dev_init(struct ism_dev *ism)
594609
/* hardware is V2 capable */
595610
ism_create_system_eid();
596611

597-
init_waitqueue_head(&ism->waitq);
598-
atomic_set(&ism->free_clients_cnt, 0);
599-
atomic_set(&ism->add_dev_cnt, 0);
600-
601-
wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
602-
spin_lock_irqsave(&clients_lock, flags);
603-
for (i = 0; i < max_client; ++i)
612+
mutex_lock(&ism_dev_list.mutex);
613+
mutex_lock(&clients_lock);
614+
for (i = 0; i < max_client; ++i) {
604615
if (clients[i]) {
605-
INIT_WORK(&clients[i]->add_work,
606-
ism_dev_add_work_func);
607-
clients[i]->tgt_ism = ism;
608-
atomic_inc(&ism->add_dev_cnt);
609-
schedule_work(&clients[i]->add_work);
616+
clients[i]->add(ism);
617+
ism_setup_forwarding(clients[i], ism);
610618
}
611-
spin_unlock_irqrestore(&clients_lock, flags);
612-
613-
wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
619+
}
620+
mutex_unlock(&clients_lock);
614621

615-
mutex_lock(&ism_dev_list.mutex);
616622
list_add(&ism->list, &ism_dev_list.list);
617623
mutex_unlock(&ism_dev_list.mutex);
618624

@@ -687,36 +693,24 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
687693
return ret;
688694
}
689695

690-
static void ism_dev_remove_work_func(struct work_struct *work)
691-
{
692-
struct ism_client *client = container_of(work, struct ism_client,
693-
remove_work);
694-
695-
client->remove(client->tgt_ism);
696-
atomic_dec(&client->tgt_ism->free_clients_cnt);
697-
wake_up(&client->tgt_ism->waitq);
698-
}
699-
700-
/* Callers must hold ism_dev_list.mutex */
701696
static void ism_dev_exit(struct ism_dev *ism)
702697
{
703698
struct pci_dev *pdev = ism->pdev;
704699
unsigned long flags;
705700
int i;
706701

707-
wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
708-
spin_lock_irqsave(&clients_lock, flags);
702+
spin_lock_irqsave(&ism->lock, flags);
709703
for (i = 0; i < max_client; ++i)
710-
if (clients[i]) {
711-
INIT_WORK(&clients[i]->remove_work,
712-
ism_dev_remove_work_func);
713-
clients[i]->tgt_ism = ism;
714-
atomic_inc(&ism->free_clients_cnt);
715-
schedule_work(&clients[i]->remove_work);
716-
}
717-
spin_unlock_irqrestore(&clients_lock, flags);
704+
ism->subs[i] = NULL;
705+
spin_unlock_irqrestore(&ism->lock, flags);
718706

719-
wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
707+
mutex_lock(&ism_dev_list.mutex);
708+
mutex_lock(&clients_lock);
709+
for (i = 0; i < max_client; ++i) {
710+
if (clients[i])
711+
clients[i]->remove(ism);
712+
}
713+
mutex_unlock(&clients_lock);
720714

721715
if (SYSTEM_EID.serial_number[0] != '0' ||
722716
SYSTEM_EID.type[0] != '0')
@@ -727,15 +721,14 @@ static void ism_dev_exit(struct ism_dev *ism)
727721
kfree(ism->sba_client_arr);
728722
pci_free_irq_vectors(pdev);
729723
list_del_init(&ism->list);
724+
mutex_unlock(&ism_dev_list.mutex);
730725
}
731726

732727
static void ism_remove(struct pci_dev *pdev)
733728
{
734729
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
735730

736-
mutex_lock(&ism_dev_list.mutex);
737731
ism_dev_exit(ism);
738-
mutex_unlock(&ism_dev_list.mutex);
739732

740733
pci_release_mem_regions(pdev);
741734
pci_disable_device(pdev);

include/linux/ism.h

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,7 @@ struct ism_dev {
4444
u64 local_gid;
4545
int ieq_idx;
4646

47-
atomic_t free_clients_cnt;
48-
atomic_t add_dev_cnt;
49-
wait_queue_head_t waitq;
47+
struct ism_client *subs[MAX_CLIENTS];
5048
};
5149

5250
struct ism_event {
@@ -68,9 +66,6 @@ struct ism_client {
6866
*/
6967
void (*handle_irq)(struct ism_dev *dev, unsigned int bit, u16 dmbemask);
7068
/* Private area - don't touch! */
71-
struct work_struct remove_work;
72-
struct work_struct add_work;
73-
struct ism_dev *tgt_ism;
7469
u8 id;
7570
};
7671

0 commit comments

Comments
 (0)