Skip to content

Commit e4f4d8c

Browse files
davejiangvinodkoul
authored andcommitted
dmaengine: idxd: Clean up descriptors with fault error
Add code to "complete" a descriptor when the descriptor or its completion address hit a fault error when SVA mode is being used. This error can be triggered due to bad programming by the user. A lock is introduced in order to protect the descriptor completion lists since the fault handler will run from the system work queue after being scheduled in the interrupt handler. Signed-off-by: Dave Jiang <[email protected]> Reviewed-by: Tony Luck <[email protected]> Reviewed-by: Dan Williams <[email protected]> Link: https://lore.kernel.org/r/160382008092.3911367.12766483427643278985.stgit@djiang5-desk3.ch.intel.com Signed-off-by: Vinod Koul <[email protected]>
1 parent 8e50d39 commit e4f4d8c

File tree

3 files changed

+140
-12
lines changed

3 files changed

+140
-12
lines changed

drivers/dma/idxd/idxd.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,11 @@ struct idxd_irq_entry {
3434
int id;
3535
struct llist_head pending_llist;
3636
struct list_head work_list;
37+
/*
38+
* Lock to protect access between irq thread process descriptor
39+
* and irq thread processing error descriptor.
40+
*/
41+
spinlock_t list_lock;
3742
};
3843

3944
struct idxd_group {

drivers/dma/idxd/init.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
9797
for (i = 0; i < msixcnt; i++) {
9898
idxd->irq_entries[i].id = i;
9999
idxd->irq_entries[i].idxd = idxd;
100+
spin_lock_init(&idxd->irq_entries[i].list_lock);
100101
}
101102

102103
msix = &idxd->msix_entries[0];

drivers/dma/idxd/irq.c

Lines changed: 134 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,24 @@
1111
#include "idxd.h"
1212
#include "registers.h"
1313

14+
enum irq_work_type {
15+
IRQ_WORK_NORMAL = 0,
16+
IRQ_WORK_PROCESS_FAULT,
17+
};
18+
19+
struct idxd_fault {
20+
struct work_struct work;
21+
u64 addr;
22+
struct idxd_device *idxd;
23+
};
24+
25+
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
26+
enum irq_work_type wtype,
27+
int *processed, u64 data);
28+
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
29+
enum irq_work_type wtype,
30+
int *processed, u64 data);
31+
1432
static void idxd_device_reinit(struct work_struct *work)
1533
{
1634
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -44,6 +62,46 @@ static void idxd_device_reinit(struct work_struct *work)
4462
idxd_device_wqs_clear_state(idxd);
4563
}
4664

65+
static void idxd_device_fault_work(struct work_struct *work)
66+
{
67+
struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
68+
struct idxd_irq_entry *ie;
69+
int i;
70+
int processed;
71+
int irqcnt = fault->idxd->num_wq_irqs + 1;
72+
73+
for (i = 1; i < irqcnt; i++) {
74+
ie = &fault->idxd->irq_entries[i];
75+
irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
76+
&processed, fault->addr);
77+
if (processed)
78+
break;
79+
80+
irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
81+
&processed, fault->addr);
82+
if (processed)
83+
break;
84+
}
85+
86+
kfree(fault);
87+
}
88+
89+
static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
90+
u64 fault_addr)
91+
{
92+
struct idxd_fault *fault;
93+
94+
fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
95+
if (!fault)
96+
return -ENOMEM;
97+
98+
fault->addr = fault_addr;
99+
fault->idxd = idxd;
100+
INIT_WORK(&fault->work, idxd_device_fault_work);
101+
queue_work(idxd->wq, &fault->work);
102+
return 0;
103+
}
104+
47105
irqreturn_t idxd_irq_handler(int vec, void *data)
48106
{
49107
struct idxd_irq_entry *irq_entry = data;
@@ -125,6 +183,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
125183
if (!err)
126184
goto out;
127185

186+
/*
187+
* This case should rarely happen and typically is due to software
188+
* programming error by the driver.
189+
*/
190+
if (idxd->sw_err.valid &&
191+
idxd->sw_err.desc_valid &&
192+
idxd->sw_err.fault_addr)
193+
idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
194+
128195
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
129196
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
130197
idxd->state = IDXD_DEV_HALTED;
@@ -152,57 +219,110 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
152219
return IRQ_HANDLED;
153220
}
154221

222+
static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
223+
{
224+
/*
225+
* Completion address can be bad as well. Check fault address match for descriptor
226+
* and completion address.
227+
*/
228+
if ((u64)desc->hw == fault_addr ||
229+
(u64)desc->completion == fault_addr) {
230+
idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
231+
return true;
232+
}
233+
234+
return false;
235+
}
236+
237+
static bool complete_desc(struct idxd_desc *desc)
238+
{
239+
if (desc->completion->status) {
240+
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
241+
return true;
242+
}
243+
244+
return false;
245+
}
246+
155247
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
156-
int *processed)
248+
enum irq_work_type wtype,
249+
int *processed, u64 data)
157250
{
158251
struct idxd_desc *desc, *t;
159252
struct llist_node *head;
160253
int queued = 0;
254+
bool completed = false;
255+
unsigned long flags;
161256

162257
*processed = 0;
163258
head = llist_del_all(&irq_entry->pending_llist);
164259
if (!head)
165-
return 0;
260+
goto out;
166261

167262
llist_for_each_entry_safe(desc, t, head, llnode) {
168-
if (desc->completion->status) {
169-
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
263+
if (wtype == IRQ_WORK_NORMAL)
264+
completed = complete_desc(desc);
265+
else if (wtype == IRQ_WORK_PROCESS_FAULT)
266+
completed = process_fault(desc, data);
267+
268+
if (completed) {
170269
idxd_free_desc(desc->wq, desc);
171270
(*processed)++;
271+
if (wtype == IRQ_WORK_PROCESS_FAULT)
272+
break;
172273
} else {
173-
list_add_tail(&desc->list, &irq_entry->work_list);
274+
spin_lock_irqsave(&irq_entry->list_lock, flags);
275+
list_add_tail(&desc->list,
276+
&irq_entry->work_list);
277+
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
174278
queued++;
175279
}
176280
}
177281

282+
out:
178283
return queued;
179284
}
180285

181286
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
182-
int *processed)
287+
enum irq_work_type wtype,
288+
int *processed, u64 data)
183289
{
184290
struct list_head *node, *next;
185291
int queued = 0;
292+
bool completed = false;
293+
unsigned long flags;
186294

187295
*processed = 0;
296+
spin_lock_irqsave(&irq_entry->list_lock, flags);
188297
if (list_empty(&irq_entry->work_list))
189-
return 0;
298+
goto out;
190299

191300
list_for_each_safe(node, next, &irq_entry->work_list) {
192301
struct idxd_desc *desc =
193302
container_of(node, struct idxd_desc, list);
194303

195-
if (desc->completion->status) {
304+
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
305+
if (wtype == IRQ_WORK_NORMAL)
306+
completed = complete_desc(desc);
307+
else if (wtype == IRQ_WORK_PROCESS_FAULT)
308+
completed = process_fault(desc, data);
309+
310+
if (completed) {
311+
spin_lock_irqsave(&irq_entry->list_lock, flags);
196312
list_del(&desc->list);
197-
/* process and callback */
198-
idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
313+
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
199314
idxd_free_desc(desc->wq, desc);
200315
(*processed)++;
316+
if (wtype == IRQ_WORK_PROCESS_FAULT)
317+
return queued;
201318
} else {
202319
queued++;
203320
}
321+
spin_lock_irqsave(&irq_entry->list_lock, flags);
204322
}
205323

324+
out:
325+
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
206326
return queued;
207327
}
208328

@@ -230,12 +350,14 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
230350
* 5. Repeat until no more descriptors.
231351
*/
232352
do {
233-
rc = irq_process_work_list(irq_entry, &processed);
353+
rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
354+
&processed, 0);
234355
total += processed;
235356
if (rc != 0)
236357
continue;
237358

238-
rc = irq_process_pending_llist(irq_entry, &processed);
359+
rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
360+
&processed, 0);
239361
total += processed;
240362
} while (rc != 0);
241363

0 commit comments

Comments
 (0)