|
11 | 11 | #include "idxd.h"
|
12 | 12 | #include "registers.h"
|
13 | 13 |
|
| 14 | +enum irq_work_type { |
| 15 | + IRQ_WORK_NORMAL = 0, |
| 16 | + IRQ_WORK_PROCESS_FAULT, |
| 17 | +}; |
| 18 | + |
| 19 | +struct idxd_fault { |
| 20 | + struct work_struct work; |
| 21 | + u64 addr; |
| 22 | + struct idxd_device *idxd; |
| 23 | +}; |
| 24 | + |
| 25 | +static int irq_process_work_list(struct idxd_irq_entry *irq_entry, |
| 26 | + enum irq_work_type wtype, |
| 27 | + int *processed, u64 data); |
| 28 | +static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, |
| 29 | + enum irq_work_type wtype, |
| 30 | + int *processed, u64 data); |
| 31 | + |
14 | 32 | static void idxd_device_reinit(struct work_struct *work)
|
15 | 33 | {
|
16 | 34 | struct idxd_device *idxd = container_of(work, struct idxd_device, work);
|
@@ -44,6 +62,46 @@ static void idxd_device_reinit(struct work_struct *work)
|
44 | 62 | idxd_device_wqs_clear_state(idxd);
|
45 | 63 | }
|
46 | 64 |
|
| 65 | +static void idxd_device_fault_work(struct work_struct *work) |
| 66 | +{ |
| 67 | + struct idxd_fault *fault = container_of(work, struct idxd_fault, work); |
| 68 | + struct idxd_irq_entry *ie; |
| 69 | + int i; |
| 70 | + int processed; |
| 71 | + int irqcnt = fault->idxd->num_wq_irqs + 1; |
| 72 | + |
| 73 | + for (i = 1; i < irqcnt; i++) { |
| 74 | + ie = &fault->idxd->irq_entries[i]; |
| 75 | + irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT, |
| 76 | + &processed, fault->addr); |
| 77 | + if (processed) |
| 78 | + break; |
| 79 | + |
| 80 | + irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT, |
| 81 | + &processed, fault->addr); |
| 82 | + if (processed) |
| 83 | + break; |
| 84 | + } |
| 85 | + |
| 86 | + kfree(fault); |
| 87 | +} |
| 88 | + |
| 89 | +static int idxd_device_schedule_fault_process(struct idxd_device *idxd, |
| 90 | + u64 fault_addr) |
| 91 | +{ |
| 92 | + struct idxd_fault *fault; |
| 93 | + |
| 94 | + fault = kmalloc(sizeof(*fault), GFP_ATOMIC); |
| 95 | + if (!fault) |
| 96 | + return -ENOMEM; |
| 97 | + |
| 98 | + fault->addr = fault_addr; |
| 99 | + fault->idxd = idxd; |
| 100 | + INIT_WORK(&fault->work, idxd_device_fault_work); |
| 101 | + queue_work(idxd->wq, &fault->work); |
| 102 | + return 0; |
| 103 | +} |
| 104 | + |
47 | 105 | irqreturn_t idxd_irq_handler(int vec, void *data)
|
48 | 106 | {
|
49 | 107 | struct idxd_irq_entry *irq_entry = data;
|
@@ -125,6 +183,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
125 | 183 | if (!err)
|
126 | 184 | goto out;
|
127 | 185 |
|
| 186 | + /* |
| 187 | + * This case should rarely happen and typically is due to software |
| 188 | + * programming error by the driver. |
| 189 | + */ |
| 190 | + if (idxd->sw_err.valid && |
| 191 | + idxd->sw_err.desc_valid && |
| 192 | + idxd->sw_err.fault_addr) |
| 193 | + idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr); |
| 194 | + |
128 | 195 | gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
129 | 196 | if (gensts.state == IDXD_DEVICE_STATE_HALT) {
|
130 | 197 | idxd->state = IDXD_DEV_HALTED;
|
@@ -152,57 +219,110 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
152 | 219 | return IRQ_HANDLED;
|
153 | 220 | }
|
154 | 221 |
|
| 222 | +static bool process_fault(struct idxd_desc *desc, u64 fault_addr) |
| 223 | +{ |
| 224 | + /* |
| 225 | + * Completion address can be bad as well. Check fault address match for descriptor |
| 226 | + * and completion address. |
| 227 | + */ |
| 228 | + if ((u64)desc->hw == fault_addr || |
| 229 | + (u64)desc->completion == fault_addr) { |
| 230 | + idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL); |
| 231 | + return true; |
| 232 | + } |
| 233 | + |
| 234 | + return false; |
| 235 | +} |
| 236 | + |
| 237 | +static bool complete_desc(struct idxd_desc *desc) |
| 238 | +{ |
| 239 | + if (desc->completion->status) { |
| 240 | + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); |
| 241 | + return true; |
| 242 | + } |
| 243 | + |
| 244 | + return false; |
| 245 | +} |
| 246 | + |
155 | 247 | static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
|
156 |
| - int *processed) |
| 248 | + enum irq_work_type wtype, |
| 249 | + int *processed, u64 data) |
157 | 250 | {
|
158 | 251 | struct idxd_desc *desc, *t;
|
159 | 252 | struct llist_node *head;
|
160 | 253 | int queued = 0;
|
| 254 | + bool completed = false; |
| 255 | + unsigned long flags; |
161 | 256 |
|
162 | 257 | *processed = 0;
|
163 | 258 | head = llist_del_all(&irq_entry->pending_llist);
|
164 | 259 | if (!head)
|
165 |
| - return 0; |
| 260 | + goto out; |
166 | 261 |
|
167 | 262 | llist_for_each_entry_safe(desc, t, head, llnode) {
|
168 |
| - if (desc->completion->status) { |
169 |
| - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); |
| 263 | + if (wtype == IRQ_WORK_NORMAL) |
| 264 | + completed = complete_desc(desc); |
| 265 | + else if (wtype == IRQ_WORK_PROCESS_FAULT) |
| 266 | + completed = process_fault(desc, data); |
| 267 | + |
| 268 | + if (completed) { |
170 | 269 | idxd_free_desc(desc->wq, desc);
|
171 | 270 | (*processed)++;
|
| 271 | + if (wtype == IRQ_WORK_PROCESS_FAULT) |
| 272 | + break; |
172 | 273 | } else {
|
173 |
| - list_add_tail(&desc->list, &irq_entry->work_list); |
| 274 | + spin_lock_irqsave(&irq_entry->list_lock, flags); |
| 275 | + list_add_tail(&desc->list, |
| 276 | + &irq_entry->work_list); |
| 277 | + spin_unlock_irqrestore(&irq_entry->list_lock, flags); |
174 | 278 | queued++;
|
175 | 279 | }
|
176 | 280 | }
|
177 | 281 |
|
| 282 | + out: |
178 | 283 | return queued;
|
179 | 284 | }
|
180 | 285 |
|
181 | 286 | static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
|
182 |
| - int *processed) |
| 287 | + enum irq_work_type wtype, |
| 288 | + int *processed, u64 data) |
183 | 289 | {
|
184 | 290 | struct list_head *node, *next;
|
185 | 291 | int queued = 0;
|
| 292 | + bool completed = false; |
| 293 | + unsigned long flags; |
186 | 294 |
|
187 | 295 | *processed = 0;
|
| 296 | + spin_lock_irqsave(&irq_entry->list_lock, flags); |
188 | 297 | if (list_empty(&irq_entry->work_list))
|
189 |
| - return 0; |
| 298 | + goto out; |
190 | 299 |
|
191 | 300 | list_for_each_safe(node, next, &irq_entry->work_list) {
|
192 | 301 | struct idxd_desc *desc =
|
193 | 302 | container_of(node, struct idxd_desc, list);
|
194 | 303 |
|
195 |
| - if (desc->completion->status) { |
| 304 | + spin_unlock_irqrestore(&irq_entry->list_lock, flags); |
| 305 | + if (wtype == IRQ_WORK_NORMAL) |
| 306 | + completed = complete_desc(desc); |
| 307 | + else if (wtype == IRQ_WORK_PROCESS_FAULT) |
| 308 | + completed = process_fault(desc, data); |
| 309 | + |
| 310 | + if (completed) { |
| 311 | + spin_lock_irqsave(&irq_entry->list_lock, flags); |
196 | 312 | list_del(&desc->list);
|
197 |
| - /* process and callback */ |
198 |
| - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); |
| 313 | + spin_unlock_irqrestore(&irq_entry->list_lock, flags); |
199 | 314 | idxd_free_desc(desc->wq, desc);
|
200 | 315 | (*processed)++;
|
| 316 | + if (wtype == IRQ_WORK_PROCESS_FAULT) |
| 317 | + return queued; |
201 | 318 | } else {
|
202 | 319 | queued++;
|
203 | 320 | }
|
| 321 | + spin_lock_irqsave(&irq_entry->list_lock, flags); |
204 | 322 | }
|
205 | 323 |
|
| 324 | + out: |
| 325 | + spin_unlock_irqrestore(&irq_entry->list_lock, flags); |
206 | 326 | return queued;
|
207 | 327 | }
|
208 | 328 |
|
@@ -230,12 +350,14 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
|
230 | 350 | * 5. Repeat until no more descriptors.
|
231 | 351 | */
|
232 | 352 | do {
|
233 |
| - rc = irq_process_work_list(irq_entry, &processed); |
| 353 | + rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL, |
| 354 | + &processed, 0); |
234 | 355 | total += processed;
|
235 | 356 | if (rc != 0)
|
236 | 357 | continue;
|
237 | 358 |
|
238 |
| - rc = irq_process_pending_llist(irq_entry, &processed); |
| 359 | + rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL, |
| 360 | + &processed, 0); |
239 | 361 | total += processed;
|
240 | 362 | } while (rc != 0);
|
241 | 363 |
|
|
0 commit comments