xref: /linux/drivers/dma/ioat/dma.c (revision 19d0070a2792181f79df01277fe00b83b9f7eda7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel I/OAT DMA Linux driver
4  * Copyright(c) 2004 - 2015 Intel Corporation.
5  */
6 
7 /*
8  * This driver supports an Intel I/OAT DMA engine, which does asynchronous
9  * copy operations.
10  */
11 
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/dmaengine.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/workqueue.h>
21 #include <linux/prefetch.h>
22 #include <linux/sizes.h>
23 #include "dma.h"
24 #include "registers.h"
25 #include "hw.h"
26 
27 #include "../dmaengine.h"
28 
29 int completion_timeout = 200;
30 module_param(completion_timeout, int, 0644);
31 MODULE_PARM_DESC(completion_timeout,
32 		"set ioat completion timeout [msec] (default 200 [msec])");
33 int idle_timeout = 2000;
34 module_param(idle_timeout, int, 0644);
35 MODULE_PARM_DESC(idle_timeout,
36 		"set ioat idel timeout [msec] (default 2000 [msec])");
37 
38 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
39 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
40 
41 static char *chanerr_str[] = {
42 	"DMA Transfer Source Address Error",
43 	"DMA Transfer Destination Address Error",
44 	"Next Descriptor Address Error",
45 	"Descriptor Error",
46 	"Chan Address Value Error",
47 	"CHANCMD Error",
48 	"Chipset Uncorrectable Data Integrity Error",
49 	"DMA Uncorrectable Data Integrity Error",
50 	"Read Data Error",
51 	"Write Data Error",
52 	"Descriptor Control Error",
53 	"Descriptor Transfer Size Error",
54 	"Completion Address Error",
55 	"Interrupt Configuration Error",
56 	"Super extended descriptor Address Error",
57 	"Unaffiliated Error",
58 	"CRC or XOR P Error",
59 	"XOR Q Error",
60 	"Descriptor Count Error",
61 	"DIF All F detect Error",
62 	"Guard Tag verification Error",
63 	"Application Tag verification Error",
64 	"Reference Tag verification Error",
65 	"Bundle Bit Error",
66 	"Result DIF All F detect Error",
67 	"Result Guard Tag verification Error",
68 	"Result Application Tag verification Error",
69 	"Result Reference Tag verification Error",
70 };
71 
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
73 
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75 {
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79 		if ((chanerr >> i) & 1) {
80 			dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
81 				i, chanerr_str[i]);
82 		}
83 	}
84 }
85 
86 /**
87  * ioat_dma_do_interrupt - handler used for single vector interrupt mode
88  * @irq: interrupt id
89  * @data: interrupt data
90  */
91 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
92 {
93 	struct ioatdma_device *instance = data;
94 	struct ioatdma_chan *ioat_chan;
95 	unsigned long attnstatus;
96 	int bit;
97 	u8 intrctrl;
98 
99 	intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
100 
101 	if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
102 		return IRQ_NONE;
103 
104 	if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 		writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106 		return IRQ_NONE;
107 	}
108 
109 	attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110 	for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111 		ioat_chan = ioat_chan_by_index(instance, bit);
112 		if (test_bit(IOAT_RUN, &ioat_chan->state))
113 			tasklet_schedule(&ioat_chan->cleanup_task);
114 	}
115 
116 	writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
117 	return IRQ_HANDLED;
118 }
119 
120 /**
121  * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
122  * @irq: interrupt id
123  * @data: interrupt data
124  */
125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
126 {
127 	struct ioatdma_chan *ioat_chan = data;
128 
129 	if (test_bit(IOAT_RUN, &ioat_chan->state))
130 		tasklet_schedule(&ioat_chan->cleanup_task);
131 
132 	return IRQ_HANDLED;
133 }
134 
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
136 {
137 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 	struct pci_dev *pdev = ioat_dma->pdev;
139 	int chan_id = chan_num(ioat_chan);
140 	struct msix_entry *msix;
141 
142 	/* 1/ stop irq from firing tasklets
143 	 * 2/ stop the tasklet from re-arming irqs
144 	 */
145 	clear_bit(IOAT_RUN, &ioat_chan->state);
146 
147 	/* flush inflight interrupts */
148 	switch (ioat_dma->irq_mode) {
149 	case IOAT_MSIX:
150 		msix = &ioat_dma->msix_entries[chan_id];
151 		synchronize_irq(msix->vector);
152 		break;
153 	case IOAT_MSI:
154 	case IOAT_INTX:
155 		synchronize_irq(pdev->irq);
156 		break;
157 	default:
158 		break;
159 	}
160 
161 	/* flush inflight timers */
162 	del_timer_sync(&ioat_chan->timer);
163 
164 	/* flush inflight tasklet runs */
165 	tasklet_kill(&ioat_chan->cleanup_task);
166 
167 	/* final cleanup now that everything is quiesced and can't re-arm */
168 	ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
169 }
170 
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
172 {
173 	ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 	ioat_chan->issued = ioat_chan->head;
175 	writew(ioat_chan->dmacount,
176 	       ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 	dev_dbg(to_dev(ioat_chan),
178 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 		__func__, ioat_chan->head, ioat_chan->tail,
180 		ioat_chan->issued, ioat_chan->dmacount);
181 }
182 
183 void ioat_issue_pending(struct dma_chan *c)
184 {
185 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
186 
187 	if (ioat_ring_pending(ioat_chan)) {
188 		spin_lock_bh(&ioat_chan->prep_lock);
189 		__ioat_issue_pending(ioat_chan);
190 		spin_unlock_bh(&ioat_chan->prep_lock);
191 	}
192 }
193 
194 /**
195  * ioat_update_pending - log pending descriptors
196  * @ioat: ioat+ channel
197  *
198  * Check if the number of unsubmitted descriptors has exceeded the
199  * watermark.  Called with prep_lock held
200  */
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
202 {
203 	if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 		__ioat_issue_pending(ioat_chan);
205 }
206 
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
208 {
209 	struct ioat_ring_ent *desc;
210 	struct ioat_dma_descriptor *hw;
211 
212 	if (ioat_ring_space(ioat_chan) < 1) {
213 		dev_err(to_dev(ioat_chan),
214 			"Unable to start null desc - ring full\n");
215 		return;
216 	}
217 
218 	dev_dbg(to_dev(ioat_chan),
219 		"%s: head: %#x tail: %#x issued: %#x\n",
220 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
222 
223 	hw = desc->hw;
224 	hw->ctl = 0;
225 	hw->ctl_f.null = 1;
226 	hw->ctl_f.int_en = 1;
227 	hw->ctl_f.compl_write = 1;
228 	/* set size to non-zero value (channel returns error when size is 0) */
229 	hw->size = NULL_DESC_BUFFER_SIZE;
230 	hw->src_addr = 0;
231 	hw->dst_addr = 0;
232 	async_tx_ack(&desc->txd);
233 	ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 	dump_desc_dbg(ioat_chan, desc);
235 	/* make sure descriptors are written before we submit */
236 	wmb();
237 	ioat_chan->head += 1;
238 	__ioat_issue_pending(ioat_chan);
239 }
240 
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
242 {
243 	spin_lock_bh(&ioat_chan->prep_lock);
244 	if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 		__ioat_start_null_desc(ioat_chan);
246 	spin_unlock_bh(&ioat_chan->prep_lock);
247 }
248 
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
250 {
251 	/* set the tail to be re-issued */
252 	ioat_chan->issued = ioat_chan->tail;
253 	ioat_chan->dmacount = 0;
254 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
255 
256 	dev_dbg(to_dev(ioat_chan),
257 		"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 		__func__, ioat_chan->head, ioat_chan->tail,
259 		ioat_chan->issued, ioat_chan->dmacount);
260 
261 	if (ioat_ring_pending(ioat_chan)) {
262 		struct ioat_ring_ent *desc;
263 
264 		desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 		ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 		__ioat_issue_pending(ioat_chan);
267 	} else
268 		__ioat_start_null_desc(ioat_chan);
269 }
270 
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
272 {
273 	unsigned long end = jiffies + tmo;
274 	int err = 0;
275 	u32 status;
276 
277 	status = ioat_chansts(ioat_chan);
278 	if (is_ioat_active(status) || is_ioat_idle(status))
279 		ioat_suspend(ioat_chan);
280 	while (is_ioat_active(status) || is_ioat_idle(status)) {
281 		if (tmo && time_after(jiffies, end)) {
282 			err = -ETIMEDOUT;
283 			break;
284 		}
285 		status = ioat_chansts(ioat_chan);
286 		cpu_relax();
287 	}
288 
289 	return err;
290 }
291 
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
293 {
294 	unsigned long end = jiffies + tmo;
295 	int err = 0;
296 
297 	ioat_reset(ioat_chan);
298 	while (ioat_reset_pending(ioat_chan)) {
299 		if (end && time_after(jiffies, end)) {
300 			err = -ETIMEDOUT;
301 			break;
302 		}
303 		cpu_relax();
304 	}
305 
306 	return err;
307 }
308 
309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310 	__releases(&ioat_chan->prep_lock)
311 {
312 	struct dma_chan *c = tx->chan;
313 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
314 	dma_cookie_t cookie;
315 
316 	cookie = dma_cookie_assign(tx);
317 	dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
318 
319 	if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
321 
322 	/* make descriptor updates visible before advancing ioat->head,
323 	 * this is purposefully not smp_wmb() since we are also
324 	 * publishing the descriptor updates to a dma device
325 	 */
326 	wmb();
327 
328 	ioat_chan->head += ioat_chan->produce;
329 
330 	ioat_update_pending(ioat_chan);
331 	spin_unlock_bh(&ioat_chan->prep_lock);
332 
333 	return cookie;
334 }
335 
336 static struct ioat_ring_ent *
337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
338 {
339 	struct ioat_dma_descriptor *hw;
340 	struct ioat_ring_ent *desc;
341 	struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
342 	int chunk;
343 	dma_addr_t phys;
344 	u8 *pos;
345 	off_t offs;
346 
347 	chunk = idx / IOAT_DESCS_PER_CHUNK;
348 	idx &= (IOAT_DESCS_PER_CHUNK - 1);
349 	offs = idx * IOAT_DESC_SZ;
350 	pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 	phys = ioat_chan->descs[chunk].hw + offs;
352 	hw = (struct ioat_dma_descriptor *)pos;
353 	memset(hw, 0, sizeof(*hw));
354 
355 	desc = kmem_cache_zalloc(ioat_cache, flags);
356 	if (!desc)
357 		return NULL;
358 
359 	dma_async_tx_descriptor_init(&desc->txd, chan);
360 	desc->txd.tx_submit = ioat_tx_submit_unlock;
361 	desc->hw = hw;
362 	desc->txd.phys = phys;
363 	return desc;
364 }
365 
366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
367 {
368 	kmem_cache_free(ioat_cache, desc);
369 }
370 
371 struct ioat_ring_ent **
372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373 {
374 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376 	struct ioat_ring_ent **ring;
377 	int total_descs = 1 << order;
378 	int i, chunks;
379 
380 	/* allocate the array to hold the software ring */
381 	ring = kcalloc(total_descs, sizeof(*ring), flags);
382 	if (!ring)
383 		return NULL;
384 
385 	chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
386 	ioat_chan->desc_chunks = chunks;
387 
388 	for (i = 0; i < chunks; i++) {
389 		struct ioat_descs *descs = &ioat_chan->descs[i];
390 
391 		descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
392 						 SZ_2M, &descs->hw, flags);
393 		if (!descs->virt) {
394 			int idx;
395 
396 			for (idx = 0; idx < i; idx++) {
397 				descs = &ioat_chan->descs[idx];
398 				dma_free_coherent(to_dev(ioat_chan),
399 						IOAT_CHUNK_SIZE,
400 						descs->virt, descs->hw);
401 				descs->virt = NULL;
402 				descs->hw = 0;
403 			}
404 
405 			ioat_chan->desc_chunks = 0;
406 			kfree(ring);
407 			return NULL;
408 		}
409 	}
410 
411 	for (i = 0; i < total_descs; i++) {
412 		ring[i] = ioat_alloc_ring_ent(c, i, flags);
413 		if (!ring[i]) {
414 			int idx;
415 
416 			while (i--)
417 				ioat_free_ring_ent(ring[i], c);
418 
419 			for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
420 				dma_free_coherent(to_dev(ioat_chan),
421 						  IOAT_CHUNK_SIZE,
422 						  ioat_chan->descs[idx].virt,
423 						  ioat_chan->descs[idx].hw);
424 				ioat_chan->descs[idx].virt = NULL;
425 				ioat_chan->descs[idx].hw = 0;
426 			}
427 
428 			ioat_chan->desc_chunks = 0;
429 			kfree(ring);
430 			return NULL;
431 		}
432 		set_desc_id(ring[i], i);
433 	}
434 
435 	/* link descs */
436 	for (i = 0; i < total_descs-1; i++) {
437 		struct ioat_ring_ent *next = ring[i+1];
438 		struct ioat_dma_descriptor *hw = ring[i]->hw;
439 
440 		hw->next = next->txd.phys;
441 	}
442 	ring[i]->hw->next = ring[0]->txd.phys;
443 
444 	/* setup descriptor pre-fetching for v3.4 */
445 	if (ioat_dma->cap & IOAT_CAP_DPS) {
446 		u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
447 
448 		if (chunks == 1)
449 			drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
450 
451 		writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
452 
453 	}
454 
455 	return ring;
456 }
457 
458 /**
459  * ioat_check_space_lock - verify space and grab ring producer lock
460  * @ioat: ioat,3 channel (ring) to operate on
461  * @num_descs: allocation length
462  */
463 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
464 	__acquires(&ioat_chan->prep_lock)
465 {
466 	spin_lock_bh(&ioat_chan->prep_lock);
467 	/* never allow the last descriptor to be consumed, we need at
468 	 * least one free at all times to allow for on-the-fly ring
469 	 * resizing.
470 	 */
471 	if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
472 		dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
473 			__func__, num_descs, ioat_chan->head,
474 			ioat_chan->tail, ioat_chan->issued);
475 		ioat_chan->produce = num_descs;
476 		return 0;  /* with ioat->prep_lock held */
477 	}
478 	spin_unlock_bh(&ioat_chan->prep_lock);
479 
480 	dev_dbg_ratelimited(to_dev(ioat_chan),
481 			    "%s: ring full! num_descs: %d (%x:%x:%x)\n",
482 			    __func__, num_descs, ioat_chan->head,
483 			    ioat_chan->tail, ioat_chan->issued);
484 
485 	/* progress reclaim in the allocation failure case we may be
486 	 * called under bh_disabled so we need to trigger the timer
487 	 * event directly
488 	 */
489 	if (time_is_before_jiffies(ioat_chan->timer.expires)
490 	    && timer_pending(&ioat_chan->timer)) {
491 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
492 		ioat_timer_event(&ioat_chan->timer);
493 	}
494 
495 	return -ENOMEM;
496 }
497 
498 static bool desc_has_ext(struct ioat_ring_ent *desc)
499 {
500 	struct ioat_dma_descriptor *hw = desc->hw;
501 
502 	if (hw->ctl_f.op == IOAT_OP_XOR ||
503 	    hw->ctl_f.op == IOAT_OP_XOR_VAL) {
504 		struct ioat_xor_descriptor *xor = desc->xor;
505 
506 		if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
507 			return true;
508 	} else if (hw->ctl_f.op == IOAT_OP_PQ ||
509 		   hw->ctl_f.op == IOAT_OP_PQ_VAL) {
510 		struct ioat_pq_descriptor *pq = desc->pq;
511 
512 		if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
513 			return true;
514 	}
515 
516 	return false;
517 }
518 
519 static void
520 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
521 {
522 	if (!sed)
523 		return;
524 
525 	dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
526 	kmem_cache_free(ioat_sed_cache, sed);
527 }
528 
529 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
530 {
531 	u64 phys_complete;
532 	u64 completion;
533 
534 	completion = *ioat_chan->completion;
535 	phys_complete = ioat_chansts_to_addr(completion);
536 
537 	dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
538 		(unsigned long long) phys_complete);
539 
540 	return phys_complete;
541 }
542 
543 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
544 				   u64 *phys_complete)
545 {
546 	*phys_complete = ioat_get_current_completion(ioat_chan);
547 	if (*phys_complete == ioat_chan->last_completion)
548 		return false;
549 
550 	clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
551 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
552 
553 	return true;
554 }
555 
556 static void
557 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
558 {
559 	struct ioat_dma_descriptor *hw = desc->hw;
560 
561 	switch (hw->ctl_f.op) {
562 	case IOAT_OP_PQ_VAL:
563 	case IOAT_OP_PQ_VAL_16S:
564 	{
565 		struct ioat_pq_descriptor *pq = desc->pq;
566 
567 		/* check if there's error written */
568 		if (!pq->dwbes_f.wbes)
569 			return;
570 
571 		/* need to set a chanerr var for checking to clear later */
572 
573 		if (pq->dwbes_f.p_val_err)
574 			*desc->result |= SUM_CHECK_P_RESULT;
575 
576 		if (pq->dwbes_f.q_val_err)
577 			*desc->result |= SUM_CHECK_Q_RESULT;
578 
579 		return;
580 	}
581 	default:
582 		return;
583 	}
584 }
585 
586 /**
587  * __cleanup - reclaim used descriptors
588  * @ioat: channel (ring) to clean
589  */
590 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
591 {
592 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
593 	struct ioat_ring_ent *desc;
594 	bool seen_current = false;
595 	int idx = ioat_chan->tail, i;
596 	u16 active;
597 
598 	dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
599 		__func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
600 
601 	/*
602 	 * At restart of the channel, the completion address and the
603 	 * channel status will be 0 due to starting a new chain. Since
604 	 * it's new chain and the first descriptor "fails", there is
605 	 * nothing to clean up. We do not want to reap the entire submitted
606 	 * chain due to this 0 address value and then BUG.
607 	 */
608 	if (!phys_complete)
609 		return;
610 
611 	active = ioat_ring_active(ioat_chan);
612 	for (i = 0; i < active && !seen_current; i++) {
613 		struct dma_async_tx_descriptor *tx;
614 
615 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
616 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
617 		dump_desc_dbg(ioat_chan, desc);
618 
619 		/* set err stat if we are using dwbes */
620 		if (ioat_dma->cap & IOAT_CAP_DWBES)
621 			desc_get_errstat(ioat_chan, desc);
622 
623 		tx = &desc->txd;
624 		if (tx->cookie) {
625 			dma_cookie_complete(tx);
626 			dma_descriptor_unmap(tx);
627 			dmaengine_desc_get_callback_invoke(tx, NULL);
628 			tx->callback = NULL;
629 			tx->callback_result = NULL;
630 		}
631 
632 		if (tx->phys == phys_complete)
633 			seen_current = true;
634 
635 		/* skip extended descriptors */
636 		if (desc_has_ext(desc)) {
637 			BUG_ON(i + 1 >= active);
638 			i++;
639 		}
640 
641 		/* cleanup super extended descriptors */
642 		if (desc->sed) {
643 			ioat_free_sed(ioat_dma, desc->sed);
644 			desc->sed = NULL;
645 		}
646 	}
647 
648 	/* finish all descriptor reads before incrementing tail */
649 	smp_mb();
650 	ioat_chan->tail = idx + i;
651 	/* no active descs have written a completion? */
652 	BUG_ON(active && !seen_current);
653 	ioat_chan->last_completion = phys_complete;
654 
655 	if (active - i == 0) {
656 		dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
657 			__func__);
658 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
659 	}
660 
661 	/* microsecond delay by sysfs variable  per pending descriptor */
662 	if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
663 		writew(min((ioat_chan->intr_coalesce * (active - i)),
664 		       IOAT_INTRDELAY_MASK),
665 		       ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
666 		ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
667 	}
668 }
669 
670 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
671 {
672 	u64 phys_complete;
673 
674 	spin_lock_bh(&ioat_chan->cleanup_lock);
675 
676 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
677 		__cleanup(ioat_chan, phys_complete);
678 
679 	if (is_ioat_halted(*ioat_chan->completion)) {
680 		u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
681 
682 		if (chanerr &
683 		    (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
684 			mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
685 			ioat_eh(ioat_chan);
686 		}
687 	}
688 
689 	spin_unlock_bh(&ioat_chan->cleanup_lock);
690 }
691 
692 void ioat_cleanup_event(unsigned long data)
693 {
694 	struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
695 
696 	ioat_cleanup(ioat_chan);
697 	if (!test_bit(IOAT_RUN, &ioat_chan->state))
698 		return;
699 	writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
700 }
701 
702 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
703 {
704 	u64 phys_complete;
705 
706 	/* set the completion address register again */
707 	writel(lower_32_bits(ioat_chan->completion_dma),
708 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
709 	writel(upper_32_bits(ioat_chan->completion_dma),
710 	       ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
711 
712 	ioat_quiesce(ioat_chan, 0);
713 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
714 		__cleanup(ioat_chan, phys_complete);
715 
716 	__ioat_restart_chan(ioat_chan);
717 }
718 
719 
720 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
721 {
722 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
723 	struct ioat_ring_ent *desc;
724 	u16 active;
725 	int idx = ioat_chan->tail, i;
726 
727 	/*
728 	 * We assume that the failed descriptor has been processed.
729 	 * Now we are just returning all the remaining submitted
730 	 * descriptors to abort.
731 	 */
732 	active = ioat_ring_active(ioat_chan);
733 
734 	/* we skip the failed descriptor that tail points to */
735 	for (i = 1; i < active; i++) {
736 		struct dma_async_tx_descriptor *tx;
737 
738 		prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
739 		desc = ioat_get_ring_ent(ioat_chan, idx + i);
740 
741 		tx = &desc->txd;
742 		if (tx->cookie) {
743 			struct dmaengine_result res;
744 
745 			dma_cookie_complete(tx);
746 			dma_descriptor_unmap(tx);
747 			res.result = DMA_TRANS_ABORTED;
748 			dmaengine_desc_get_callback_invoke(tx, &res);
749 			tx->callback = NULL;
750 			tx->callback_result = NULL;
751 		}
752 
753 		/* skip extended descriptors */
754 		if (desc_has_ext(desc)) {
755 			WARN_ON(i + 1 >= active);
756 			i++;
757 		}
758 
759 		/* cleanup super extended descriptors */
760 		if (desc->sed) {
761 			ioat_free_sed(ioat_dma, desc->sed);
762 			desc->sed = NULL;
763 		}
764 	}
765 
766 	smp_mb(); /* finish all descriptor reads before incrementing tail */
767 	ioat_chan->tail = idx + active;
768 
769 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
770 	ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
771 }
772 
773 static void ioat_eh(struct ioatdma_chan *ioat_chan)
774 {
775 	struct pci_dev *pdev = to_pdev(ioat_chan);
776 	struct ioat_dma_descriptor *hw;
777 	struct dma_async_tx_descriptor *tx;
778 	u64 phys_complete;
779 	struct ioat_ring_ent *desc;
780 	u32 err_handled = 0;
781 	u32 chanerr_int;
782 	u32 chanerr;
783 	bool abort = false;
784 	struct dmaengine_result res;
785 
786 	/* cleanup so tail points to descriptor that caused the error */
787 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
788 		__cleanup(ioat_chan, phys_complete);
789 
790 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
791 	pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
792 
793 	dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
794 		__func__, chanerr, chanerr_int);
795 
796 	desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
797 	hw = desc->hw;
798 	dump_desc_dbg(ioat_chan, desc);
799 
800 	switch (hw->ctl_f.op) {
801 	case IOAT_OP_XOR_VAL:
802 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
803 			*desc->result |= SUM_CHECK_P_RESULT;
804 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
805 		}
806 		break;
807 	case IOAT_OP_PQ_VAL:
808 	case IOAT_OP_PQ_VAL_16S:
809 		if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
810 			*desc->result |= SUM_CHECK_P_RESULT;
811 			err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
812 		}
813 		if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
814 			*desc->result |= SUM_CHECK_Q_RESULT;
815 			err_handled |= IOAT_CHANERR_XOR_Q_ERR;
816 		}
817 		break;
818 	}
819 
820 	if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
821 		if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
822 			res.result = DMA_TRANS_READ_FAILED;
823 			err_handled |= IOAT_CHANERR_READ_DATA_ERR;
824 		} else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
825 			res.result = DMA_TRANS_WRITE_FAILED;
826 			err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
827 		}
828 
829 		abort = true;
830 	} else
831 		res.result = DMA_TRANS_NOERROR;
832 
833 	/* fault on unhandled error or spurious halt */
834 	if (chanerr ^ err_handled || chanerr == 0) {
835 		dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
836 			__func__, chanerr, err_handled);
837 		dev_err(to_dev(ioat_chan), "Errors handled:\n");
838 		ioat_print_chanerrs(ioat_chan, err_handled);
839 		dev_err(to_dev(ioat_chan), "Errors not handled:\n");
840 		ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
841 
842 		BUG();
843 	}
844 
845 	/* cleanup the faulty descriptor since we are continuing */
846 	tx = &desc->txd;
847 	if (tx->cookie) {
848 		dma_cookie_complete(tx);
849 		dma_descriptor_unmap(tx);
850 		dmaengine_desc_get_callback_invoke(tx, &res);
851 		tx->callback = NULL;
852 		tx->callback_result = NULL;
853 	}
854 
855 	/* mark faulting descriptor as complete */
856 	*ioat_chan->completion = desc->txd.phys;
857 
858 	spin_lock_bh(&ioat_chan->prep_lock);
859 	/* we need abort all descriptors */
860 	if (abort) {
861 		ioat_abort_descs(ioat_chan);
862 		/* clean up the channel, we could be in weird state */
863 		ioat_reset_hw(ioat_chan);
864 	}
865 
866 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
867 	pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
868 
869 	ioat_restart_channel(ioat_chan);
870 	spin_unlock_bh(&ioat_chan->prep_lock);
871 }
872 
873 static void check_active(struct ioatdma_chan *ioat_chan)
874 {
875 	if (ioat_ring_active(ioat_chan)) {
876 		mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
877 		return;
878 	}
879 
880 	if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
881 		mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
882 }
883 
884 static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
885 {
886 	spin_lock_bh(&ioat_chan->prep_lock);
887 	set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
888 	spin_unlock_bh(&ioat_chan->prep_lock);
889 
890 	ioat_abort_descs(ioat_chan);
891 	dev_warn(to_dev(ioat_chan), "Reset channel...\n");
892 	ioat_reset_hw(ioat_chan);
893 	dev_warn(to_dev(ioat_chan), "Restart channel...\n");
894 	ioat_restart_channel(ioat_chan);
895 
896 	spin_lock_bh(&ioat_chan->prep_lock);
897 	clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
898 	spin_unlock_bh(&ioat_chan->prep_lock);
899 }
900 
901 void ioat_timer_event(struct timer_list *t)
902 {
903 	struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
904 	dma_addr_t phys_complete;
905 	u64 status;
906 
907 	status = ioat_chansts(ioat_chan);
908 
909 	/* when halted due to errors check for channel
910 	 * programming errors before advancing the completion state
911 	 */
912 	if (is_ioat_halted(status)) {
913 		u32 chanerr;
914 
915 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
916 		dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
917 			__func__, chanerr);
918 		dev_err(to_dev(ioat_chan), "Errors:\n");
919 		ioat_print_chanerrs(ioat_chan, chanerr);
920 
921 		if (test_bit(IOAT_RUN, &ioat_chan->state)) {
922 			spin_lock_bh(&ioat_chan->cleanup_lock);
923 			ioat_reboot_chan(ioat_chan);
924 			spin_unlock_bh(&ioat_chan->cleanup_lock);
925 		}
926 
927 		return;
928 	}
929 
930 	spin_lock_bh(&ioat_chan->cleanup_lock);
931 
932 	/* handle the no-actives case */
933 	if (!ioat_ring_active(ioat_chan)) {
934 		spin_lock_bh(&ioat_chan->prep_lock);
935 		check_active(ioat_chan);
936 		spin_unlock_bh(&ioat_chan->prep_lock);
937 		goto unlock_out;
938 	}
939 
940 	/* handle the missed cleanup case */
941 	if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
942 		/* timer restarted in ioat_cleanup_preamble
943 		 * and IOAT_COMPLETION_ACK cleared
944 		 */
945 		__cleanup(ioat_chan, phys_complete);
946 		goto unlock_out;
947 	}
948 
949 	/* if we haven't made progress and we have already
950 	 * acknowledged a pending completion once, then be more
951 	 * forceful with a restart
952 	 */
953 	if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
954 		u32 chanerr;
955 
956 		chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
957 		dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
958 			status, chanerr);
959 		dev_err(to_dev(ioat_chan), "Errors:\n");
960 		ioat_print_chanerrs(ioat_chan, chanerr);
961 
962 		dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
963 			ioat_ring_active(ioat_chan));
964 
965 		ioat_reboot_chan(ioat_chan);
966 
967 		goto unlock_out;
968 	}
969 
970 	/* handle missed issue pending case */
971 	if (ioat_ring_pending(ioat_chan)) {
972 		dev_warn(to_dev(ioat_chan),
973 			"Completion timeout with pending descriptors\n");
974 		spin_lock_bh(&ioat_chan->prep_lock);
975 		__ioat_issue_pending(ioat_chan);
976 		spin_unlock_bh(&ioat_chan->prep_lock);
977 	}
978 
979 	set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
980 	mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
981 unlock_out:
982 	spin_unlock_bh(&ioat_chan->cleanup_lock);
983 }
984 
985 enum dma_status
986 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
987 		struct dma_tx_state *txstate)
988 {
989 	struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
990 	enum dma_status ret;
991 
992 	ret = dma_cookie_status(c, cookie, txstate);
993 	if (ret == DMA_COMPLETE)
994 		return ret;
995 
996 	ioat_cleanup(ioat_chan);
997 
998 	return dma_cookie_status(c, cookie, txstate);
999 }
1000 
1001 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
1002 {
1003 	/* throw away whatever the channel was doing and get it
1004 	 * initialized, with ioat3 specific workarounds
1005 	 */
1006 	struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1007 	struct pci_dev *pdev = ioat_dma->pdev;
1008 	u32 chanerr;
1009 	u16 dev_id;
1010 	int err;
1011 
1012 	ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1013 
1014 	chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1015 	writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1016 
1017 	if (ioat_dma->version < IOAT_VER_3_3) {
1018 		/* clear any pending errors */
1019 		err = pci_read_config_dword(pdev,
1020 				IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1021 		if (err) {
1022 			dev_err(&pdev->dev,
1023 				"channel error register unreachable\n");
1024 			return err;
1025 		}
1026 		pci_write_config_dword(pdev,
1027 				IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1028 
1029 		/* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1030 		 * (workaround for spurious config parity error after restart)
1031 		 */
1032 		pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1033 		if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1034 			pci_write_config_dword(pdev,
1035 					       IOAT_PCI_DMAUNCERRSTS_OFFSET,
1036 					       0x10);
1037 		}
1038 	}
1039 
1040 	if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1041 		ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1042 		ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1043 		ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1044 	}
1045 
1046 
1047 	err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1048 	if (!err) {
1049 		if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1050 			writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1051 			writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1052 			writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1053 		}
1054 	}
1055 
1056 	if (err)
1057 		dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1058 
1059 	return err;
1060 }
1061