xref: /linux/drivers/ntb/hw/mscc/ntb_hw_switchtec.c (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6 
7 #include <linux/interrupt.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/ntb.h>
13 #include <linux/pci.h>
14 #include <linux/switchtec.h>
15 
16 MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver");
17 MODULE_VERSION("0.1");
18 MODULE_LICENSE("GPL");
19 MODULE_AUTHOR("Microsemi Corporation");
20 
21 static ulong max_mw_size = SZ_2M;
22 module_param(max_mw_size, ulong, 0644);
23 MODULE_PARM_DESC(max_mw_size,
24 	"Max memory window size reported to the upper layer");
25 
26 static bool use_lut_mws;
27 module_param(use_lut_mws, bool, 0644);
28 MODULE_PARM_DESC(use_lut_mws,
29 		 "Enable the use of the LUT based memory windows");
30 
31 #define SWITCHTEC_NTB_MAGIC 0x45CC0001
32 #define MAX_MWS     128
33 
34 struct shared_mw {
35 	u32 magic;
36 	u32 link_sta;
37 	u32 partition_id;
38 	u64 mw_sizes[MAX_MWS];
39 	u32 spad[128];
40 };
41 
42 #define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry)
43 #define LUT_SIZE SZ_64K
44 
45 struct switchtec_ntb {
46 	struct ntb_dev ntb;
47 	struct switchtec_dev *stdev;
48 
49 	int self_partition;
50 	int peer_partition;
51 
52 	int doorbell_irq;
53 	int message_irq;
54 
55 	struct ntb_info_regs __iomem *mmio_ntb;
56 	struct ntb_ctrl_regs __iomem *mmio_ctrl;
57 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
58 	struct ntb_ctrl_regs __iomem *mmio_self_ctrl;
59 	struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
60 	struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg;
61 	struct ntb_dbmsg_regs __iomem *mmio_peer_dbmsg;
62 
63 	void __iomem *mmio_xlink_win;
64 
65 	struct shared_mw *self_shared;
66 	struct shared_mw __iomem *peer_shared;
67 	dma_addr_t self_shared_dma;
68 
69 	u64 db_mask;
70 	u64 db_valid_mask;
71 	int db_shift;
72 	int db_peer_shift;
73 
74 	/* synchronize rmw access of db_mask and hw reg */
75 	spinlock_t db_mask_lock;
76 
77 	int nr_direct_mw;
78 	int nr_lut_mw;
79 	int nr_rsvd_luts;
80 	int direct_mw_to_bar[MAX_DIRECT_MW];
81 
82 	int peer_nr_direct_mw;
83 	int peer_nr_lut_mw;
84 	int peer_direct_mw_to_bar[MAX_DIRECT_MW];
85 
86 	bool link_is_up;
87 	enum ntb_speed link_speed;
88 	enum ntb_width link_width;
89 	struct work_struct link_reinit_work;
90 };
91 
92 static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb)
93 {
94 	return container_of(ntb, struct switchtec_ntb, ntb);
95 }
96 
97 static int switchtec_ntb_part_op(struct switchtec_ntb *sndev,
98 				 struct ntb_ctrl_regs __iomem *ctl,
99 				 u32 op, int wait_status)
100 {
101 	static const char * const op_text[] = {
102 		[NTB_CTRL_PART_OP_LOCK] = "lock",
103 		[NTB_CTRL_PART_OP_CFG] = "configure",
104 		[NTB_CTRL_PART_OP_RESET] = "reset",
105 	};
106 
107 	int i;
108 	u32 ps;
109 	int status;
110 
111 	switch (op) {
112 	case NTB_CTRL_PART_OP_LOCK:
113 		status = NTB_CTRL_PART_STATUS_LOCKING;
114 		break;
115 	case NTB_CTRL_PART_OP_CFG:
116 		status = NTB_CTRL_PART_STATUS_CONFIGURING;
117 		break;
118 	case NTB_CTRL_PART_OP_RESET:
119 		status = NTB_CTRL_PART_STATUS_RESETTING;
120 		break;
121 	default:
122 		return -EINVAL;
123 	}
124 
125 	iowrite32(op, &ctl->partition_op);
126 
127 	for (i = 0; i < 1000; i++) {
128 		if (msleep_interruptible(50) != 0) {
129 			iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op);
130 			return -EINTR;
131 		}
132 
133 		ps = ioread32(&ctl->partition_status) & 0xFFFF;
134 
135 		if (ps != status)
136 			break;
137 	}
138 
139 	if (ps == wait_status)
140 		return 0;
141 
142 	if (ps == status) {
143 		dev_err(&sndev->stdev->dev,
144 			"Timed out while performing %s (%d). (%08x)\n",
145 			op_text[op], op,
146 			ioread32(&ctl->partition_status));
147 
148 		return -ETIMEDOUT;
149 	}
150 
151 	return -EIO;
152 }
153 
154 static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx,
155 				  u32 val)
156 {
157 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_peer_dbmsg->omsg))
158 		return -EINVAL;
159 
160 	iowrite32(val, &sndev->mmio_peer_dbmsg->omsg[idx].msg);
161 
162 	return 0;
163 }
164 
165 static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx)
166 {
167 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
168 	int nr_direct_mw = sndev->peer_nr_direct_mw;
169 	int nr_lut_mw = sndev->peer_nr_lut_mw - sndev->nr_rsvd_luts;
170 
171 	if (pidx != NTB_DEF_PEER_IDX)
172 		return -EINVAL;
173 
174 	if (!use_lut_mws)
175 		nr_lut_mw = 0;
176 
177 	return nr_direct_mw + nr_lut_mw;
178 }
179 
180 static int lut_index(struct switchtec_ntb *sndev, int mw_idx)
181 {
182 	return mw_idx - sndev->nr_direct_mw + sndev->nr_rsvd_luts;
183 }
184 
185 static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx)
186 {
187 	return mw_idx - sndev->peer_nr_direct_mw + sndev->nr_rsvd_luts;
188 }
189 
190 static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx,
191 				      int widx, resource_size_t *addr_align,
192 				      resource_size_t *size_align,
193 				      resource_size_t *size_max)
194 {
195 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
196 	int lut;
197 	resource_size_t size;
198 
199 	if (pidx != NTB_DEF_PEER_IDX)
200 		return -EINVAL;
201 
202 	lut = widx >= sndev->peer_nr_direct_mw;
203 	size = ioread64(&sndev->peer_shared->mw_sizes[widx]);
204 
205 	if (size == 0)
206 		return -EINVAL;
207 
208 	if (addr_align)
209 		*addr_align = lut ? size : SZ_4K;
210 
211 	if (size_align)
212 		*size_align = lut ? size : SZ_4K;
213 
214 	if (size_max)
215 		*size_max = size;
216 
217 	return 0;
218 }
219 
220 static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
221 {
222 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
223 	int bar = sndev->peer_direct_mw_to_bar[idx];
224 	u32 ctl_val;
225 
226 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
227 	ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
228 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
229 	iowrite32(0, &ctl->bar_entry[bar].win_size);
230 	iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
231 	iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
232 }
233 
234 static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx)
235 {
236 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
237 
238 	iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]);
239 }
240 
241 static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
242 					dma_addr_t addr, resource_size_t size)
243 {
244 	int xlate_pos = ilog2(size);
245 	int bar = sndev->peer_direct_mw_to_bar[idx];
246 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
247 	u32 ctl_val;
248 
249 	ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
250 	ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
251 
252 	iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
253 	iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
254 		  &ctl->bar_entry[bar].win_size);
255 	iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
256 	iowrite64(sndev->self_partition | addr,
257 		  &ctl->bar_entry[bar].xlate_addr);
258 }
259 
260 static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx,
261 				     dma_addr_t addr, resource_size_t size)
262 {
263 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
264 
265 	iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr),
266 		  &ctl->lut_entry[peer_lut_index(sndev, idx)]);
267 }
268 
269 static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
270 				      dma_addr_t addr, resource_size_t size)
271 {
272 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
273 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl;
274 	int xlate_pos = ilog2(size);
275 	int nr_direct_mw = sndev->peer_nr_direct_mw;
276 	int rc;
277 
278 	if (pidx != NTB_DEF_PEER_IDX)
279 		return -EINVAL;
280 
281 	dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap\n",
282 		widx, pidx, &addr, &size);
283 
284 	if (widx >= switchtec_ntb_mw_count(ntb, pidx))
285 		return -EINVAL;
286 
287 	if (xlate_pos < 12)
288 		return -EINVAL;
289 
290 	if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
291 		/*
292 		 * In certain circumstances we can get a buffer that is
293 		 * not aligned to its size. (Most of the time
294 		 * dma_alloc_coherent ensures this). This can happen when
295 		 * using large buffers allocated by the CMA
296 		 * (see CMA_CONFIG_ALIGNMENT)
297 		 */
298 		dev_err(&sndev->stdev->dev,
299 			"ERROR: Memory window address is not aligned to it's size!\n");
300 		return -EINVAL;
301 	}
302 
303 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
304 				   NTB_CTRL_PART_STATUS_LOCKED);
305 	if (rc)
306 		return rc;
307 
308 	if (addr == 0 || size == 0) {
309 		if (widx < nr_direct_mw)
310 			switchtec_ntb_mw_clr_direct(sndev, widx);
311 		else
312 			switchtec_ntb_mw_clr_lut(sndev, widx);
313 	} else {
314 		if (widx < nr_direct_mw)
315 			switchtec_ntb_mw_set_direct(sndev, widx, addr, size);
316 		else
317 			switchtec_ntb_mw_set_lut(sndev, widx, addr, size);
318 	}
319 
320 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
321 				   NTB_CTRL_PART_STATUS_NORMAL);
322 
323 	if (rc == -EIO) {
324 		dev_err(&sndev->stdev->dev,
325 			"Hardware reported an error configuring mw %d: %08x\n",
326 			widx, ioread32(&ctl->bar_error));
327 
328 		if (widx < nr_direct_mw)
329 			switchtec_ntb_mw_clr_direct(sndev, widx);
330 		else
331 			switchtec_ntb_mw_clr_lut(sndev, widx);
332 
333 		switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
334 				      NTB_CTRL_PART_STATUS_NORMAL);
335 	}
336 
337 	return rc;
338 }
339 
340 static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb)
341 {
342 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
343 	int nr_lut_mw = sndev->nr_lut_mw - sndev->nr_rsvd_luts;
344 
345 	return sndev->nr_direct_mw + (use_lut_mws ? nr_lut_mw : 0);
346 }
347 
348 static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev,
349 					 int idx, phys_addr_t *base,
350 					 resource_size_t *size)
351 {
352 	int bar = sndev->direct_mw_to_bar[idx];
353 	size_t offset = 0;
354 
355 	if (bar < 0)
356 		return -EINVAL;
357 
358 	if (idx == 0) {
359 		/*
360 		 * This is the direct BAR shared with the LUTs
361 		 * which means the actual window will be offset
362 		 * by the size of all the LUT entries.
363 		 */
364 
365 		offset = LUT_SIZE * sndev->nr_lut_mw;
366 	}
367 
368 	if (base)
369 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
370 
371 	if (size) {
372 		*size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
373 		if (offset && *size > offset)
374 			*size = offset;
375 
376 		if (*size > max_mw_size)
377 			*size = max_mw_size;
378 	}
379 
380 	return 0;
381 }
382 
383 static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev,
384 				      int idx, phys_addr_t *base,
385 				      resource_size_t *size)
386 {
387 	int bar = sndev->direct_mw_to_bar[0];
388 	int offset;
389 
390 	offset = LUT_SIZE * lut_index(sndev, idx);
391 
392 	if (base)
393 		*base = pci_resource_start(sndev->ntb.pdev, bar) + offset;
394 
395 	if (size)
396 		*size = LUT_SIZE;
397 
398 	return 0;
399 }
400 
401 static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
402 					  phys_addr_t *base,
403 					  resource_size_t *size)
404 {
405 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
406 
407 	if (idx < sndev->nr_direct_mw)
408 		return switchtec_ntb_direct_get_addr(sndev, idx, base, size);
409 	else if (idx < switchtec_ntb_peer_mw_count(ntb))
410 		return switchtec_ntb_lut_get_addr(sndev, idx, base, size);
411 	else
412 		return -EINVAL;
413 }
414 
415 static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
416 					  int partition,
417 					  enum ntb_speed *speed,
418 					  enum ntb_width *width)
419 {
420 	struct switchtec_dev *stdev = sndev->stdev;
421 
422 	u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
423 	u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
424 
425 	if (speed)
426 		*speed = (linksta >> 16) & 0xF;
427 
428 	if (width)
429 		*width = (linksta >> 20) & 0x3F;
430 }
431 
432 static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev)
433 {
434 	enum ntb_speed self_speed, peer_speed;
435 	enum ntb_width self_width, peer_width;
436 
437 	if (!sndev->link_is_up) {
438 		sndev->link_speed = NTB_SPEED_NONE;
439 		sndev->link_width = NTB_WIDTH_NONE;
440 		return;
441 	}
442 
443 	switchtec_ntb_part_link_speed(sndev, sndev->self_partition,
444 				      &self_speed, &self_width);
445 	switchtec_ntb_part_link_speed(sndev, sndev->peer_partition,
446 				      &peer_speed, &peer_width);
447 
448 	sndev->link_speed = min(self_speed, peer_speed);
449 	sndev->link_width = min(self_width, peer_width);
450 }
451 
452 static int crosslink_is_enabled(struct switchtec_ntb *sndev)
453 {
454 	struct ntb_info_regs __iomem *inf = sndev->mmio_ntb;
455 
456 	return ioread8(&inf->ntp_info[sndev->peer_partition].xlink_enabled);
457 }
458 
459 static void crosslink_init_dbmsgs(struct switchtec_ntb *sndev)
460 {
461 	int i;
462 	u32 msg_map = 0;
463 
464 	if (!crosslink_is_enabled(sndev))
465 		return;
466 
467 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_peer_dbmsg->imsg); i++) {
468 		int m = i | sndev->self_partition << 2;
469 
470 		msg_map |= m << i * 8;
471 	}
472 
473 	iowrite32(msg_map, &sndev->mmio_peer_dbmsg->msg_map);
474 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
475 		  &sndev->mmio_peer_dbmsg->odb_mask);
476 }
477 
478 enum switchtec_msg {
479 	LINK_MESSAGE = 0,
480 	MSG_LINK_UP = 1,
481 	MSG_LINK_DOWN = 2,
482 	MSG_CHECK_LINK = 3,
483 	MSG_LINK_FORCE_DOWN = 4,
484 };
485 
486 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev);
487 
488 static void link_reinit_work(struct work_struct *work)
489 {
490 	struct switchtec_ntb *sndev;
491 
492 	sndev = container_of(work, struct switchtec_ntb, link_reinit_work);
493 
494 	switchtec_ntb_reinit_peer(sndev);
495 }
496 
497 static void switchtec_ntb_check_link(struct switchtec_ntb *sndev,
498 				     enum switchtec_msg msg)
499 {
500 	int link_sta;
501 	int old = sndev->link_is_up;
502 
503 	if (msg == MSG_LINK_FORCE_DOWN) {
504 		schedule_work(&sndev->link_reinit_work);
505 
506 		if (sndev->link_is_up) {
507 			sndev->link_is_up = 0;
508 			ntb_link_event(&sndev->ntb);
509 			dev_info(&sndev->stdev->dev, "ntb link forced down\n");
510 		}
511 
512 		return;
513 	}
514 
515 	link_sta = sndev->self_shared->link_sta;
516 	if (link_sta) {
517 		u64 peer = ioread64(&sndev->peer_shared->magic);
518 
519 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
520 			link_sta = peer >> 32;
521 		else
522 			link_sta = 0;
523 	}
524 
525 	sndev->link_is_up = link_sta;
526 	switchtec_ntb_set_link_speed(sndev);
527 
528 	if (link_sta != old) {
529 		switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK);
530 		ntb_link_event(&sndev->ntb);
531 		dev_info(&sndev->stdev->dev, "ntb link %s\n",
532 			 link_sta ? "up" : "down");
533 
534 		if (link_sta)
535 			crosslink_init_dbmsgs(sndev);
536 	}
537 }
538 
539 static void switchtec_ntb_link_notification(struct switchtec_dev *stdev)
540 {
541 	struct switchtec_ntb *sndev = stdev->sndev;
542 
543 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
544 }
545 
546 static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb,
547 				    enum ntb_speed *speed,
548 				    enum ntb_width *width)
549 {
550 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
551 
552 	if (speed)
553 		*speed = sndev->link_speed;
554 	if (width)
555 		*width = sndev->link_width;
556 
557 	return sndev->link_is_up;
558 }
559 
560 static int switchtec_ntb_link_enable(struct ntb_dev *ntb,
561 				     enum ntb_speed max_speed,
562 				     enum ntb_width max_width)
563 {
564 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
565 
566 	dev_dbg(&sndev->stdev->dev, "enabling link\n");
567 
568 	sndev->self_shared->link_sta = 1;
569 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP);
570 
571 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
572 
573 	return 0;
574 }
575 
576 static int switchtec_ntb_link_disable(struct ntb_dev *ntb)
577 {
578 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
579 
580 	dev_dbg(&sndev->stdev->dev, "disabling link\n");
581 
582 	sndev->self_shared->link_sta = 0;
583 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_DOWN);
584 
585 	switchtec_ntb_check_link(sndev, MSG_CHECK_LINK);
586 
587 	return 0;
588 }
589 
590 static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb)
591 {
592 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
593 
594 	return sndev->db_valid_mask;
595 }
596 
597 static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb)
598 {
599 	return 1;
600 }
601 
602 static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
603 {
604 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
605 
606 	if (db_vector < 0 || db_vector > 1)
607 		return 0;
608 
609 	return sndev->db_valid_mask;
610 }
611 
612 static u64 switchtec_ntb_db_read(struct ntb_dev *ntb)
613 {
614 	u64 ret;
615 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
616 
617 	ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift;
618 
619 	return ret & sndev->db_valid_mask;
620 }
621 
622 static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
623 {
624 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
625 
626 	iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb);
627 
628 	return 0;
629 }
630 
631 static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
632 {
633 	unsigned long irqflags;
634 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
635 
636 	if (db_bits & ~sndev->db_valid_mask)
637 		return -EINVAL;
638 
639 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
640 
641 	sndev->db_mask |= db_bits << sndev->db_shift;
642 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
643 
644 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
645 
646 	return 0;
647 }
648 
649 static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
650 {
651 	unsigned long irqflags;
652 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
653 
654 	if (db_bits & ~sndev->db_valid_mask)
655 		return -EINVAL;
656 
657 	spin_lock_irqsave(&sndev->db_mask_lock, irqflags);
658 
659 	sndev->db_mask &= ~(db_bits << sndev->db_shift);
660 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
661 
662 	spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags);
663 
664 	return 0;
665 }
666 
667 static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
668 {
669 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
670 
671 	return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask;
672 }
673 
674 static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
675 				      phys_addr_t *db_addr,
676 				      resource_size_t *db_size,
677 				      u64 *db_data,
678 				      int db_bit)
679 {
680 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
681 	unsigned long offset;
682 
683 	if (unlikely(db_bit >= BITS_PER_LONG_LONG))
684 		return -EINVAL;
685 
686 	offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
687 		(unsigned long)sndev->stdev->mmio;
688 
689 	offset += sndev->db_shift / 8;
690 
691 	if (db_addr)
692 		*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
693 	if (db_size)
694 		*db_size = sizeof(u32);
695 	if (db_data)
696 		*db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
697 
698 	return 0;
699 }
700 
701 static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
702 {
703 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
704 
705 	iowrite64(db_bits << sndev->db_peer_shift,
706 		  &sndev->mmio_peer_dbmsg->odb);
707 
708 	return 0;
709 }
710 
711 static int switchtec_ntb_spad_count(struct ntb_dev *ntb)
712 {
713 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
714 
715 	return ARRAY_SIZE(sndev->self_shared->spad);
716 }
717 
718 static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx)
719 {
720 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
721 
722 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
723 		return 0;
724 
725 	if (!sndev->self_shared)
726 		return 0;
727 
728 	return sndev->self_shared->spad[idx];
729 }
730 
731 static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
732 {
733 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
734 
735 	if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad))
736 		return -EINVAL;
737 
738 	if (!sndev->self_shared)
739 		return -EIO;
740 
741 	sndev->self_shared->spad[idx] = val;
742 
743 	return 0;
744 }
745 
746 static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx,
747 					int sidx)
748 {
749 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
750 
751 	if (pidx != NTB_DEF_PEER_IDX)
752 		return -EINVAL;
753 
754 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
755 		return 0;
756 
757 	if (!sndev->peer_shared)
758 		return 0;
759 
760 	return ioread32(&sndev->peer_shared->spad[sidx]);
761 }
762 
763 static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx,
764 					 int sidx, u32 val)
765 {
766 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
767 
768 	if (pidx != NTB_DEF_PEER_IDX)
769 		return -EINVAL;
770 
771 	if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad))
772 		return -EINVAL;
773 
774 	if (!sndev->peer_shared)
775 		return -EIO;
776 
777 	iowrite32(val, &sndev->peer_shared->spad[sidx]);
778 
779 	return 0;
780 }
781 
782 static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx,
783 					int sidx, phys_addr_t *spad_addr)
784 {
785 	struct switchtec_ntb *sndev = ntb_sndev(ntb);
786 	unsigned long offset;
787 
788 	if (pidx != NTB_DEF_PEER_IDX)
789 		return -EINVAL;
790 
791 	offset = (unsigned long)&sndev->peer_shared->spad[sidx] -
792 		(unsigned long)sndev->stdev->mmio;
793 
794 	if (spad_addr)
795 		*spad_addr = pci_resource_start(ntb->pdev, 0) + offset;
796 
797 	return 0;
798 }
799 
800 static const struct ntb_dev_ops switchtec_ntb_ops = {
801 	.mw_count		= switchtec_ntb_mw_count,
802 	.mw_get_align		= switchtec_ntb_mw_get_align,
803 	.mw_set_trans		= switchtec_ntb_mw_set_trans,
804 	.peer_mw_count		= switchtec_ntb_peer_mw_count,
805 	.peer_mw_get_addr	= switchtec_ntb_peer_mw_get_addr,
806 	.link_is_up		= switchtec_ntb_link_is_up,
807 	.link_enable		= switchtec_ntb_link_enable,
808 	.link_disable		= switchtec_ntb_link_disable,
809 	.db_valid_mask		= switchtec_ntb_db_valid_mask,
810 	.db_vector_count	= switchtec_ntb_db_vector_count,
811 	.db_vector_mask		= switchtec_ntb_db_vector_mask,
812 	.db_read		= switchtec_ntb_db_read,
813 	.db_clear		= switchtec_ntb_db_clear,
814 	.db_set_mask		= switchtec_ntb_db_set_mask,
815 	.db_clear_mask		= switchtec_ntb_db_clear_mask,
816 	.db_read_mask		= switchtec_ntb_db_read_mask,
817 	.peer_db_addr		= switchtec_ntb_peer_db_addr,
818 	.peer_db_set		= switchtec_ntb_peer_db_set,
819 	.spad_count		= switchtec_ntb_spad_count,
820 	.spad_read		= switchtec_ntb_spad_read,
821 	.spad_write		= switchtec_ntb_spad_write,
822 	.peer_spad_read		= switchtec_ntb_peer_spad_read,
823 	.peer_spad_write	= switchtec_ntb_peer_spad_write,
824 	.peer_spad_addr		= switchtec_ntb_peer_spad_addr,
825 };
826 
827 static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
828 {
829 	u64 tpart_vec;
830 	int self;
831 	u64 part_map;
832 	int bit;
833 
834 	sndev->ntb.pdev = sndev->stdev->pdev;
835 	sndev->ntb.topo = NTB_TOPO_SWITCH;
836 	sndev->ntb.ops = &switchtec_ntb_ops;
837 
838 	INIT_WORK(&sndev->link_reinit_work, link_reinit_work);
839 
840 	sndev->self_partition = sndev->stdev->partition;
841 
842 	sndev->mmio_ntb = sndev->stdev->mmio_ntb;
843 
844 	self = sndev->self_partition;
845 	tpart_vec = ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_high);
846 	tpart_vec <<= 32;
847 	tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
848 
849 	part_map = ioread64(&sndev->mmio_ntb->ep_map);
850 	part_map &= ~(1 << sndev->self_partition);
851 
852 	if (!ffs(tpart_vec)) {
853 		if (sndev->stdev->partition_count != 2) {
854 			dev_err(&sndev->stdev->dev,
855 				"ntb target partition not defined\n");
856 			return -ENODEV;
857 		}
858 
859 		bit = ffs(part_map);
860 		if (!bit) {
861 			dev_err(&sndev->stdev->dev,
862 				"peer partition is not NT partition\n");
863 			return -ENODEV;
864 		}
865 
866 		sndev->peer_partition = bit - 1;
867 	} else {
868 		if (ffs(tpart_vec) != fls(tpart_vec)) {
869 			dev_err(&sndev->stdev->dev,
870 				"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
871 			return -ENODEV;
872 		}
873 
874 		sndev->peer_partition = ffs(tpart_vec) - 1;
875 		if (!(part_map & (1 << sndev->peer_partition))) {
876 			dev_err(&sndev->stdev->dev,
877 				"ntb target partition is not NT partition\n");
878 			return -ENODEV;
879 		}
880 	}
881 
882 	dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d\n",
883 		sndev->self_partition, sndev->stdev->partition_count);
884 
885 	sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb +
886 		SWITCHTEC_NTB_REG_CTRL_OFFSET;
887 	sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb +
888 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
889 
890 	sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition];
891 	sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition];
892 	sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition];
893 	sndev->mmio_peer_dbmsg = sndev->mmio_self_dbmsg;
894 
895 	return 0;
896 }
897 
898 static int config_rsvd_lut_win(struct switchtec_ntb *sndev,
899 			       struct ntb_ctrl_regs __iomem *ctl,
900 			       int lut_idx, int partition, u64 addr)
901 {
902 	int peer_bar = sndev->peer_direct_mw_to_bar[0];
903 	u32 ctl_val;
904 	int rc;
905 
906 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
907 				   NTB_CTRL_PART_STATUS_LOCKED);
908 	if (rc)
909 		return rc;
910 
911 	ctl_val = ioread32(&ctl->bar_entry[peer_bar].ctl);
912 	ctl_val &= 0xFF;
913 	ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN;
914 	ctl_val |= ilog2(LUT_SIZE) << 8;
915 	ctl_val |= (sndev->nr_lut_mw - 1) << 14;
916 	iowrite32(ctl_val, &ctl->bar_entry[peer_bar].ctl);
917 
918 	iowrite64((NTB_CTRL_LUT_EN | (partition << 1) | addr),
919 		  &ctl->lut_entry[lut_idx]);
920 
921 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
922 				   NTB_CTRL_PART_STATUS_NORMAL);
923 	if (rc) {
924 		u32 bar_error, lut_error;
925 
926 		bar_error = ioread32(&ctl->bar_error);
927 		lut_error = ioread32(&ctl->lut_error);
928 		dev_err(&sndev->stdev->dev,
929 			"Error setting up reserved lut window: %08x / %08x\n",
930 			bar_error, lut_error);
931 		return rc;
932 	}
933 
934 	return 0;
935 }
936 
937 static int config_req_id_table(struct switchtec_ntb *sndev,
938 			       struct ntb_ctrl_regs __iomem *mmio_ctrl,
939 			       int *req_ids, int count)
940 {
941 	int i, rc = 0;
942 	u32 error;
943 	u32 proxy_id;
944 
945 	if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
946 		dev_err(&sndev->stdev->dev,
947 			"Not enough requester IDs available.\n");
948 		return -EFAULT;
949 	}
950 
951 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
952 				   NTB_CTRL_PART_OP_LOCK,
953 				   NTB_CTRL_PART_STATUS_LOCKED);
954 	if (rc)
955 		return rc;
956 
957 	iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
958 		  &mmio_ctrl->partition_ctrl);
959 
960 	for (i = 0; i < count; i++) {
961 		iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
962 			  &mmio_ctrl->req_id_table[i]);
963 
964 		proxy_id = ioread32(&mmio_ctrl->req_id_table[i]);
965 		dev_dbg(&sndev->stdev->dev,
966 			"Requester ID %02X:%02X.%X -> BB:%02X.%X\n",
967 			req_ids[i] >> 8, (req_ids[i] >> 3) & 0x1F,
968 			req_ids[i] & 0x7, (proxy_id >> 4) & 0x1F,
969 			(proxy_id >> 1) & 0x7);
970 	}
971 
972 	rc = switchtec_ntb_part_op(sndev, mmio_ctrl,
973 				   NTB_CTRL_PART_OP_CFG,
974 				   NTB_CTRL_PART_STATUS_NORMAL);
975 
976 	if (rc == -EIO) {
977 		error = ioread32(&mmio_ctrl->req_id_error);
978 		dev_err(&sndev->stdev->dev,
979 			"Error setting up the requester ID table: %08x\n",
980 			error);
981 	}
982 
983 	return 0;
984 }
985 
986 static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
987 			       u64 *mw_addrs, int mw_count)
988 {
989 	int rc, i;
990 	struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_self_ctrl;
991 	u64 addr;
992 	size_t size, offset;
993 	int bar;
994 	int xlate_pos;
995 	u32 ctl_val;
996 
997 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK,
998 				   NTB_CTRL_PART_STATUS_LOCKED);
999 	if (rc)
1000 		return rc;
1001 
1002 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1003 		if (i == ntb_lut_idx)
1004 			continue;
1005 
1006 		addr = mw_addrs[0] + LUT_SIZE * i;
1007 
1008 		iowrite64((NTB_CTRL_LUT_EN | (sndev->peer_partition << 1) |
1009 			   addr),
1010 			  &ctl->lut_entry[i]);
1011 	}
1012 
1013 	sndev->nr_direct_mw = min_t(int, sndev->nr_direct_mw, mw_count);
1014 
1015 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1016 		bar = sndev->direct_mw_to_bar[i];
1017 		offset = (i == 0) ? LUT_SIZE * sndev->nr_lut_mw : 0;
1018 		addr = mw_addrs[i] + offset;
1019 		size = pci_resource_len(sndev->ntb.pdev, bar) - offset;
1020 		xlate_pos = ilog2(size);
1021 
1022 		if (offset && size > offset)
1023 			size = offset;
1024 
1025 		ctl_val = ioread32(&ctl->bar_entry[bar].ctl);
1026 		ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1027 
1028 		iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1029 		iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
1030 			  &ctl->bar_entry[bar].win_size);
1031 		iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
1032 		iowrite64(sndev->peer_partition | addr,
1033 			  &ctl->bar_entry[bar].xlate_addr);
1034 	}
1035 
1036 	rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG,
1037 				   NTB_CTRL_PART_STATUS_NORMAL);
1038 	if (rc) {
1039 		u32 bar_error, lut_error;
1040 
1041 		bar_error = ioread32(&ctl->bar_error);
1042 		lut_error = ioread32(&ctl->lut_error);
1043 		dev_err(&sndev->stdev->dev,
1044 			"Error setting up cross link windows: %08x / %08x\n",
1045 			bar_error, lut_error);
1046 		return rc;
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static int crosslink_setup_req_ids(struct switchtec_ntb *sndev,
1053 	struct ntb_ctrl_regs __iomem *mmio_ctrl)
1054 {
1055 	int req_ids[16];
1056 	int i;
1057 	u32 proxy_id;
1058 
1059 	for (i = 0; i < ARRAY_SIZE(req_ids); i++) {
1060 		proxy_id = ioread32(&sndev->mmio_self_ctrl->req_id_table[i]);
1061 
1062 		if (!(proxy_id & NTB_CTRL_REQ_ID_EN))
1063 			break;
1064 
1065 		req_ids[i] = ((proxy_id >> 1) & 0xFF);
1066 	}
1067 
1068 	return config_req_id_table(sndev, mmio_ctrl, req_ids, i);
1069 }
1070 
1071 /*
1072  * In crosslink configuration there is a virtual partition in the
1073  * middle of the two switches. The BARs in this partition have to be
1074  * enumerated and assigned addresses.
1075  */
1076 static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1077 				    u64 *bar_addrs)
1078 {
1079 	struct part_cfg_regs __iomem *part_cfg =
1080 		&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
1081 	u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
1082 	struct pff_csr_regs __iomem *mmio_pff =
1083 		&sndev->stdev->mmio_pff_csr[pff];
1084 	const u64 bar_space = 0x1000000000LL;
1085 	u64 bar_addr;
1086 	int bar_cnt = 0;
1087 	int i;
1088 
1089 	iowrite16(0x6, &mmio_pff->pcicmd);
1090 
1091 	for (i = 0; i < ARRAY_SIZE(mmio_pff->pci_bar64); i++) {
1092 		iowrite64(bar_space * i, &mmio_pff->pci_bar64[i]);
1093 		bar_addr = ioread64(&mmio_pff->pci_bar64[i]);
1094 		bar_addr &= ~0xf;
1095 
1096 		dev_dbg(&sndev->stdev->dev,
1097 			"Crosslink BAR%d addr: %llx\n",
1098 			i*2, bar_addr);
1099 
1100 		if (bar_addr != bar_space * i)
1101 			continue;
1102 
1103 		bar_addrs[bar_cnt++] = bar_addr;
1104 	}
1105 
1106 	return bar_cnt;
1107 }
1108 
1109 static int switchtec_ntb_init_crosslink(struct switchtec_ntb *sndev)
1110 {
1111 	int rc;
1112 	int bar = sndev->direct_mw_to_bar[0];
1113 	const int ntb_lut_idx = 1;
1114 	u64 bar_addrs[6];
1115 	u64 addr;
1116 	int offset;
1117 	int bar_cnt;
1118 
1119 	if (!crosslink_is_enabled(sndev))
1120 		return 0;
1121 
1122 	dev_info(&sndev->stdev->dev, "Using crosslink configuration\n");
1123 	sndev->ntb.topo = NTB_TOPO_CROSSLINK;
1124 
1125 	bar_cnt = crosslink_enum_partition(sndev, bar_addrs);
1126 	if (bar_cnt < sndev->nr_direct_mw + 1) {
1127 		dev_err(&sndev->stdev->dev,
1128 			"Error enumerating crosslink partition\n");
1129 		return -EINVAL;
1130 	}
1131 
1132 	addr = (bar_addrs[0] + SWITCHTEC_GAS_NTB_OFFSET +
1133 		SWITCHTEC_NTB_REG_DBMSG_OFFSET +
1134 		sizeof(struct ntb_dbmsg_regs) * sndev->peer_partition);
1135 
1136 	offset = addr & (LUT_SIZE - 1);
1137 	addr -= offset;
1138 
1139 	rc = config_rsvd_lut_win(sndev, sndev->mmio_self_ctrl, ntb_lut_idx,
1140 				 sndev->peer_partition, addr);
1141 	if (rc)
1142 		return rc;
1143 
1144 	rc = crosslink_setup_mws(sndev, ntb_lut_idx, &bar_addrs[1],
1145 				 bar_cnt - 1);
1146 	if (rc)
1147 		return rc;
1148 
1149 	rc = crosslink_setup_req_ids(sndev, sndev->mmio_peer_ctrl);
1150 	if (rc)
1151 		return rc;
1152 
1153 	sndev->mmio_xlink_win = pci_iomap_range(sndev->stdev->pdev, bar,
1154 						LUT_SIZE, LUT_SIZE);
1155 	if (!sndev->mmio_xlink_win) {
1156 		rc = -ENOMEM;
1157 		return rc;
1158 	}
1159 
1160 	sndev->mmio_peer_dbmsg = sndev->mmio_xlink_win + offset;
1161 	sndev->nr_rsvd_luts++;
1162 
1163 	crosslink_init_dbmsgs(sndev);
1164 
1165 	return 0;
1166 }
1167 
1168 static void switchtec_ntb_deinit_crosslink(struct switchtec_ntb *sndev)
1169 {
1170 	if (sndev->mmio_xlink_win)
1171 		pci_iounmap(sndev->stdev->pdev, sndev->mmio_xlink_win);
1172 }
1173 
1174 static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl)
1175 {
1176 	int i;
1177 	int cnt = 0;
1178 
1179 	for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) {
1180 		u32 r = ioread32(&ctrl->bar_entry[i].ctl);
1181 
1182 		if (r & NTB_CTRL_BAR_VALID)
1183 			map[cnt++] = i;
1184 	}
1185 
1186 	return cnt;
1187 }
1188 
1189 static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev)
1190 {
1191 	sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar,
1192 				       sndev->mmio_self_ctrl);
1193 
1194 	sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries);
1195 	sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw);
1196 
1197 	dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut\n",
1198 		sndev->nr_direct_mw, sndev->nr_lut_mw);
1199 
1200 	sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar,
1201 					    sndev->mmio_peer_ctrl);
1202 
1203 	sndev->peer_nr_lut_mw =
1204 		ioread16(&sndev->mmio_peer_ctrl->lut_table_entries);
1205 	sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw);
1206 
1207 	dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut\n",
1208 		sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw);
1209 
1210 }
1211 
1212 /*
1213  * There are 64 doorbells in the switch hardware but this is
1214  * shared among all partitions. So we must split them in half
1215  * (32 for each partition). However, the message interrupts are
1216  * also shared with the top 4 doorbells so we just limit this to
1217  * 28 doorbells per partition.
1218  *
1219  * In crosslink mode, each side has it's own dbmsg register so
1220  * they can each use all 60 of the available doorbells.
1221  */
1222 static void switchtec_ntb_init_db(struct switchtec_ntb *sndev)
1223 {
1224 	sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL;
1225 
1226 	if (sndev->mmio_peer_dbmsg != sndev->mmio_self_dbmsg) {
1227 		sndev->db_shift = 0;
1228 		sndev->db_peer_shift = 0;
1229 		sndev->db_valid_mask = sndev->db_mask;
1230 	} else if (sndev->self_partition < sndev->peer_partition) {
1231 		sndev->db_shift = 0;
1232 		sndev->db_peer_shift = 32;
1233 		sndev->db_valid_mask = 0x0FFFFFFF;
1234 	} else {
1235 		sndev->db_shift = 32;
1236 		sndev->db_peer_shift = 0;
1237 		sndev->db_valid_mask = 0x0FFFFFFF;
1238 	}
1239 
1240 	iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask);
1241 	iowrite64(sndev->db_valid_mask << sndev->db_peer_shift,
1242 		  &sndev->mmio_peer_dbmsg->odb_mask);
1243 
1244 	dev_dbg(&sndev->stdev->dev, "dbs: shift %d/%d, mask %016llx\n",
1245 		sndev->db_shift, sndev->db_peer_shift, sndev->db_valid_mask);
1246 }
1247 
1248 static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev)
1249 {
1250 	int i;
1251 	u32 msg_map = 0;
1252 
1253 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1254 		int m = i | sndev->peer_partition << 2;
1255 
1256 		msg_map |= m << i * 8;
1257 	}
1258 
1259 	iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map);
1260 
1261 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++)
1262 		iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK,
1263 			  &sndev->mmio_self_dbmsg->imsg[i]);
1264 }
1265 
1266 static int
1267 switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev)
1268 {
1269 	int req_ids[2];
1270 
1271 	/*
1272 	 * Root Complex Requester ID (which is 0:00.0)
1273 	 */
1274 	req_ids[0] = 0;
1275 
1276 	/*
1277 	 * Host Bridge Requester ID (as read from the mmap address)
1278 	 */
1279 	req_ids[1] = ioread16(&sndev->mmio_ntb->requester_id);
1280 
1281 	return config_req_id_table(sndev, sndev->mmio_self_ctrl, req_ids,
1282 				   ARRAY_SIZE(req_ids));
1283 }
1284 
1285 static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev)
1286 {
1287 	int i;
1288 
1289 	memset(sndev->self_shared, 0, LUT_SIZE);
1290 	sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC;
1291 	sndev->self_shared->partition_id = sndev->stdev->partition;
1292 
1293 	for (i = 0; i < sndev->nr_direct_mw; i++) {
1294 		int bar = sndev->direct_mw_to_bar[i];
1295 		resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar);
1296 
1297 		if (i == 0)
1298 			sz = min_t(resource_size_t, sz,
1299 				   LUT_SIZE * sndev->nr_lut_mw);
1300 
1301 		sndev->self_shared->mw_sizes[i] = sz;
1302 	}
1303 
1304 	for (i = 0; i < sndev->nr_lut_mw; i++) {
1305 		int idx = sndev->nr_direct_mw + i;
1306 
1307 		sndev->self_shared->mw_sizes[idx] = LUT_SIZE;
1308 	}
1309 }
1310 
1311 static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev)
1312 {
1313 	int self_bar = sndev->direct_mw_to_bar[0];
1314 	int rc;
1315 
1316 	sndev->nr_rsvd_luts++;
1317 	sndev->self_shared = dma_alloc_coherent(&sndev->stdev->pdev->dev,
1318 						LUT_SIZE,
1319 						&sndev->self_shared_dma,
1320 						GFP_KERNEL);
1321 	if (!sndev->self_shared) {
1322 		dev_err(&sndev->stdev->dev,
1323 			"unable to allocate memory for shared mw\n");
1324 		return -ENOMEM;
1325 	}
1326 
1327 	switchtec_ntb_init_shared(sndev);
1328 
1329 	rc = config_rsvd_lut_win(sndev, sndev->mmio_peer_ctrl, 0,
1330 				 sndev->self_partition,
1331 				 sndev->self_shared_dma);
1332 	if (rc)
1333 		goto unalloc_and_exit;
1334 
1335 	sndev->peer_shared = pci_iomap(sndev->stdev->pdev, self_bar, LUT_SIZE);
1336 	if (!sndev->peer_shared) {
1337 		rc = -ENOMEM;
1338 		goto unalloc_and_exit;
1339 	}
1340 
1341 	dev_dbg(&sndev->stdev->dev, "Shared MW Ready\n");
1342 	return 0;
1343 
1344 unalloc_and_exit:
1345 	dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1346 			  sndev->self_shared, sndev->self_shared_dma);
1347 
1348 	return rc;
1349 }
1350 
1351 static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev)
1352 {
1353 	if (sndev->peer_shared)
1354 		pci_iounmap(sndev->stdev->pdev, sndev->peer_shared);
1355 
1356 	if (sndev->self_shared)
1357 		dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE,
1358 				  sndev->self_shared,
1359 				  sndev->self_shared_dma);
1360 	sndev->nr_rsvd_luts--;
1361 }
1362 
1363 static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev)
1364 {
1365 	struct switchtec_ntb *sndev = dev;
1366 
1367 	dev_dbg(&sndev->stdev->dev, "doorbell\n");
1368 
1369 	ntb_db_event(&sndev->ntb, 0);
1370 
1371 	return IRQ_HANDLED;
1372 }
1373 
1374 static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev)
1375 {
1376 	int i;
1377 	struct switchtec_ntb *sndev = dev;
1378 
1379 	for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) {
1380 		u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]);
1381 
1382 		if (msg & NTB_DBMSG_IMSG_STATUS) {
1383 			dev_dbg(&sndev->stdev->dev, "message: %d %08x\n",
1384 				i, (u32)msg);
1385 			iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status);
1386 
1387 			if (i == LINK_MESSAGE)
1388 				switchtec_ntb_check_link(sndev, msg);
1389 		}
1390 	}
1391 
1392 	return IRQ_HANDLED;
1393 }
1394 
1395 static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev)
1396 {
1397 	int i;
1398 	int rc;
1399 	int doorbell_irq = 0;
1400 	int message_irq = 0;
1401 	int event_irq;
1402 	int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map);
1403 
1404 	event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number);
1405 
1406 	while (doorbell_irq == event_irq)
1407 		doorbell_irq++;
1408 	while (message_irq == doorbell_irq ||
1409 	       message_irq == event_irq)
1410 		message_irq++;
1411 
1412 	dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d\n",
1413 		event_irq, doorbell_irq, message_irq);
1414 
1415 	for (i = 0; i < idb_vecs - 4; i++)
1416 		iowrite8(doorbell_irq,
1417 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1418 
1419 	for (; i < idb_vecs; i++)
1420 		iowrite8(message_irq,
1421 			 &sndev->mmio_self_dbmsg->idb_vec_map[i]);
1422 
1423 	sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq);
1424 	sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq);
1425 
1426 	rc = request_irq(sndev->doorbell_irq,
1427 			 switchtec_ntb_doorbell_isr, 0,
1428 			 "switchtec_ntb_doorbell", sndev);
1429 	if (rc)
1430 		return rc;
1431 
1432 	rc = request_irq(sndev->message_irq,
1433 			 switchtec_ntb_message_isr, 0,
1434 			 "switchtec_ntb_message", sndev);
1435 	if (rc) {
1436 		free_irq(sndev->doorbell_irq, sndev);
1437 		return rc;
1438 	}
1439 
1440 	return 0;
1441 }
1442 
1443 static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev)
1444 {
1445 	free_irq(sndev->doorbell_irq, sndev);
1446 	free_irq(sndev->message_irq, sndev);
1447 }
1448 
1449 static int switchtec_ntb_reinit_peer(struct switchtec_ntb *sndev)
1450 {
1451 	dev_info(&sndev->stdev->dev, "peer reinitialized\n");
1452 	switchtec_ntb_deinit_shared_mw(sndev);
1453 	switchtec_ntb_init_mw(sndev);
1454 	return switchtec_ntb_init_shared_mw(sndev);
1455 }
1456 
1457 static int switchtec_ntb_add(struct device *dev,
1458 			     struct class_interface *class_intf)
1459 {
1460 	struct switchtec_dev *stdev = to_stdev(dev);
1461 	struct switchtec_ntb *sndev;
1462 	int rc;
1463 
1464 	stdev->sndev = NULL;
1465 
1466 	if (stdev->pdev->class != (PCI_CLASS_BRIDGE_OTHER << 8))
1467 		return -ENODEV;
1468 
1469 	sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev));
1470 	if (!sndev)
1471 		return -ENOMEM;
1472 
1473 	sndev->stdev = stdev;
1474 	rc = switchtec_ntb_init_sndev(sndev);
1475 	if (rc)
1476 		goto free_and_exit;
1477 
1478 	switchtec_ntb_init_mw(sndev);
1479 
1480 	rc = switchtec_ntb_init_req_id_table(sndev);
1481 	if (rc)
1482 		goto free_and_exit;
1483 
1484 	rc = switchtec_ntb_init_crosslink(sndev);
1485 	if (rc)
1486 		goto free_and_exit;
1487 
1488 	switchtec_ntb_init_db(sndev);
1489 	switchtec_ntb_init_msgs(sndev);
1490 
1491 	rc = switchtec_ntb_init_shared_mw(sndev);
1492 	if (rc)
1493 		goto deinit_crosslink;
1494 
1495 	rc = switchtec_ntb_init_db_msg_irq(sndev);
1496 	if (rc)
1497 		goto deinit_shared_and_exit;
1498 
1499 	/*
1500 	 * If this host crashed, the other host may think the link is
1501 	 * still up. Tell them to force it down (it will go back up
1502 	 * once we register the ntb device).
1503 	 */
1504 	switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_FORCE_DOWN);
1505 
1506 	rc = ntb_register_device(&sndev->ntb);
1507 	if (rc)
1508 		goto deinit_and_exit;
1509 
1510 	stdev->sndev = sndev;
1511 	stdev->link_notifier = switchtec_ntb_link_notification;
1512 	dev_info(dev, "NTB device registered\n");
1513 
1514 	return 0;
1515 
1516 deinit_and_exit:
1517 	switchtec_ntb_deinit_db_msg_irq(sndev);
1518 deinit_shared_and_exit:
1519 	switchtec_ntb_deinit_shared_mw(sndev);
1520 deinit_crosslink:
1521 	switchtec_ntb_deinit_crosslink(sndev);
1522 free_and_exit:
1523 	kfree(sndev);
1524 	dev_err(dev, "failed to register ntb device: %d\n", rc);
1525 	return rc;
1526 }
1527 
1528 static void switchtec_ntb_remove(struct device *dev,
1529 				 struct class_interface *class_intf)
1530 {
1531 	struct switchtec_dev *stdev = to_stdev(dev);
1532 	struct switchtec_ntb *sndev = stdev->sndev;
1533 
1534 	if (!sndev)
1535 		return;
1536 
1537 	stdev->link_notifier = NULL;
1538 	stdev->sndev = NULL;
1539 	ntb_unregister_device(&sndev->ntb);
1540 	switchtec_ntb_deinit_db_msg_irq(sndev);
1541 	switchtec_ntb_deinit_shared_mw(sndev);
1542 	switchtec_ntb_deinit_crosslink(sndev);
1543 	kfree(sndev);
1544 	dev_info(dev, "ntb device unregistered\n");
1545 }
1546 
1547 static struct class_interface switchtec_interface  = {
1548 	.add_dev = switchtec_ntb_add,
1549 	.remove_dev = switchtec_ntb_remove,
1550 };
1551 
1552 static int __init switchtec_ntb_init(void)
1553 {
1554 	switchtec_interface.class = switchtec_class;
1555 	return class_interface_register(&switchtec_interface);
1556 }
1557 module_init(switchtec_ntb_init);
1558 
1559 static void __exit switchtec_ntb_exit(void)
1560 {
1561 	class_interface_unregister(&switchtec_interface);
1562 }
1563 module_exit(switchtec_ntb_exit);
1564