xref: /linux/arch/powerpc/platforms/powernv/pci.c (revision a13d7201d7deedcbb6ac6efa94a1a7d34d3d79ec)
1 /*
2  * Support PCI/PCIe on PowerNV platforms
3  *
4  * Currently supports only P5IOC2
5  *
6  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * as published by the Free Software Foundation; either version
11  * 2 of the License, or (at your option) any later version.
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/irq.h>
20 #include <linux/io.h>
21 #include <linux/msi.h>
22 #include <linux/iommu.h>
23 
24 #include <asm/sections.h>
25 #include <asm/io.h>
26 #include <asm/prom.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/ppc-pci.h>
31 #include <asm/opal.h>
32 #include <asm/iommu.h>
33 #include <asm/tce.h>
34 #include <asm/firmware.h>
35 #include <asm/eeh_event.h>
36 #include <asm/eeh.h>
37 
38 #include "powernv.h"
39 #include "pci.h"
40 
41 /* Delay in usec */
42 #define PCI_RESET_DELAY_US	3000000
43 
44 #define cfg_dbg(fmt...)	do { } while(0)
45 //#define cfg_dbg(fmt...)	printk(fmt)
46 
47 #ifdef CONFIG_PCI_MSI
48 int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
49 {
50 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
51 	struct pnv_phb *phb = hose->private_data;
52 	struct msi_desc *entry;
53 	struct msi_msg msg;
54 	int hwirq;
55 	unsigned int virq;
56 	int rc;
57 
58 	if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
59 		return -ENODEV;
60 
61 	if (pdev->no_64bit_msi && !phb->msi32_support)
62 		return -ENODEV;
63 
64 	list_for_each_entry(entry, &pdev->msi_list, list) {
65 		if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
66 			pr_warn("%s: Supports only 64-bit MSIs\n",
67 				pci_name(pdev));
68 			return -ENXIO;
69 		}
70 		hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
71 		if (hwirq < 0) {
72 			pr_warn("%s: Failed to find a free MSI\n",
73 				pci_name(pdev));
74 			return -ENOSPC;
75 		}
76 		virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
77 		if (virq == NO_IRQ) {
78 			pr_warn("%s: Failed to map MSI to linux irq\n",
79 				pci_name(pdev));
80 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
81 			return -ENOMEM;
82 		}
83 		rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
84 				    virq, entry->msi_attrib.is_64, &msg);
85 		if (rc) {
86 			pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
87 			irq_dispose_mapping(virq);
88 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
89 			return rc;
90 		}
91 		irq_set_msi_desc(virq, entry);
92 		pci_write_msi_msg(virq, &msg);
93 	}
94 	return 0;
95 }
96 
97 void pnv_teardown_msi_irqs(struct pci_dev *pdev)
98 {
99 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
100 	struct pnv_phb *phb = hose->private_data;
101 	struct msi_desc *entry;
102 
103 	if (WARN_ON(!phb))
104 		return;
105 
106 	list_for_each_entry(entry, &pdev->msi_list, list) {
107 		if (entry->irq == NO_IRQ)
108 			continue;
109 		irq_set_msi_desc(entry->irq, NULL);
110 		msi_bitmap_free_hwirqs(&phb->msi_bmp,
111 			virq_to_hw(entry->irq) - phb->msi_base, 1);
112 		irq_dispose_mapping(entry->irq);
113 	}
114 }
115 #endif /* CONFIG_PCI_MSI */
116 
117 static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
118 					 struct OpalIoPhbErrorCommon *common)
119 {
120 	struct OpalIoP7IOCPhbErrorData *data;
121 	int i;
122 
123 	data = (struct OpalIoP7IOCPhbErrorData *)common;
124 	pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
125 		hose->global_number, be32_to_cpu(common->version));
126 
127 	if (data->brdgCtl)
128 		pr_info("brdgCtl:     %08x\n",
129 			be32_to_cpu(data->brdgCtl));
130 	if (data->portStatusReg || data->rootCmplxStatus ||
131 	    data->busAgentStatus)
132 		pr_info("UtlSts:      %08x %08x %08x\n",
133 			be32_to_cpu(data->portStatusReg),
134 			be32_to_cpu(data->rootCmplxStatus),
135 			be32_to_cpu(data->busAgentStatus));
136 	if (data->deviceStatus || data->slotStatus   ||
137 	    data->linkStatus   || data->devCmdStatus ||
138 	    data->devSecStatus)
139 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
140 			be32_to_cpu(data->deviceStatus),
141 			be32_to_cpu(data->slotStatus),
142 			be32_to_cpu(data->linkStatus),
143 			be32_to_cpu(data->devCmdStatus),
144 			be32_to_cpu(data->devSecStatus));
145 	if (data->rootErrorStatus   || data->uncorrErrorStatus ||
146 	    data->corrErrorStatus)
147 		pr_info("RootErrSts:  %08x %08x %08x\n",
148 			be32_to_cpu(data->rootErrorStatus),
149 			be32_to_cpu(data->uncorrErrorStatus),
150 			be32_to_cpu(data->corrErrorStatus));
151 	if (data->tlpHdr1 || data->tlpHdr2 ||
152 	    data->tlpHdr3 || data->tlpHdr4)
153 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
154 			be32_to_cpu(data->tlpHdr1),
155 			be32_to_cpu(data->tlpHdr2),
156 			be32_to_cpu(data->tlpHdr3),
157 			be32_to_cpu(data->tlpHdr4));
158 	if (data->sourceId || data->errorClass ||
159 	    data->correlator)
160 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
161 			be32_to_cpu(data->sourceId),
162 			be64_to_cpu(data->errorClass),
163 			be64_to_cpu(data->correlator));
164 	if (data->p7iocPlssr || data->p7iocCsr)
165 		pr_info("PhbSts:      %016llx %016llx\n",
166 			be64_to_cpu(data->p7iocPlssr),
167 			be64_to_cpu(data->p7iocCsr));
168 	if (data->lemFir)
169 		pr_info("Lem:         %016llx %016llx %016llx\n",
170 			be64_to_cpu(data->lemFir),
171 			be64_to_cpu(data->lemErrorMask),
172 			be64_to_cpu(data->lemWOF));
173 	if (data->phbErrorStatus)
174 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
175 			be64_to_cpu(data->phbErrorStatus),
176 			be64_to_cpu(data->phbFirstErrorStatus),
177 			be64_to_cpu(data->phbErrorLog0),
178 			be64_to_cpu(data->phbErrorLog1));
179 	if (data->mmioErrorStatus)
180 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
181 			be64_to_cpu(data->mmioErrorStatus),
182 			be64_to_cpu(data->mmioFirstErrorStatus),
183 			be64_to_cpu(data->mmioErrorLog0),
184 			be64_to_cpu(data->mmioErrorLog1));
185 	if (data->dma0ErrorStatus)
186 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
187 			be64_to_cpu(data->dma0ErrorStatus),
188 			be64_to_cpu(data->dma0FirstErrorStatus),
189 			be64_to_cpu(data->dma0ErrorLog0),
190 			be64_to_cpu(data->dma0ErrorLog1));
191 	if (data->dma1ErrorStatus)
192 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
193 			be64_to_cpu(data->dma1ErrorStatus),
194 			be64_to_cpu(data->dma1FirstErrorStatus),
195 			be64_to_cpu(data->dma1ErrorLog0),
196 			be64_to_cpu(data->dma1ErrorLog1));
197 
198 	for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
199 		if ((data->pestA[i] >> 63) == 0 &&
200 		    (data->pestB[i] >> 63) == 0)
201 			continue;
202 
203 		pr_info("PE[%3d] A/B: %016llx %016llx\n",
204 			i, be64_to_cpu(data->pestA[i]),
205 			be64_to_cpu(data->pestB[i]));
206 	}
207 }
208 
209 static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
210 					struct OpalIoPhbErrorCommon *common)
211 {
212 	struct OpalIoPhb3ErrorData *data;
213 	int i;
214 
215 	data = (struct OpalIoPhb3ErrorData*)common;
216 	pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
217 		hose->global_number, be32_to_cpu(common->version));
218 	if (data->brdgCtl)
219 		pr_info("brdgCtl:     %08x\n",
220 			be32_to_cpu(data->brdgCtl));
221 	if (data->portStatusReg || data->rootCmplxStatus ||
222 	    data->busAgentStatus)
223 		pr_info("UtlSts:      %08x %08x %08x\n",
224 			be32_to_cpu(data->portStatusReg),
225 			be32_to_cpu(data->rootCmplxStatus),
226 			be32_to_cpu(data->busAgentStatus));
227 	if (data->deviceStatus || data->slotStatus   ||
228 	    data->linkStatus   || data->devCmdStatus ||
229 	    data->devSecStatus)
230 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
231 			be32_to_cpu(data->deviceStatus),
232 			be32_to_cpu(data->slotStatus),
233 			be32_to_cpu(data->linkStatus),
234 			be32_to_cpu(data->devCmdStatus),
235 			be32_to_cpu(data->devSecStatus));
236 	if (data->rootErrorStatus || data->uncorrErrorStatus ||
237 	    data->corrErrorStatus)
238 		pr_info("RootErrSts:  %08x %08x %08x\n",
239 			be32_to_cpu(data->rootErrorStatus),
240 			be32_to_cpu(data->uncorrErrorStatus),
241 			be32_to_cpu(data->corrErrorStatus));
242 	if (data->tlpHdr1 || data->tlpHdr2 ||
243 	    data->tlpHdr3 || data->tlpHdr4)
244 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
245 			be32_to_cpu(data->tlpHdr1),
246 			be32_to_cpu(data->tlpHdr2),
247 			be32_to_cpu(data->tlpHdr3),
248 			be32_to_cpu(data->tlpHdr4));
249 	if (data->sourceId || data->errorClass ||
250 	    data->correlator)
251 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
252 			be32_to_cpu(data->sourceId),
253 			be64_to_cpu(data->errorClass),
254 			be64_to_cpu(data->correlator));
255 	if (data->nFir)
256 		pr_info("nFir:        %016llx %016llx %016llx\n",
257 			be64_to_cpu(data->nFir),
258 			be64_to_cpu(data->nFirMask),
259 			be64_to_cpu(data->nFirWOF));
260 	if (data->phbPlssr || data->phbCsr)
261 		pr_info("PhbSts:      %016llx %016llx\n",
262 			be64_to_cpu(data->phbPlssr),
263 			be64_to_cpu(data->phbCsr));
264 	if (data->lemFir)
265 		pr_info("Lem:         %016llx %016llx %016llx\n",
266 			be64_to_cpu(data->lemFir),
267 			be64_to_cpu(data->lemErrorMask),
268 			be64_to_cpu(data->lemWOF));
269 	if (data->phbErrorStatus)
270 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
271 			be64_to_cpu(data->phbErrorStatus),
272 			be64_to_cpu(data->phbFirstErrorStatus),
273 			be64_to_cpu(data->phbErrorLog0),
274 			be64_to_cpu(data->phbErrorLog1));
275 	if (data->mmioErrorStatus)
276 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
277 			be64_to_cpu(data->mmioErrorStatus),
278 			be64_to_cpu(data->mmioFirstErrorStatus),
279 			be64_to_cpu(data->mmioErrorLog0),
280 			be64_to_cpu(data->mmioErrorLog1));
281 	if (data->dma0ErrorStatus)
282 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
283 			be64_to_cpu(data->dma0ErrorStatus),
284 			be64_to_cpu(data->dma0FirstErrorStatus),
285 			be64_to_cpu(data->dma0ErrorLog0),
286 			be64_to_cpu(data->dma0ErrorLog1));
287 	if (data->dma1ErrorStatus)
288 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
289 			be64_to_cpu(data->dma1ErrorStatus),
290 			be64_to_cpu(data->dma1FirstErrorStatus),
291 			be64_to_cpu(data->dma1ErrorLog0),
292 			be64_to_cpu(data->dma1ErrorLog1));
293 
294 	for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
295 		if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
296 		    (be64_to_cpu(data->pestB[i]) >> 63) == 0)
297 			continue;
298 
299 		pr_info("PE[%3d] A/B: %016llx %016llx\n",
300 				i, be64_to_cpu(data->pestA[i]),
301 				be64_to_cpu(data->pestB[i]));
302 	}
303 }
304 
305 void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
306 				unsigned char *log_buff)
307 {
308 	struct OpalIoPhbErrorCommon *common;
309 
310 	if (!hose || !log_buff)
311 		return;
312 
313 	common = (struct OpalIoPhbErrorCommon *)log_buff;
314 	switch (be32_to_cpu(common->ioType)) {
315 	case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
316 		pnv_pci_dump_p7ioc_diag_data(hose, common);
317 		break;
318 	case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
319 		pnv_pci_dump_phb3_diag_data(hose, common);
320 		break;
321 	default:
322 		pr_warn("%s: Unrecognized ioType %d\n",
323 			__func__, be32_to_cpu(common->ioType));
324 	}
325 }
326 
327 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
328 {
329 	unsigned long flags, rc;
330 	int has_diag, ret = 0;
331 
332 	spin_lock_irqsave(&phb->lock, flags);
333 
334 	/* Fetch PHB diag-data */
335 	rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
336 					 PNV_PCI_DIAG_BUF_SIZE);
337 	has_diag = (rc == OPAL_SUCCESS);
338 
339 	/* If PHB supports compound PE, to handle it */
340 	if (phb->unfreeze_pe) {
341 		ret = phb->unfreeze_pe(phb,
342 				       pe_no,
343 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
344 	} else {
345 		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
346 					     pe_no,
347 					     OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
348 		if (rc) {
349 			pr_warn("%s: Failure %ld clearing frozen "
350 				"PHB#%x-PE#%x\n",
351 				__func__, rc, phb->hose->global_number,
352 				pe_no);
353 			ret = -EIO;
354 		}
355 	}
356 
357 	/*
358 	 * For now, let's only display the diag buffer when we fail to clear
359 	 * the EEH status. We'll do more sensible things later when we have
360 	 * proper EEH support. We need to make sure we don't pollute ourselves
361 	 * with the normal errors generated when probing empty slots
362 	 */
363 	if (has_diag && ret)
364 		pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
365 
366 	spin_unlock_irqrestore(&phb->lock, flags);
367 }
368 
369 static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
370 {
371 	struct pnv_phb *phb = pdn->phb->private_data;
372 	u8	fstate;
373 	__be16	pcierr;
374 	int	pe_no;
375 	s64	rc;
376 
377 	/*
378 	 * Get the PE#. During the PCI probe stage, we might not
379 	 * setup that yet. So all ER errors should be mapped to
380 	 * reserved PE.
381 	 */
382 	pe_no = pdn->pe_number;
383 	if (pe_no == IODA_INVALID_PE) {
384 		if (phb->type == PNV_PHB_P5IOC2)
385 			pe_no = 0;
386 		else
387 			pe_no = phb->ioda.reserved_pe;
388 	}
389 
390 	/*
391 	 * Fetch frozen state. If the PHB support compound PE,
392 	 * we need handle that case.
393 	 */
394 	if (phb->get_pe_state) {
395 		fstate = phb->get_pe_state(phb, pe_no);
396 	} else {
397 		rc = opal_pci_eeh_freeze_status(phb->opal_id,
398 						pe_no,
399 						&fstate,
400 						&pcierr,
401 						NULL);
402 		if (rc) {
403 			pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
404 				__func__, rc, phb->hose->global_number, pe_no);
405 			return;
406 		}
407 	}
408 
409 	cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
410 		(pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
411 
412 	/* Clear the frozen state if applicable */
413 	if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
414 	    fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
415 	    fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
416 		/*
417 		 * If PHB supports compound PE, freeze it for
418 		 * consistency.
419 		 */
420 		if (phb->freeze_pe)
421 			phb->freeze_pe(phb, pe_no);
422 
423 		pnv_pci_handle_eeh_config(phb, pe_no);
424 	}
425 }
426 
427 int pnv_pci_cfg_read(struct pci_dn *pdn,
428 		     int where, int size, u32 *val)
429 {
430 	struct pnv_phb *phb = pdn->phb->private_data;
431 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
432 	s64 rc;
433 
434 	switch (size) {
435 	case 1: {
436 		u8 v8;
437 		rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
438 		*val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
439 		break;
440 	}
441 	case 2: {
442 		__be16 v16;
443 		rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
444 						   &v16);
445 		*val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
446 		break;
447 	}
448 	case 4: {
449 		__be32 v32;
450 		rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
451 		*val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
452 		break;
453 	}
454 	default:
455 		return PCIBIOS_FUNC_NOT_SUPPORTED;
456 	}
457 
458 	cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
459 		__func__, pdn->busno, pdn->devfn, where, size, *val);
460 	return PCIBIOS_SUCCESSFUL;
461 }
462 
463 int pnv_pci_cfg_write(struct pci_dn *pdn,
464 		      int where, int size, u32 val)
465 {
466 	struct pnv_phb *phb = pdn->phb->private_data;
467 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
468 
469 	cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
470 		pdn->busno, pdn->devfn, where, size, val);
471 	switch (size) {
472 	case 1:
473 		opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
474 		break;
475 	case 2:
476 		opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
477 		break;
478 	case 4:
479 		opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
480 		break;
481 	default:
482 		return PCIBIOS_FUNC_NOT_SUPPORTED;
483 	}
484 
485 	return PCIBIOS_SUCCESSFUL;
486 }
487 
488 #if CONFIG_EEH
489 static bool pnv_pci_cfg_check(struct pci_dn *pdn)
490 {
491 	struct eeh_dev *edev = NULL;
492 	struct pnv_phb *phb = pdn->phb->private_data;
493 
494 	/* EEH not enabled ? */
495 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
496 		return true;
497 
498 	/* PE reset or device removed ? */
499 	edev = pdn->edev;
500 	if (edev) {
501 		if (edev->pe &&
502 		    (edev->pe->state & EEH_PE_CFG_BLOCKED))
503 			return false;
504 
505 		if (edev->mode & EEH_DEV_REMOVED)
506 			return false;
507 	}
508 
509 	return true;
510 }
511 #else
512 static inline pnv_pci_cfg_check(struct pci_dn *pdn)
513 {
514 	return true;
515 }
516 #endif /* CONFIG_EEH */
517 
518 static int pnv_pci_read_config(struct pci_bus *bus,
519 			       unsigned int devfn,
520 			       int where, int size, u32 *val)
521 {
522 	struct pci_dn *pdn;
523 	struct pnv_phb *phb;
524 	int ret;
525 
526 	*val = 0xFFFFFFFF;
527 	pdn = pci_get_pdn_by_devfn(bus, devfn);
528 	if (!pdn)
529 		return PCIBIOS_DEVICE_NOT_FOUND;
530 
531 	if (!pnv_pci_cfg_check(pdn))
532 		return PCIBIOS_DEVICE_NOT_FOUND;
533 
534 	ret = pnv_pci_cfg_read(pdn, where, size, val);
535 	phb = pdn->phb->private_data;
536 	if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
537 		if (*val == EEH_IO_ERROR_VALUE(size) &&
538 		    eeh_dev_check_failure(pdn->edev))
539                         return PCIBIOS_DEVICE_NOT_FOUND;
540 	} else {
541 		pnv_pci_config_check_eeh(pdn);
542 	}
543 
544 	return ret;
545 }
546 
547 static int pnv_pci_write_config(struct pci_bus *bus,
548 				unsigned int devfn,
549 				int where, int size, u32 val)
550 {
551 	struct pci_dn *pdn;
552 	struct pnv_phb *phb;
553 	int ret;
554 
555 	pdn = pci_get_pdn_by_devfn(bus, devfn);
556 	if (!pdn)
557 		return PCIBIOS_DEVICE_NOT_FOUND;
558 
559 	if (!pnv_pci_cfg_check(pdn))
560 		return PCIBIOS_DEVICE_NOT_FOUND;
561 
562 	ret = pnv_pci_cfg_write(pdn, where, size, val);
563 	phb = pdn->phb->private_data;
564 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
565 		pnv_pci_config_check_eeh(pdn);
566 
567 	return ret;
568 }
569 
570 struct pci_ops pnv_pci_ops = {
571 	.read  = pnv_pci_read_config,
572 	.write = pnv_pci_write_config,
573 };
574 
575 static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
576 {
577 	__be64 *tmp = ((__be64 *)tbl->it_base);
578 	int  level = tbl->it_indirect_levels;
579 	const long shift = ilog2(tbl->it_level_size);
580 	unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
581 
582 	while (level) {
583 		int n = (idx & mask) >> (level * shift);
584 		unsigned long tce = be64_to_cpu(tmp[n]);
585 
586 		tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
587 		idx &= ~mask;
588 		mask >>= shift;
589 		--level;
590 	}
591 
592 	return tmp + idx;
593 }
594 
595 int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
596 		unsigned long uaddr, enum dma_data_direction direction,
597 		struct dma_attrs *attrs)
598 {
599 	u64 proto_tce = iommu_direction_to_tce_perm(direction);
600 	u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
601 	long i;
602 
603 	for (i = 0; i < npages; i++) {
604 		unsigned long newtce = proto_tce |
605 			((rpn + i) << tbl->it_page_shift);
606 		unsigned long idx = index - tbl->it_offset + i;
607 
608 		*(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
609 	}
610 
611 	return 0;
612 }
613 
614 #ifdef CONFIG_IOMMU_API
615 int pnv_tce_xchg(struct iommu_table *tbl, long index,
616 		unsigned long *hpa, enum dma_data_direction *direction)
617 {
618 	u64 proto_tce = iommu_direction_to_tce_perm(*direction);
619 	unsigned long newtce = *hpa | proto_tce, oldtce;
620 	unsigned long idx = index - tbl->it_offset;
621 
622 	BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
623 
624 	oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
625 	*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
626 	*direction = iommu_tce_direction(oldtce);
627 
628 	return 0;
629 }
630 #endif
631 
632 void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
633 {
634 	long i;
635 
636 	for (i = 0; i < npages; i++) {
637 		unsigned long idx = index - tbl->it_offset + i;
638 
639 		*(pnv_tce(tbl, idx)) = cpu_to_be64(0);
640 	}
641 }
642 
643 unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
644 {
645 	return *(pnv_tce(tbl, index - tbl->it_offset));
646 }
647 
648 struct iommu_table *pnv_pci_table_alloc(int nid)
649 {
650 	struct iommu_table *tbl;
651 
652 	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
653 	INIT_LIST_HEAD_RCU(&tbl->it_group_list);
654 
655 	return tbl;
656 }
657 
658 long pnv_pci_link_table_and_group(int node, int num,
659 		struct iommu_table *tbl,
660 		struct iommu_table_group *table_group)
661 {
662 	struct iommu_table_group_link *tgl = NULL;
663 
664 	if (WARN_ON(!tbl || !table_group))
665 		return -EINVAL;
666 
667 	tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
668 			node);
669 	if (!tgl)
670 		return -ENOMEM;
671 
672 	tgl->table_group = table_group;
673 	list_add_rcu(&tgl->next, &tbl->it_group_list);
674 
675 	table_group->tables[num] = tbl;
676 
677 	return 0;
678 }
679 
680 static void pnv_iommu_table_group_link_free(struct rcu_head *head)
681 {
682 	struct iommu_table_group_link *tgl = container_of(head,
683 			struct iommu_table_group_link, rcu);
684 
685 	kfree(tgl);
686 }
687 
688 void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
689 		struct iommu_table_group *table_group)
690 {
691 	long i;
692 	bool found;
693 	struct iommu_table_group_link *tgl;
694 
695 	if (!tbl || !table_group)
696 		return;
697 
698 	/* Remove link to a group from table's list of attached groups */
699 	found = false;
700 	list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
701 		if (tgl->table_group == table_group) {
702 			list_del_rcu(&tgl->next);
703 			call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
704 			found = true;
705 			break;
706 		}
707 	}
708 	if (WARN_ON(!found))
709 		return;
710 
711 	/* Clean a pointer to iommu_table in iommu_table_group::tables[] */
712 	found = false;
713 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
714 		if (table_group->tables[i] == tbl) {
715 			table_group->tables[i] = NULL;
716 			found = true;
717 			break;
718 		}
719 	}
720 	WARN_ON(!found);
721 }
722 
723 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
724 			       void *tce_mem, u64 tce_size,
725 			       u64 dma_offset, unsigned page_shift)
726 {
727 	tbl->it_blocksize = 16;
728 	tbl->it_base = (unsigned long)tce_mem;
729 	tbl->it_page_shift = page_shift;
730 	tbl->it_offset = dma_offset >> tbl->it_page_shift;
731 	tbl->it_index = 0;
732 	tbl->it_size = tce_size >> 3;
733 	tbl->it_busno = 0;
734 	tbl->it_type = TCE_PCI;
735 }
736 
737 void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
738 {
739 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
740 	struct pnv_phb *phb = hose->private_data;
741 #ifdef CONFIG_PCI_IOV
742 	struct pnv_ioda_pe *pe;
743 	struct pci_dn *pdn;
744 
745 	/* Fix the VF pdn PE number */
746 	if (pdev->is_virtfn) {
747 		pdn = pci_get_pdn(pdev);
748 		WARN_ON(pdn->pe_number != IODA_INVALID_PE);
749 		list_for_each_entry(pe, &phb->ioda.pe_list, list) {
750 			if (pe->rid == ((pdev->bus->number << 8) |
751 			    (pdev->devfn & 0xff))) {
752 				pdn->pe_number = pe->pe_number;
753 				pe->pdev = pdev;
754 				break;
755 			}
756 		}
757 	}
758 #endif /* CONFIG_PCI_IOV */
759 
760 	if (phb && phb->dma_dev_setup)
761 		phb->dma_dev_setup(phb, pdev);
762 }
763 
764 u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
765 {
766 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
767 	struct pnv_phb *phb = hose->private_data;
768 
769 	if (phb && phb->dma_get_required_mask)
770 		return phb->dma_get_required_mask(phb, pdev);
771 
772 	return __dma_get_required_mask(&pdev->dev);
773 }
774 
775 void pnv_pci_shutdown(void)
776 {
777 	struct pci_controller *hose;
778 
779 	list_for_each_entry(hose, &hose_list, list_node)
780 		if (hose->controller_ops.shutdown)
781 			hose->controller_ops.shutdown(hose);
782 }
783 
784 /* Fixup wrong class code in p7ioc and p8 root complex */
785 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
786 {
787 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
788 }
789 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
790 
791 void __init pnv_pci_init(void)
792 {
793 	struct device_node *np;
794 	bool found_ioda = false;
795 
796 	pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
797 
798 	/* If we don't have OPAL, eg. in sim, just skip PCI probe */
799 	if (!firmware_has_feature(FW_FEATURE_OPAL))
800 		return;
801 
802 	/* Look for IODA IO-Hubs. We don't support mixing IODA
803 	 * and p5ioc2 due to the need to change some global
804 	 * probing flags
805 	 */
806 	for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
807 		pnv_pci_init_ioda_hub(np);
808 		found_ioda = true;
809 	}
810 
811 	/* Look for p5ioc2 IO-Hubs */
812 	if (!found_ioda)
813 		for_each_compatible_node(np, NULL, "ibm,p5ioc2")
814 			pnv_pci_init_p5ioc2_hub(np);
815 
816 	/* Look for ioda2 built-in PHB3's */
817 	for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
818 		pnv_pci_init_ioda2_phb(np);
819 
820 	/* Setup the linkage between OF nodes and PHBs */
821 	pci_devs_phb_init();
822 
823 	/* Configure IOMMU DMA hooks */
824 	set_pci_dma_ops(&dma_iommu_ops);
825 }
826 
827 machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
828