xref: /linux/drivers/pci/controller/pci-tegra.c (revision ac84bac4062e7fc24f5e2c61c6a414b2a00a29ad)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/irq.h>
24 #include <linux/irqdomain.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/module.h>
28 #include <linux/msi.h>
29 #include <linux/of_address.h>
30 #include <linux/of_pci.h>
31 #include <linux/of_platform.h>
32 #include <linux/pci.h>
33 #include <linux/phy/phy.h>
34 #include <linux/pinctrl/consumer.h>
35 #include <linux/platform_device.h>
36 #include <linux/reset.h>
37 #include <linux/sizes.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/regulator/consumer.h>
41 
42 #include <soc/tegra/cpuidle.h>
43 #include <soc/tegra/pmc.h>
44 
45 #include "../pci.h"
46 
47 #define INT_PCI_MSI_NR (8 * 32)
48 
49 /* register definitions */
50 
51 #define AFI_AXI_BAR0_SZ	0x00
52 #define AFI_AXI_BAR1_SZ	0x04
53 #define AFI_AXI_BAR2_SZ	0x08
54 #define AFI_AXI_BAR3_SZ	0x0c
55 #define AFI_AXI_BAR4_SZ	0x10
56 #define AFI_AXI_BAR5_SZ	0x14
57 
58 #define AFI_AXI_BAR0_START	0x18
59 #define AFI_AXI_BAR1_START	0x1c
60 #define AFI_AXI_BAR2_START	0x20
61 #define AFI_AXI_BAR3_START	0x24
62 #define AFI_AXI_BAR4_START	0x28
63 #define AFI_AXI_BAR5_START	0x2c
64 
65 #define AFI_FPCI_BAR0	0x30
66 #define AFI_FPCI_BAR1	0x34
67 #define AFI_FPCI_BAR2	0x38
68 #define AFI_FPCI_BAR3	0x3c
69 #define AFI_FPCI_BAR4	0x40
70 #define AFI_FPCI_BAR5	0x44
71 
72 #define AFI_CACHE_BAR0_SZ	0x48
73 #define AFI_CACHE_BAR0_ST	0x4c
74 #define AFI_CACHE_BAR1_SZ	0x50
75 #define AFI_CACHE_BAR1_ST	0x54
76 
77 #define AFI_MSI_BAR_SZ		0x60
78 #define AFI_MSI_FPCI_BAR_ST	0x64
79 #define AFI_MSI_AXI_BAR_ST	0x68
80 
81 #define AFI_MSI_VEC0		0x6c
82 #define AFI_MSI_VEC1		0x70
83 #define AFI_MSI_VEC2		0x74
84 #define AFI_MSI_VEC3		0x78
85 #define AFI_MSI_VEC4		0x7c
86 #define AFI_MSI_VEC5		0x80
87 #define AFI_MSI_VEC6		0x84
88 #define AFI_MSI_VEC7		0x88
89 
90 #define AFI_MSI_EN_VEC0		0x8c
91 #define AFI_MSI_EN_VEC1		0x90
92 #define AFI_MSI_EN_VEC2		0x94
93 #define AFI_MSI_EN_VEC3		0x98
94 #define AFI_MSI_EN_VEC4		0x9c
95 #define AFI_MSI_EN_VEC5		0xa0
96 #define AFI_MSI_EN_VEC6		0xa4
97 #define AFI_MSI_EN_VEC7		0xa8
98 
99 #define AFI_CONFIGURATION		0xac
100 #define  AFI_CONFIGURATION_EN_FPCI		(1 << 0)
101 #define  AFI_CONFIGURATION_CLKEN_OVERRIDE	(1 << 31)
102 
103 #define AFI_FPCI_ERROR_MASKS	0xb0
104 
105 #define AFI_INTR_MASK		0xb4
106 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
107 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
108 
109 #define AFI_INTR_CODE			0xb8
110 #define  AFI_INTR_CODE_MASK		0xf
111 #define  AFI_INTR_INI_SLAVE_ERROR	1
112 #define  AFI_INTR_INI_DECODE_ERROR	2
113 #define  AFI_INTR_TARGET_ABORT		3
114 #define  AFI_INTR_MASTER_ABORT		4
115 #define  AFI_INTR_INVALID_WRITE		5
116 #define  AFI_INTR_LEGACY		6
117 #define  AFI_INTR_FPCI_DECODE_ERROR	7
118 #define  AFI_INTR_AXI_DECODE_ERROR	8
119 #define  AFI_INTR_FPCI_TIMEOUT		9
120 #define  AFI_INTR_PE_PRSNT_SENSE	10
121 #define  AFI_INTR_PE_CLKREQ_SENSE	11
122 #define  AFI_INTR_CLKCLAMP_SENSE	12
123 #define  AFI_INTR_RDY4PD_SENSE		13
124 #define  AFI_INTR_P2P_ERROR		14
125 
126 #define AFI_INTR_SIGNATURE	0xbc
127 #define AFI_UPPER_FPCI_ADDRESS	0xc0
128 #define AFI_SM_INTR_ENABLE	0xc4
129 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
130 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
131 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
132 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
133 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
134 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
135 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
136 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
137 
138 #define AFI_AFI_INTR_ENABLE		0xc8
139 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
140 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
141 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
142 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
143 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
144 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
145 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
146 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
147 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
148 
149 #define AFI_PCIE_PME		0xf0
150 
151 #define AFI_PCIE_CONFIG					0x0f8
152 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
153 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
154 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
155 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
156 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
157 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
158 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
159 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
160 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
161 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
162 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
163 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
164 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
165 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x)		(1 << ((x) + 29))
166 #define  AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL		(0x7 << 29)
167 
168 #define AFI_FUSE			0x104
169 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
170 
171 #define AFI_PEX0_CTRL			0x110
172 #define AFI_PEX1_CTRL			0x118
173 #define  AFI_PEX_CTRL_RST		(1 << 0)
174 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
175 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
176 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
177 
178 #define AFI_PLLE_CONTROL		0x160
179 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
180 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
181 
182 #define AFI_PEXBIAS_CTRL_0		0x168
183 
184 #define RP_PRIV_XP_DL		0x00000494
185 #define  RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD	(0x1ff << 1)
186 
187 #define RP_RX_HDR_LIMIT		0x00000e00
188 #define  RP_RX_HDR_LIMIT_PW_MASK	(0xff << 8)
189 #define  RP_RX_HDR_LIMIT_PW		(0x0e << 8)
190 
191 #define RP_ECTL_2_R1	0x00000e84
192 #define  RP_ECTL_2_R1_RX_CTLE_1C_MASK		0xffff
193 
194 #define RP_ECTL_4_R1	0x00000e8c
195 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
196 #define  RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT	16
197 
198 #define RP_ECTL_5_R1	0x00000e90
199 #define  RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK	0xffffffff
200 
201 #define RP_ECTL_6_R1	0x00000e94
202 #define  RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK	0xffffffff
203 
204 #define RP_ECTL_2_R2	0x00000ea4
205 #define  RP_ECTL_2_R2_RX_CTLE_1C_MASK	0xffff
206 
207 #define RP_ECTL_4_R2	0x00000eac
208 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK	(0xffff << 16)
209 #define  RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT	16
210 
211 #define RP_ECTL_5_R2	0x00000eb0
212 #define  RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK	0xffffffff
213 
214 #define RP_ECTL_6_R2	0x00000eb4
215 #define  RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK	0xffffffff
216 
217 #define RP_VEND_XP	0x00000f00
218 #define  RP_VEND_XP_DL_UP			(1 << 30)
219 #define  RP_VEND_XP_OPPORTUNISTIC_ACK		(1 << 27)
220 #define  RP_VEND_XP_OPPORTUNISTIC_UPDATEFC	(1 << 28)
221 #define  RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK	(0xff << 18)
222 
223 #define RP_VEND_CTL0	0x00000f44
224 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK	(0xf << 12)
225 #define  RP_VEND_CTL0_DSK_RST_PULSE_WIDTH	(0x9 << 12)
226 
227 #define RP_VEND_CTL1	0x00000f48
228 #define  RP_VEND_CTL1_ERPT	(1 << 13)
229 
230 #define RP_VEND_XP_BIST	0x00000f4c
231 #define  RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE	(1 << 28)
232 
233 #define RP_VEND_CTL2 0x00000fa8
234 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
235 
236 #define RP_PRIV_MISC	0x00000fe0
237 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT		(0xe << 0)
238 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT		(0xf << 0)
239 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 16)
240 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD		(0xf << 16)
241 #define  RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE		(1 << 23)
242 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK	(0x7f << 24)
243 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD		(0xf << 24)
244 #define  RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE		(1 << 31)
245 
246 #define RP_LINK_CONTROL_STATUS			0x00000090
247 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
248 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
249 
250 #define RP_LINK_CONTROL_STATUS_2		0x000000b0
251 
252 #define PADS_CTL_SEL		0x0000009c
253 
254 #define PADS_CTL		0x000000a0
255 #define  PADS_CTL_IDDQ_1L	(1 << 0)
256 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
257 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
258 
259 #define PADS_PLL_CTL_TEGRA20			0x000000b8
260 #define PADS_PLL_CTL_TEGRA30			0x000000b4
261 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
262 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
263 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
264 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
265 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
266 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
267 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
268 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
269 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
270 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
271 
272 #define PADS_REFCLK_CFG0			0x000000c8
273 #define PADS_REFCLK_CFG1			0x000000cc
274 #define PADS_REFCLK_BIAS			0x000000d0
275 
276 /*
277  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
278  * entries, one entry per PCIe port. These field definitions and desired
279  * values aren't in the TRM, but do come from NVIDIA.
280  */
281 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
282 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
283 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
284 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
285 
286 #define PME_ACK_TIMEOUT 10000
287 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
288 
289 struct tegra_msi {
290 	struct msi_controller chip;
291 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
292 	struct irq_domain *domain;
293 	struct mutex lock;
294 	void *virt;
295 	dma_addr_t phys;
296 	int irq;
297 };
298 
299 /* used to differentiate between Tegra SoC generations */
300 struct tegra_pcie_port_soc {
301 	struct {
302 		u8 turnoff_bit;
303 		u8 ack_bit;
304 	} pme;
305 };
306 
307 struct tegra_pcie_soc {
308 	unsigned int num_ports;
309 	const struct tegra_pcie_port_soc *ports;
310 	unsigned int msi_base_shift;
311 	unsigned long afi_pex2_ctrl;
312 	u32 pads_pll_ctl;
313 	u32 tx_ref_sel;
314 	u32 pads_refclk_cfg0;
315 	u32 pads_refclk_cfg1;
316 	u32 update_fc_threshold;
317 	bool has_pex_clkreq_en;
318 	bool has_pex_bias_ctrl;
319 	bool has_intr_prsnt_sense;
320 	bool has_cml_clk;
321 	bool has_gen2;
322 	bool force_pca_enable;
323 	bool program_uphy;
324 	bool update_clamp_threshold;
325 	bool program_deskew_time;
326 	bool raw_violation_fixup;
327 	bool update_fc_timer;
328 	bool has_cache_bars;
329 	struct {
330 		struct {
331 			u32 rp_ectl_2_r1;
332 			u32 rp_ectl_4_r1;
333 			u32 rp_ectl_5_r1;
334 			u32 rp_ectl_6_r1;
335 			u32 rp_ectl_2_r2;
336 			u32 rp_ectl_4_r2;
337 			u32 rp_ectl_5_r2;
338 			u32 rp_ectl_6_r2;
339 		} regs;
340 		bool enable;
341 	} ectl;
342 };
343 
344 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
345 {
346 	return container_of(chip, struct tegra_msi, chip);
347 }
348 
349 struct tegra_pcie {
350 	struct device *dev;
351 
352 	void __iomem *pads;
353 	void __iomem *afi;
354 	void __iomem *cfg;
355 	int irq;
356 
357 	struct resource cs;
358 
359 	struct clk *pex_clk;
360 	struct clk *afi_clk;
361 	struct clk *pll_e;
362 	struct clk *cml_clk;
363 
364 	struct reset_control *pex_rst;
365 	struct reset_control *afi_rst;
366 	struct reset_control *pcie_xrst;
367 
368 	bool legacy_phy;
369 	struct phy *phy;
370 
371 	struct tegra_msi msi;
372 
373 	struct list_head ports;
374 	u32 xbar_config;
375 
376 	struct regulator_bulk_data *supplies;
377 	unsigned int num_supplies;
378 
379 	const struct tegra_pcie_soc *soc;
380 	struct dentry *debugfs;
381 };
382 
383 struct tegra_pcie_port {
384 	struct tegra_pcie *pcie;
385 	struct device_node *np;
386 	struct list_head list;
387 	struct resource regs;
388 	void __iomem *base;
389 	unsigned int index;
390 	unsigned int lanes;
391 
392 	struct phy **phys;
393 
394 	struct gpio_desc *reset_gpio;
395 };
396 
397 struct tegra_pcie_bus {
398 	struct list_head list;
399 	unsigned int nr;
400 };
401 
402 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
403 			      unsigned long offset)
404 {
405 	writel(value, pcie->afi + offset);
406 }
407 
408 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
409 {
410 	return readl(pcie->afi + offset);
411 }
412 
413 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
414 			       unsigned long offset)
415 {
416 	writel(value, pcie->pads + offset);
417 }
418 
419 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
420 {
421 	return readl(pcie->pads + offset);
422 }
423 
424 /*
425  * The configuration space mapping on Tegra is somewhat similar to the ECAM
426  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
427  * register accesses are mapped:
428  *
429  *    [27:24] extended register number
430  *    [23:16] bus number
431  *    [15:11] device number
432  *    [10: 8] function number
433  *    [ 7: 0] register number
434  *
435  * Mapping the whole extended configuration space would require 256 MiB of
436  * virtual address space, only a small part of which will actually be used.
437  *
438  * To work around this, a 4 KiB region is used to generate the required
439  * configuration transaction with relevant B:D:F and register offset values.
440  * This is achieved by dynamically programming base address and size of
441  * AFI_AXI_BAR used for end point config space mapping to make sure that the
442  * address (access to which generates correct config transaction) falls in
443  * this 4 KiB region.
444  */
445 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
446 					   unsigned int where)
447 {
448 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
449 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
450 }
451 
452 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
453 					unsigned int devfn,
454 					int where)
455 {
456 	struct tegra_pcie *pcie = bus->sysdata;
457 	void __iomem *addr = NULL;
458 
459 	if (bus->number == 0) {
460 		unsigned int slot = PCI_SLOT(devfn);
461 		struct tegra_pcie_port *port;
462 
463 		list_for_each_entry(port, &pcie->ports, list) {
464 			if (port->index + 1 == slot) {
465 				addr = port->base + (where & ~3);
466 				break;
467 			}
468 		}
469 	} else {
470 		unsigned int offset;
471 		u32 base;
472 
473 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
474 
475 		/* move 4 KiB window to offset within the FPCI region */
476 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
477 		afi_writel(pcie, base, AFI_FPCI_BAR0);
478 
479 		/* move to correct offset within the 4 KiB page */
480 		addr = pcie->cfg + (offset & (SZ_4K - 1));
481 	}
482 
483 	return addr;
484 }
485 
486 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
487 				  int where, int size, u32 *value)
488 {
489 	if (bus->number == 0)
490 		return pci_generic_config_read32(bus, devfn, where, size,
491 						 value);
492 
493 	return pci_generic_config_read(bus, devfn, where, size, value);
494 }
495 
496 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
497 				   int where, int size, u32 value)
498 {
499 	if (bus->number == 0)
500 		return pci_generic_config_write32(bus, devfn, where, size,
501 						  value);
502 
503 	return pci_generic_config_write(bus, devfn, where, size, value);
504 }
505 
506 static struct pci_ops tegra_pcie_ops = {
507 	.map_bus = tegra_pcie_map_bus,
508 	.read = tegra_pcie_config_read,
509 	.write = tegra_pcie_config_write,
510 };
511 
512 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
513 {
514 	const struct tegra_pcie_soc *soc = port->pcie->soc;
515 	unsigned long ret = 0;
516 
517 	switch (port->index) {
518 	case 0:
519 		ret = AFI_PEX0_CTRL;
520 		break;
521 
522 	case 1:
523 		ret = AFI_PEX1_CTRL;
524 		break;
525 
526 	case 2:
527 		ret = soc->afi_pex2_ctrl;
528 		break;
529 	}
530 
531 	return ret;
532 }
533 
534 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
535 {
536 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
537 	unsigned long value;
538 
539 	/* pulse reset signal */
540 	if (port->reset_gpio) {
541 		gpiod_set_value(port->reset_gpio, 1);
542 	} else {
543 		value = afi_readl(port->pcie, ctrl);
544 		value &= ~AFI_PEX_CTRL_RST;
545 		afi_writel(port->pcie, value, ctrl);
546 	}
547 
548 	usleep_range(1000, 2000);
549 
550 	if (port->reset_gpio) {
551 		gpiod_set_value(port->reset_gpio, 0);
552 	} else {
553 		value = afi_readl(port->pcie, ctrl);
554 		value |= AFI_PEX_CTRL_RST;
555 		afi_writel(port->pcie, value, ctrl);
556 	}
557 }
558 
559 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
560 {
561 	const struct tegra_pcie_soc *soc = port->pcie->soc;
562 	u32 value;
563 
564 	/* Enable AER capability */
565 	value = readl(port->base + RP_VEND_CTL1);
566 	value |= RP_VEND_CTL1_ERPT;
567 	writel(value, port->base + RP_VEND_CTL1);
568 
569 	/* Optimal settings to enhance bandwidth */
570 	value = readl(port->base + RP_VEND_XP);
571 	value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
572 	value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
573 	writel(value, port->base + RP_VEND_XP);
574 
575 	/*
576 	 * LTSSM will wait for DLLP to finish before entering L1 or L2,
577 	 * to avoid truncation of PM messages which results in receiver errors
578 	 */
579 	value = readl(port->base + RP_VEND_XP_BIST);
580 	value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
581 	writel(value, port->base + RP_VEND_XP_BIST);
582 
583 	value = readl(port->base + RP_PRIV_MISC);
584 	value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
585 	value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
586 
587 	if (soc->update_clamp_threshold) {
588 		value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
589 				RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
590 		value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
591 			RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
592 	}
593 
594 	writel(value, port->base + RP_PRIV_MISC);
595 }
596 
597 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
598 {
599 	const struct tegra_pcie_soc *soc = port->pcie->soc;
600 	u32 value;
601 
602 	value = readl(port->base + RP_ECTL_2_R1);
603 	value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
604 	value |= soc->ectl.regs.rp_ectl_2_r1;
605 	writel(value, port->base + RP_ECTL_2_R1);
606 
607 	value = readl(port->base + RP_ECTL_4_R1);
608 	value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
609 	value |= soc->ectl.regs.rp_ectl_4_r1 <<
610 				RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
611 	writel(value, port->base + RP_ECTL_4_R1);
612 
613 	value = readl(port->base + RP_ECTL_5_R1);
614 	value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
615 	value |= soc->ectl.regs.rp_ectl_5_r1;
616 	writel(value, port->base + RP_ECTL_5_R1);
617 
618 	value = readl(port->base + RP_ECTL_6_R1);
619 	value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
620 	value |= soc->ectl.regs.rp_ectl_6_r1;
621 	writel(value, port->base + RP_ECTL_6_R1);
622 
623 	value = readl(port->base + RP_ECTL_2_R2);
624 	value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
625 	value |= soc->ectl.regs.rp_ectl_2_r2;
626 	writel(value, port->base + RP_ECTL_2_R2);
627 
628 	value = readl(port->base + RP_ECTL_4_R2);
629 	value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
630 	value |= soc->ectl.regs.rp_ectl_4_r2 <<
631 				RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
632 	writel(value, port->base + RP_ECTL_4_R2);
633 
634 	value = readl(port->base + RP_ECTL_5_R2);
635 	value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
636 	value |= soc->ectl.regs.rp_ectl_5_r2;
637 	writel(value, port->base + RP_ECTL_5_R2);
638 
639 	value = readl(port->base + RP_ECTL_6_R2);
640 	value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
641 	value |= soc->ectl.regs.rp_ectl_6_r2;
642 	writel(value, port->base + RP_ECTL_6_R2);
643 }
644 
645 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
646 {
647 	const struct tegra_pcie_soc *soc = port->pcie->soc;
648 	u32 value;
649 
650 	/*
651 	 * Sometimes link speed change from Gen2 to Gen1 fails due to
652 	 * instability in deskew logic on lane-0. Increase the deskew
653 	 * retry time to resolve this issue.
654 	 */
655 	if (soc->program_deskew_time) {
656 		value = readl(port->base + RP_VEND_CTL0);
657 		value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
658 		value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
659 		writel(value, port->base + RP_VEND_CTL0);
660 	}
661 
662 	/* Fixup for read after write violation. */
663 	if (soc->raw_violation_fixup) {
664 		value = readl(port->base + RP_RX_HDR_LIMIT);
665 		value &= ~RP_RX_HDR_LIMIT_PW_MASK;
666 		value |= RP_RX_HDR_LIMIT_PW;
667 		writel(value, port->base + RP_RX_HDR_LIMIT);
668 
669 		value = readl(port->base + RP_PRIV_XP_DL);
670 		value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
671 		writel(value, port->base + RP_PRIV_XP_DL);
672 
673 		value = readl(port->base + RP_VEND_XP);
674 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
675 		value |= soc->update_fc_threshold;
676 		writel(value, port->base + RP_VEND_XP);
677 	}
678 
679 	if (soc->update_fc_timer) {
680 		value = readl(port->base + RP_VEND_XP);
681 		value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
682 		value |= soc->update_fc_threshold;
683 		writel(value, port->base + RP_VEND_XP);
684 	}
685 
686 	/*
687 	 * PCIe link doesn't come up with few legacy PCIe endpoints if
688 	 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
689 	 * Hence, the strategy followed here is to initially advertise
690 	 * only Gen-1 and after link is up, retrain link to Gen-2 speed
691 	 */
692 	value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
693 	value &= ~PCI_EXP_LNKSTA_CLS;
694 	value |= PCI_EXP_LNKSTA_CLS_2_5GB;
695 	writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
696 }
697 
698 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
699 {
700 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
701 	const struct tegra_pcie_soc *soc = port->pcie->soc;
702 	unsigned long value;
703 
704 	/* enable reference clock */
705 	value = afi_readl(port->pcie, ctrl);
706 	value |= AFI_PEX_CTRL_REFCLK_EN;
707 
708 	if (soc->has_pex_clkreq_en)
709 		value |= AFI_PEX_CTRL_CLKREQ_EN;
710 
711 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
712 
713 	afi_writel(port->pcie, value, ctrl);
714 
715 	tegra_pcie_port_reset(port);
716 
717 	if (soc->force_pca_enable) {
718 		value = readl(port->base + RP_VEND_CTL2);
719 		value |= RP_VEND_CTL2_PCA_ENABLE;
720 		writel(value, port->base + RP_VEND_CTL2);
721 	}
722 
723 	tegra_pcie_enable_rp_features(port);
724 
725 	if (soc->ectl.enable)
726 		tegra_pcie_program_ectl_settings(port);
727 
728 	tegra_pcie_apply_sw_fixup(port);
729 }
730 
731 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
732 {
733 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
734 	const struct tegra_pcie_soc *soc = port->pcie->soc;
735 	unsigned long value;
736 
737 	/* assert port reset */
738 	value = afi_readl(port->pcie, ctrl);
739 	value &= ~AFI_PEX_CTRL_RST;
740 	afi_writel(port->pcie, value, ctrl);
741 
742 	/* disable reference clock */
743 	value = afi_readl(port->pcie, ctrl);
744 
745 	if (soc->has_pex_clkreq_en)
746 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
747 
748 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
749 	afi_writel(port->pcie, value, ctrl);
750 
751 	/* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
752 	value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
753 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
754 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
755 	afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
756 }
757 
758 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
759 {
760 	struct tegra_pcie *pcie = port->pcie;
761 	struct device *dev = pcie->dev;
762 
763 	devm_iounmap(dev, port->base);
764 	devm_release_mem_region(dev, port->regs.start,
765 				resource_size(&port->regs));
766 	list_del(&port->list);
767 	devm_kfree(dev, port);
768 }
769 
770 /* Tegra PCIE root complex wrongly reports device class */
771 static void tegra_pcie_fixup_class(struct pci_dev *dev)
772 {
773 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
774 }
775 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
776 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
777 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
778 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
779 
780 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
781 static void tegra_pcie_relax_enable(struct pci_dev *dev)
782 {
783 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
784 }
785 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
786 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
787 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
788 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
789 
790 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
791 {
792 	struct tegra_pcie *pcie = pdev->bus->sysdata;
793 	int irq;
794 
795 	tegra_cpuidle_pcie_irqs_in_use();
796 
797 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
798 	if (!irq)
799 		irq = pcie->irq;
800 
801 	return irq;
802 }
803 
804 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
805 {
806 	const char *err_msg[] = {
807 		"Unknown",
808 		"AXI slave error",
809 		"AXI decode error",
810 		"Target abort",
811 		"Master abort",
812 		"Invalid write",
813 		"Legacy interrupt",
814 		"Response decoding error",
815 		"AXI response decoding error",
816 		"Transaction timeout",
817 		"Slot present pin change",
818 		"Slot clock request change",
819 		"TMS clock ramp change",
820 		"TMS ready for power down",
821 		"Peer2Peer error",
822 	};
823 	struct tegra_pcie *pcie = arg;
824 	struct device *dev = pcie->dev;
825 	u32 code, signature;
826 
827 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
828 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
829 	afi_writel(pcie, 0, AFI_INTR_CODE);
830 
831 	if (code == AFI_INTR_LEGACY)
832 		return IRQ_NONE;
833 
834 	if (code >= ARRAY_SIZE(err_msg))
835 		code = 0;
836 
837 	/*
838 	 * do not pollute kernel log with master abort reports since they
839 	 * happen a lot during enumeration
840 	 */
841 	if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
842 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
843 	else
844 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
845 
846 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
847 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
848 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
849 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
850 
851 		if (code == AFI_INTR_MASTER_ABORT)
852 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
853 		else
854 			dev_err(dev, "  FPCI address: %10llx\n", address);
855 	}
856 
857 	return IRQ_HANDLED;
858 }
859 
860 /*
861  * FPCI map is as follows:
862  * - 0xfdfc000000: I/O space
863  * - 0xfdfe000000: type 0 configuration space
864  * - 0xfdff000000: type 1 configuration space
865  * - 0xfe00000000: type 0 extended configuration space
866  * - 0xfe10000000: type 1 extended configuration space
867  */
868 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
869 {
870 	u32 size;
871 	struct resource_entry *entry;
872 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
873 
874 	/* Bar 0: type 1 extended configuration space */
875 	size = resource_size(&pcie->cs);
876 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
877 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
878 
879 	resource_list_for_each_entry(entry, &bridge->windows) {
880 		u32 fpci_bar, axi_address;
881 		struct resource *res = entry->res;
882 
883 		size = resource_size(res);
884 
885 		switch (resource_type(res)) {
886 		case IORESOURCE_IO:
887 			/* Bar 1: downstream IO bar */
888 			fpci_bar = 0xfdfc0000;
889 			axi_address = pci_pio_to_address(res->start);
890 			afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
891 			afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
892 			afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
893 			break;
894 		case IORESOURCE_MEM:
895 			fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
896 			axi_address = res->start;
897 
898 			if (res->flags & IORESOURCE_PREFETCH) {
899 				/* Bar 2: prefetchable memory BAR */
900 				afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
901 				afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
902 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
903 
904 			} else {
905 				/* Bar 3: non prefetchable memory BAR */
906 				afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
907 				afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
908 				afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
909 			}
910 			break;
911 		}
912 	}
913 
914 	/* NULL out the remaining BARs as they are not used */
915 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
916 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
917 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
918 
919 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
920 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
921 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
922 
923 	if (pcie->soc->has_cache_bars) {
924 		/* map all upstream transactions as uncached */
925 		afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
926 		afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
927 		afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
928 		afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
929 	}
930 
931 	/* MSI translations are setup only when needed */
932 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
933 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
934 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
935 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
936 }
937 
938 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
939 {
940 	const struct tegra_pcie_soc *soc = pcie->soc;
941 	u32 value;
942 
943 	timeout = jiffies + msecs_to_jiffies(timeout);
944 
945 	while (time_before(jiffies, timeout)) {
946 		value = pads_readl(pcie, soc->pads_pll_ctl);
947 		if (value & PADS_PLL_CTL_LOCKDET)
948 			return 0;
949 	}
950 
951 	return -ETIMEDOUT;
952 }
953 
954 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
955 {
956 	struct device *dev = pcie->dev;
957 	const struct tegra_pcie_soc *soc = pcie->soc;
958 	u32 value;
959 	int err;
960 
961 	/* initialize internal PHY, enable up to 16 PCIE lanes */
962 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
963 
964 	/* override IDDQ to 1 on all 4 lanes */
965 	value = pads_readl(pcie, PADS_CTL);
966 	value |= PADS_CTL_IDDQ_1L;
967 	pads_writel(pcie, value, PADS_CTL);
968 
969 	/*
970 	 * Set up PHY PLL inputs select PLLE output as refclock,
971 	 * set TX ref sel to div10 (not div5).
972 	 */
973 	value = pads_readl(pcie, soc->pads_pll_ctl);
974 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
975 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
976 	pads_writel(pcie, value, soc->pads_pll_ctl);
977 
978 	/* reset PLL */
979 	value = pads_readl(pcie, soc->pads_pll_ctl);
980 	value &= ~PADS_PLL_CTL_RST_B4SM;
981 	pads_writel(pcie, value, soc->pads_pll_ctl);
982 
983 	usleep_range(20, 100);
984 
985 	/* take PLL out of reset  */
986 	value = pads_readl(pcie, soc->pads_pll_ctl);
987 	value |= PADS_PLL_CTL_RST_B4SM;
988 	pads_writel(pcie, value, soc->pads_pll_ctl);
989 
990 	/* wait for the PLL to lock */
991 	err = tegra_pcie_pll_wait(pcie, 500);
992 	if (err < 0) {
993 		dev_err(dev, "PLL failed to lock: %d\n", err);
994 		return err;
995 	}
996 
997 	/* turn off IDDQ override */
998 	value = pads_readl(pcie, PADS_CTL);
999 	value &= ~PADS_CTL_IDDQ_1L;
1000 	pads_writel(pcie, value, PADS_CTL);
1001 
1002 	/* enable TX/RX data */
1003 	value = pads_readl(pcie, PADS_CTL);
1004 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
1005 	pads_writel(pcie, value, PADS_CTL);
1006 
1007 	return 0;
1008 }
1009 
1010 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
1011 {
1012 	const struct tegra_pcie_soc *soc = pcie->soc;
1013 	u32 value;
1014 
1015 	/* disable TX/RX data */
1016 	value = pads_readl(pcie, PADS_CTL);
1017 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
1018 	pads_writel(pcie, value, PADS_CTL);
1019 
1020 	/* override IDDQ */
1021 	value = pads_readl(pcie, PADS_CTL);
1022 	value |= PADS_CTL_IDDQ_1L;
1023 	pads_writel(pcie, value, PADS_CTL);
1024 
1025 	/* reset PLL */
1026 	value = pads_readl(pcie, soc->pads_pll_ctl);
1027 	value &= ~PADS_PLL_CTL_RST_B4SM;
1028 	pads_writel(pcie, value, soc->pads_pll_ctl);
1029 
1030 	usleep_range(20, 100);
1031 
1032 	return 0;
1033 }
1034 
1035 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
1036 {
1037 	struct device *dev = port->pcie->dev;
1038 	unsigned int i;
1039 	int err;
1040 
1041 	for (i = 0; i < port->lanes; i++) {
1042 		err = phy_power_on(port->phys[i]);
1043 		if (err < 0) {
1044 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1045 			return err;
1046 		}
1047 	}
1048 
1049 	return 0;
1050 }
1051 
1052 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1053 {
1054 	struct device *dev = port->pcie->dev;
1055 	unsigned int i;
1056 	int err;
1057 
1058 	for (i = 0; i < port->lanes; i++) {
1059 		err = phy_power_off(port->phys[i]);
1060 		if (err < 0) {
1061 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1062 				err);
1063 			return err;
1064 		}
1065 	}
1066 
1067 	return 0;
1068 }
1069 
1070 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1071 {
1072 	struct device *dev = pcie->dev;
1073 	struct tegra_pcie_port *port;
1074 	int err;
1075 
1076 	if (pcie->legacy_phy) {
1077 		if (pcie->phy)
1078 			err = phy_power_on(pcie->phy);
1079 		else
1080 			err = tegra_pcie_phy_enable(pcie);
1081 
1082 		if (err < 0)
1083 			dev_err(dev, "failed to power on PHY: %d\n", err);
1084 
1085 		return err;
1086 	}
1087 
1088 	list_for_each_entry(port, &pcie->ports, list) {
1089 		err = tegra_pcie_port_phy_power_on(port);
1090 		if (err < 0) {
1091 			dev_err(dev,
1092 				"failed to power on PCIe port %u PHY: %d\n",
1093 				port->index, err);
1094 			return err;
1095 		}
1096 	}
1097 
1098 	return 0;
1099 }
1100 
1101 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1102 {
1103 	struct device *dev = pcie->dev;
1104 	struct tegra_pcie_port *port;
1105 	int err;
1106 
1107 	if (pcie->legacy_phy) {
1108 		if (pcie->phy)
1109 			err = phy_power_off(pcie->phy);
1110 		else
1111 			err = tegra_pcie_phy_disable(pcie);
1112 
1113 		if (err < 0)
1114 			dev_err(dev, "failed to power off PHY: %d\n", err);
1115 
1116 		return err;
1117 	}
1118 
1119 	list_for_each_entry(port, &pcie->ports, list) {
1120 		err = tegra_pcie_port_phy_power_off(port);
1121 		if (err < 0) {
1122 			dev_err(dev,
1123 				"failed to power off PCIe port %u PHY: %d\n",
1124 				port->index, err);
1125 			return err;
1126 		}
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1133 {
1134 	const struct tegra_pcie_soc *soc = pcie->soc;
1135 	struct tegra_pcie_port *port;
1136 	unsigned long value;
1137 
1138 	/* enable PLL power down */
1139 	if (pcie->phy) {
1140 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
1141 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1142 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1143 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
1144 	}
1145 
1146 	/* power down PCIe slot clock bias pad */
1147 	if (soc->has_pex_bias_ctrl)
1148 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1149 
1150 	/* configure mode and disable all ports */
1151 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
1152 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1153 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1154 	value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1155 
1156 	list_for_each_entry(port, &pcie->ports, list) {
1157 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1158 		value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1159 	}
1160 
1161 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
1162 
1163 	if (soc->has_gen2) {
1164 		value = afi_readl(pcie, AFI_FUSE);
1165 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1166 		afi_writel(pcie, value, AFI_FUSE);
1167 	} else {
1168 		value = afi_readl(pcie, AFI_FUSE);
1169 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1170 		afi_writel(pcie, value, AFI_FUSE);
1171 	}
1172 
1173 	/* Disable AFI dynamic clock gating and enable PCIe */
1174 	value = afi_readl(pcie, AFI_CONFIGURATION);
1175 	value |= AFI_CONFIGURATION_EN_FPCI;
1176 	value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1177 	afi_writel(pcie, value, AFI_CONFIGURATION);
1178 
1179 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1180 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1181 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1182 
1183 	if (soc->has_intr_prsnt_sense)
1184 		value |= AFI_INTR_EN_PRSNT_SENSE;
1185 
1186 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1187 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1188 
1189 	/* don't enable MSI for now, only when needed */
1190 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1191 
1192 	/* disable all exceptions */
1193 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1194 }
1195 
1196 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1197 {
1198 	struct device *dev = pcie->dev;
1199 	const struct tegra_pcie_soc *soc = pcie->soc;
1200 	int err;
1201 
1202 	reset_control_assert(pcie->afi_rst);
1203 
1204 	clk_disable_unprepare(pcie->pll_e);
1205 	if (soc->has_cml_clk)
1206 		clk_disable_unprepare(pcie->cml_clk);
1207 	clk_disable_unprepare(pcie->afi_clk);
1208 
1209 	if (!dev->pm_domain)
1210 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1211 
1212 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1213 	if (err < 0)
1214 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1215 }
1216 
1217 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1218 {
1219 	struct device *dev = pcie->dev;
1220 	const struct tegra_pcie_soc *soc = pcie->soc;
1221 	int err;
1222 
1223 	reset_control_assert(pcie->pcie_xrst);
1224 	reset_control_assert(pcie->afi_rst);
1225 	reset_control_assert(pcie->pex_rst);
1226 
1227 	if (!dev->pm_domain)
1228 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1229 
1230 	/* enable regulators */
1231 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1232 	if (err < 0)
1233 		dev_err(dev, "failed to enable regulators: %d\n", err);
1234 
1235 	if (!dev->pm_domain) {
1236 		err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1237 		if (err) {
1238 			dev_err(dev, "failed to power ungate: %d\n", err);
1239 			goto regulator_disable;
1240 		}
1241 		err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1242 		if (err) {
1243 			dev_err(dev, "failed to remove clamp: %d\n", err);
1244 			goto powergate;
1245 		}
1246 	}
1247 
1248 	err = clk_prepare_enable(pcie->afi_clk);
1249 	if (err < 0) {
1250 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1251 		goto powergate;
1252 	}
1253 
1254 	if (soc->has_cml_clk) {
1255 		err = clk_prepare_enable(pcie->cml_clk);
1256 		if (err < 0) {
1257 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1258 			goto disable_afi_clk;
1259 		}
1260 	}
1261 
1262 	err = clk_prepare_enable(pcie->pll_e);
1263 	if (err < 0) {
1264 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1265 		goto disable_cml_clk;
1266 	}
1267 
1268 	reset_control_deassert(pcie->afi_rst);
1269 
1270 	return 0;
1271 
1272 disable_cml_clk:
1273 	if (soc->has_cml_clk)
1274 		clk_disable_unprepare(pcie->cml_clk);
1275 disable_afi_clk:
1276 	clk_disable_unprepare(pcie->afi_clk);
1277 powergate:
1278 	if (!dev->pm_domain)
1279 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1280 regulator_disable:
1281 	regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1282 
1283 	return err;
1284 }
1285 
1286 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1287 {
1288 	const struct tegra_pcie_soc *soc = pcie->soc;
1289 
1290 	/* Configure the reference clock driver */
1291 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1292 
1293 	if (soc->num_ports > 2)
1294 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1295 }
1296 
1297 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1298 {
1299 	struct device *dev = pcie->dev;
1300 	const struct tegra_pcie_soc *soc = pcie->soc;
1301 
1302 	pcie->pex_clk = devm_clk_get(dev, "pex");
1303 	if (IS_ERR(pcie->pex_clk))
1304 		return PTR_ERR(pcie->pex_clk);
1305 
1306 	pcie->afi_clk = devm_clk_get(dev, "afi");
1307 	if (IS_ERR(pcie->afi_clk))
1308 		return PTR_ERR(pcie->afi_clk);
1309 
1310 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1311 	if (IS_ERR(pcie->pll_e))
1312 		return PTR_ERR(pcie->pll_e);
1313 
1314 	if (soc->has_cml_clk) {
1315 		pcie->cml_clk = devm_clk_get(dev, "cml");
1316 		if (IS_ERR(pcie->cml_clk))
1317 			return PTR_ERR(pcie->cml_clk);
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1324 {
1325 	struct device *dev = pcie->dev;
1326 
1327 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1328 	if (IS_ERR(pcie->pex_rst))
1329 		return PTR_ERR(pcie->pex_rst);
1330 
1331 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1332 	if (IS_ERR(pcie->afi_rst))
1333 		return PTR_ERR(pcie->afi_rst);
1334 
1335 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1336 	if (IS_ERR(pcie->pcie_xrst))
1337 		return PTR_ERR(pcie->pcie_xrst);
1338 
1339 	return 0;
1340 }
1341 
1342 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1343 {
1344 	struct device *dev = pcie->dev;
1345 	int err;
1346 
1347 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1348 	if (IS_ERR(pcie->phy)) {
1349 		err = PTR_ERR(pcie->phy);
1350 		dev_err(dev, "failed to get PHY: %d\n", err);
1351 		return err;
1352 	}
1353 
1354 	err = phy_init(pcie->phy);
1355 	if (err < 0) {
1356 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1357 		return err;
1358 	}
1359 
1360 	pcie->legacy_phy = true;
1361 
1362 	return 0;
1363 }
1364 
1365 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1366 						  struct device_node *np,
1367 						  const char *consumer,
1368 						  unsigned int index)
1369 {
1370 	struct phy *phy;
1371 	char *name;
1372 
1373 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1374 	if (!name)
1375 		return ERR_PTR(-ENOMEM);
1376 
1377 	phy = devm_of_phy_get(dev, np, name);
1378 	kfree(name);
1379 
1380 	if (PTR_ERR(phy) == -ENODEV)
1381 		phy = NULL;
1382 
1383 	return phy;
1384 }
1385 
1386 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1387 {
1388 	struct device *dev = port->pcie->dev;
1389 	struct phy *phy;
1390 	unsigned int i;
1391 	int err;
1392 
1393 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1394 	if (!port->phys)
1395 		return -ENOMEM;
1396 
1397 	for (i = 0; i < port->lanes; i++) {
1398 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1399 		if (IS_ERR(phy)) {
1400 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1401 				PTR_ERR(phy));
1402 			return PTR_ERR(phy);
1403 		}
1404 
1405 		err = phy_init(phy);
1406 		if (err < 0) {
1407 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1408 				err);
1409 			return err;
1410 		}
1411 
1412 		port->phys[i] = phy;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1419 {
1420 	const struct tegra_pcie_soc *soc = pcie->soc;
1421 	struct device_node *np = pcie->dev->of_node;
1422 	struct tegra_pcie_port *port;
1423 	int err;
1424 
1425 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1426 		return tegra_pcie_phys_get_legacy(pcie);
1427 
1428 	list_for_each_entry(port, &pcie->ports, list) {
1429 		err = tegra_pcie_port_get_phys(port);
1430 		if (err < 0)
1431 			return err;
1432 	}
1433 
1434 	return 0;
1435 }
1436 
1437 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1438 {
1439 	struct tegra_pcie_port *port;
1440 	struct device *dev = pcie->dev;
1441 	int err, i;
1442 
1443 	if (pcie->legacy_phy) {
1444 		err = phy_exit(pcie->phy);
1445 		if (err < 0)
1446 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1447 		return;
1448 	}
1449 
1450 	list_for_each_entry(port, &pcie->ports, list) {
1451 		for (i = 0; i < port->lanes; i++) {
1452 			err = phy_exit(port->phys[i]);
1453 			if (err < 0)
1454 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1455 					i, err);
1456 		}
1457 	}
1458 }
1459 
1460 
1461 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1462 {
1463 	struct device *dev = pcie->dev;
1464 	struct platform_device *pdev = to_platform_device(dev);
1465 	struct resource *pads, *afi, *res;
1466 	const struct tegra_pcie_soc *soc = pcie->soc;
1467 	int err;
1468 
1469 	err = tegra_pcie_clocks_get(pcie);
1470 	if (err) {
1471 		dev_err(dev, "failed to get clocks: %d\n", err);
1472 		return err;
1473 	}
1474 
1475 	err = tegra_pcie_resets_get(pcie);
1476 	if (err) {
1477 		dev_err(dev, "failed to get resets: %d\n", err);
1478 		return err;
1479 	}
1480 
1481 	if (soc->program_uphy) {
1482 		err = tegra_pcie_phys_get(pcie);
1483 		if (err < 0) {
1484 			dev_err(dev, "failed to get PHYs: %d\n", err);
1485 			return err;
1486 		}
1487 	}
1488 
1489 	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1490 	pcie->pads = devm_ioremap_resource(dev, pads);
1491 	if (IS_ERR(pcie->pads)) {
1492 		err = PTR_ERR(pcie->pads);
1493 		goto phys_put;
1494 	}
1495 
1496 	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1497 	pcie->afi = devm_ioremap_resource(dev, afi);
1498 	if (IS_ERR(pcie->afi)) {
1499 		err = PTR_ERR(pcie->afi);
1500 		goto phys_put;
1501 	}
1502 
1503 	/* request configuration space, but remap later, on demand */
1504 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1505 	if (!res) {
1506 		err = -EADDRNOTAVAIL;
1507 		goto phys_put;
1508 	}
1509 
1510 	pcie->cs = *res;
1511 
1512 	/* constrain configuration space to 4 KiB */
1513 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1514 
1515 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1516 	if (IS_ERR(pcie->cfg)) {
1517 		err = PTR_ERR(pcie->cfg);
1518 		goto phys_put;
1519 	}
1520 
1521 	/* request interrupt */
1522 	err = platform_get_irq_byname(pdev, "intr");
1523 	if (err < 0) {
1524 		dev_err(dev, "failed to get IRQ: %d\n", err);
1525 		goto phys_put;
1526 	}
1527 
1528 	pcie->irq = err;
1529 
1530 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1531 	if (err) {
1532 		dev_err(dev, "failed to register IRQ: %d\n", err);
1533 		goto phys_put;
1534 	}
1535 
1536 	return 0;
1537 
1538 phys_put:
1539 	if (soc->program_uphy)
1540 		tegra_pcie_phys_put(pcie);
1541 	return err;
1542 }
1543 
1544 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1545 {
1546 	const struct tegra_pcie_soc *soc = pcie->soc;
1547 
1548 	if (pcie->irq > 0)
1549 		free_irq(pcie->irq, pcie);
1550 
1551 	if (soc->program_uphy)
1552 		tegra_pcie_phys_put(pcie);
1553 
1554 	return 0;
1555 }
1556 
1557 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1558 {
1559 	struct tegra_pcie *pcie = port->pcie;
1560 	const struct tegra_pcie_soc *soc = pcie->soc;
1561 	int err;
1562 	u32 val;
1563 	u8 ack_bit;
1564 
1565 	val = afi_readl(pcie, AFI_PCIE_PME);
1566 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1567 	afi_writel(pcie, val, AFI_PCIE_PME);
1568 
1569 	ack_bit = soc->ports[port->index].pme.ack_bit;
1570 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1571 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1572 	if (err)
1573 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1574 			port->index);
1575 
1576 	usleep_range(10000, 11000);
1577 
1578 	val = afi_readl(pcie, AFI_PCIE_PME);
1579 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1580 	afi_writel(pcie, val, AFI_PCIE_PME);
1581 }
1582 
1583 static int tegra_msi_alloc(struct tegra_msi *chip)
1584 {
1585 	int msi;
1586 
1587 	mutex_lock(&chip->lock);
1588 
1589 	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1590 	if (msi < INT_PCI_MSI_NR)
1591 		set_bit(msi, chip->used);
1592 	else
1593 		msi = -ENOSPC;
1594 
1595 	mutex_unlock(&chip->lock);
1596 
1597 	return msi;
1598 }
1599 
1600 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1601 {
1602 	struct device *dev = chip->chip.dev;
1603 
1604 	mutex_lock(&chip->lock);
1605 
1606 	if (!test_bit(irq, chip->used))
1607 		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1608 	else
1609 		clear_bit(irq, chip->used);
1610 
1611 	mutex_unlock(&chip->lock);
1612 }
1613 
1614 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1615 {
1616 	struct tegra_pcie *pcie = data;
1617 	struct device *dev = pcie->dev;
1618 	struct tegra_msi *msi = &pcie->msi;
1619 	unsigned int i, processed = 0;
1620 
1621 	for (i = 0; i < 8; i++) {
1622 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1623 
1624 		while (reg) {
1625 			unsigned int offset = find_first_bit(&reg, 32);
1626 			unsigned int index = i * 32 + offset;
1627 			unsigned int irq;
1628 
1629 			/* clear the interrupt */
1630 			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1631 
1632 			irq = irq_find_mapping(msi->domain, index);
1633 			if (irq) {
1634 				if (test_bit(index, msi->used))
1635 					generic_handle_irq(irq);
1636 				else
1637 					dev_info(dev, "unhandled MSI\n");
1638 			} else {
1639 				/*
1640 				 * that's weird who triggered this?
1641 				 * just clear it
1642 				 */
1643 				dev_info(dev, "unexpected MSI\n");
1644 			}
1645 
1646 			/* see if there's any more pending in this vector */
1647 			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1648 
1649 			processed++;
1650 		}
1651 	}
1652 
1653 	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1654 }
1655 
1656 static int tegra_msi_setup_irq(struct msi_controller *chip,
1657 			       struct pci_dev *pdev, struct msi_desc *desc)
1658 {
1659 	struct tegra_msi *msi = to_tegra_msi(chip);
1660 	struct msi_msg msg;
1661 	unsigned int irq;
1662 	int hwirq;
1663 
1664 	hwirq = tegra_msi_alloc(msi);
1665 	if (hwirq < 0)
1666 		return hwirq;
1667 
1668 	irq = irq_create_mapping(msi->domain, hwirq);
1669 	if (!irq) {
1670 		tegra_msi_free(msi, hwirq);
1671 		return -EINVAL;
1672 	}
1673 
1674 	irq_set_msi_desc(irq, desc);
1675 
1676 	msg.address_lo = lower_32_bits(msi->phys);
1677 	msg.address_hi = upper_32_bits(msi->phys);
1678 	msg.data = hwirq;
1679 
1680 	pci_write_msi_msg(irq, &msg);
1681 
1682 	return 0;
1683 }
1684 
1685 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1686 				   unsigned int irq)
1687 {
1688 	struct tegra_msi *msi = to_tegra_msi(chip);
1689 	struct irq_data *d = irq_get_irq_data(irq);
1690 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1691 
1692 	irq_dispose_mapping(irq);
1693 	tegra_msi_free(msi, hwirq);
1694 }
1695 
1696 static struct irq_chip tegra_msi_irq_chip = {
1697 	.name = "Tegra PCIe MSI",
1698 	.irq_enable = pci_msi_unmask_irq,
1699 	.irq_disable = pci_msi_mask_irq,
1700 	.irq_mask = pci_msi_mask_irq,
1701 	.irq_unmask = pci_msi_unmask_irq,
1702 };
1703 
1704 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1705 			 irq_hw_number_t hwirq)
1706 {
1707 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1708 	irq_set_chip_data(irq, domain->host_data);
1709 
1710 	tegra_cpuidle_pcie_irqs_in_use();
1711 
1712 	return 0;
1713 }
1714 
1715 static const struct irq_domain_ops msi_domain_ops = {
1716 	.map = tegra_msi_map,
1717 };
1718 
1719 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1720 {
1721 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1722 	struct platform_device *pdev = to_platform_device(pcie->dev);
1723 	struct tegra_msi *msi = &pcie->msi;
1724 	struct device *dev = pcie->dev;
1725 	int err;
1726 
1727 	mutex_init(&msi->lock);
1728 
1729 	msi->chip.dev = dev;
1730 	msi->chip.setup_irq = tegra_msi_setup_irq;
1731 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1732 
1733 	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1734 					    &msi_domain_ops, &msi->chip);
1735 	if (!msi->domain) {
1736 		dev_err(dev, "failed to create IRQ domain\n");
1737 		return -ENOMEM;
1738 	}
1739 
1740 	err = platform_get_irq_byname(pdev, "msi");
1741 	if (err < 0) {
1742 		dev_err(dev, "failed to get IRQ: %d\n", err);
1743 		goto free_irq_domain;
1744 	}
1745 
1746 	msi->irq = err;
1747 
1748 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1749 			  tegra_msi_irq_chip.name, pcie);
1750 	if (err < 0) {
1751 		dev_err(dev, "failed to request IRQ: %d\n", err);
1752 		goto free_irq_domain;
1753 	}
1754 
1755 	/* Though the PCIe controller can address >32-bit address space, to
1756 	 * facilitate endpoints that support only 32-bit MSI target address,
1757 	 * the mask is set to 32-bit to make sure that MSI target address is
1758 	 * always a 32-bit address
1759 	 */
1760 	err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1761 	if (err < 0) {
1762 		dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1763 		goto free_irq;
1764 	}
1765 
1766 	msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1767 				    DMA_ATTR_NO_KERNEL_MAPPING);
1768 	if (!msi->virt) {
1769 		dev_err(dev, "failed to allocate DMA memory for MSI\n");
1770 		err = -ENOMEM;
1771 		goto free_irq;
1772 	}
1773 
1774 	host->msi = &msi->chip;
1775 
1776 	return 0;
1777 
1778 free_irq:
1779 	free_irq(msi->irq, pcie);
1780 free_irq_domain:
1781 	irq_domain_remove(msi->domain);
1782 	return err;
1783 }
1784 
1785 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1786 {
1787 	const struct tegra_pcie_soc *soc = pcie->soc;
1788 	struct tegra_msi *msi = &pcie->msi;
1789 	u32 reg;
1790 
1791 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1792 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1793 	/* this register is in 4K increments */
1794 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1795 
1796 	/* enable all MSI vectors */
1797 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1798 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1799 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1800 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1801 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1802 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1803 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1804 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1805 
1806 	/* and unmask the MSI interrupt */
1807 	reg = afi_readl(pcie, AFI_INTR_MASK);
1808 	reg |= AFI_INTR_MASK_MSI_MASK;
1809 	afi_writel(pcie, reg, AFI_INTR_MASK);
1810 }
1811 
1812 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1813 {
1814 	struct tegra_msi *msi = &pcie->msi;
1815 	unsigned int i, irq;
1816 
1817 	dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1818 		       DMA_ATTR_NO_KERNEL_MAPPING);
1819 
1820 	if (msi->irq > 0)
1821 		free_irq(msi->irq, pcie);
1822 
1823 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1824 		irq = irq_find_mapping(msi->domain, i);
1825 		if (irq > 0)
1826 			irq_dispose_mapping(irq);
1827 	}
1828 
1829 	irq_domain_remove(msi->domain);
1830 }
1831 
1832 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1833 {
1834 	u32 value;
1835 
1836 	/* mask the MSI interrupt */
1837 	value = afi_readl(pcie, AFI_INTR_MASK);
1838 	value &= ~AFI_INTR_MASK_MSI_MASK;
1839 	afi_writel(pcie, value, AFI_INTR_MASK);
1840 
1841 	/* disable all MSI vectors */
1842 	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1843 	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1844 	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1845 	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1846 	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1847 	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1848 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1849 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1850 
1851 	return 0;
1852 }
1853 
1854 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1855 {
1856 	u32 value;
1857 
1858 	value = afi_readl(pcie, AFI_INTR_MASK);
1859 	value &= ~AFI_INTR_MASK_INT_MASK;
1860 	afi_writel(pcie, value, AFI_INTR_MASK);
1861 }
1862 
1863 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1864 				      u32 *xbar)
1865 {
1866 	struct device *dev = pcie->dev;
1867 	struct device_node *np = dev->of_node;
1868 
1869 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1870 		switch (lanes) {
1871 		case 0x010004:
1872 			dev_info(dev, "4x1, 1x1 configuration\n");
1873 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1874 			return 0;
1875 
1876 		case 0x010102:
1877 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1878 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1879 			return 0;
1880 
1881 		case 0x010101:
1882 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1883 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1884 			return 0;
1885 
1886 		default:
1887 			dev_info(dev, "wrong configuration updated in DT, "
1888 				 "switching to default 2x1, 1x1, 1x1 "
1889 				 "configuration\n");
1890 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1891 			return 0;
1892 		}
1893 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1894 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1895 		switch (lanes) {
1896 		case 0x0000104:
1897 			dev_info(dev, "4x1, 1x1 configuration\n");
1898 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1899 			return 0;
1900 
1901 		case 0x0000102:
1902 			dev_info(dev, "2x1, 1x1 configuration\n");
1903 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1904 			return 0;
1905 		}
1906 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1907 		switch (lanes) {
1908 		case 0x00000204:
1909 			dev_info(dev, "4x1, 2x1 configuration\n");
1910 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1911 			return 0;
1912 
1913 		case 0x00020202:
1914 			dev_info(dev, "2x3 configuration\n");
1915 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1916 			return 0;
1917 
1918 		case 0x00010104:
1919 			dev_info(dev, "4x1, 1x2 configuration\n");
1920 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1921 			return 0;
1922 		}
1923 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1924 		switch (lanes) {
1925 		case 0x00000004:
1926 			dev_info(dev, "single-mode configuration\n");
1927 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1928 			return 0;
1929 
1930 		case 0x00000202:
1931 			dev_info(dev, "dual-mode configuration\n");
1932 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1933 			return 0;
1934 		}
1935 	}
1936 
1937 	return -EINVAL;
1938 }
1939 
1940 /*
1941  * Check whether a given set of supplies is available in a device tree node.
1942  * This is used to check whether the new or the legacy device tree bindings
1943  * should be used.
1944  */
1945 static bool of_regulator_bulk_available(struct device_node *np,
1946 					struct regulator_bulk_data *supplies,
1947 					unsigned int num_supplies)
1948 {
1949 	char property[32];
1950 	unsigned int i;
1951 
1952 	for (i = 0; i < num_supplies; i++) {
1953 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1954 
1955 		if (of_find_property(np, property, NULL) == NULL)
1956 			return false;
1957 	}
1958 
1959 	return true;
1960 }
1961 
1962 /*
1963  * Old versions of the device tree binding for this device used a set of power
1964  * supplies that didn't match the hardware inputs. This happened to work for a
1965  * number of cases but is not future proof. However to preserve backwards-
1966  * compatibility with old device trees, this function will try to use the old
1967  * set of supplies.
1968  */
1969 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1970 {
1971 	struct device *dev = pcie->dev;
1972 	struct device_node *np = dev->of_node;
1973 
1974 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1975 		pcie->num_supplies = 3;
1976 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1977 		pcie->num_supplies = 2;
1978 
1979 	if (pcie->num_supplies == 0) {
1980 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1981 		return -ENODEV;
1982 	}
1983 
1984 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1985 				      sizeof(*pcie->supplies),
1986 				      GFP_KERNEL);
1987 	if (!pcie->supplies)
1988 		return -ENOMEM;
1989 
1990 	pcie->supplies[0].supply = "pex-clk";
1991 	pcie->supplies[1].supply = "vdd";
1992 
1993 	if (pcie->num_supplies > 2)
1994 		pcie->supplies[2].supply = "avdd";
1995 
1996 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1997 }
1998 
1999 /*
2000  * Obtains the list of regulators required for a particular generation of the
2001  * IP block.
2002  *
2003  * This would've been nice to do simply by providing static tables for use
2004  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
2005  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
2006  * and either seems to be optional depending on which ports are being used.
2007  */
2008 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
2009 {
2010 	struct device *dev = pcie->dev;
2011 	struct device_node *np = dev->of_node;
2012 	unsigned int i = 0;
2013 
2014 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
2015 		pcie->num_supplies = 4;
2016 
2017 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2018 					      sizeof(*pcie->supplies),
2019 					      GFP_KERNEL);
2020 		if (!pcie->supplies)
2021 			return -ENOMEM;
2022 
2023 		pcie->supplies[i++].supply = "dvdd-pex";
2024 		pcie->supplies[i++].supply = "hvdd-pex-pll";
2025 		pcie->supplies[i++].supply = "hvdd-pex";
2026 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
2027 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
2028 		pcie->num_supplies = 6;
2029 
2030 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2031 					      sizeof(*pcie->supplies),
2032 					      GFP_KERNEL);
2033 		if (!pcie->supplies)
2034 			return -ENOMEM;
2035 
2036 		pcie->supplies[i++].supply = "avdd-pll-uerefe";
2037 		pcie->supplies[i++].supply = "hvddio-pex";
2038 		pcie->supplies[i++].supply = "dvddio-pex";
2039 		pcie->supplies[i++].supply = "dvdd-pex-pll";
2040 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2041 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2042 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2043 		pcie->num_supplies = 7;
2044 
2045 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2046 					      sizeof(*pcie->supplies),
2047 					      GFP_KERNEL);
2048 		if (!pcie->supplies)
2049 			return -ENOMEM;
2050 
2051 		pcie->supplies[i++].supply = "avddio-pex";
2052 		pcie->supplies[i++].supply = "dvddio-pex";
2053 		pcie->supplies[i++].supply = "avdd-pex-pll";
2054 		pcie->supplies[i++].supply = "hvdd-pex";
2055 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
2056 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2057 		pcie->supplies[i++].supply = "avdd-pll-erefe";
2058 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2059 		bool need_pexa = false, need_pexb = false;
2060 
2061 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2062 		if (lane_mask & 0x0f)
2063 			need_pexa = true;
2064 
2065 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2066 		if (lane_mask & 0x30)
2067 			need_pexb = true;
2068 
2069 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2070 					 (need_pexb ? 2 : 0);
2071 
2072 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2073 					      sizeof(*pcie->supplies),
2074 					      GFP_KERNEL);
2075 		if (!pcie->supplies)
2076 			return -ENOMEM;
2077 
2078 		pcie->supplies[i++].supply = "avdd-pex-pll";
2079 		pcie->supplies[i++].supply = "hvdd-pex";
2080 		pcie->supplies[i++].supply = "vddio-pex-ctl";
2081 		pcie->supplies[i++].supply = "avdd-plle";
2082 
2083 		if (need_pexa) {
2084 			pcie->supplies[i++].supply = "avdd-pexa";
2085 			pcie->supplies[i++].supply = "vdd-pexa";
2086 		}
2087 
2088 		if (need_pexb) {
2089 			pcie->supplies[i++].supply = "avdd-pexb";
2090 			pcie->supplies[i++].supply = "vdd-pexb";
2091 		}
2092 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2093 		pcie->num_supplies = 5;
2094 
2095 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2096 					      sizeof(*pcie->supplies),
2097 					      GFP_KERNEL);
2098 		if (!pcie->supplies)
2099 			return -ENOMEM;
2100 
2101 		pcie->supplies[0].supply = "avdd-pex";
2102 		pcie->supplies[1].supply = "vdd-pex";
2103 		pcie->supplies[2].supply = "avdd-pex-pll";
2104 		pcie->supplies[3].supply = "avdd-plle";
2105 		pcie->supplies[4].supply = "vddio-pex-clk";
2106 	}
2107 
2108 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2109 					pcie->num_supplies))
2110 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
2111 					       pcie->supplies);
2112 
2113 	/*
2114 	 * If not all regulators are available for this new scheme, assume
2115 	 * that the device tree complies with an older version of the device
2116 	 * tree binding.
2117 	 */
2118 	dev_info(dev, "using legacy DT binding for power supplies\n");
2119 
2120 	devm_kfree(dev, pcie->supplies);
2121 	pcie->num_supplies = 0;
2122 
2123 	return tegra_pcie_get_legacy_regulators(pcie);
2124 }
2125 
2126 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2127 {
2128 	struct device *dev = pcie->dev;
2129 	struct device_node *np = dev->of_node, *port;
2130 	const struct tegra_pcie_soc *soc = pcie->soc;
2131 	u32 lanes = 0, mask = 0;
2132 	unsigned int lane = 0;
2133 	int err;
2134 
2135 	/* parse root ports */
2136 	for_each_child_of_node(np, port) {
2137 		struct tegra_pcie_port *rp;
2138 		unsigned int index;
2139 		u32 value;
2140 		char *label;
2141 
2142 		err = of_pci_get_devfn(port);
2143 		if (err < 0) {
2144 			dev_err(dev, "failed to parse address: %d\n", err);
2145 			goto err_node_put;
2146 		}
2147 
2148 		index = PCI_SLOT(err);
2149 
2150 		if (index < 1 || index > soc->num_ports) {
2151 			dev_err(dev, "invalid port number: %d\n", index);
2152 			err = -EINVAL;
2153 			goto err_node_put;
2154 		}
2155 
2156 		index--;
2157 
2158 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2159 		if (err < 0) {
2160 			dev_err(dev, "failed to parse # of lanes: %d\n",
2161 				err);
2162 			goto err_node_put;
2163 		}
2164 
2165 		if (value > 16) {
2166 			dev_err(dev, "invalid # of lanes: %u\n", value);
2167 			err = -EINVAL;
2168 			goto err_node_put;
2169 		}
2170 
2171 		lanes |= value << (index << 3);
2172 
2173 		if (!of_device_is_available(port)) {
2174 			lane += value;
2175 			continue;
2176 		}
2177 
2178 		mask |= ((1 << value) - 1) << lane;
2179 		lane += value;
2180 
2181 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2182 		if (!rp) {
2183 			err = -ENOMEM;
2184 			goto err_node_put;
2185 		}
2186 
2187 		err = of_address_to_resource(port, 0, &rp->regs);
2188 		if (err < 0) {
2189 			dev_err(dev, "failed to parse address: %d\n", err);
2190 			goto err_node_put;
2191 		}
2192 
2193 		INIT_LIST_HEAD(&rp->list);
2194 		rp->index = index;
2195 		rp->lanes = value;
2196 		rp->pcie = pcie;
2197 		rp->np = port;
2198 
2199 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2200 		if (IS_ERR(rp->base))
2201 			return PTR_ERR(rp->base);
2202 
2203 		label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2204 		if (!label) {
2205 			dev_err(dev, "failed to create reset GPIO label\n");
2206 			return -ENOMEM;
2207 		}
2208 
2209 		/*
2210 		 * Returns -ENOENT if reset-gpios property is not populated
2211 		 * and in this case fall back to using AFI per port register
2212 		 * to toggle PERST# SFIO line.
2213 		 */
2214 		rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2215 							     "reset-gpios", 0,
2216 							     GPIOD_OUT_LOW,
2217 							     label);
2218 		if (IS_ERR(rp->reset_gpio)) {
2219 			if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2220 				rp->reset_gpio = NULL;
2221 			} else {
2222 				dev_err(dev, "failed to get reset GPIO: %d\n",
2223 					err);
2224 				return PTR_ERR(rp->reset_gpio);
2225 			}
2226 		}
2227 
2228 		list_add_tail(&rp->list, &pcie->ports);
2229 	}
2230 
2231 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2232 	if (err < 0) {
2233 		dev_err(dev, "invalid lane configuration\n");
2234 		return err;
2235 	}
2236 
2237 	err = tegra_pcie_get_regulators(pcie, mask);
2238 	if (err < 0)
2239 		return err;
2240 
2241 	return 0;
2242 
2243 err_node_put:
2244 	of_node_put(port);
2245 	return err;
2246 }
2247 
2248 /*
2249  * FIXME: If there are no PCIe cards attached, then calling this function
2250  * can result in the increase of the bootup time as there are big timeout
2251  * loops.
2252  */
2253 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
2254 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2255 {
2256 	struct device *dev = port->pcie->dev;
2257 	unsigned int retries = 3;
2258 	unsigned long value;
2259 
2260 	/* override presence detection */
2261 	value = readl(port->base + RP_PRIV_MISC);
2262 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2263 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2264 	writel(value, port->base + RP_PRIV_MISC);
2265 
2266 	do {
2267 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2268 
2269 		do {
2270 			value = readl(port->base + RP_VEND_XP);
2271 
2272 			if (value & RP_VEND_XP_DL_UP)
2273 				break;
2274 
2275 			usleep_range(1000, 2000);
2276 		} while (--timeout);
2277 
2278 		if (!timeout) {
2279 			dev_dbg(dev, "link %u down, retrying\n", port->index);
2280 			goto retry;
2281 		}
2282 
2283 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2284 
2285 		do {
2286 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2287 
2288 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2289 				return true;
2290 
2291 			usleep_range(1000, 2000);
2292 		} while (--timeout);
2293 
2294 retry:
2295 		tegra_pcie_port_reset(port);
2296 	} while (--retries);
2297 
2298 	return false;
2299 }
2300 
2301 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2302 {
2303 	struct device *dev = pcie->dev;
2304 	struct tegra_pcie_port *port;
2305 	ktime_t deadline;
2306 	u32 value;
2307 
2308 	list_for_each_entry(port, &pcie->ports, list) {
2309 		/*
2310 		 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2311 		 * is not supported by Tegra. tegra_pcie_change_link_speed()
2312 		 * is called only for Tegra chips which support Gen2.
2313 		 * So there no harm if supported link speed is not verified.
2314 		 */
2315 		value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2316 		value &= ~PCI_EXP_LNKSTA_CLS;
2317 		value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2318 		writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2319 
2320 		/*
2321 		 * Poll until link comes back from recovery to avoid race
2322 		 * condition.
2323 		 */
2324 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2325 
2326 		while (ktime_before(ktime_get(), deadline)) {
2327 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2328 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2329 				break;
2330 
2331 			usleep_range(2000, 3000);
2332 		}
2333 
2334 		if (value & PCI_EXP_LNKSTA_LT)
2335 			dev_warn(dev, "PCIe port %u link is in recovery\n",
2336 				 port->index);
2337 
2338 		/* Retrain the link */
2339 		value = readl(port->base + RP_LINK_CONTROL_STATUS);
2340 		value |= PCI_EXP_LNKCTL_RL;
2341 		writel(value, port->base + RP_LINK_CONTROL_STATUS);
2342 
2343 		deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2344 
2345 		while (ktime_before(ktime_get(), deadline)) {
2346 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2347 			if ((value & PCI_EXP_LNKSTA_LT) == 0)
2348 				break;
2349 
2350 			usleep_range(2000, 3000);
2351 		}
2352 
2353 		if (value & PCI_EXP_LNKSTA_LT)
2354 			dev_err(dev, "failed to retrain link of port %u\n",
2355 				port->index);
2356 	}
2357 }
2358 
2359 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2360 {
2361 	struct device *dev = pcie->dev;
2362 	struct tegra_pcie_port *port, *tmp;
2363 
2364 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2365 		dev_info(dev, "probing port %u, using %u lanes\n",
2366 			 port->index, port->lanes);
2367 
2368 		tegra_pcie_port_enable(port);
2369 	}
2370 
2371 	/* Start LTSSM from Tegra side */
2372 	reset_control_deassert(pcie->pcie_xrst);
2373 
2374 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2375 		if (tegra_pcie_port_check_link(port))
2376 			continue;
2377 
2378 		dev_info(dev, "link %u down, ignoring\n", port->index);
2379 
2380 		tegra_pcie_port_disable(port);
2381 		tegra_pcie_port_free(port);
2382 	}
2383 
2384 	if (pcie->soc->has_gen2)
2385 		tegra_pcie_change_link_speed(pcie);
2386 }
2387 
2388 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2389 {
2390 	struct tegra_pcie_port *port, *tmp;
2391 
2392 	reset_control_assert(pcie->pcie_xrst);
2393 
2394 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2395 		tegra_pcie_port_disable(port);
2396 }
2397 
2398 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2399 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2400 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2401 };
2402 
2403 static const struct tegra_pcie_soc tegra20_pcie = {
2404 	.num_ports = 2,
2405 	.ports = tegra20_pcie_ports,
2406 	.msi_base_shift = 0,
2407 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2408 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2409 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2410 	.has_pex_clkreq_en = false,
2411 	.has_pex_bias_ctrl = false,
2412 	.has_intr_prsnt_sense = false,
2413 	.has_cml_clk = false,
2414 	.has_gen2 = false,
2415 	.force_pca_enable = false,
2416 	.program_uphy = true,
2417 	.update_clamp_threshold = false,
2418 	.program_deskew_time = false,
2419 	.raw_violation_fixup = false,
2420 	.update_fc_timer = false,
2421 	.has_cache_bars = true,
2422 	.ectl.enable = false,
2423 };
2424 
2425 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2426 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2427 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2428 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2429 };
2430 
2431 static const struct tegra_pcie_soc tegra30_pcie = {
2432 	.num_ports = 3,
2433 	.ports = tegra30_pcie_ports,
2434 	.msi_base_shift = 8,
2435 	.afi_pex2_ctrl = 0x128,
2436 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2437 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2438 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2439 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2440 	.has_pex_clkreq_en = true,
2441 	.has_pex_bias_ctrl = true,
2442 	.has_intr_prsnt_sense = true,
2443 	.has_cml_clk = true,
2444 	.has_gen2 = false,
2445 	.force_pca_enable = false,
2446 	.program_uphy = true,
2447 	.update_clamp_threshold = false,
2448 	.program_deskew_time = false,
2449 	.raw_violation_fixup = false,
2450 	.update_fc_timer = false,
2451 	.has_cache_bars = false,
2452 	.ectl.enable = false,
2453 };
2454 
2455 static const struct tegra_pcie_soc tegra124_pcie = {
2456 	.num_ports = 2,
2457 	.ports = tegra20_pcie_ports,
2458 	.msi_base_shift = 8,
2459 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2460 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2461 	.pads_refclk_cfg0 = 0x44ac44ac,
2462 	/* FC threshold is bit[25:18] */
2463 	.update_fc_threshold = 0x03fc0000,
2464 	.has_pex_clkreq_en = true,
2465 	.has_pex_bias_ctrl = true,
2466 	.has_intr_prsnt_sense = true,
2467 	.has_cml_clk = true,
2468 	.has_gen2 = true,
2469 	.force_pca_enable = false,
2470 	.program_uphy = true,
2471 	.update_clamp_threshold = true,
2472 	.program_deskew_time = false,
2473 	.raw_violation_fixup = true,
2474 	.update_fc_timer = false,
2475 	.has_cache_bars = false,
2476 	.ectl.enable = false,
2477 };
2478 
2479 static const struct tegra_pcie_soc tegra210_pcie = {
2480 	.num_ports = 2,
2481 	.ports = tegra20_pcie_ports,
2482 	.msi_base_shift = 8,
2483 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2484 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2485 	.pads_refclk_cfg0 = 0x90b890b8,
2486 	/* FC threshold is bit[25:18] */
2487 	.update_fc_threshold = 0x01800000,
2488 	.has_pex_clkreq_en = true,
2489 	.has_pex_bias_ctrl = true,
2490 	.has_intr_prsnt_sense = true,
2491 	.has_cml_clk = true,
2492 	.has_gen2 = true,
2493 	.force_pca_enable = true,
2494 	.program_uphy = true,
2495 	.update_clamp_threshold = true,
2496 	.program_deskew_time = true,
2497 	.raw_violation_fixup = false,
2498 	.update_fc_timer = true,
2499 	.has_cache_bars = false,
2500 	.ectl = {
2501 		.regs = {
2502 			.rp_ectl_2_r1 = 0x0000000f,
2503 			.rp_ectl_4_r1 = 0x00000067,
2504 			.rp_ectl_5_r1 = 0x55010000,
2505 			.rp_ectl_6_r1 = 0x00000001,
2506 			.rp_ectl_2_r2 = 0x0000008f,
2507 			.rp_ectl_4_r2 = 0x000000c7,
2508 			.rp_ectl_5_r2 = 0x55010000,
2509 			.rp_ectl_6_r2 = 0x00000001,
2510 		},
2511 		.enable = true,
2512 	},
2513 };
2514 
2515 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2516 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2517 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2518 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2519 };
2520 
2521 static const struct tegra_pcie_soc tegra186_pcie = {
2522 	.num_ports = 3,
2523 	.ports = tegra186_pcie_ports,
2524 	.msi_base_shift = 8,
2525 	.afi_pex2_ctrl = 0x19c,
2526 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2527 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2528 	.pads_refclk_cfg0 = 0x80b880b8,
2529 	.pads_refclk_cfg1 = 0x000480b8,
2530 	.has_pex_clkreq_en = true,
2531 	.has_pex_bias_ctrl = true,
2532 	.has_intr_prsnt_sense = true,
2533 	.has_cml_clk = false,
2534 	.has_gen2 = true,
2535 	.force_pca_enable = false,
2536 	.program_uphy = false,
2537 	.update_clamp_threshold = false,
2538 	.program_deskew_time = false,
2539 	.raw_violation_fixup = false,
2540 	.update_fc_timer = false,
2541 	.has_cache_bars = false,
2542 	.ectl.enable = false,
2543 };
2544 
2545 static const struct of_device_id tegra_pcie_of_match[] = {
2546 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2547 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2548 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2549 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2550 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2551 	{ },
2552 };
2553 
2554 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2555 {
2556 	struct tegra_pcie *pcie = s->private;
2557 
2558 	if (list_empty(&pcie->ports))
2559 		return NULL;
2560 
2561 	seq_printf(s, "Index  Status\n");
2562 
2563 	return seq_list_start(&pcie->ports, *pos);
2564 }
2565 
2566 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2567 {
2568 	struct tegra_pcie *pcie = s->private;
2569 
2570 	return seq_list_next(v, &pcie->ports, pos);
2571 }
2572 
2573 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2574 {
2575 }
2576 
2577 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2578 {
2579 	bool up = false, active = false;
2580 	struct tegra_pcie_port *port;
2581 	unsigned int value;
2582 
2583 	port = list_entry(v, struct tegra_pcie_port, list);
2584 
2585 	value = readl(port->base + RP_VEND_XP);
2586 
2587 	if (value & RP_VEND_XP_DL_UP)
2588 		up = true;
2589 
2590 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2591 
2592 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2593 		active = true;
2594 
2595 	seq_printf(s, "%2u     ", port->index);
2596 
2597 	if (up)
2598 		seq_printf(s, "up");
2599 
2600 	if (active) {
2601 		if (up)
2602 			seq_printf(s, ", ");
2603 
2604 		seq_printf(s, "active");
2605 	}
2606 
2607 	seq_printf(s, "\n");
2608 	return 0;
2609 }
2610 
2611 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2612 	.start = tegra_pcie_ports_seq_start,
2613 	.next = tegra_pcie_ports_seq_next,
2614 	.stop = tegra_pcie_ports_seq_stop,
2615 	.show = tegra_pcie_ports_seq_show,
2616 };
2617 
2618 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2619 {
2620 	struct tegra_pcie *pcie = inode->i_private;
2621 	struct seq_file *s;
2622 	int err;
2623 
2624 	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2625 	if (err)
2626 		return err;
2627 
2628 	s = file->private_data;
2629 	s->private = pcie;
2630 
2631 	return 0;
2632 }
2633 
2634 static const struct file_operations tegra_pcie_ports_ops = {
2635 	.owner = THIS_MODULE,
2636 	.open = tegra_pcie_ports_open,
2637 	.read = seq_read,
2638 	.llseek = seq_lseek,
2639 	.release = seq_release,
2640 };
2641 
2642 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2643 {
2644 	debugfs_remove_recursive(pcie->debugfs);
2645 	pcie->debugfs = NULL;
2646 }
2647 
2648 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2649 {
2650 	struct dentry *file;
2651 
2652 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2653 	if (!pcie->debugfs)
2654 		return -ENOMEM;
2655 
2656 	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2657 				   pcie, &tegra_pcie_ports_ops);
2658 	if (!file)
2659 		goto remove;
2660 
2661 	return 0;
2662 
2663 remove:
2664 	tegra_pcie_debugfs_exit(pcie);
2665 	return -ENOMEM;
2666 }
2667 
2668 static int tegra_pcie_probe(struct platform_device *pdev)
2669 {
2670 	struct device *dev = &pdev->dev;
2671 	struct pci_host_bridge *host;
2672 	struct tegra_pcie *pcie;
2673 	struct pci_bus *child;
2674 	struct resource *bus;
2675 	int err;
2676 
2677 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2678 	if (!host)
2679 		return -ENOMEM;
2680 
2681 	pcie = pci_host_bridge_priv(host);
2682 	host->sysdata = pcie;
2683 	platform_set_drvdata(pdev, pcie);
2684 
2685 	pcie->soc = of_device_get_match_data(dev);
2686 	INIT_LIST_HEAD(&pcie->ports);
2687 	pcie->dev = dev;
2688 
2689 	err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus);
2690 	if (err) {
2691 		dev_err(dev, "Getting bridge resources failed\n");
2692 		return err;
2693 	}
2694 
2695 	err = tegra_pcie_parse_dt(pcie);
2696 	if (err < 0)
2697 		return err;
2698 
2699 	err = tegra_pcie_get_resources(pcie);
2700 	if (err < 0) {
2701 		dev_err(dev, "failed to request resources: %d\n", err);
2702 		return err;
2703 	}
2704 
2705 	err = tegra_pcie_msi_setup(pcie);
2706 	if (err < 0) {
2707 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2708 		goto put_resources;
2709 	}
2710 
2711 	pm_runtime_enable(pcie->dev);
2712 	err = pm_runtime_get_sync(pcie->dev);
2713 	if (err < 0) {
2714 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2715 		goto teardown_msi;
2716 	}
2717 
2718 	host->busnr = bus->start;
2719 	host->dev.parent = &pdev->dev;
2720 	host->ops = &tegra_pcie_ops;
2721 	host->map_irq = tegra_pcie_map_irq;
2722 	host->swizzle_irq = pci_common_swizzle;
2723 
2724 	err = pci_scan_root_bus_bridge(host);
2725 	if (err < 0) {
2726 		dev_err(dev, "failed to register host: %d\n", err);
2727 		goto pm_runtime_put;
2728 	}
2729 
2730 	pci_bus_size_bridges(host->bus);
2731 	pci_bus_assign_resources(host->bus);
2732 
2733 	list_for_each_entry(child, &host->bus->children, node)
2734 		pcie_bus_configure_settings(child);
2735 
2736 	pci_bus_add_devices(host->bus);
2737 
2738 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2739 		err = tegra_pcie_debugfs_init(pcie);
2740 		if (err < 0)
2741 			dev_err(dev, "failed to setup debugfs: %d\n", err);
2742 	}
2743 
2744 	return 0;
2745 
2746 pm_runtime_put:
2747 	pm_runtime_put_sync(pcie->dev);
2748 	pm_runtime_disable(pcie->dev);
2749 teardown_msi:
2750 	tegra_pcie_msi_teardown(pcie);
2751 put_resources:
2752 	tegra_pcie_put_resources(pcie);
2753 	return err;
2754 }
2755 
2756 static int tegra_pcie_remove(struct platform_device *pdev)
2757 {
2758 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2759 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2760 	struct tegra_pcie_port *port, *tmp;
2761 
2762 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2763 		tegra_pcie_debugfs_exit(pcie);
2764 
2765 	pci_stop_root_bus(host->bus);
2766 	pci_remove_root_bus(host->bus);
2767 	pm_runtime_put_sync(pcie->dev);
2768 	pm_runtime_disable(pcie->dev);
2769 
2770 	if (IS_ENABLED(CONFIG_PCI_MSI))
2771 		tegra_pcie_msi_teardown(pcie);
2772 
2773 	tegra_pcie_put_resources(pcie);
2774 
2775 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2776 		tegra_pcie_port_free(port);
2777 
2778 	return 0;
2779 }
2780 
2781 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2782 {
2783 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2784 	struct tegra_pcie_port *port;
2785 	int err;
2786 
2787 	list_for_each_entry(port, &pcie->ports, list)
2788 		tegra_pcie_pme_turnoff(port);
2789 
2790 	tegra_pcie_disable_ports(pcie);
2791 
2792 	/*
2793 	 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2794 	 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2795 	 */
2796 	tegra_pcie_disable_interrupts(pcie);
2797 
2798 	if (pcie->soc->program_uphy) {
2799 		err = tegra_pcie_phy_power_off(pcie);
2800 		if (err < 0)
2801 			dev_err(dev, "failed to power off PHY(s): %d\n", err);
2802 	}
2803 
2804 	reset_control_assert(pcie->pex_rst);
2805 	clk_disable_unprepare(pcie->pex_clk);
2806 
2807 	if (IS_ENABLED(CONFIG_PCI_MSI))
2808 		tegra_pcie_disable_msi(pcie);
2809 
2810 	pinctrl_pm_select_idle_state(dev);
2811 	tegra_pcie_power_off(pcie);
2812 
2813 	return 0;
2814 }
2815 
2816 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2817 {
2818 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2819 	int err;
2820 
2821 	err = tegra_pcie_power_on(pcie);
2822 	if (err) {
2823 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2824 		return err;
2825 	}
2826 
2827 	err = pinctrl_pm_select_default_state(dev);
2828 	if (err < 0) {
2829 		dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2830 		goto poweroff;
2831 	}
2832 
2833 	tegra_pcie_enable_controller(pcie);
2834 	tegra_pcie_setup_translations(pcie);
2835 
2836 	if (IS_ENABLED(CONFIG_PCI_MSI))
2837 		tegra_pcie_enable_msi(pcie);
2838 
2839 	err = clk_prepare_enable(pcie->pex_clk);
2840 	if (err) {
2841 		dev_err(dev, "failed to enable PEX clock: %d\n", err);
2842 		goto pex_dpd_enable;
2843 	}
2844 
2845 	reset_control_deassert(pcie->pex_rst);
2846 
2847 	if (pcie->soc->program_uphy) {
2848 		err = tegra_pcie_phy_power_on(pcie);
2849 		if (err < 0) {
2850 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
2851 			goto disable_pex_clk;
2852 		}
2853 	}
2854 
2855 	tegra_pcie_apply_pad_settings(pcie);
2856 	tegra_pcie_enable_ports(pcie);
2857 
2858 	return 0;
2859 
2860 disable_pex_clk:
2861 	reset_control_assert(pcie->pex_rst);
2862 	clk_disable_unprepare(pcie->pex_clk);
2863 pex_dpd_enable:
2864 	pinctrl_pm_select_idle_state(dev);
2865 poweroff:
2866 	tegra_pcie_power_off(pcie);
2867 
2868 	return err;
2869 }
2870 
2871 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2872 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2873 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2874 				      tegra_pcie_pm_resume)
2875 };
2876 
2877 static struct platform_driver tegra_pcie_driver = {
2878 	.driver = {
2879 		.name = "tegra-pcie",
2880 		.of_match_table = tegra_pcie_of_match,
2881 		.suppress_bind_attrs = true,
2882 		.pm = &tegra_pcie_pm_ops,
2883 	},
2884 	.probe = tegra_pcie_probe,
2885 	.remove = tegra_pcie_remove,
2886 };
2887 module_platform_driver(tegra_pcie_driver);
2888 MODULE_LICENSE("GPL");
2889