xref: /linux/drivers/mmc/host/cavium-thunderx.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * Driver for MMC and SSD cards for Cavium ThunderX SOCs.
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * Copyright (C) 2016 Cavium Inc.
9  */
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_platform.h>
16 #include <linux/pci.h>
17 #include "cavium.h"
18 
19 static void thunder_mmc_acquire_bus(struct cvm_mmc_host *host)
20 {
21 	down(&host->mmc_serializer);
22 }
23 
24 static void thunder_mmc_release_bus(struct cvm_mmc_host *host)
25 {
26 	up(&host->mmc_serializer);
27 }
28 
29 static void thunder_mmc_int_enable(struct cvm_mmc_host *host, u64 val)
30 {
31 	writeq(val, host->base + MIO_EMM_INT(host));
32 	writeq(val, host->base + MIO_EMM_INT_EN_SET(host));
33 }
34 
35 static int thunder_mmc_register_interrupts(struct cvm_mmc_host *host,
36 					   struct pci_dev *pdev)
37 {
38 	int nvec, ret, i;
39 
40 	nvec = pci_alloc_irq_vectors(pdev, 1, 9, PCI_IRQ_MSIX);
41 	if (nvec < 0)
42 		return nvec;
43 
44 	/* register interrupts */
45 	for (i = 0; i < nvec; i++) {
46 		ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i),
47 				       cvm_mmc_interrupt,
48 				       0, cvm_mmc_irq_names[i], host);
49 		if (ret)
50 			return ret;
51 	}
52 	return 0;
53 }
54 
55 static int thunder_mmc_probe(struct pci_dev *pdev,
56 			     const struct pci_device_id *id)
57 {
58 	struct device_node *node = pdev->dev.of_node;
59 	struct device *dev = &pdev->dev;
60 	struct device_node *child_node;
61 	struct cvm_mmc_host *host;
62 	int ret, i = 0;
63 
64 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
65 	if (!host)
66 		return -ENOMEM;
67 
68 	pci_set_drvdata(pdev, host);
69 	ret = pcim_enable_device(pdev);
70 	if (ret)
71 		return ret;
72 
73 	ret = pci_request_regions(pdev, KBUILD_MODNAME);
74 	if (ret)
75 		return ret;
76 
77 	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
78 	if (!host->base)
79 		return -EINVAL;
80 
81 	/* On ThunderX these are identical */
82 	host->dma_base = host->base;
83 
84 	host->reg_off = 0x2000;
85 	host->reg_off_dma = 0x160;
86 
87 	host->clk = devm_clk_get(dev, NULL);
88 	if (IS_ERR(host->clk))
89 		return PTR_ERR(host->clk);
90 
91 	ret = clk_prepare_enable(host->clk);
92 	if (ret)
93 		return ret;
94 	host->sys_freq = clk_get_rate(host->clk);
95 
96 	spin_lock_init(&host->irq_handler_lock);
97 	sema_init(&host->mmc_serializer, 1);
98 
99 	host->dev = dev;
100 	host->acquire_bus = thunder_mmc_acquire_bus;
101 	host->release_bus = thunder_mmc_release_bus;
102 	host->int_enable = thunder_mmc_int_enable;
103 
104 	host->use_sg = true;
105 	host->big_dma_addr = true;
106 	host->need_irq_handler_lock = true;
107 	host->last_slot = -1;
108 
109 	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
110 	if (ret)
111 		goto error;
112 
113 	/*
114 	 * Clear out any pending interrupts that may be left over from
115 	 * bootloader. Writing 1 to the bits clears them.
116 	 */
117 	writeq(127, host->base + MIO_EMM_INT_EN(host));
118 	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
119 	/* Clear DMA FIFO */
120 	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));
121 
122 	ret = thunder_mmc_register_interrupts(host, pdev);
123 	if (ret)
124 		goto error;
125 
126 	for_each_child_of_node(node, child_node) {
127 		/*
128 		 * mmc_of_parse and devm* require one device per slot.
129 		 * Create a dummy device per slot and set the node pointer to
130 		 * the slot. The easiest way to get this is using
131 		 * of_platform_device_create.
132 		 */
133 		if (of_device_is_compatible(child_node, "mmc-slot")) {
134 			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
135 								       &pdev->dev);
136 			if (!host->slot_pdev[i])
137 				continue;
138 
139 			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
140 			if (ret)
141 				goto error;
142 		}
143 		i++;
144 	}
145 	dev_info(dev, "probed\n");
146 	return 0;
147 
148 error:
149 	clk_disable_unprepare(host->clk);
150 	return ret;
151 }
152 
153 static void thunder_mmc_remove(struct pci_dev *pdev)
154 {
155 	struct cvm_mmc_host *host = pci_get_drvdata(pdev);
156 	u64 dma_cfg;
157 	int i;
158 
159 	for (i = 0; i < CAVIUM_MAX_MMC; i++)
160 		if (host->slot[i])
161 			cvm_mmc_of_slot_remove(host->slot[i]);
162 
163 	dma_cfg = readq(host->dma_base + MIO_EMM_DMA_CFG(host));
164 	dma_cfg &= ~MIO_EMM_DMA_CFG_EN;
165 	writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
166 
167 	clk_disable_unprepare(host->clk);
168 }
169 
170 static const struct pci_device_id thunder_mmc_id_table[] = {
171 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa010) },
172 	{ 0, }  /* end of table */
173 };
174 
175 static struct pci_driver thunder_mmc_driver = {
176 	.name = KBUILD_MODNAME,
177 	.id_table = thunder_mmc_id_table,
178 	.probe = thunder_mmc_probe,
179 	.remove = thunder_mmc_remove,
180 };
181 
182 module_pci_driver(thunder_mmc_driver);
183 
184 MODULE_AUTHOR("Cavium Inc.");
185 MODULE_DESCRIPTION("Cavium ThunderX eMMC Driver");
186 MODULE_LICENSE("GPL");
187 MODULE_DEVICE_TABLE(pci, thunder_mmc_id_table);
188