xref: /linux/arch/sparc/kernel/pci_msi.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_msi.c: Sparc64 MSI support common layer.
3  *
4  * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
5  */
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/of.h>
9 #include <linux/platform_device.h>
10 #include <linux/slab.h>
11 #include <linux/irq.h>
12 
13 #include "pci_impl.h"
14 
15 static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
16 {
17 	struct sparc64_msiq_cookie *msiq_cookie = cookie;
18 	struct pci_pbm_info *pbm = msiq_cookie->pbm;
19 	unsigned long msiqid = msiq_cookie->msiqid;
20 	const struct sparc64_msiq_ops *ops;
21 	unsigned long orig_head, head;
22 	int err;
23 
24 	ops = pbm->msi_ops;
25 
26 	err = ops->get_head(pbm, msiqid, &head);
27 	if (unlikely(err < 0))
28 		goto err_get_head;
29 
30 	orig_head = head;
31 	for (;;) {
32 		unsigned long msi;
33 
34 		err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
35 		if (likely(err > 0)) {
36 			unsigned int irq;
37 
38 			irq = pbm->msi_irq_table[msi - pbm->msi_first];
39 			generic_handle_irq(irq);
40 		}
41 
42 		if (unlikely(err < 0))
43 			goto err_dequeue;
44 
45 		if (err == 0)
46 			break;
47 	}
48 	if (likely(head != orig_head)) {
49 		err = ops->set_head(pbm, msiqid, head);
50 		if (unlikely(err < 0))
51 			goto err_set_head;
52 	}
53 	return IRQ_HANDLED;
54 
55 err_get_head:
56 	printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
57 	       msiqid, err);
58 	goto err_out;
59 
60 err_dequeue:
61 	printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
62 	       "gives error %d\n",
63 	       head, msiqid, err);
64 	goto err_out;
65 
66 err_set_head:
67 	printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
68 	       "gives error %d\n",
69 	       head, msiqid, err);
70 	goto err_out;
71 
72 err_out:
73 	return IRQ_NONE;
74 }
75 
76 static u32 pick_msiq(struct pci_pbm_info *pbm)
77 {
78 	static DEFINE_SPINLOCK(rotor_lock);
79 	unsigned long flags;
80 	u32 ret, rotor;
81 
82 	spin_lock_irqsave(&rotor_lock, flags);
83 
84 	rotor = pbm->msiq_rotor;
85 	ret = pbm->msiq_first + rotor;
86 
87 	if (++rotor >= pbm->msiq_num)
88 		rotor = 0;
89 	pbm->msiq_rotor = rotor;
90 
91 	spin_unlock_irqrestore(&rotor_lock, flags);
92 
93 	return ret;
94 }
95 
96 
97 static int alloc_msi(struct pci_pbm_info *pbm)
98 {
99 	int i;
100 
101 	for (i = 0; i < pbm->msi_num; i++) {
102 		if (!test_and_set_bit(i, pbm->msi_bitmap))
103 			return i + pbm->msi_first;
104 	}
105 
106 	return -ENOENT;
107 }
108 
109 static void free_msi(struct pci_pbm_info *pbm, int msi_num)
110 {
111 	msi_num -= pbm->msi_first;
112 	clear_bit(msi_num, pbm->msi_bitmap);
113 }
114 
115 static struct irq_chip msi_irq = {
116 	.name		= "PCI-MSI",
117 	.irq_mask	= pci_msi_mask_irq,
118 	.irq_unmask	= pci_msi_unmask_irq,
119 	.irq_enable	= pci_msi_unmask_irq,
120 	.irq_disable	= pci_msi_mask_irq,
121 	/* XXX affinity XXX */
122 };
123 
124 static int sparc64_setup_msi_irq(unsigned int *irq_p,
125 				 struct pci_dev *pdev,
126 				 struct msi_desc *entry)
127 {
128 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
129 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
130 	struct msi_msg msg;
131 	int msi, err;
132 	u32 msiqid;
133 
134 	*irq_p = irq_alloc(0, 0);
135 	err = -ENOMEM;
136 	if (!*irq_p)
137 		goto out_err;
138 
139 	irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
140 				      "MSI");
141 
142 	err = alloc_msi(pbm);
143 	if (unlikely(err < 0))
144 		goto out_irq_free;
145 
146 	msi = err;
147 
148 	msiqid = pick_msiq(pbm);
149 
150 	err = ops->msi_setup(pbm, msiqid, msi,
151 			     (entry->pci.msi_attrib.is_64 ? 1 : 0));
152 	if (err)
153 		goto out_msi_free;
154 
155 	pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
156 
157 	if (entry->pci.msi_attrib.is_64) {
158 		msg.address_hi = pbm->msi64_start >> 32;
159 		msg.address_lo = pbm->msi64_start & 0xffffffff;
160 	} else {
161 		msg.address_hi = 0;
162 		msg.address_lo = pbm->msi32_start;
163 	}
164 	msg.data = msi;
165 
166 	irq_set_msi_desc(*irq_p, entry);
167 	pci_write_msi_msg(*irq_p, &msg);
168 
169 	return 0;
170 
171 out_msi_free:
172 	free_msi(pbm, msi);
173 
174 out_irq_free:
175 	irq_set_chip(*irq_p, NULL);
176 	irq_free(*irq_p);
177 	*irq_p = 0;
178 
179 out_err:
180 	return err;
181 }
182 
183 static void sparc64_teardown_msi_irq(unsigned int irq,
184 				     struct pci_dev *pdev)
185 {
186 	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
187 	const struct sparc64_msiq_ops *ops = pbm->msi_ops;
188 	unsigned int msi_num;
189 	int i, err;
190 
191 	for (i = 0; i < pbm->msi_num; i++) {
192 		if (pbm->msi_irq_table[i] == irq)
193 			break;
194 	}
195 	if (i >= pbm->msi_num) {
196 		pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
197 			irq);
198 		return;
199 	}
200 
201 	msi_num = pbm->msi_first + i;
202 	pbm->msi_irq_table[i] = ~0U;
203 
204 	err = ops->msi_teardown(pbm, msi_num);
205 	if (err) {
206 		pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
207 			"irq %u, gives error %d\n", pbm->name, msi_num, irq,
208 			err);
209 		return;
210 	}
211 
212 	free_msi(pbm, msi_num);
213 
214 	irq_set_chip(irq, NULL);
215 	irq_free(irq);
216 }
217 
218 static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
219 {
220 	unsigned long size, bits_per_ulong;
221 
222 	bits_per_ulong = sizeof(unsigned long) * 8;
223 	size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
224 	size /= 8;
225 	BUG_ON(size % sizeof(unsigned long));
226 
227 	pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
228 	if (!pbm->msi_bitmap)
229 		return -ENOMEM;
230 
231 	return 0;
232 }
233 
234 static void msi_bitmap_free(struct pci_pbm_info *pbm)
235 {
236 	kfree(pbm->msi_bitmap);
237 	pbm->msi_bitmap = NULL;
238 }
239 
240 static int msi_table_alloc(struct pci_pbm_info *pbm)
241 {
242 	int size, i;
243 
244 	size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
245 	pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
246 	if (!pbm->msiq_irq_cookies)
247 		return -ENOMEM;
248 
249 	for (i = 0; i < pbm->msiq_num; i++) {
250 		struct sparc64_msiq_cookie *p;
251 
252 		p = &pbm->msiq_irq_cookies[i];
253 		p->pbm = pbm;
254 		p->msiqid = pbm->msiq_first + i;
255 	}
256 
257 	size = pbm->msi_num * sizeof(unsigned int);
258 	pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
259 	if (!pbm->msi_irq_table) {
260 		kfree(pbm->msiq_irq_cookies);
261 		pbm->msiq_irq_cookies = NULL;
262 		return -ENOMEM;
263 	}
264 
265 	return 0;
266 }
267 
268 static void msi_table_free(struct pci_pbm_info *pbm)
269 {
270 	kfree(pbm->msiq_irq_cookies);
271 	pbm->msiq_irq_cookies = NULL;
272 
273 	kfree(pbm->msi_irq_table);
274 	pbm->msi_irq_table = NULL;
275 }
276 
277 static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
278 				 const struct sparc64_msiq_ops *ops,
279 				 unsigned long msiqid,
280 				 unsigned long devino)
281 {
282 	int irq = ops->msiq_build_irq(pbm, msiqid, devino);
283 	int err, nid;
284 
285 	if (irq < 0)
286 		return irq;
287 
288 	nid = pbm->numa_node;
289 	if (nid != -1) {
290 		cpumask_t numa_mask;
291 
292 		cpumask_copy(&numa_mask, cpumask_of_node(nid));
293 		irq_set_affinity(irq, &numa_mask);
294 	}
295 	err = request_irq(irq, sparc64_msiq_interrupt, 0,
296 			  "MSIQ",
297 			  &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
298 	if (err)
299 		return err;
300 
301 	return 0;
302 }
303 
304 static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
305 				      const struct sparc64_msiq_ops *ops)
306 {
307 	int i;
308 
309 	for (i = 0; i < pbm->msiq_num; i++) {
310 		unsigned long msiqid = i + pbm->msiq_first;
311 		unsigned long devino = i + pbm->msiq_first_devino;
312 		int err;
313 
314 		err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
315 		if (err)
316 			return err;
317 	}
318 
319 	return 0;
320 }
321 
322 void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
323 			  const struct sparc64_msiq_ops *ops)
324 {
325 	const u32 *val;
326 	int len;
327 
328 	val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
329 	if (!val || len != 4)
330 		goto no_msi;
331 	pbm->msiq_num = *val;
332 	if (pbm->msiq_num) {
333 		const struct msiq_prop {
334 			u32 first_msiq;
335 			u32 num_msiq;
336 			u32 first_devino;
337 		} *mqp;
338 		const struct msi_range_prop {
339 			u32 first_msi;
340 			u32 num_msi;
341 		} *mrng;
342 		const struct addr_range_prop {
343 			u32 msi32_high;
344 			u32 msi32_low;
345 			u32 msi32_len;
346 			u32 msi64_high;
347 			u32 msi64_low;
348 			u32 msi64_len;
349 		} *arng;
350 
351 		val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
352 		if (!val || len != 4)
353 			goto no_msi;
354 
355 		pbm->msiq_ent_count = *val;
356 
357 		mqp = of_get_property(pbm->op->dev.of_node,
358 				      "msi-eq-to-devino", &len);
359 		if (!mqp)
360 			mqp = of_get_property(pbm->op->dev.of_node,
361 					      "msi-eq-devino", &len);
362 		if (!mqp || len != sizeof(struct msiq_prop))
363 			goto no_msi;
364 
365 		pbm->msiq_first = mqp->first_msiq;
366 		pbm->msiq_first_devino = mqp->first_devino;
367 
368 		val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
369 		if (!val || len != 4)
370 			goto no_msi;
371 		pbm->msi_num = *val;
372 
373 		mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
374 		if (!mrng || len != sizeof(struct msi_range_prop))
375 			goto no_msi;
376 		pbm->msi_first = mrng->first_msi;
377 
378 		val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
379 		if (!val || len != 4)
380 			goto no_msi;
381 		pbm->msi_data_mask = *val;
382 
383 		val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
384 		if (!val || len != 4)
385 			goto no_msi;
386 		pbm->msix_data_width = *val;
387 
388 		arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
389 				       &len);
390 		if (!arng || len != sizeof(struct addr_range_prop))
391 			goto no_msi;
392 		pbm->msi32_start = ((u64)arng->msi32_high << 32) |
393 			(u64) arng->msi32_low;
394 		pbm->msi64_start = ((u64)arng->msi64_high << 32) |
395 			(u64) arng->msi64_low;
396 		pbm->msi32_len = arng->msi32_len;
397 		pbm->msi64_len = arng->msi64_len;
398 
399 		if (msi_bitmap_alloc(pbm))
400 			goto no_msi;
401 
402 		if (msi_table_alloc(pbm)) {
403 			msi_bitmap_free(pbm);
404 			goto no_msi;
405 		}
406 
407 		if (ops->msiq_alloc(pbm)) {
408 			msi_table_free(pbm);
409 			msi_bitmap_free(pbm);
410 			goto no_msi;
411 		}
412 
413 		if (sparc64_bringup_msi_queues(pbm, ops)) {
414 			ops->msiq_free(pbm);
415 			msi_table_free(pbm);
416 			msi_bitmap_free(pbm);
417 			goto no_msi;
418 		}
419 
420 		printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
421 		       "devino[0x%x]\n",
422 		       pbm->name,
423 		       pbm->msiq_first, pbm->msiq_num,
424 		       pbm->msiq_ent_count,
425 		       pbm->msiq_first_devino);
426 		printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
427 		       "width[%u]\n",
428 		       pbm->name,
429 		       pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
430 		       pbm->msix_data_width);
431 		printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
432 		       "addr64[0x%llx:0x%x]\n",
433 		       pbm->name,
434 		       pbm->msi32_start, pbm->msi32_len,
435 		       pbm->msi64_start, pbm->msi64_len);
436 		printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
437 		       pbm->name,
438 		       __pa(pbm->msi_queues));
439 
440 		pbm->msi_ops = ops;
441 		pbm->setup_msi_irq = sparc64_setup_msi_irq;
442 		pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
443 	}
444 	return;
445 
446 no_msi:
447 	pbm->msiq_num = 0;
448 	printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
449 }
450