xref: /linux/arch/alpha/kernel/sys_marvel.c (revision e9fb13bfec7e017130ddc5c1b5466340470f4900)
1 /*
2  * linux/arch/alpha/kernel/sys_marvel.c
3  *
4  * Marvel / IO7 support
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/mm.h>
10 #include <linux/sched.h>
11 #include <linux/pci.h>
12 #include <linux/init.h>
13 #include <linux/bitops.h>
14 
15 #include <asm/ptrace.h>
16 #include <asm/system.h>
17 #include <asm/dma.h>
18 #include <asm/irq.h>
19 #include <asm/mmu_context.h>
20 #include <asm/io.h>
21 #include <asm/pgtable.h>
22 #include <asm/core_marvel.h>
23 #include <asm/hwrpb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/vga.h>
26 #include <asm/rtc.h>
27 
28 #include "proto.h"
29 #include "err_impl.h"
30 #include "irq_impl.h"
31 #include "pci_impl.h"
32 #include "machvec_impl.h"
33 
34 #if NR_IRQS < MARVEL_NR_IRQS
35 # error NR_IRQS < MARVEL_NR_IRQS !!!
36 #endif
37 
38 
39 /*
40  * Interrupt handling.
41  */
42 static void
43 io7_device_interrupt(unsigned long vector)
44 {
45 	unsigned int pid;
46 	unsigned int irq;
47 
48 	/*
49 	 * Vector is 0x800 + (interrupt)
50 	 *
51 	 * where (interrupt) is:
52 	 *
53 	 *	...16|15 14|13     4|3 0
54 	 *	-----+-----+--------+---
55 	 *	  PE |  0  |   irq  | 0
56 	 *
57 	 * where (irq) is
58 	 *
59 	 *       0x0800 - 0x0ff0	 - 0x0800 + (LSI id << 4)
60 	 *	 0x1000 - 0x2ff0	 - 0x1000 + (MSI_DAT<8:0> << 4)
61 	 */
62 	pid = vector >> 16;
63 	irq = ((vector & 0xffff) - 0x800) >> 4;
64 
65 	irq += 16;				/* offset for legacy */
66 	irq &= MARVEL_IRQ_VEC_IRQ_MASK;		/* not too many bits */
67 	irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT;	/* merge the pid     */
68 
69 	handle_irq(irq);
70 }
71 
72 static volatile unsigned long *
73 io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
74 {
75 	volatile unsigned long *ctl;
76 	unsigned int pid;
77 	struct io7 *io7;
78 
79 	pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
80 
81 	if (!(io7 = marvel_find_io7(pid))) {
82 		printk(KERN_ERR
83 		       "%s for nonexistent io7 -- vec %x, pid %d\n",
84 		       __func__, irq, pid);
85 		return NULL;
86 	}
87 
88 	irq &= MARVEL_IRQ_VEC_IRQ_MASK;	/* isolate the vector    */
89 	irq -= 16;			/* subtract legacy bias  */
90 
91 	if (irq >= 0x180) {
92 		printk(KERN_ERR
93 		       "%s for invalid irq -- pid %d adjusted irq %x\n",
94 		       __func__, pid, irq);
95 		return NULL;
96 	}
97 
98 	ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr; /* assume LSI */
99 	if (irq >= 0x80)	     	/* MSI */
100 		ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
101 
102 	if (pio7) *pio7 = io7;
103 	return ctl;
104 }
105 
106 static void
107 io7_enable_irq(struct irq_data *d)
108 {
109 	volatile unsigned long *ctl;
110 	unsigned int irq = d->irq;
111 	struct io7 *io7;
112 
113 	ctl = io7_get_irq_ctl(irq, &io7);
114 	if (!ctl || !io7) {
115 		printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
116 		       __func__, irq);
117 		return;
118 	}
119 
120 	spin_lock(&io7->irq_lock);
121 	*ctl |= 1UL << 24;
122 	mb();
123 	*ctl;
124 	spin_unlock(&io7->irq_lock);
125 }
126 
127 static void
128 io7_disable_irq(struct irq_data *d)
129 {
130 	volatile unsigned long *ctl;
131 	unsigned int irq = d->irq;
132 	struct io7 *io7;
133 
134 	ctl = io7_get_irq_ctl(irq, &io7);
135 	if (!ctl || !io7) {
136 		printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
137 		       __func__, irq);
138 		return;
139 	}
140 
141 	spin_lock(&io7->irq_lock);
142 	*ctl &= ~(1UL << 24);
143 	mb();
144 	*ctl;
145 	spin_unlock(&io7->irq_lock);
146 }
147 
148 static void
149 marvel_irq_noop(struct irq_data *d)
150 {
151 	return;
152 }
153 
154 static struct irq_chip marvel_legacy_irq_type = {
155 	.name		= "LEGACY",
156 	.irq_mask	= marvel_irq_noop,
157 	.irq_unmask	= marvel_irq_noop,
158 };
159 
160 static struct irq_chip io7_lsi_irq_type = {
161 	.name		= "LSI",
162 	.irq_unmask	= io7_enable_irq,
163 	.irq_mask	= io7_disable_irq,
164 	.irq_mask_ack	= io7_disable_irq,
165 };
166 
167 static struct irq_chip io7_msi_irq_type = {
168 	.name		= "MSI",
169 	.irq_unmask	= io7_enable_irq,
170 	.irq_mask	= io7_disable_irq,
171 	.irq_ack	= marvel_irq_noop,
172 };
173 
174 static void
175 io7_redirect_irq(struct io7 *io7,
176 		 volatile unsigned long *csr,
177 		 unsigned int where)
178 {
179 	unsigned long val;
180 
181 	val = *csr;
182 	val &= ~(0x1ffUL << 24);		/* clear the target pid   */
183 	val |= ((unsigned long)where << 24);	/* set the new target pid */
184 
185 	*csr = val;
186 	mb();
187 	*csr;
188 }
189 
190 static void
191 io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
192 {
193 	unsigned long val;
194 
195 	/*
196 	 * LSI_CTL has target PID @ 14
197 	 */
198 	val = io7->csrs->PO7_LSI_CTL[which].csr;
199 	val &= ~(0x1ffUL << 14);		/* clear the target pid */
200 	val |= ((unsigned long)where << 14);	/* set the new target pid */
201 
202 	io7->csrs->PO7_LSI_CTL[which].csr = val;
203 	mb();
204 	io7->csrs->PO7_LSI_CTL[which].csr;
205 }
206 
207 static void
208 io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
209 {
210 	unsigned long val;
211 
212 	/*
213 	 * MSI_CTL has target PID @ 14
214 	 */
215 	val = io7->csrs->PO7_MSI_CTL[which].csr;
216 	val &= ~(0x1ffUL << 14);		/* clear the target pid */
217 	val |= ((unsigned long)where << 14);	/* set the new target pid */
218 
219 	io7->csrs->PO7_MSI_CTL[which].csr = val;
220 	mb();
221 	io7->csrs->PO7_MSI_CTL[which].csr;
222 }
223 
224 static void __init
225 init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
226 {
227 	/*
228 	 * LSI_CTL has target PID @ 14
229 	 */
230 	io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
231 	mb();
232 	io7->csrs->PO7_LSI_CTL[which].csr;
233 }
234 
235 static void __init
236 init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
237 {
238 	/*
239 	 * MSI_CTL has target PID @ 14
240 	 */
241 	io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
242 	mb();
243 	io7->csrs->PO7_MSI_CTL[which].csr;
244 }
245 
246 static void __init
247 init_io7_irqs(struct io7 *io7,
248 	      struct irq_chip *lsi_ops,
249 	      struct irq_chip *msi_ops)
250 {
251 	long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
252 	long i;
253 
254 	printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
255 		io7->pe, base);
256 
257 	/*
258 	 * Where should interrupts from this IO7 go?
259 	 *
260 	 * They really should be sent to the local CPU to avoid having to
261 	 * traverse the mesh, but if it's not an SMP kernel, they have to
262 	 * go to the boot CPU. Send them all to the boot CPU for now,
263 	 * as each secondary starts, it can redirect it's local device
264 	 * interrupts.
265 	 */
266 	printk("  Interrupts reported to CPU at PE %u\n", boot_cpuid);
267 
268 	spin_lock(&io7->irq_lock);
269 
270 	/* set up the error irqs */
271 	io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
272 	io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
273 	io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
274 	io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
275 	io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
276 
277 	/* Set up the lsi irqs.  */
278 	for (i = 0; i < 128; ++i) {
279 		irq_set_chip_and_handler(base + i, lsi_ops, handle_level_irq);
280 		irq_set_status_flags(i, IRQ_LEVEL);
281 	}
282 
283 	/* Disable the implemented irqs in hardware.  */
284 	for (i = 0; i < 0x60; ++i)
285 		init_one_io7_lsi(io7, i, boot_cpuid);
286 
287 	init_one_io7_lsi(io7, 0x74, boot_cpuid);
288 	init_one_io7_lsi(io7, 0x75, boot_cpuid);
289 
290 
291 	/* Set up the msi irqs.  */
292 	for (i = 128; i < (128 + 512); ++i) {
293 		irq_set_chip_and_handler(base + i, msi_ops, handle_level_irq);
294 		irq_set_status_flags(i, IRQ_LEVEL);
295 	}
296 
297 	for (i = 0; i < 16; ++i)
298 		init_one_io7_msi(io7, i, boot_cpuid);
299 
300 	spin_unlock(&io7->irq_lock);
301 }
302 
303 static void __init
304 marvel_init_irq(void)
305 {
306 	int i;
307 	struct io7 *io7 = NULL;
308 
309 	/* Reserve the legacy irqs.  */
310 	for (i = 0; i < 16; ++i) {
311 		irq_set_chip_and_handler(i, &marvel_legacy_irq_type,
312 					 handle_level_irq);
313 	}
314 
315 	/* Init the io7 irqs.  */
316 	for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
317 		init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
318 }
319 
320 static int
321 marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
322 {
323 	struct pci_controller *hose = dev->sysdata;
324 	struct io7_port *io7_port = hose->sysdata;
325 	struct io7 *io7 = io7_port->io7;
326 	int msi_loc, msi_data_off;
327 	u16 msg_ctl;
328 	u16 msg_dat;
329 	u8 intline;
330 	int irq;
331 
332 	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
333 	irq = intline;
334 
335 	msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
336 	msg_ctl = 0;
337 	if (msi_loc)
338 		pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
339 
340 	if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
341  		msi_data_off = PCI_MSI_DATA_32;
342 		if (msg_ctl & PCI_MSI_FLAGS_64BIT)
343 			msi_data_off = PCI_MSI_DATA_64;
344 		pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
345 
346 		irq = msg_dat & 0x1ff;		/* we use msg_data<8:0> */
347 		irq += 0x80;			/* offset for lsi       */
348 
349 #if 1
350 		printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
351 		       dev->bus->number,
352 		       PCI_SLOT(dev->devfn),
353 		       PCI_FUNC(dev->devfn),
354 		       hose->index);
355 		printk("  %d message(s) from 0x%04x\n",
356 		       1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
357 		       msg_dat);
358 		printk("  reporting on %d IRQ(s) from %d (0x%x)\n",
359 		       1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
360 		       (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
361 		       (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
362 #endif
363 
364 #if 0
365 		pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
366 				      msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
367 		pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
368 		irq = intline;
369 
370 		printk("  forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
371 #endif
372 	}
373 
374 	irq += 16;					/* offset for legacy */
375 	irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT;	/* merge the pid     */
376 
377 	return irq;
378 }
379 
380 static void __init
381 marvel_init_pci(void)
382 {
383 	struct io7 *io7;
384 
385 	marvel_register_error_handlers();
386 
387 	pci_probe_only = 1;
388 	common_init_pci();
389 	locate_and_init_vga(NULL);
390 
391 	/* Clear any io7 errors.  */
392 	for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
393 		io7_clear_errors(io7);
394 }
395 
396 static void __init
397 marvel_init_rtc(void)
398 {
399 	init_rtc_irq();
400 }
401 
402 struct marvel_rtc_time {
403 	struct rtc_time *time;
404 	int retval;
405 };
406 
407 #ifdef CONFIG_SMP
408 static void
409 smp_get_rtc_time(void *data)
410 {
411 	struct marvel_rtc_time *mrt = data;
412 	mrt->retval = __get_rtc_time(mrt->time);
413 }
414 
415 static void
416 smp_set_rtc_time(void *data)
417 {
418 	struct marvel_rtc_time *mrt = data;
419 	mrt->retval = __set_rtc_time(mrt->time);
420 }
421 #endif
422 
423 static unsigned int
424 marvel_get_rtc_time(struct rtc_time *time)
425 {
426 #ifdef CONFIG_SMP
427 	struct marvel_rtc_time mrt;
428 
429 	if (smp_processor_id() != boot_cpuid) {
430 		mrt.time = time;
431 		smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
432 		return mrt.retval;
433 	}
434 #endif
435 	return __get_rtc_time(time);
436 }
437 
438 static int
439 marvel_set_rtc_time(struct rtc_time *time)
440 {
441 #ifdef CONFIG_SMP
442 	struct marvel_rtc_time mrt;
443 
444 	if (smp_processor_id() != boot_cpuid) {
445 		mrt.time = time;
446 		smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
447 		return mrt.retval;
448 	}
449 #endif
450 	return __set_rtc_time(time);
451 }
452 
453 static void
454 marvel_smp_callin(void)
455 {
456 	int cpuid = hard_smp_processor_id();
457 	struct io7 *io7 = marvel_find_io7(cpuid);
458 	unsigned int i;
459 
460 	if (!io7)
461 		return;
462 
463 	/*
464 	 * There is a local IO7 - redirect all of its interrupts here.
465 	 */
466 	printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
467 
468 	/* Redirect the error IRQS here.  */
469 	io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
470 	io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
471 	io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
472 	io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
473 	io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
474 
475 	/* Redirect the implemented LSIs here.  */
476 	for (i = 0; i < 0x60; ++i)
477 		io7_redirect_one_lsi(io7, i, cpuid);
478 
479 	io7_redirect_one_lsi(io7, 0x74, cpuid);
480 	io7_redirect_one_lsi(io7, 0x75, cpuid);
481 
482 	/* Redirect the MSIs here.  */
483 	for (i = 0; i < 16; ++i)
484 		io7_redirect_one_msi(io7, i, cpuid);
485 }
486 
487 /*
488  * System Vectors
489  */
490 struct alpha_machine_vector marvel_ev7_mv __initmv = {
491 	.vector_name		= "MARVEL/EV7",
492 	DO_EV7_MMU,
493 	.rtc_port		= 0x70,
494 	.rtc_get_time		= marvel_get_rtc_time,
495 	.rtc_set_time		= marvel_set_rtc_time,
496 	DO_MARVEL_IO,
497 	.machine_check		= marvel_machine_check,
498 	.max_isa_dma_address	= ALPHA_MAX_ISA_DMA_ADDRESS,
499 	.min_io_address		= DEFAULT_IO_BASE,
500 	.min_mem_address	= DEFAULT_MEM_BASE,
501 	.pci_dac_offset		= IO7_DAC_OFFSET,
502 
503 	.nr_irqs		= MARVEL_NR_IRQS,
504 	.device_interrupt	= io7_device_interrupt,
505 
506 	.agp_info		= marvel_agp_info,
507 
508 	.smp_callin		= marvel_smp_callin,
509 	.init_arch		= marvel_init_arch,
510 	.init_irq		= marvel_init_irq,
511 	.init_rtc		= marvel_init_rtc,
512 	.init_pci		= marvel_init_pci,
513 	.kill_arch		= marvel_kill_arch,
514 	.pci_map_irq		= marvel_map_irq,
515 	.pci_swizzle		= common_swizzle,
516 
517 	.pa_to_nid		= marvel_pa_to_nid,
518 	.cpuid_to_nid		= marvel_cpuid_to_nid,
519 	.node_mem_start		= marvel_node_mem_start,
520 	.node_mem_size		= marvel_node_mem_size,
521 };
522 ALIAS_MV(marvel_ev7)
523