1 /* 2 * Intel IO-APIC support for multi-Pentium hosts. 3 * 4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo 5 * 6 * Many thanks to Stig Venaas for trying out countless experimental 7 * patches and reporting/debugging problems patiently! 8 * 9 * (c) 1999, Multiple IO-APIC support, developed by 10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and 11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>, 12 * further tested and cleaned up by Zach Brown <zab@redhat.com> 13 * and Ingo Molnar <mingo@redhat.com> 14 * 15 * Fixes 16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs; 17 * thanks to Eric Gilmore 18 * and Rolf G. Tews 19 * for testing these extensively 20 * Paul Diefenbaugh : Added full ACPI support 21 */ 22 23 #include <linux/mm.h> 24 #include <linux/interrupt.h> 25 #include <linux/init.h> 26 #include <linux/delay.h> 27 #include <linux/sched.h> 28 #include <linux/pci.h> 29 #include <linux/mc146818rtc.h> 30 #include <linux/compiler.h> 31 #include <linux/acpi.h> 32 #include <linux/module.h> 33 #include <linux/sysdev.h> 34 #include <linux/msi.h> 35 #include <linux/htirq.h> 36 #include <linux/freezer.h> 37 #include <linux/kthread.h> 38 #include <linux/jiffies.h> /* time_after() */ 39 #include <linux/slab.h> 40 #ifdef CONFIG_ACPI 41 #include <acpi/acpi_bus.h> 42 #endif 43 #include <linux/bootmem.h> 44 #include <linux/dmar.h> 45 #include <linux/hpet.h> 46 47 #include <asm/idle.h> 48 #include <asm/io.h> 49 #include <asm/smp.h> 50 #include <asm/cpu.h> 51 #include <asm/desc.h> 52 #include <asm/proto.h> 53 #include <asm/acpi.h> 54 #include <asm/dma.h> 55 #include <asm/timer.h> 56 #include <asm/i8259.h> 57 #include <asm/nmi.h> 58 #include <asm/msidef.h> 59 #include <asm/hypertransport.h> 60 #include <asm/setup.h> 61 #include <asm/irq_remapping.h> 62 #include <asm/hpet.h> 63 #include <asm/hw_irq.h> 64 65 #include <asm/apic.h> 66 67 #define __apicdebuginit(type) static type __init 68 #define for_each_irq_pin(entry, head) \ 69 for (entry = head; entry; entry = entry->next) 70 71 /* 72 * Is the SiS APIC rmw bug present ? 73 * -1 = don't know, 0 = no, 1 = yes 74 */ 75 int sis_apic_bug = -1; 76 77 static DEFINE_RAW_SPINLOCK(ioapic_lock); 78 static DEFINE_RAW_SPINLOCK(vector_lock); 79 80 /* 81 * # of IRQ routing registers 82 */ 83 int nr_ioapic_registers[MAX_IO_APICS]; 84 85 /* I/O APIC entries */ 86 struct mpc_ioapic mp_ioapics[MAX_IO_APICS]; 87 int nr_ioapics; 88 89 /* IO APIC gsi routing info */ 90 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS]; 91 92 /* The one past the highest gsi number used */ 93 u32 gsi_top; 94 95 /* MP IRQ source entries */ 96 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES]; 97 98 /* # of MP IRQ source entries */ 99 int mp_irq_entries; 100 101 /* GSI interrupts */ 102 static int nr_irqs_gsi = NR_IRQS_LEGACY; 103 104 #if defined (CONFIG_MCA) || defined (CONFIG_EISA) 105 int mp_bus_id_to_type[MAX_MP_BUSSES]; 106 #endif 107 108 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES); 109 110 int skip_ioapic_setup; 111 112 void arch_disable_smp_support(void) 113 { 114 #ifdef CONFIG_PCI 115 noioapicquirk = 1; 116 noioapicreroute = -1; 117 #endif 118 skip_ioapic_setup = 1; 119 } 120 121 static int __init parse_noapic(char *str) 122 { 123 /* disable IO-APIC */ 124 arch_disable_smp_support(); 125 return 0; 126 } 127 early_param("noapic", parse_noapic); 128 129 struct irq_pin_list { 130 int apic, pin; 131 struct irq_pin_list *next; 132 }; 133 134 static struct irq_pin_list *alloc_irq_pin_list(int node) 135 { 136 return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node); 137 } 138 139 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 140 #ifdef CONFIG_SPARSE_IRQ 141 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY]; 142 #else 143 static struct irq_cfg irq_cfgx[NR_IRQS]; 144 #endif 145 146 int __init arch_early_irq_init(void) 147 { 148 struct irq_cfg *cfg; 149 int count, node, i; 150 151 if (!legacy_pic->nr_legacy_irqs) { 152 nr_irqs_gsi = 0; 153 io_apic_irqs = ~0UL; 154 } 155 156 cfg = irq_cfgx; 157 count = ARRAY_SIZE(irq_cfgx); 158 node = cpu_to_node(0); 159 160 /* Make sure the legacy interrupts are marked in the bitmap */ 161 irq_reserve_irqs(0, legacy_pic->nr_legacy_irqs); 162 163 for (i = 0; i < count; i++) { 164 set_irq_chip_data(i, &cfg[i]); 165 zalloc_cpumask_var_node(&cfg[i].domain, GFP_KERNEL, node); 166 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_KERNEL, node); 167 /* 168 * For legacy IRQ's, start with assigning irq0 to irq15 to 169 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0. 170 */ 171 if (i < legacy_pic->nr_legacy_irqs) { 172 cfg[i].vector = IRQ0_VECTOR + i; 173 cpumask_set_cpu(0, cfg[i].domain); 174 } 175 } 176 177 return 0; 178 } 179 180 #ifdef CONFIG_SPARSE_IRQ 181 static struct irq_cfg *irq_cfg(unsigned int irq) 182 { 183 return get_irq_chip_data(irq); 184 } 185 186 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 187 { 188 struct irq_cfg *cfg; 189 190 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 191 if (!cfg) 192 return NULL; 193 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 194 goto out_cfg; 195 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 196 goto out_domain; 197 return cfg; 198 out_domain: 199 free_cpumask_var(cfg->domain); 200 out_cfg: 201 kfree(cfg); 202 return NULL; 203 } 204 205 static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) 206 { 207 if (!cfg) 208 return; 209 set_irq_chip_data(at, NULL); 210 free_cpumask_var(cfg->domain); 211 free_cpumask_var(cfg->old_domain); 212 kfree(cfg); 213 } 214 215 #else 216 217 struct irq_cfg *irq_cfg(unsigned int irq) 218 { 219 return irq < nr_irqs ? irq_cfgx + irq : NULL; 220 } 221 222 static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) 223 { 224 return irq_cfgx + irq; 225 } 226 227 static inline void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) { } 228 229 #endif 230 231 static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) 232 { 233 int res = irq_alloc_desc_at(at, node); 234 struct irq_cfg *cfg; 235 236 if (res < 0) { 237 if (res != -EEXIST) 238 return NULL; 239 cfg = get_irq_chip_data(at); 240 if (cfg) 241 return cfg; 242 } 243 244 cfg = alloc_irq_cfg(at, node); 245 if (cfg) 246 set_irq_chip_data(at, cfg); 247 else 248 irq_free_desc(at); 249 return cfg; 250 } 251 252 static int alloc_irq_from(unsigned int from, int node) 253 { 254 return irq_alloc_desc_from(from, node); 255 } 256 257 static void free_irq_at(unsigned int at, struct irq_cfg *cfg) 258 { 259 free_irq_cfg(at, cfg); 260 irq_free_desc(at); 261 } 262 263 struct io_apic { 264 unsigned int index; 265 unsigned int unused[3]; 266 unsigned int data; 267 unsigned int unused2[11]; 268 unsigned int eoi; 269 }; 270 271 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx) 272 { 273 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx) 274 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK); 275 } 276 277 static inline void io_apic_eoi(unsigned int apic, unsigned int vector) 278 { 279 struct io_apic __iomem *io_apic = io_apic_base(apic); 280 writel(vector, &io_apic->eoi); 281 } 282 283 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) 284 { 285 struct io_apic __iomem *io_apic = io_apic_base(apic); 286 writel(reg, &io_apic->index); 287 return readl(&io_apic->data); 288 } 289 290 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value) 291 { 292 struct io_apic __iomem *io_apic = io_apic_base(apic); 293 writel(reg, &io_apic->index); 294 writel(value, &io_apic->data); 295 } 296 297 /* 298 * Re-write a value: to be used for read-modify-write 299 * cycles where the read already set up the index register. 300 * 301 * Older SiS APIC requires we rewrite the index register 302 */ 303 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 304 { 305 struct io_apic __iomem *io_apic = io_apic_base(apic); 306 307 if (sis_apic_bug) 308 writel(reg, &io_apic->index); 309 writel(value, &io_apic->data); 310 } 311 312 static bool io_apic_level_ack_pending(struct irq_cfg *cfg) 313 { 314 struct irq_pin_list *entry; 315 unsigned long flags; 316 317 raw_spin_lock_irqsave(&ioapic_lock, flags); 318 for_each_irq_pin(entry, cfg->irq_2_pin) { 319 unsigned int reg; 320 int pin; 321 322 pin = entry->pin; 323 reg = io_apic_read(entry->apic, 0x10 + pin*2); 324 /* Is the remote IRR bit set? */ 325 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 326 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 327 return true; 328 } 329 } 330 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 331 332 return false; 333 } 334 335 union entry_union { 336 struct { u32 w1, w2; }; 337 struct IO_APIC_route_entry entry; 338 }; 339 340 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin) 341 { 342 union entry_union eu; 343 unsigned long flags; 344 raw_spin_lock_irqsave(&ioapic_lock, flags); 345 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 346 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 347 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 348 return eu.entry; 349 } 350 351 /* 352 * When we write a new IO APIC routing entry, we need to write the high 353 * word first! If the mask bit in the low word is clear, we will enable 354 * the interrupt, and we need to make sure the entry is fully populated 355 * before that happens. 356 */ 357 static void 358 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 359 { 360 union entry_union eu = {{0, 0}}; 361 362 eu.entry = e; 363 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 364 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 365 } 366 367 static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 368 { 369 unsigned long flags; 370 raw_spin_lock_irqsave(&ioapic_lock, flags); 371 __ioapic_write_entry(apic, pin, e); 372 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 373 } 374 375 /* 376 * When we mask an IO APIC routing entry, we need to write the low 377 * word first, in order to set the mask bit before we change the 378 * high bits! 379 */ 380 static void ioapic_mask_entry(int apic, int pin) 381 { 382 unsigned long flags; 383 union entry_union eu = { .entry.mask = 1 }; 384 385 raw_spin_lock_irqsave(&ioapic_lock, flags); 386 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 387 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 388 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 389 } 390 391 /* 392 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are 393 * shared ISA-space IRQs, so we have to support them. We are super 394 * fast in the common case, and fast for shared ISA-space IRQs. 395 */ 396 static int 397 __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 398 { 399 struct irq_pin_list **last, *entry; 400 401 /* don't allow duplicates */ 402 last = &cfg->irq_2_pin; 403 for_each_irq_pin(entry, cfg->irq_2_pin) { 404 if (entry->apic == apic && entry->pin == pin) 405 return 0; 406 last = &entry->next; 407 } 408 409 entry = alloc_irq_pin_list(node); 410 if (!entry) { 411 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n", 412 node, apic, pin); 413 return -ENOMEM; 414 } 415 entry->apic = apic; 416 entry->pin = pin; 417 418 *last = entry; 419 return 0; 420 } 421 422 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin) 423 { 424 if (__add_pin_to_irq_node(cfg, node, apic, pin)) 425 panic("IO-APIC: failed to add irq-pin. Can not proceed\n"); 426 } 427 428 /* 429 * Reroute an IRQ to a different pin. 430 */ 431 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, 432 int oldapic, int oldpin, 433 int newapic, int newpin) 434 { 435 struct irq_pin_list *entry; 436 437 for_each_irq_pin(entry, cfg->irq_2_pin) { 438 if (entry->apic == oldapic && entry->pin == oldpin) { 439 entry->apic = newapic; 440 entry->pin = newpin; 441 /* every one is different, right? */ 442 return; 443 } 444 } 445 446 /* old apic/pin didn't exist, so just add new ones */ 447 add_pin_to_irq_node(cfg, node, newapic, newpin); 448 } 449 450 static void __io_apic_modify_irq(struct irq_pin_list *entry, 451 int mask_and, int mask_or, 452 void (*final)(struct irq_pin_list *entry)) 453 { 454 unsigned int reg, pin; 455 456 pin = entry->pin; 457 reg = io_apic_read(entry->apic, 0x10 + pin * 2); 458 reg &= mask_and; 459 reg |= mask_or; 460 io_apic_modify(entry->apic, 0x10 + pin * 2, reg); 461 if (final) 462 final(entry); 463 } 464 465 static void io_apic_modify_irq(struct irq_cfg *cfg, 466 int mask_and, int mask_or, 467 void (*final)(struct irq_pin_list *entry)) 468 { 469 struct irq_pin_list *entry; 470 471 for_each_irq_pin(entry, cfg->irq_2_pin) 472 __io_apic_modify_irq(entry, mask_and, mask_or, final); 473 } 474 475 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) 476 { 477 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, 478 IO_APIC_REDIR_MASKED, NULL); 479 } 480 481 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) 482 { 483 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, 484 IO_APIC_REDIR_LEVEL_TRIGGER, NULL); 485 } 486 487 static void io_apic_sync(struct irq_pin_list *entry) 488 { 489 /* 490 * Synchronize the IO-APIC and the CPU by doing 491 * a dummy read from the IO-APIC 492 */ 493 struct io_apic __iomem *io_apic; 494 io_apic = io_apic_base(entry->apic); 495 readl(&io_apic->data); 496 } 497 498 static void mask_ioapic(struct irq_cfg *cfg) 499 { 500 unsigned long flags; 501 502 raw_spin_lock_irqsave(&ioapic_lock, flags); 503 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); 504 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 505 } 506 507 static void mask_ioapic_irq(struct irq_data *data) 508 { 509 mask_ioapic(data->chip_data); 510 } 511 512 static void __unmask_ioapic(struct irq_cfg *cfg) 513 { 514 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL); 515 } 516 517 static void unmask_ioapic(struct irq_cfg *cfg) 518 { 519 unsigned long flags; 520 521 raw_spin_lock_irqsave(&ioapic_lock, flags); 522 __unmask_ioapic(cfg); 523 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 524 } 525 526 static void unmask_ioapic_irq(struct irq_data *data) 527 { 528 unmask_ioapic(data->chip_data); 529 } 530 531 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) 532 { 533 struct IO_APIC_route_entry entry; 534 535 /* Check delivery_mode to be sure we're not clearing an SMI pin */ 536 entry = ioapic_read_entry(apic, pin); 537 if (entry.delivery_mode == dest_SMI) 538 return; 539 /* 540 * Disable it in the IO-APIC irq-routing table: 541 */ 542 ioapic_mask_entry(apic, pin); 543 } 544 545 static void clear_IO_APIC (void) 546 { 547 int apic, pin; 548 549 for (apic = 0; apic < nr_ioapics; apic++) 550 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 551 clear_IO_APIC_pin(apic, pin); 552 } 553 554 #ifdef CONFIG_X86_32 555 /* 556 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to 557 * specific CPU-side IRQs. 558 */ 559 560 #define MAX_PIRQS 8 561 static int pirq_entries[MAX_PIRQS] = { 562 [0 ... MAX_PIRQS - 1] = -1 563 }; 564 565 static int __init ioapic_pirq_setup(char *str) 566 { 567 int i, max; 568 int ints[MAX_PIRQS+1]; 569 570 get_options(str, ARRAY_SIZE(ints), ints); 571 572 apic_printk(APIC_VERBOSE, KERN_INFO 573 "PIRQ redirection, working around broken MP-BIOS.\n"); 574 max = MAX_PIRQS; 575 if (ints[0] < MAX_PIRQS) 576 max = ints[0]; 577 578 for (i = 0; i < max; i++) { 579 apic_printk(APIC_VERBOSE, KERN_DEBUG 580 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]); 581 /* 582 * PIRQs are mapped upside down, usually. 583 */ 584 pirq_entries[MAX_PIRQS-i-1] = ints[i+1]; 585 } 586 return 1; 587 } 588 589 __setup("pirq=", ioapic_pirq_setup); 590 #endif /* CONFIG_X86_32 */ 591 592 struct IO_APIC_route_entry **alloc_ioapic_entries(void) 593 { 594 int apic; 595 struct IO_APIC_route_entry **ioapic_entries; 596 597 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, 598 GFP_KERNEL); 599 if (!ioapic_entries) 600 return 0; 601 602 for (apic = 0; apic < nr_ioapics; apic++) { 603 ioapic_entries[apic] = 604 kzalloc(sizeof(struct IO_APIC_route_entry) * 605 nr_ioapic_registers[apic], GFP_KERNEL); 606 if (!ioapic_entries[apic]) 607 goto nomem; 608 } 609 610 return ioapic_entries; 611 612 nomem: 613 while (--apic >= 0) 614 kfree(ioapic_entries[apic]); 615 kfree(ioapic_entries); 616 617 return 0; 618 } 619 620 /* 621 * Saves all the IO-APIC RTE's 622 */ 623 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 624 { 625 int apic, pin; 626 627 if (!ioapic_entries) 628 return -ENOMEM; 629 630 for (apic = 0; apic < nr_ioapics; apic++) { 631 if (!ioapic_entries[apic]) 632 return -ENOMEM; 633 634 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 635 ioapic_entries[apic][pin] = 636 ioapic_read_entry(apic, pin); 637 } 638 639 return 0; 640 } 641 642 /* 643 * Mask all IO APIC entries. 644 */ 645 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 646 { 647 int apic, pin; 648 649 if (!ioapic_entries) 650 return; 651 652 for (apic = 0; apic < nr_ioapics; apic++) { 653 if (!ioapic_entries[apic]) 654 break; 655 656 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 657 struct IO_APIC_route_entry entry; 658 659 entry = ioapic_entries[apic][pin]; 660 if (!entry.mask) { 661 entry.mask = 1; 662 ioapic_write_entry(apic, pin, entry); 663 } 664 } 665 } 666 } 667 668 /* 669 * Restore IO APIC entries which was saved in ioapic_entries. 670 */ 671 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries) 672 { 673 int apic, pin; 674 675 if (!ioapic_entries) 676 return -ENOMEM; 677 678 for (apic = 0; apic < nr_ioapics; apic++) { 679 if (!ioapic_entries[apic]) 680 return -ENOMEM; 681 682 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) 683 ioapic_write_entry(apic, pin, 684 ioapic_entries[apic][pin]); 685 } 686 return 0; 687 } 688 689 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries) 690 { 691 int apic; 692 693 for (apic = 0; apic < nr_ioapics; apic++) 694 kfree(ioapic_entries[apic]); 695 696 kfree(ioapic_entries); 697 } 698 699 /* 700 * Find the IRQ entry number of a certain pin. 701 */ 702 static int find_irq_entry(int apic, int pin, int type) 703 { 704 int i; 705 706 for (i = 0; i < mp_irq_entries; i++) 707 if (mp_irqs[i].irqtype == type && 708 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid || 709 mp_irqs[i].dstapic == MP_APIC_ALL) && 710 mp_irqs[i].dstirq == pin) 711 return i; 712 713 return -1; 714 } 715 716 /* 717 * Find the pin to which IRQ[irq] (ISA) is connected 718 */ 719 static int __init find_isa_irq_pin(int irq, int type) 720 { 721 int i; 722 723 for (i = 0; i < mp_irq_entries; i++) { 724 int lbus = mp_irqs[i].srcbus; 725 726 if (test_bit(lbus, mp_bus_not_pci) && 727 (mp_irqs[i].irqtype == type) && 728 (mp_irqs[i].srcbusirq == irq)) 729 730 return mp_irqs[i].dstirq; 731 } 732 return -1; 733 } 734 735 static int __init find_isa_irq_apic(int irq, int type) 736 { 737 int i; 738 739 for (i = 0; i < mp_irq_entries; i++) { 740 int lbus = mp_irqs[i].srcbus; 741 742 if (test_bit(lbus, mp_bus_not_pci) && 743 (mp_irqs[i].irqtype == type) && 744 (mp_irqs[i].srcbusirq == irq)) 745 break; 746 } 747 if (i < mp_irq_entries) { 748 int apic; 749 for(apic = 0; apic < nr_ioapics; apic++) { 750 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic) 751 return apic; 752 } 753 } 754 755 return -1; 756 } 757 758 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 759 /* 760 * EISA Edge/Level control register, ELCR 761 */ 762 static int EISA_ELCR(unsigned int irq) 763 { 764 if (irq < legacy_pic->nr_legacy_irqs) { 765 unsigned int port = 0x4d0 + (irq >> 3); 766 return (inb(port) >> (irq & 7)) & 1; 767 } 768 apic_printk(APIC_VERBOSE, KERN_INFO 769 "Broken MPtable reports ISA irq %d\n", irq); 770 return 0; 771 } 772 773 #endif 774 775 /* ISA interrupts are always polarity zero edge triggered, 776 * when listed as conforming in the MP table. */ 777 778 #define default_ISA_trigger(idx) (0) 779 #define default_ISA_polarity(idx) (0) 780 781 /* EISA interrupts are always polarity zero and can be edge or level 782 * trigger depending on the ELCR value. If an interrupt is listed as 783 * EISA conforming in the MP table, that means its trigger type must 784 * be read in from the ELCR */ 785 786 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq)) 787 #define default_EISA_polarity(idx) default_ISA_polarity(idx) 788 789 /* PCI interrupts are always polarity one level triggered, 790 * when listed as conforming in the MP table. */ 791 792 #define default_PCI_trigger(idx) (1) 793 #define default_PCI_polarity(idx) (1) 794 795 /* MCA interrupts are always polarity zero level triggered, 796 * when listed as conforming in the MP table. */ 797 798 #define default_MCA_trigger(idx) (1) 799 #define default_MCA_polarity(idx) default_ISA_polarity(idx) 800 801 static int MPBIOS_polarity(int idx) 802 { 803 int bus = mp_irqs[idx].srcbus; 804 int polarity; 805 806 /* 807 * Determine IRQ line polarity (high active or low active): 808 */ 809 switch (mp_irqs[idx].irqflag & 3) 810 { 811 case 0: /* conforms, ie. bus-type dependent polarity */ 812 if (test_bit(bus, mp_bus_not_pci)) 813 polarity = default_ISA_polarity(idx); 814 else 815 polarity = default_PCI_polarity(idx); 816 break; 817 case 1: /* high active */ 818 { 819 polarity = 0; 820 break; 821 } 822 case 2: /* reserved */ 823 { 824 printk(KERN_WARNING "broken BIOS!!\n"); 825 polarity = 1; 826 break; 827 } 828 case 3: /* low active */ 829 { 830 polarity = 1; 831 break; 832 } 833 default: /* invalid */ 834 { 835 printk(KERN_WARNING "broken BIOS!!\n"); 836 polarity = 1; 837 break; 838 } 839 } 840 return polarity; 841 } 842 843 static int MPBIOS_trigger(int idx) 844 { 845 int bus = mp_irqs[idx].srcbus; 846 int trigger; 847 848 /* 849 * Determine IRQ trigger mode (edge or level sensitive): 850 */ 851 switch ((mp_irqs[idx].irqflag>>2) & 3) 852 { 853 case 0: /* conforms, ie. bus-type dependent */ 854 if (test_bit(bus, mp_bus_not_pci)) 855 trigger = default_ISA_trigger(idx); 856 else 857 trigger = default_PCI_trigger(idx); 858 #if defined(CONFIG_EISA) || defined(CONFIG_MCA) 859 switch (mp_bus_id_to_type[bus]) { 860 case MP_BUS_ISA: /* ISA pin */ 861 { 862 /* set before the switch */ 863 break; 864 } 865 case MP_BUS_EISA: /* EISA pin */ 866 { 867 trigger = default_EISA_trigger(idx); 868 break; 869 } 870 case MP_BUS_PCI: /* PCI pin */ 871 { 872 /* set before the switch */ 873 break; 874 } 875 case MP_BUS_MCA: /* MCA pin */ 876 { 877 trigger = default_MCA_trigger(idx); 878 break; 879 } 880 default: 881 { 882 printk(KERN_WARNING "broken BIOS!!\n"); 883 trigger = 1; 884 break; 885 } 886 } 887 #endif 888 break; 889 case 1: /* edge */ 890 { 891 trigger = 0; 892 break; 893 } 894 case 2: /* reserved */ 895 { 896 printk(KERN_WARNING "broken BIOS!!\n"); 897 trigger = 1; 898 break; 899 } 900 case 3: /* level */ 901 { 902 trigger = 1; 903 break; 904 } 905 default: /* invalid */ 906 { 907 printk(KERN_WARNING "broken BIOS!!\n"); 908 trigger = 0; 909 break; 910 } 911 } 912 return trigger; 913 } 914 915 static inline int irq_polarity(int idx) 916 { 917 return MPBIOS_polarity(idx); 918 } 919 920 static inline int irq_trigger(int idx) 921 { 922 return MPBIOS_trigger(idx); 923 } 924 925 static int pin_2_irq(int idx, int apic, int pin) 926 { 927 int irq; 928 int bus = mp_irqs[idx].srcbus; 929 930 /* 931 * Debugging check, we are in big trouble if this message pops up! 932 */ 933 if (mp_irqs[idx].dstirq != pin) 934 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n"); 935 936 if (test_bit(bus, mp_bus_not_pci)) { 937 irq = mp_irqs[idx].srcbusirq; 938 } else { 939 u32 gsi = mp_gsi_routing[apic].gsi_base + pin; 940 941 if (gsi >= NR_IRQS_LEGACY) 942 irq = gsi; 943 else 944 irq = gsi_top + gsi; 945 } 946 947 #ifdef CONFIG_X86_32 948 /* 949 * PCI IRQ command line redirection. Yes, limits are hardcoded. 950 */ 951 if ((pin >= 16) && (pin <= 23)) { 952 if (pirq_entries[pin-16] != -1) { 953 if (!pirq_entries[pin-16]) { 954 apic_printk(APIC_VERBOSE, KERN_DEBUG 955 "disabling PIRQ%d\n", pin-16); 956 } else { 957 irq = pirq_entries[pin-16]; 958 apic_printk(APIC_VERBOSE, KERN_DEBUG 959 "using PIRQ%d -> IRQ %d\n", 960 pin-16, irq); 961 } 962 } 963 } 964 #endif 965 966 return irq; 967 } 968 969 /* 970 * Find a specific PCI IRQ entry. 971 * Not an __init, possibly needed by modules 972 */ 973 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin, 974 struct io_apic_irq_attr *irq_attr) 975 { 976 int apic, i, best_guess = -1; 977 978 apic_printk(APIC_DEBUG, 979 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n", 980 bus, slot, pin); 981 if (test_bit(bus, mp_bus_not_pci)) { 982 apic_printk(APIC_VERBOSE, 983 "PCI BIOS passed nonexistent PCI bus %d!\n", bus); 984 return -1; 985 } 986 for (i = 0; i < mp_irq_entries; i++) { 987 int lbus = mp_irqs[i].srcbus; 988 989 for (apic = 0; apic < nr_ioapics; apic++) 990 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic || 991 mp_irqs[i].dstapic == MP_APIC_ALL) 992 break; 993 994 if (!test_bit(lbus, mp_bus_not_pci) && 995 !mp_irqs[i].irqtype && 996 (bus == lbus) && 997 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) { 998 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq); 999 1000 if (!(apic || IO_APIC_IRQ(irq))) 1001 continue; 1002 1003 if (pin == (mp_irqs[i].srcbusirq & 3)) { 1004 set_io_apic_irq_attr(irq_attr, apic, 1005 mp_irqs[i].dstirq, 1006 irq_trigger(i), 1007 irq_polarity(i)); 1008 return irq; 1009 } 1010 /* 1011 * Use the first all-but-pin matching entry as a 1012 * best-guess fuzzy result for broken mptables. 1013 */ 1014 if (best_guess < 0) { 1015 set_io_apic_irq_attr(irq_attr, apic, 1016 mp_irqs[i].dstirq, 1017 irq_trigger(i), 1018 irq_polarity(i)); 1019 best_guess = irq; 1020 } 1021 } 1022 } 1023 return best_guess; 1024 } 1025 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); 1026 1027 void lock_vector_lock(void) 1028 { 1029 /* Used to the online set of cpus does not change 1030 * during assign_irq_vector. 1031 */ 1032 raw_spin_lock(&vector_lock); 1033 } 1034 1035 void unlock_vector_lock(void) 1036 { 1037 raw_spin_unlock(&vector_lock); 1038 } 1039 1040 static int 1041 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1042 { 1043 /* 1044 * NOTE! The local APIC isn't very good at handling 1045 * multiple interrupts at the same interrupt level. 1046 * As the interrupt level is determined by taking the 1047 * vector number and shifting that right by 4, we 1048 * want to spread these out a bit so that they don't 1049 * all fall in the same interrupt level. 1050 * 1051 * Also, we've got to be careful not to trash gate 1052 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1053 */ 1054 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 1055 static int current_offset = VECTOR_OFFSET_START % 8; 1056 unsigned int old_vector; 1057 int cpu, err; 1058 cpumask_var_t tmp_mask; 1059 1060 if (cfg->move_in_progress) 1061 return -EBUSY; 1062 1063 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 1064 return -ENOMEM; 1065 1066 old_vector = cfg->vector; 1067 if (old_vector) { 1068 cpumask_and(tmp_mask, mask, cpu_online_mask); 1069 cpumask_and(tmp_mask, cfg->domain, tmp_mask); 1070 if (!cpumask_empty(tmp_mask)) { 1071 free_cpumask_var(tmp_mask); 1072 return 0; 1073 } 1074 } 1075 1076 /* Only try and allocate irqs on cpus that are present */ 1077 err = -ENOSPC; 1078 for_each_cpu_and(cpu, mask, cpu_online_mask) { 1079 int new_cpu; 1080 int vector, offset; 1081 1082 apic->vector_allocation_domain(cpu, tmp_mask); 1083 1084 vector = current_vector; 1085 offset = current_offset; 1086 next: 1087 vector += 8; 1088 if (vector >= first_system_vector) { 1089 /* If out of vectors on large boxen, must share them. */ 1090 offset = (offset + 1) % 8; 1091 vector = FIRST_EXTERNAL_VECTOR + offset; 1092 } 1093 if (unlikely(current_vector == vector)) 1094 continue; 1095 1096 if (test_bit(vector, used_vectors)) 1097 goto next; 1098 1099 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1100 if (per_cpu(vector_irq, new_cpu)[vector] != -1) 1101 goto next; 1102 /* Found one! */ 1103 current_vector = vector; 1104 current_offset = offset; 1105 if (old_vector) { 1106 cfg->move_in_progress = 1; 1107 cpumask_copy(cfg->old_domain, cfg->domain); 1108 } 1109 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 1110 per_cpu(vector_irq, new_cpu)[vector] = irq; 1111 cfg->vector = vector; 1112 cpumask_copy(cfg->domain, tmp_mask); 1113 err = 0; 1114 break; 1115 } 1116 free_cpumask_var(tmp_mask); 1117 return err; 1118 } 1119 1120 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 1121 { 1122 int err; 1123 unsigned long flags; 1124 1125 raw_spin_lock_irqsave(&vector_lock, flags); 1126 err = __assign_irq_vector(irq, cfg, mask); 1127 raw_spin_unlock_irqrestore(&vector_lock, flags); 1128 return err; 1129 } 1130 1131 static void __clear_irq_vector(int irq, struct irq_cfg *cfg) 1132 { 1133 int cpu, vector; 1134 1135 BUG_ON(!cfg->vector); 1136 1137 vector = cfg->vector; 1138 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1139 per_cpu(vector_irq, cpu)[vector] = -1; 1140 1141 cfg->vector = 0; 1142 cpumask_clear(cfg->domain); 1143 1144 if (likely(!cfg->move_in_progress)) 1145 return; 1146 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1147 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1148 vector++) { 1149 if (per_cpu(vector_irq, cpu)[vector] != irq) 1150 continue; 1151 per_cpu(vector_irq, cpu)[vector] = -1; 1152 break; 1153 } 1154 } 1155 cfg->move_in_progress = 0; 1156 } 1157 1158 void __setup_vector_irq(int cpu) 1159 { 1160 /* Initialize vector_irq on a new cpu */ 1161 int irq, vector; 1162 struct irq_cfg *cfg; 1163 1164 /* 1165 * vector_lock will make sure that we don't run into irq vector 1166 * assignments that might be happening on another cpu in parallel, 1167 * while we setup our initial vector to irq mappings. 1168 */ 1169 raw_spin_lock(&vector_lock); 1170 /* Mark the inuse vectors */ 1171 for_each_active_irq(irq) { 1172 cfg = get_irq_chip_data(irq); 1173 if (!cfg) 1174 continue; 1175 /* 1176 * If it is a legacy IRQ handled by the legacy PIC, this cpu 1177 * will be part of the irq_cfg's domain. 1178 */ 1179 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq)) 1180 cpumask_set_cpu(cpu, cfg->domain); 1181 1182 if (!cpumask_test_cpu(cpu, cfg->domain)) 1183 continue; 1184 vector = cfg->vector; 1185 per_cpu(vector_irq, cpu)[vector] = irq; 1186 } 1187 /* Mark the free vectors */ 1188 for (vector = 0; vector < NR_VECTORS; ++vector) { 1189 irq = per_cpu(vector_irq, cpu)[vector]; 1190 if (irq < 0) 1191 continue; 1192 1193 cfg = irq_cfg(irq); 1194 if (!cpumask_test_cpu(cpu, cfg->domain)) 1195 per_cpu(vector_irq, cpu)[vector] = -1; 1196 } 1197 raw_spin_unlock(&vector_lock); 1198 } 1199 1200 static struct irq_chip ioapic_chip; 1201 static struct irq_chip ir_ioapic_chip; 1202 1203 #define IOAPIC_AUTO -1 1204 #define IOAPIC_EDGE 0 1205 #define IOAPIC_LEVEL 1 1206 1207 #ifdef CONFIG_X86_32 1208 static inline int IO_APIC_irq_trigger(int irq) 1209 { 1210 int apic, idx, pin; 1211 1212 for (apic = 0; apic < nr_ioapics; apic++) { 1213 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1214 idx = find_irq_entry(apic, pin, mp_INT); 1215 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1216 return irq_trigger(idx); 1217 } 1218 } 1219 /* 1220 * nonexistent IRQs are edge default 1221 */ 1222 return 0; 1223 } 1224 #else 1225 static inline int IO_APIC_irq_trigger(int irq) 1226 { 1227 return 1; 1228 } 1229 #endif 1230 1231 static void ioapic_register_intr(unsigned int irq, unsigned long trigger) 1232 { 1233 1234 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1235 trigger == IOAPIC_LEVEL) 1236 irq_set_status_flags(irq, IRQ_LEVEL); 1237 else 1238 irq_clear_status_flags(irq, IRQ_LEVEL); 1239 1240 if (irq_remapped(get_irq_chip_data(irq))) { 1241 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 1242 if (trigger) 1243 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1244 handle_fasteoi_irq, 1245 "fasteoi"); 1246 else 1247 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip, 1248 handle_edge_irq, "edge"); 1249 return; 1250 } 1251 1252 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) || 1253 trigger == IOAPIC_LEVEL) 1254 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1255 handle_fasteoi_irq, 1256 "fasteoi"); 1257 else 1258 set_irq_chip_and_handler_name(irq, &ioapic_chip, 1259 handle_edge_irq, "edge"); 1260 } 1261 1262 static int setup_ioapic_entry(int apic_id, int irq, 1263 struct IO_APIC_route_entry *entry, 1264 unsigned int destination, int trigger, 1265 int polarity, int vector, int pin) 1266 { 1267 /* 1268 * add it to the IO-APIC irq-routing table: 1269 */ 1270 memset(entry,0,sizeof(*entry)); 1271 1272 if (intr_remapping_enabled) { 1273 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id); 1274 struct irte irte; 1275 struct IR_IO_APIC_route_entry *ir_entry = 1276 (struct IR_IO_APIC_route_entry *) entry; 1277 int index; 1278 1279 if (!iommu) 1280 panic("No mapping iommu for ioapic %d\n", apic_id); 1281 1282 index = alloc_irte(iommu, irq, 1); 1283 if (index < 0) 1284 panic("Failed to allocate IRTE for ioapic %d\n", apic_id); 1285 1286 prepare_irte(&irte, vector, destination); 1287 1288 /* Set source-id of interrupt request */ 1289 set_ioapic_sid(&irte, apic_id); 1290 1291 modify_irte(irq, &irte); 1292 1293 ir_entry->index2 = (index >> 15) & 0x1; 1294 ir_entry->zero = 0; 1295 ir_entry->format = 1; 1296 ir_entry->index = (index & 0x7fff); 1297 /* 1298 * IO-APIC RTE will be configured with virtual vector. 1299 * irq handler will do the explicit EOI to the io-apic. 1300 */ 1301 ir_entry->vector = pin; 1302 } else { 1303 entry->delivery_mode = apic->irq_delivery_mode; 1304 entry->dest_mode = apic->irq_dest_mode; 1305 entry->dest = destination; 1306 entry->vector = vector; 1307 } 1308 1309 entry->mask = 0; /* enable IRQ */ 1310 entry->trigger = trigger; 1311 entry->polarity = polarity; 1312 1313 /* Mask level triggered irqs. 1314 * Use IRQ_DELAYED_DISABLE for edge triggered irqs. 1315 */ 1316 if (trigger) 1317 entry->mask = 1; 1318 return 0; 1319 } 1320 1321 static void setup_ioapic_irq(int apic_id, int pin, unsigned int irq, 1322 struct irq_cfg *cfg, int trigger, int polarity) 1323 { 1324 struct IO_APIC_route_entry entry; 1325 unsigned int dest; 1326 1327 if (!IO_APIC_IRQ(irq)) 1328 return; 1329 /* 1330 * For legacy irqs, cfg->domain starts with cpu 0 for legacy 1331 * controllers like 8259. Now that IO-APIC can handle this irq, update 1332 * the cfg->domain. 1333 */ 1334 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain)) 1335 apic->vector_allocation_domain(0, cfg->domain); 1336 1337 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1338 return; 1339 1340 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 1341 1342 apic_printk(APIC_VERBOSE,KERN_DEBUG 1343 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> " 1344 "IRQ %d Mode:%i Active:%i)\n", 1345 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector, 1346 irq, trigger, polarity); 1347 1348 1349 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry, 1350 dest, trigger, polarity, cfg->vector, pin)) { 1351 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n", 1352 mp_ioapics[apic_id].apicid, pin); 1353 __clear_irq_vector(irq, cfg); 1354 return; 1355 } 1356 1357 ioapic_register_intr(irq, trigger); 1358 if (irq < legacy_pic->nr_legacy_irqs) 1359 legacy_pic->mask(irq); 1360 1361 ioapic_write_entry(apic_id, pin, entry); 1362 } 1363 1364 static struct { 1365 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1); 1366 } mp_ioapic_routing[MAX_IO_APICS]; 1367 1368 static void __init setup_IO_APIC_irqs(void) 1369 { 1370 int apic_id, pin, idx, irq, notcon = 0; 1371 int node = cpu_to_node(0); 1372 struct irq_cfg *cfg; 1373 1374 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1375 1376 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) 1377 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { 1378 idx = find_irq_entry(apic_id, pin, mp_INT); 1379 if (idx == -1) { 1380 if (!notcon) { 1381 notcon = 1; 1382 apic_printk(APIC_VERBOSE, 1383 KERN_DEBUG " %d-%d", 1384 mp_ioapics[apic_id].apicid, pin); 1385 } else 1386 apic_printk(APIC_VERBOSE, " %d-%d", 1387 mp_ioapics[apic_id].apicid, pin); 1388 continue; 1389 } 1390 if (notcon) { 1391 apic_printk(APIC_VERBOSE, 1392 " (apicid-pin) not connected\n"); 1393 notcon = 0; 1394 } 1395 1396 irq = pin_2_irq(idx, apic_id, pin); 1397 1398 if ((apic_id > 0) && (irq > 16)) 1399 continue; 1400 1401 /* 1402 * Skip the timer IRQ if there's a quirk handler 1403 * installed and if it returns 1: 1404 */ 1405 if (apic->multi_timer_check && 1406 apic->multi_timer_check(apic_id, irq)) 1407 continue; 1408 1409 cfg = alloc_irq_and_cfg_at(irq, node); 1410 if (!cfg) 1411 continue; 1412 1413 add_pin_to_irq_node(cfg, node, apic_id, pin); 1414 /* 1415 * don't mark it in pin_programmed, so later acpi could 1416 * set it correctly when irq < 16 1417 */ 1418 setup_ioapic_irq(apic_id, pin, irq, cfg, irq_trigger(idx), 1419 irq_polarity(idx)); 1420 } 1421 1422 if (notcon) 1423 apic_printk(APIC_VERBOSE, 1424 " (apicid-pin) not connected\n"); 1425 } 1426 1427 /* 1428 * for the gsit that is not in first ioapic 1429 * but could not use acpi_register_gsi() 1430 * like some special sci in IBM x3330 1431 */ 1432 void setup_IO_APIC_irq_extra(u32 gsi) 1433 { 1434 int apic_id = 0, pin, idx, irq, node = cpu_to_node(0); 1435 struct irq_cfg *cfg; 1436 1437 /* 1438 * Convert 'gsi' to 'ioapic.pin'. 1439 */ 1440 apic_id = mp_find_ioapic(gsi); 1441 if (apic_id < 0) 1442 return; 1443 1444 pin = mp_find_ioapic_pin(apic_id, gsi); 1445 idx = find_irq_entry(apic_id, pin, mp_INT); 1446 if (idx == -1) 1447 return; 1448 1449 irq = pin_2_irq(idx, apic_id, pin); 1450 1451 /* Only handle the non legacy irqs on secondary ioapics */ 1452 if (apic_id == 0 || irq < NR_IRQS_LEGACY) 1453 return; 1454 1455 cfg = alloc_irq_and_cfg_at(irq, node); 1456 if (!cfg) 1457 return; 1458 1459 add_pin_to_irq_node(cfg, node, apic_id, pin); 1460 1461 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) { 1462 pr_debug("Pin %d-%d already programmed\n", 1463 mp_ioapics[apic_id].apicid, pin); 1464 return; 1465 } 1466 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed); 1467 1468 setup_ioapic_irq(apic_id, pin, irq, cfg, 1469 irq_trigger(idx), irq_polarity(idx)); 1470 } 1471 1472 /* 1473 * Set up the timer pin, possibly with the 8259A-master behind. 1474 */ 1475 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1476 int vector) 1477 { 1478 struct IO_APIC_route_entry entry; 1479 1480 if (intr_remapping_enabled) 1481 return; 1482 1483 memset(&entry, 0, sizeof(entry)); 1484 1485 /* 1486 * We use logical delivery to get the timer IRQ 1487 * to the first CPU. 1488 */ 1489 entry.dest_mode = apic->irq_dest_mode; 1490 entry.mask = 0; /* don't mask IRQ for edge */ 1491 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus()); 1492 entry.delivery_mode = apic->irq_delivery_mode; 1493 entry.polarity = 0; 1494 entry.trigger = 0; 1495 entry.vector = vector; 1496 1497 /* 1498 * The timer IRQ doesn't have to know that behind the 1499 * scene we may have a 8259A-master in AEOI mode ... 1500 */ 1501 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 1502 1503 /* 1504 * Add it to the IO-APIC irq-routing table: 1505 */ 1506 ioapic_write_entry(apic_id, pin, entry); 1507 } 1508 1509 1510 __apicdebuginit(void) print_IO_APIC(void) 1511 { 1512 int apic, i; 1513 union IO_APIC_reg_00 reg_00; 1514 union IO_APIC_reg_01 reg_01; 1515 union IO_APIC_reg_02 reg_02; 1516 union IO_APIC_reg_03 reg_03; 1517 unsigned long flags; 1518 struct irq_cfg *cfg; 1519 unsigned int irq; 1520 1521 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); 1522 for (i = 0; i < nr_ioapics; i++) 1523 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", 1524 mp_ioapics[i].apicid, nr_ioapic_registers[i]); 1525 1526 /* 1527 * We are a bit conservative about what we expect. We have to 1528 * know about every hardware change ASAP. 1529 */ 1530 printk(KERN_INFO "testing the IO APIC.......................\n"); 1531 1532 for (apic = 0; apic < nr_ioapics; apic++) { 1533 1534 raw_spin_lock_irqsave(&ioapic_lock, flags); 1535 reg_00.raw = io_apic_read(apic, 0); 1536 reg_01.raw = io_apic_read(apic, 1); 1537 if (reg_01.bits.version >= 0x10) 1538 reg_02.raw = io_apic_read(apic, 2); 1539 if (reg_01.bits.version >= 0x20) 1540 reg_03.raw = io_apic_read(apic, 3); 1541 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1542 1543 printk("\n"); 1544 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1545 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw); 1546 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID); 1547 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type); 1548 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS); 1549 1550 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01); 1551 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries); 1552 1553 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ); 1554 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version); 1555 1556 /* 1557 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02, 1558 * but the value of reg_02 is read as the previous read register 1559 * value, so ignore it if reg_02 == reg_01. 1560 */ 1561 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) { 1562 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw); 1563 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration); 1564 } 1565 1566 /* 1567 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02 1568 * or reg_03, but the value of reg_0[23] is read as the previous read 1569 * register value, so ignore it if reg_03 == reg_0[12]. 1570 */ 1571 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw && 1572 reg_03.raw != reg_01.raw) { 1573 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw); 1574 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT); 1575 } 1576 1577 printk(KERN_DEBUG ".... IRQ redirection table:\n"); 1578 1579 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol" 1580 " Stat Dmod Deli Vect:\n"); 1581 1582 for (i = 0; i <= reg_01.bits.entries; i++) { 1583 struct IO_APIC_route_entry entry; 1584 1585 entry = ioapic_read_entry(apic, i); 1586 1587 printk(KERN_DEBUG " %02x %03X ", 1588 i, 1589 entry.dest 1590 ); 1591 1592 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n", 1593 entry.mask, 1594 entry.trigger, 1595 entry.irr, 1596 entry.polarity, 1597 entry.delivery_status, 1598 entry.dest_mode, 1599 entry.delivery_mode, 1600 entry.vector 1601 ); 1602 } 1603 } 1604 printk(KERN_DEBUG "IRQ to pin mappings:\n"); 1605 for_each_active_irq(irq) { 1606 struct irq_pin_list *entry; 1607 1608 cfg = get_irq_chip_data(irq); 1609 if (!cfg) 1610 continue; 1611 entry = cfg->irq_2_pin; 1612 if (!entry) 1613 continue; 1614 printk(KERN_DEBUG "IRQ%d ", irq); 1615 for_each_irq_pin(entry, cfg->irq_2_pin) 1616 printk("-> %d:%d", entry->apic, entry->pin); 1617 printk("\n"); 1618 } 1619 1620 printk(KERN_INFO ".................................... done.\n"); 1621 1622 return; 1623 } 1624 1625 __apicdebuginit(void) print_APIC_field(int base) 1626 { 1627 int i; 1628 1629 printk(KERN_DEBUG); 1630 1631 for (i = 0; i < 8; i++) 1632 printk(KERN_CONT "%08x", apic_read(base + i*0x10)); 1633 1634 printk(KERN_CONT "\n"); 1635 } 1636 1637 __apicdebuginit(void) print_local_APIC(void *dummy) 1638 { 1639 unsigned int i, v, ver, maxlvt; 1640 u64 icr; 1641 1642 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", 1643 smp_processor_id(), hard_smp_processor_id()); 1644 v = apic_read(APIC_ID); 1645 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); 1646 v = apic_read(APIC_LVR); 1647 printk(KERN_INFO "... APIC VERSION: %08x\n", v); 1648 ver = GET_APIC_VERSION(v); 1649 maxlvt = lapic_get_maxlvt(); 1650 1651 v = apic_read(APIC_TASKPRI); 1652 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); 1653 1654 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1655 if (!APIC_XAPIC(ver)) { 1656 v = apic_read(APIC_ARBPRI); 1657 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, 1658 v & APIC_ARBPRI_MASK); 1659 } 1660 v = apic_read(APIC_PROCPRI); 1661 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); 1662 } 1663 1664 /* 1665 * Remote read supported only in the 82489DX and local APIC for 1666 * Pentium processors. 1667 */ 1668 if (!APIC_INTEGRATED(ver) || maxlvt == 3) { 1669 v = apic_read(APIC_RRR); 1670 printk(KERN_DEBUG "... APIC RRR: %08x\n", v); 1671 } 1672 1673 v = apic_read(APIC_LDR); 1674 printk(KERN_DEBUG "... APIC LDR: %08x\n", v); 1675 if (!x2apic_enabled()) { 1676 v = apic_read(APIC_DFR); 1677 printk(KERN_DEBUG "... APIC DFR: %08x\n", v); 1678 } 1679 v = apic_read(APIC_SPIV); 1680 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); 1681 1682 printk(KERN_DEBUG "... APIC ISR field:\n"); 1683 print_APIC_field(APIC_ISR); 1684 printk(KERN_DEBUG "... APIC TMR field:\n"); 1685 print_APIC_field(APIC_TMR); 1686 printk(KERN_DEBUG "... APIC IRR field:\n"); 1687 print_APIC_field(APIC_IRR); 1688 1689 if (APIC_INTEGRATED(ver)) { /* !82489DX */ 1690 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ 1691 apic_write(APIC_ESR, 0); 1692 1693 v = apic_read(APIC_ESR); 1694 printk(KERN_DEBUG "... APIC ESR: %08x\n", v); 1695 } 1696 1697 icr = apic_icr_read(); 1698 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); 1699 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); 1700 1701 v = apic_read(APIC_LVTT); 1702 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); 1703 1704 if (maxlvt > 3) { /* PC is LVT#4. */ 1705 v = apic_read(APIC_LVTPC); 1706 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); 1707 } 1708 v = apic_read(APIC_LVT0); 1709 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); 1710 v = apic_read(APIC_LVT1); 1711 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); 1712 1713 if (maxlvt > 2) { /* ERR is LVT#3. */ 1714 v = apic_read(APIC_LVTERR); 1715 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); 1716 } 1717 1718 v = apic_read(APIC_TMICT); 1719 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); 1720 v = apic_read(APIC_TMCCT); 1721 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); 1722 v = apic_read(APIC_TDCR); 1723 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); 1724 1725 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { 1726 v = apic_read(APIC_EFEAT); 1727 maxlvt = (v >> 16) & 0xff; 1728 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); 1729 v = apic_read(APIC_ECTRL); 1730 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); 1731 for (i = 0; i < maxlvt; i++) { 1732 v = apic_read(APIC_EILVTn(i)); 1733 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); 1734 } 1735 } 1736 printk("\n"); 1737 } 1738 1739 __apicdebuginit(void) print_local_APICs(int maxcpu) 1740 { 1741 int cpu; 1742 1743 if (!maxcpu) 1744 return; 1745 1746 preempt_disable(); 1747 for_each_online_cpu(cpu) { 1748 if (cpu >= maxcpu) 1749 break; 1750 smp_call_function_single(cpu, print_local_APIC, NULL, 1); 1751 } 1752 preempt_enable(); 1753 } 1754 1755 __apicdebuginit(void) print_PIC(void) 1756 { 1757 unsigned int v; 1758 unsigned long flags; 1759 1760 if (!legacy_pic->nr_legacy_irqs) 1761 return; 1762 1763 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1764 1765 raw_spin_lock_irqsave(&i8259A_lock, flags); 1766 1767 v = inb(0xa1) << 8 | inb(0x21); 1768 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1769 1770 v = inb(0xa0) << 8 | inb(0x20); 1771 printk(KERN_DEBUG "... PIC IRR: %04x\n", v); 1772 1773 outb(0x0b,0xa0); 1774 outb(0x0b,0x20); 1775 v = inb(0xa0) << 8 | inb(0x20); 1776 outb(0x0a,0xa0); 1777 outb(0x0a,0x20); 1778 1779 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 1780 1781 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1782 1783 v = inb(0x4d1) << 8 | inb(0x4d0); 1784 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); 1785 } 1786 1787 static int __initdata show_lapic = 1; 1788 static __init int setup_show_lapic(char *arg) 1789 { 1790 int num = -1; 1791 1792 if (strcmp(arg, "all") == 0) { 1793 show_lapic = CONFIG_NR_CPUS; 1794 } else { 1795 get_option(&arg, &num); 1796 if (num >= 0) 1797 show_lapic = num; 1798 } 1799 1800 return 1; 1801 } 1802 __setup("show_lapic=", setup_show_lapic); 1803 1804 __apicdebuginit(int) print_ICs(void) 1805 { 1806 if (apic_verbosity == APIC_QUIET) 1807 return 0; 1808 1809 print_PIC(); 1810 1811 /* don't print out if apic is not there */ 1812 if (!cpu_has_apic && !apic_from_smp_config()) 1813 return 0; 1814 1815 print_local_APICs(show_lapic); 1816 print_IO_APIC(); 1817 1818 return 0; 1819 } 1820 1821 fs_initcall(print_ICs); 1822 1823 1824 /* Where if anywhere is the i8259 connect in external int mode */ 1825 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 }; 1826 1827 void __init enable_IO_APIC(void) 1828 { 1829 int i8259_apic, i8259_pin; 1830 int apic; 1831 1832 if (!legacy_pic->nr_legacy_irqs) 1833 return; 1834 1835 for(apic = 0; apic < nr_ioapics; apic++) { 1836 int pin; 1837 /* See if any of the pins is in ExtINT mode */ 1838 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1839 struct IO_APIC_route_entry entry; 1840 entry = ioapic_read_entry(apic, pin); 1841 1842 /* If the interrupt line is enabled and in ExtInt mode 1843 * I have found the pin where the i8259 is connected. 1844 */ 1845 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) { 1846 ioapic_i8259.apic = apic; 1847 ioapic_i8259.pin = pin; 1848 goto found_i8259; 1849 } 1850 } 1851 } 1852 found_i8259: 1853 /* Look to see what if the MP table has reported the ExtINT */ 1854 /* If we could not find the appropriate pin by looking at the ioapic 1855 * the i8259 probably is not connected the ioapic but give the 1856 * mptable a chance anyway. 1857 */ 1858 i8259_pin = find_isa_irq_pin(0, mp_ExtINT); 1859 i8259_apic = find_isa_irq_apic(0, mp_ExtINT); 1860 /* Trust the MP table if nothing is setup in the hardware */ 1861 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) { 1862 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n"); 1863 ioapic_i8259.pin = i8259_pin; 1864 ioapic_i8259.apic = i8259_apic; 1865 } 1866 /* Complain if the MP table and the hardware disagree */ 1867 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) && 1868 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0)) 1869 { 1870 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n"); 1871 } 1872 1873 /* 1874 * Do not trust the IO-APIC being empty at bootup 1875 */ 1876 clear_IO_APIC(); 1877 } 1878 1879 /* 1880 * Not an __init, needed by the reboot code 1881 */ 1882 void disable_IO_APIC(void) 1883 { 1884 /* 1885 * Clear the IO-APIC before rebooting: 1886 */ 1887 clear_IO_APIC(); 1888 1889 if (!legacy_pic->nr_legacy_irqs) 1890 return; 1891 1892 /* 1893 * If the i8259 is routed through an IOAPIC 1894 * Put that IOAPIC in virtual wire mode 1895 * so legacy interrupts can be delivered. 1896 * 1897 * With interrupt-remapping, for now we will use virtual wire A mode, 1898 * as virtual wire B is little complex (need to configure both 1899 * IOAPIC RTE aswell as interrupt-remapping table entry). 1900 * As this gets called during crash dump, keep this simple for now. 1901 */ 1902 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) { 1903 struct IO_APIC_route_entry entry; 1904 1905 memset(&entry, 0, sizeof(entry)); 1906 entry.mask = 0; /* Enabled */ 1907 entry.trigger = 0; /* Edge */ 1908 entry.irr = 0; 1909 entry.polarity = 0; /* High */ 1910 entry.delivery_status = 0; 1911 entry.dest_mode = 0; /* Physical */ 1912 entry.delivery_mode = dest_ExtINT; /* ExtInt */ 1913 entry.vector = 0; 1914 entry.dest = read_apic_id(); 1915 1916 /* 1917 * Add it to the IO-APIC irq-routing table: 1918 */ 1919 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry); 1920 } 1921 1922 /* 1923 * Use virtual wire A mode when interrupt remapping is enabled. 1924 */ 1925 if (cpu_has_apic || apic_from_smp_config()) 1926 disconnect_bsp_APIC(!intr_remapping_enabled && 1927 ioapic_i8259.pin != -1); 1928 } 1929 1930 #ifdef CONFIG_X86_32 1931 /* 1932 * function to set the IO-APIC physical IDs based on the 1933 * values stored in the MPC table. 1934 * 1935 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999 1936 */ 1937 1938 void __init setup_ioapic_ids_from_mpc(void) 1939 { 1940 union IO_APIC_reg_00 reg_00; 1941 physid_mask_t phys_id_present_map; 1942 int apic_id; 1943 int i; 1944 unsigned char old_id; 1945 unsigned long flags; 1946 1947 if (acpi_ioapic) 1948 return; 1949 /* 1950 * Don't check I/O APIC IDs for xAPIC systems. They have 1951 * no meaning without the serial APIC bus. 1952 */ 1953 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 1954 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 1955 return; 1956 /* 1957 * This is broken; anything with a real cpu count has to 1958 * circumvent this idiocy regardless. 1959 */ 1960 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); 1961 1962 /* 1963 * Set the IOAPIC ID to the value stored in the MPC table. 1964 */ 1965 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 1966 1967 /* Read the register 0 value */ 1968 raw_spin_lock_irqsave(&ioapic_lock, flags); 1969 reg_00.raw = io_apic_read(apic_id, 0); 1970 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 1971 1972 old_id = mp_ioapics[apic_id].apicid; 1973 1974 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) { 1975 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n", 1976 apic_id, mp_ioapics[apic_id].apicid); 1977 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1978 reg_00.bits.ID); 1979 mp_ioapics[apic_id].apicid = reg_00.bits.ID; 1980 } 1981 1982 /* 1983 * Sanity check, is the ID really free? Every APIC in a 1984 * system must have a unique ID or we get lots of nice 1985 * 'stuck on smp_invalidate_needed IPI wait' messages. 1986 */ 1987 if (apic->check_apicid_used(&phys_id_present_map, 1988 mp_ioapics[apic_id].apicid)) { 1989 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", 1990 apic_id, mp_ioapics[apic_id].apicid); 1991 for (i = 0; i < get_physical_broadcast(); i++) 1992 if (!physid_isset(i, phys_id_present_map)) 1993 break; 1994 if (i >= get_physical_broadcast()) 1995 panic("Max APIC ID exceeded!\n"); 1996 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n", 1997 i); 1998 physid_set(i, phys_id_present_map); 1999 mp_ioapics[apic_id].apicid = i; 2000 } else { 2001 physid_mask_t tmp; 2002 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp); 2003 apic_printk(APIC_VERBOSE, "Setting %d in the " 2004 "phys_id_present_map\n", 2005 mp_ioapics[apic_id].apicid); 2006 physids_or(phys_id_present_map, phys_id_present_map, tmp); 2007 } 2008 2009 2010 /* 2011 * We need to adjust the IRQ routing table 2012 * if the ID changed. 2013 */ 2014 if (old_id != mp_ioapics[apic_id].apicid) 2015 for (i = 0; i < mp_irq_entries; i++) 2016 if (mp_irqs[i].dstapic == old_id) 2017 mp_irqs[i].dstapic 2018 = mp_ioapics[apic_id].apicid; 2019 2020 /* 2021 * Read the right value from the MPC table and 2022 * write it into the ID register. 2023 */ 2024 apic_printk(APIC_VERBOSE, KERN_INFO 2025 "...changing IO-APIC physical APIC ID to %d ...", 2026 mp_ioapics[apic_id].apicid); 2027 2028 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2029 raw_spin_lock_irqsave(&ioapic_lock, flags); 2030 io_apic_write(apic_id, 0, reg_00.raw); 2031 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2032 2033 /* 2034 * Sanity check 2035 */ 2036 raw_spin_lock_irqsave(&ioapic_lock, flags); 2037 reg_00.raw = io_apic_read(apic_id, 0); 2038 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2039 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2040 printk("could not set ID!\n"); 2041 else 2042 apic_printk(APIC_VERBOSE, " ok.\n"); 2043 } 2044 } 2045 #endif 2046 2047 int no_timer_check __initdata; 2048 2049 static int __init notimercheck(char *s) 2050 { 2051 no_timer_check = 1; 2052 return 1; 2053 } 2054 __setup("no_timer_check", notimercheck); 2055 2056 /* 2057 * There is a nasty bug in some older SMP boards, their mptable lies 2058 * about the timer IRQ. We do the following to work around the situation: 2059 * 2060 * - timer IRQ defaults to IO-APIC IRQ 2061 * - if this function detects that timer IRQs are defunct, then we fall 2062 * back to ISA timer IRQs 2063 */ 2064 static int __init timer_irq_works(void) 2065 { 2066 unsigned long t1 = jiffies; 2067 unsigned long flags; 2068 2069 if (no_timer_check) 2070 return 1; 2071 2072 local_save_flags(flags); 2073 local_irq_enable(); 2074 /* Let ten ticks pass... */ 2075 mdelay((10 * 1000) / HZ); 2076 local_irq_restore(flags); 2077 2078 /* 2079 * Expect a few ticks at least, to be sure some possible 2080 * glue logic does not lock up after one or two first 2081 * ticks in a non-ExtINT mode. Also the local APIC 2082 * might have cached one ExtINT interrupt. Finally, at 2083 * least one tick may be lost due to delays. 2084 */ 2085 2086 /* jiffies wrap? */ 2087 if (time_after(jiffies, t1 + 4)) 2088 return 1; 2089 return 0; 2090 } 2091 2092 /* 2093 * In the SMP+IOAPIC case it might happen that there are an unspecified 2094 * number of pending IRQ events unhandled. These cases are very rare, 2095 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much 2096 * better to do it this way as thus we do not have to be aware of 2097 * 'pending' interrupts in the IRQ path, except at this point. 2098 */ 2099 /* 2100 * Edge triggered needs to resend any interrupt 2101 * that was delayed but this is now handled in the device 2102 * independent code. 2103 */ 2104 2105 /* 2106 * Starting up a edge-triggered IO-APIC interrupt is 2107 * nasty - we need to make sure that we get the edge. 2108 * If it is already asserted for some reason, we need 2109 * return 1 to indicate that is was pending. 2110 * 2111 * This is not complete - we should be able to fake 2112 * an edge even if it isn't on the 8259A... 2113 */ 2114 2115 static unsigned int startup_ioapic_irq(struct irq_data *data) 2116 { 2117 int was_pending = 0, irq = data->irq; 2118 unsigned long flags; 2119 2120 raw_spin_lock_irqsave(&ioapic_lock, flags); 2121 if (irq < legacy_pic->nr_legacy_irqs) { 2122 legacy_pic->mask(irq); 2123 if (legacy_pic->irq_pending(irq)) 2124 was_pending = 1; 2125 } 2126 __unmask_ioapic(data->chip_data); 2127 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2128 2129 return was_pending; 2130 } 2131 2132 static int ioapic_retrigger_irq(struct irq_data *data) 2133 { 2134 struct irq_cfg *cfg = data->chip_data; 2135 unsigned long flags; 2136 2137 raw_spin_lock_irqsave(&vector_lock, flags); 2138 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2139 raw_spin_unlock_irqrestore(&vector_lock, flags); 2140 2141 return 1; 2142 } 2143 2144 /* 2145 * Level and edge triggered IO-APIC interrupts need different handling, 2146 * so we use two separate IRQ descriptors. Edge triggered IRQs can be 2147 * handled with the level-triggered descriptor, but that one has slightly 2148 * more overhead. Level-triggered interrupts cannot be handled with the 2149 * edge-triggered handler, without risking IRQ storms and other ugly 2150 * races. 2151 */ 2152 2153 #ifdef CONFIG_SMP 2154 void send_cleanup_vector(struct irq_cfg *cfg) 2155 { 2156 cpumask_var_t cleanup_mask; 2157 2158 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 2159 unsigned int i; 2160 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 2161 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); 2162 } else { 2163 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 2164 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 2165 free_cpumask_var(cleanup_mask); 2166 } 2167 cfg->move_in_progress = 0; 2168 } 2169 2170 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg) 2171 { 2172 int apic, pin; 2173 struct irq_pin_list *entry; 2174 u8 vector = cfg->vector; 2175 2176 for_each_irq_pin(entry, cfg->irq_2_pin) { 2177 unsigned int reg; 2178 2179 apic = entry->apic; 2180 pin = entry->pin; 2181 /* 2182 * With interrupt-remapping, destination information comes 2183 * from interrupt-remapping table entry. 2184 */ 2185 if (!irq_remapped(cfg)) 2186 io_apic_write(apic, 0x11 + pin*2, dest); 2187 reg = io_apic_read(apic, 0x10 + pin*2); 2188 reg &= ~IO_APIC_REDIR_VECTOR_MASK; 2189 reg |= vector; 2190 io_apic_modify(apic, 0x10 + pin*2, reg); 2191 } 2192 } 2193 2194 /* 2195 * Either sets data->affinity to a valid value, and returns 2196 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and 2197 * leaves data->affinity untouched. 2198 */ 2199 int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2200 unsigned int *dest_id) 2201 { 2202 struct irq_cfg *cfg = data->chip_data; 2203 2204 if (!cpumask_intersects(mask, cpu_online_mask)) 2205 return -1; 2206 2207 if (assign_irq_vector(data->irq, data->chip_data, mask)) 2208 return -1; 2209 2210 cpumask_copy(data->affinity, mask); 2211 2212 *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain); 2213 return 0; 2214 } 2215 2216 static int 2217 ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2218 bool force) 2219 { 2220 unsigned int dest, irq = data->irq; 2221 unsigned long flags; 2222 int ret; 2223 2224 raw_spin_lock_irqsave(&ioapic_lock, flags); 2225 ret = __ioapic_set_affinity(data, mask, &dest); 2226 if (!ret) { 2227 /* Only the high 8 bits are valid. */ 2228 dest = SET_APIC_LOGICAL_ID(dest); 2229 __target_IO_APIC_irq(irq, dest, data->chip_data); 2230 } 2231 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2232 return ret; 2233 } 2234 2235 #ifdef CONFIG_INTR_REMAP 2236 2237 /* 2238 * Migrate the IO-APIC irq in the presence of intr-remapping. 2239 * 2240 * For both level and edge triggered, irq migration is a simple atomic 2241 * update(of vector and cpu destination) of IRTE and flush the hardware cache. 2242 * 2243 * For level triggered, we eliminate the io-apic RTE modification (with the 2244 * updated vector information), by using a virtual vector (io-apic pin number). 2245 * Real vector that is used for interrupting cpu will be coming from 2246 * the interrupt-remapping table entry. 2247 */ 2248 static int 2249 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2250 bool force) 2251 { 2252 struct irq_cfg *cfg = data->chip_data; 2253 unsigned int dest, irq = data->irq; 2254 struct irte irte; 2255 2256 if (!cpumask_intersects(mask, cpu_online_mask)) 2257 return -EINVAL; 2258 2259 if (get_irte(irq, &irte)) 2260 return -EBUSY; 2261 2262 if (assign_irq_vector(irq, cfg, mask)) 2263 return -EBUSY; 2264 2265 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask); 2266 2267 irte.vector = cfg->vector; 2268 irte.dest_id = IRTE_DEST(dest); 2269 2270 /* 2271 * Modified the IRTE and flushes the Interrupt entry cache. 2272 */ 2273 modify_irte(irq, &irte); 2274 2275 if (cfg->move_in_progress) 2276 send_cleanup_vector(cfg); 2277 2278 cpumask_copy(data->affinity, mask); 2279 return 0; 2280 } 2281 2282 #else 2283 static inline int 2284 ir_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, 2285 bool force) 2286 { 2287 return 0; 2288 } 2289 #endif 2290 2291 asmlinkage void smp_irq_move_cleanup_interrupt(void) 2292 { 2293 unsigned vector, me; 2294 2295 ack_APIC_irq(); 2296 exit_idle(); 2297 irq_enter(); 2298 2299 me = smp_processor_id(); 2300 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { 2301 unsigned int irq; 2302 unsigned int irr; 2303 struct irq_desc *desc; 2304 struct irq_cfg *cfg; 2305 irq = __get_cpu_var(vector_irq)[vector]; 2306 2307 if (irq == -1) 2308 continue; 2309 2310 desc = irq_to_desc(irq); 2311 if (!desc) 2312 continue; 2313 2314 cfg = irq_cfg(irq); 2315 raw_spin_lock(&desc->lock); 2316 2317 /* 2318 * Check if the irq migration is in progress. If so, we 2319 * haven't received the cleanup request yet for this irq. 2320 */ 2321 if (cfg->move_in_progress) 2322 goto unlock; 2323 2324 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2325 goto unlock; 2326 2327 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 2328 /* 2329 * Check if the vector that needs to be cleanedup is 2330 * registered at the cpu's IRR. If so, then this is not 2331 * the best time to clean it up. Lets clean it up in the 2332 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR 2333 * to myself. 2334 */ 2335 if (irr & (1 << (vector % 32))) { 2336 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); 2337 goto unlock; 2338 } 2339 __get_cpu_var(vector_irq)[vector] = -1; 2340 unlock: 2341 raw_spin_unlock(&desc->lock); 2342 } 2343 2344 irq_exit(); 2345 } 2346 2347 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 2348 { 2349 unsigned me; 2350 2351 if (likely(!cfg->move_in_progress)) 2352 return; 2353 2354 me = smp_processor_id(); 2355 2356 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2357 send_cleanup_vector(cfg); 2358 } 2359 2360 static void irq_complete_move(struct irq_cfg *cfg) 2361 { 2362 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 2363 } 2364 2365 void irq_force_complete_move(int irq) 2366 { 2367 struct irq_cfg *cfg = get_irq_chip_data(irq); 2368 2369 if (!cfg) 2370 return; 2371 2372 __irq_complete_move(cfg, cfg->vector); 2373 } 2374 #else 2375 static inline void irq_complete_move(struct irq_cfg *cfg) { } 2376 #endif 2377 2378 static void ack_apic_edge(struct irq_data *data) 2379 { 2380 irq_complete_move(data->chip_data); 2381 move_native_irq(data->irq); 2382 ack_APIC_irq(); 2383 } 2384 2385 atomic_t irq_mis_count; 2386 2387 /* 2388 * IO-APIC versions below 0x20 don't support EOI register. 2389 * For the record, here is the information about various versions: 2390 * 0Xh 82489DX 2391 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant 2392 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant 2393 * 30h-FFh Reserved 2394 * 2395 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic 2396 * version as 0x2. This is an error with documentation and these ICH chips 2397 * use io-apic's of version 0x20. 2398 * 2399 * For IO-APIC's with EOI register, we use that to do an explicit EOI. 2400 * Otherwise, we simulate the EOI message manually by changing the trigger 2401 * mode to edge and then back to level, with RTE being masked during this. 2402 */ 2403 static void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) 2404 { 2405 struct irq_pin_list *entry; 2406 unsigned long flags; 2407 2408 raw_spin_lock_irqsave(&ioapic_lock, flags); 2409 for_each_irq_pin(entry, cfg->irq_2_pin) { 2410 if (mp_ioapics[entry->apic].apicver >= 0x20) { 2411 /* 2412 * Intr-remapping uses pin number as the virtual vector 2413 * in the RTE. Actual vector is programmed in 2414 * intr-remapping table entry. Hence for the io-apic 2415 * EOI we use the pin number. 2416 */ 2417 if (irq_remapped(cfg)) 2418 io_apic_eoi(entry->apic, entry->pin); 2419 else 2420 io_apic_eoi(entry->apic, cfg->vector); 2421 } else { 2422 __mask_and_edge_IO_APIC_irq(entry); 2423 __unmask_and_level_IO_APIC_irq(entry); 2424 } 2425 } 2426 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 2427 } 2428 2429 static void ack_apic_level(struct irq_data *data) 2430 { 2431 struct irq_cfg *cfg = data->chip_data; 2432 int i, do_unmask_irq = 0, irq = data->irq; 2433 struct irq_desc *desc = irq_to_desc(irq); 2434 unsigned long v; 2435 2436 irq_complete_move(cfg); 2437 #ifdef CONFIG_GENERIC_PENDING_IRQ 2438 /* If we are moving the irq we need to mask it */ 2439 if (unlikely(desc->status & IRQ_MOVE_PENDING)) { 2440 do_unmask_irq = 1; 2441 mask_ioapic(cfg); 2442 } 2443 #endif 2444 2445 /* 2446 * It appears there is an erratum which affects at least version 0x11 2447 * of I/O APIC (that's the 82093AA and cores integrated into various 2448 * chipsets). Under certain conditions a level-triggered interrupt is 2449 * erroneously delivered as edge-triggered one but the respective IRR 2450 * bit gets set nevertheless. As a result the I/O unit expects an EOI 2451 * message but it will never arrive and further interrupts are blocked 2452 * from the source. The exact reason is so far unknown, but the 2453 * phenomenon was observed when two consecutive interrupt requests 2454 * from a given source get delivered to the same CPU and the source is 2455 * temporarily disabled in between. 2456 * 2457 * A workaround is to simulate an EOI message manually. We achieve it 2458 * by setting the trigger mode to edge and then to level when the edge 2459 * trigger mode gets detected in the TMR of a local APIC for a 2460 * level-triggered interrupt. We mask the source for the time of the 2461 * operation to prevent an edge-triggered interrupt escaping meanwhile. 2462 * The idea is from Manfred Spraul. --macro 2463 * 2464 * Also in the case when cpu goes offline, fixup_irqs() will forward 2465 * any unhandled interrupt on the offlined cpu to the new cpu 2466 * destination that is handling the corresponding interrupt. This 2467 * interrupt forwarding is done via IPI's. Hence, in this case also 2468 * level-triggered io-apic interrupt will be seen as an edge 2469 * interrupt in the IRR. And we can't rely on the cpu's EOI 2470 * to be broadcasted to the IO-APIC's which will clear the remoteIRR 2471 * corresponding to the level-triggered interrupt. Hence on IO-APIC's 2472 * supporting EOI register, we do an explicit EOI to clear the 2473 * remote IRR and on IO-APIC's which don't have an EOI register, 2474 * we use the above logic (mask+edge followed by unmask+level) from 2475 * Manfred Spraul to clear the remote IRR. 2476 */ 2477 i = cfg->vector; 2478 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1)); 2479 2480 /* 2481 * We must acknowledge the irq before we move it or the acknowledge will 2482 * not propagate properly. 2483 */ 2484 ack_APIC_irq(); 2485 2486 /* 2487 * Tail end of clearing remote IRR bit (either by delivering the EOI 2488 * message via io-apic EOI register write or simulating it using 2489 * mask+edge followed by unnask+level logic) manually when the 2490 * level triggered interrupt is seen as the edge triggered interrupt 2491 * at the cpu. 2492 */ 2493 if (!(v & (1 << (i & 0x1f)))) { 2494 atomic_inc(&irq_mis_count); 2495 2496 eoi_ioapic_irq(irq, cfg); 2497 } 2498 2499 /* Now we can move and renable the irq */ 2500 if (unlikely(do_unmask_irq)) { 2501 /* Only migrate the irq if the ack has been received. 2502 * 2503 * On rare occasions the broadcast level triggered ack gets 2504 * delayed going to ioapics, and if we reprogram the 2505 * vector while Remote IRR is still set the irq will never 2506 * fire again. 2507 * 2508 * To prevent this scenario we read the Remote IRR bit 2509 * of the ioapic. This has two effects. 2510 * - On any sane system the read of the ioapic will 2511 * flush writes (and acks) going to the ioapic from 2512 * this cpu. 2513 * - We get to see if the ACK has actually been delivered. 2514 * 2515 * Based on failed experiments of reprogramming the 2516 * ioapic entry from outside of irq context starting 2517 * with masking the ioapic entry and then polling until 2518 * Remote IRR was clear before reprogramming the 2519 * ioapic I don't trust the Remote IRR bit to be 2520 * completey accurate. 2521 * 2522 * However there appears to be no other way to plug 2523 * this race, so if the Remote IRR bit is not 2524 * accurate and is causing problems then it is a hardware bug 2525 * and you can go talk to the chipset vendor about it. 2526 */ 2527 if (!io_apic_level_ack_pending(cfg)) 2528 move_masked_irq(irq); 2529 unmask_ioapic(cfg); 2530 } 2531 } 2532 2533 #ifdef CONFIG_INTR_REMAP 2534 static void ir_ack_apic_edge(struct irq_data *data) 2535 { 2536 ack_APIC_irq(); 2537 } 2538 2539 static void ir_ack_apic_level(struct irq_data *data) 2540 { 2541 ack_APIC_irq(); 2542 eoi_ioapic_irq(data->irq, data->chip_data); 2543 } 2544 #endif /* CONFIG_INTR_REMAP */ 2545 2546 static struct irq_chip ioapic_chip __read_mostly = { 2547 .name = "IO-APIC", 2548 .irq_startup = startup_ioapic_irq, 2549 .irq_mask = mask_ioapic_irq, 2550 .irq_unmask = unmask_ioapic_irq, 2551 .irq_ack = ack_apic_edge, 2552 .irq_eoi = ack_apic_level, 2553 #ifdef CONFIG_SMP 2554 .irq_set_affinity = ioapic_set_affinity, 2555 #endif 2556 .irq_retrigger = ioapic_retrigger_irq, 2557 }; 2558 2559 static struct irq_chip ir_ioapic_chip __read_mostly = { 2560 .name = "IR-IO-APIC", 2561 .irq_startup = startup_ioapic_irq, 2562 .irq_mask = mask_ioapic_irq, 2563 .irq_unmask = unmask_ioapic_irq, 2564 #ifdef CONFIG_INTR_REMAP 2565 .irq_ack = ir_ack_apic_edge, 2566 .irq_eoi = ir_ack_apic_level, 2567 #ifdef CONFIG_SMP 2568 .irq_set_affinity = ir_ioapic_set_affinity, 2569 #endif 2570 #endif 2571 .irq_retrigger = ioapic_retrigger_irq, 2572 }; 2573 2574 static inline void init_IO_APIC_traps(void) 2575 { 2576 struct irq_cfg *cfg; 2577 unsigned int irq; 2578 2579 /* 2580 * NOTE! The local APIC isn't very good at handling 2581 * multiple interrupts at the same interrupt level. 2582 * As the interrupt level is determined by taking the 2583 * vector number and shifting that right by 4, we 2584 * want to spread these out a bit so that they don't 2585 * all fall in the same interrupt level. 2586 * 2587 * Also, we've got to be careful not to trash gate 2588 * 0x80, because int 0x80 is hm, kind of importantish. ;) 2589 */ 2590 for_each_active_irq(irq) { 2591 cfg = get_irq_chip_data(irq); 2592 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) { 2593 /* 2594 * Hmm.. We don't have an entry for this, 2595 * so default to an old-fashioned 8259 2596 * interrupt if we can.. 2597 */ 2598 if (irq < legacy_pic->nr_legacy_irqs) 2599 legacy_pic->make_irq(irq); 2600 else 2601 /* Strange. Oh, well.. */ 2602 set_irq_chip(irq, &no_irq_chip); 2603 } 2604 } 2605 } 2606 2607 /* 2608 * The local APIC irq-chip implementation: 2609 */ 2610 2611 static void mask_lapic_irq(struct irq_data *data) 2612 { 2613 unsigned long v; 2614 2615 v = apic_read(APIC_LVT0); 2616 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 2617 } 2618 2619 static void unmask_lapic_irq(struct irq_data *data) 2620 { 2621 unsigned long v; 2622 2623 v = apic_read(APIC_LVT0); 2624 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED); 2625 } 2626 2627 static void ack_lapic_irq(struct irq_data *data) 2628 { 2629 ack_APIC_irq(); 2630 } 2631 2632 static struct irq_chip lapic_chip __read_mostly = { 2633 .name = "local-APIC", 2634 .irq_mask = mask_lapic_irq, 2635 .irq_unmask = unmask_lapic_irq, 2636 .irq_ack = ack_lapic_irq, 2637 }; 2638 2639 static void lapic_register_intr(int irq) 2640 { 2641 irq_clear_status_flags(irq, IRQ_LEVEL); 2642 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq, 2643 "edge"); 2644 } 2645 2646 static void __init setup_nmi(void) 2647 { 2648 /* 2649 * Dirty trick to enable the NMI watchdog ... 2650 * We put the 8259A master into AEOI mode and 2651 * unmask on all local APICs LVT0 as NMI. 2652 * 2653 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire') 2654 * is from Maciej W. Rozycki - so we do not have to EOI from 2655 * the NMI handler or the timer interrupt. 2656 */ 2657 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ..."); 2658 2659 enable_NMI_through_LVT0(); 2660 2661 apic_printk(APIC_VERBOSE, " done.\n"); 2662 } 2663 2664 /* 2665 * This looks a bit hackish but it's about the only one way of sending 2666 * a few INTA cycles to 8259As and any associated glue logic. ICR does 2667 * not support the ExtINT mode, unfortunately. We need to send these 2668 * cycles as some i82489DX-based boards have glue logic that keeps the 2669 * 8259A interrupt line asserted until INTA. --macro 2670 */ 2671 static inline void __init unlock_ExtINT_logic(void) 2672 { 2673 int apic, pin, i; 2674 struct IO_APIC_route_entry entry0, entry1; 2675 unsigned char save_control, save_freq_select; 2676 2677 pin = find_isa_irq_pin(8, mp_INT); 2678 if (pin == -1) { 2679 WARN_ON_ONCE(1); 2680 return; 2681 } 2682 apic = find_isa_irq_apic(8, mp_INT); 2683 if (apic == -1) { 2684 WARN_ON_ONCE(1); 2685 return; 2686 } 2687 2688 entry0 = ioapic_read_entry(apic, pin); 2689 clear_IO_APIC_pin(apic, pin); 2690 2691 memset(&entry1, 0, sizeof(entry1)); 2692 2693 entry1.dest_mode = 0; /* physical delivery */ 2694 entry1.mask = 0; /* unmask IRQ now */ 2695 entry1.dest = hard_smp_processor_id(); 2696 entry1.delivery_mode = dest_ExtINT; 2697 entry1.polarity = entry0.polarity; 2698 entry1.trigger = 0; 2699 entry1.vector = 0; 2700 2701 ioapic_write_entry(apic, pin, entry1); 2702 2703 save_control = CMOS_READ(RTC_CONTROL); 2704 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); 2705 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6, 2706 RTC_FREQ_SELECT); 2707 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL); 2708 2709 i = 100; 2710 while (i-- > 0) { 2711 mdelay(10); 2712 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF) 2713 i -= 10; 2714 } 2715 2716 CMOS_WRITE(save_control, RTC_CONTROL); 2717 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); 2718 clear_IO_APIC_pin(apic, pin); 2719 2720 ioapic_write_entry(apic, pin, entry0); 2721 } 2722 2723 static int disable_timer_pin_1 __initdata; 2724 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */ 2725 static int __init disable_timer_pin_setup(char *arg) 2726 { 2727 disable_timer_pin_1 = 1; 2728 return 0; 2729 } 2730 early_param("disable_timer_pin_1", disable_timer_pin_setup); 2731 2732 int timer_through_8259 __initdata; 2733 2734 /* 2735 * This code may look a bit paranoid, but it's supposed to cooperate with 2736 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ 2737 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast 2738 * fanatically on his truly buggy board. 2739 * 2740 * FIXME: really need to revamp this for all platforms. 2741 */ 2742 static inline void __init check_timer(void) 2743 { 2744 struct irq_cfg *cfg = get_irq_chip_data(0); 2745 int node = cpu_to_node(0); 2746 int apic1, pin1, apic2, pin2; 2747 unsigned long flags; 2748 int no_pin1 = 0; 2749 2750 local_irq_save(flags); 2751 2752 /* 2753 * get/set the timer IRQ vector: 2754 */ 2755 legacy_pic->mask(0); 2756 assign_irq_vector(0, cfg, apic->target_cpus()); 2757 2758 /* 2759 * As IRQ0 is to be enabled in the 8259A, the virtual 2760 * wire has to be disabled in the local APIC. Also 2761 * timer interrupts need to be acknowledged manually in 2762 * the 8259A for the i82489DX when using the NMI 2763 * watchdog as that APIC treats NMIs as level-triggered. 2764 * The AEOI mode will finish them in the 8259A 2765 * automatically. 2766 */ 2767 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT); 2768 legacy_pic->init(1); 2769 #ifdef CONFIG_X86_32 2770 { 2771 unsigned int ver; 2772 2773 ver = apic_read(APIC_LVR); 2774 ver = GET_APIC_VERSION(ver); 2775 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver)); 2776 } 2777 #endif 2778 2779 pin1 = find_isa_irq_pin(0, mp_INT); 2780 apic1 = find_isa_irq_apic(0, mp_INT); 2781 pin2 = ioapic_i8259.pin; 2782 apic2 = ioapic_i8259.apic; 2783 2784 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X " 2785 "apic1=%d pin1=%d apic2=%d pin2=%d\n", 2786 cfg->vector, apic1, pin1, apic2, pin2); 2787 2788 /* 2789 * Some BIOS writers are clueless and report the ExtINTA 2790 * I/O APIC input from the cascaded 8259A as the timer 2791 * interrupt input. So just in case, if only one pin 2792 * was found above, try it both directly and through the 2793 * 8259A. 2794 */ 2795 if (pin1 == -1) { 2796 if (intr_remapping_enabled) 2797 panic("BIOS bug: timer not connected to IO-APIC"); 2798 pin1 = pin2; 2799 apic1 = apic2; 2800 no_pin1 = 1; 2801 } else if (pin2 == -1) { 2802 pin2 = pin1; 2803 apic2 = apic1; 2804 } 2805 2806 if (pin1 != -1) { 2807 /* 2808 * Ok, does IRQ0 through the IOAPIC work? 2809 */ 2810 if (no_pin1) { 2811 add_pin_to_irq_node(cfg, node, apic1, pin1); 2812 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector); 2813 } else { 2814 /* for edge trigger, setup_ioapic_irq already 2815 * leave it unmasked. 2816 * so only need to unmask if it is level-trigger 2817 * do we really have level trigger timer? 2818 */ 2819 int idx; 2820 idx = find_irq_entry(apic1, pin1, mp_INT); 2821 if (idx != -1 && irq_trigger(idx)) 2822 unmask_ioapic(cfg); 2823 } 2824 if (timer_irq_works()) { 2825 if (nmi_watchdog == NMI_IO_APIC) { 2826 setup_nmi(); 2827 legacy_pic->unmask(0); 2828 } 2829 if (disable_timer_pin_1 > 0) 2830 clear_IO_APIC_pin(0, pin1); 2831 goto out; 2832 } 2833 if (intr_remapping_enabled) 2834 panic("timer doesn't work through Interrupt-remapped IO-APIC"); 2835 local_irq_disable(); 2836 clear_IO_APIC_pin(apic1, pin1); 2837 if (!no_pin1) 2838 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: " 2839 "8254 timer not connected to IO-APIC\n"); 2840 2841 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer " 2842 "(IRQ0) through the 8259A ...\n"); 2843 apic_printk(APIC_QUIET, KERN_INFO 2844 "..... (found apic %d pin %d) ...\n", apic2, pin2); 2845 /* 2846 * legacy devices should be connected to IO APIC #0 2847 */ 2848 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2); 2849 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector); 2850 legacy_pic->unmask(0); 2851 if (timer_irq_works()) { 2852 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n"); 2853 timer_through_8259 = 1; 2854 if (nmi_watchdog == NMI_IO_APIC) { 2855 legacy_pic->mask(0); 2856 setup_nmi(); 2857 legacy_pic->unmask(0); 2858 } 2859 goto out; 2860 } 2861 /* 2862 * Cleanup, just in case ... 2863 */ 2864 local_irq_disable(); 2865 legacy_pic->mask(0); 2866 clear_IO_APIC_pin(apic2, pin2); 2867 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n"); 2868 } 2869 2870 if (nmi_watchdog == NMI_IO_APIC) { 2871 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work " 2872 "through the IO-APIC - disabling NMI Watchdog!\n"); 2873 nmi_watchdog = NMI_NONE; 2874 } 2875 #ifdef CONFIG_X86_32 2876 timer_ack = 0; 2877 #endif 2878 2879 apic_printk(APIC_QUIET, KERN_INFO 2880 "...trying to set up timer as Virtual Wire IRQ...\n"); 2881 2882 lapic_register_intr(0); 2883 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */ 2884 legacy_pic->unmask(0); 2885 2886 if (timer_irq_works()) { 2887 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2888 goto out; 2889 } 2890 local_irq_disable(); 2891 legacy_pic->mask(0); 2892 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector); 2893 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n"); 2894 2895 apic_printk(APIC_QUIET, KERN_INFO 2896 "...trying to set up timer as ExtINT IRQ...\n"); 2897 2898 legacy_pic->init(0); 2899 legacy_pic->make_irq(0); 2900 apic_write(APIC_LVT0, APIC_DM_EXTINT); 2901 2902 unlock_ExtINT_logic(); 2903 2904 if (timer_irq_works()) { 2905 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n"); 2906 goto out; 2907 } 2908 local_irq_disable(); 2909 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n"); 2910 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a " 2911 "report. Then try booting with the 'noapic' option.\n"); 2912 out: 2913 local_irq_restore(flags); 2914 } 2915 2916 /* 2917 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available 2918 * to devices. However there may be an I/O APIC pin available for 2919 * this interrupt regardless. The pin may be left unconnected, but 2920 * typically it will be reused as an ExtINT cascade interrupt for 2921 * the master 8259A. In the MPS case such a pin will normally be 2922 * reported as an ExtINT interrupt in the MP table. With ACPI 2923 * there is no provision for ExtINT interrupts, and in the absence 2924 * of an override it would be treated as an ordinary ISA I/O APIC 2925 * interrupt, that is edge-triggered and unmasked by default. We 2926 * used to do this, but it caused problems on some systems because 2927 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using 2928 * the same ExtINT cascade interrupt to drive the local APIC of the 2929 * bootstrap processor. Therefore we refrain from routing IRQ2 to 2930 * the I/O APIC in all cases now. No actual device should request 2931 * it anyway. --macro 2932 */ 2933 #define PIC_IRQS (1UL << PIC_CASCADE_IR) 2934 2935 void __init setup_IO_APIC(void) 2936 { 2937 2938 /* 2939 * calling enable_IO_APIC() is moved to setup_local_APIC for BP 2940 */ 2941 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL; 2942 2943 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2944 /* 2945 * Set up IO-APIC IRQ routing. 2946 */ 2947 x86_init.mpparse.setup_ioapic_ids(); 2948 2949 sync_Arb_IDs(); 2950 setup_IO_APIC_irqs(); 2951 init_IO_APIC_traps(); 2952 if (legacy_pic->nr_legacy_irqs) 2953 check_timer(); 2954 } 2955 2956 /* 2957 * Called after all the initialization is done. If we didnt find any 2958 * APIC bugs then we can allow the modify fast path 2959 */ 2960 2961 static int __init io_apic_bug_finalize(void) 2962 { 2963 if (sis_apic_bug == -1) 2964 sis_apic_bug = 0; 2965 return 0; 2966 } 2967 2968 late_initcall(io_apic_bug_finalize); 2969 2970 struct sysfs_ioapic_data { 2971 struct sys_device dev; 2972 struct IO_APIC_route_entry entry[0]; 2973 }; 2974 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS]; 2975 2976 static int ioapic_suspend(struct sys_device *dev, pm_message_t state) 2977 { 2978 struct IO_APIC_route_entry *entry; 2979 struct sysfs_ioapic_data *data; 2980 int i; 2981 2982 data = container_of(dev, struct sysfs_ioapic_data, dev); 2983 entry = data->entry; 2984 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) 2985 *entry = ioapic_read_entry(dev->id, i); 2986 2987 return 0; 2988 } 2989 2990 static int ioapic_resume(struct sys_device *dev) 2991 { 2992 struct IO_APIC_route_entry *entry; 2993 struct sysfs_ioapic_data *data; 2994 unsigned long flags; 2995 union IO_APIC_reg_00 reg_00; 2996 int i; 2997 2998 data = container_of(dev, struct sysfs_ioapic_data, dev); 2999 entry = data->entry; 3000 3001 raw_spin_lock_irqsave(&ioapic_lock, flags); 3002 reg_00.raw = io_apic_read(dev->id, 0); 3003 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3004 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3005 io_apic_write(dev->id, 0, reg_00.raw); 3006 } 3007 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3008 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3009 ioapic_write_entry(dev->id, i, entry[i]); 3010 3011 return 0; 3012 } 3013 3014 static struct sysdev_class ioapic_sysdev_class = { 3015 .name = "ioapic", 3016 .suspend = ioapic_suspend, 3017 .resume = ioapic_resume, 3018 }; 3019 3020 static int __init ioapic_init_sysfs(void) 3021 { 3022 struct sys_device * dev; 3023 int i, size, error; 3024 3025 error = sysdev_class_register(&ioapic_sysdev_class); 3026 if (error) 3027 return error; 3028 3029 for (i = 0; i < nr_ioapics; i++ ) { 3030 size = sizeof(struct sys_device) + nr_ioapic_registers[i] 3031 * sizeof(struct IO_APIC_route_entry); 3032 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL); 3033 if (!mp_ioapic_data[i]) { 3034 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 3035 continue; 3036 } 3037 dev = &mp_ioapic_data[i]->dev; 3038 dev->id = i; 3039 dev->cls = &ioapic_sysdev_class; 3040 error = sysdev_register(dev); 3041 if (error) { 3042 kfree(mp_ioapic_data[i]); 3043 mp_ioapic_data[i] = NULL; 3044 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i); 3045 continue; 3046 } 3047 } 3048 3049 return 0; 3050 } 3051 3052 device_initcall(ioapic_init_sysfs); 3053 3054 /* 3055 * Dynamic irq allocate and deallocation 3056 */ 3057 unsigned int create_irq_nr(unsigned int from, int node) 3058 { 3059 struct irq_cfg *cfg; 3060 unsigned long flags; 3061 unsigned int ret = 0; 3062 int irq; 3063 3064 if (from < nr_irqs_gsi) 3065 from = nr_irqs_gsi; 3066 3067 irq = alloc_irq_from(from, node); 3068 if (irq < 0) 3069 return 0; 3070 cfg = alloc_irq_cfg(irq, node); 3071 if (!cfg) { 3072 free_irq_at(irq, NULL); 3073 return 0; 3074 } 3075 3076 raw_spin_lock_irqsave(&vector_lock, flags); 3077 if (!__assign_irq_vector(irq, cfg, apic->target_cpus())) 3078 ret = irq; 3079 raw_spin_unlock_irqrestore(&vector_lock, flags); 3080 3081 if (ret) { 3082 set_irq_chip_data(irq, cfg); 3083 irq_clear_status_flags(irq, IRQ_NOREQUEST); 3084 } else { 3085 free_irq_at(irq, cfg); 3086 } 3087 return ret; 3088 } 3089 3090 int create_irq(void) 3091 { 3092 int node = cpu_to_node(0); 3093 unsigned int irq_want; 3094 int irq; 3095 3096 irq_want = nr_irqs_gsi; 3097 irq = create_irq_nr(irq_want, node); 3098 3099 if (irq == 0) 3100 irq = -1; 3101 3102 return irq; 3103 } 3104 3105 void destroy_irq(unsigned int irq) 3106 { 3107 struct irq_cfg *cfg = get_irq_chip_data(irq); 3108 unsigned long flags; 3109 3110 irq_set_status_flags(irq, IRQ_NOREQUEST|IRQ_NOPROBE); 3111 3112 if (intr_remapping_enabled) 3113 free_irte(irq); 3114 raw_spin_lock_irqsave(&vector_lock, flags); 3115 __clear_irq_vector(irq, cfg); 3116 raw_spin_unlock_irqrestore(&vector_lock, flags); 3117 free_irq_at(irq, cfg); 3118 } 3119 3120 /* 3121 * MSI message composition 3122 */ 3123 #ifdef CONFIG_PCI_MSI 3124 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, 3125 struct msi_msg *msg, u8 hpet_id) 3126 { 3127 struct irq_cfg *cfg; 3128 int err; 3129 unsigned dest; 3130 3131 if (disable_apic) 3132 return -ENXIO; 3133 3134 cfg = irq_cfg(irq); 3135 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3136 if (err) 3137 return err; 3138 3139 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus()); 3140 3141 if (irq_remapped(get_irq_chip_data(irq))) { 3142 struct irte irte; 3143 int ir_index; 3144 u16 sub_handle; 3145 3146 ir_index = map_irq_to_irte_handle(irq, &sub_handle); 3147 BUG_ON(ir_index == -1); 3148 3149 prepare_irte(&irte, cfg->vector, dest); 3150 3151 /* Set source-id of interrupt request */ 3152 if (pdev) 3153 set_msi_sid(&irte, pdev); 3154 else 3155 set_hpet_sid(&irte, hpet_id); 3156 3157 modify_irte(irq, &irte); 3158 3159 msg->address_hi = MSI_ADDR_BASE_HI; 3160 msg->data = sub_handle; 3161 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | 3162 MSI_ADDR_IR_SHV | 3163 MSI_ADDR_IR_INDEX1(ir_index) | 3164 MSI_ADDR_IR_INDEX2(ir_index); 3165 } else { 3166 if (x2apic_enabled()) 3167 msg->address_hi = MSI_ADDR_BASE_HI | 3168 MSI_ADDR_EXT_DEST_ID(dest); 3169 else 3170 msg->address_hi = MSI_ADDR_BASE_HI; 3171 3172 msg->address_lo = 3173 MSI_ADDR_BASE_LO | 3174 ((apic->irq_dest_mode == 0) ? 3175 MSI_ADDR_DEST_MODE_PHYSICAL: 3176 MSI_ADDR_DEST_MODE_LOGICAL) | 3177 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3178 MSI_ADDR_REDIRECTION_CPU: 3179 MSI_ADDR_REDIRECTION_LOWPRI) | 3180 MSI_ADDR_DEST_ID(dest); 3181 3182 msg->data = 3183 MSI_DATA_TRIGGER_EDGE | 3184 MSI_DATA_LEVEL_ASSERT | 3185 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3186 MSI_DATA_DELIVERY_FIXED: 3187 MSI_DATA_DELIVERY_LOWPRI) | 3188 MSI_DATA_VECTOR(cfg->vector); 3189 } 3190 return err; 3191 } 3192 3193 #ifdef CONFIG_SMP 3194 static int 3195 msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3196 { 3197 struct irq_cfg *cfg = data->chip_data; 3198 struct msi_msg msg; 3199 unsigned int dest; 3200 3201 if (__ioapic_set_affinity(data, mask, &dest)) 3202 return -1; 3203 3204 __get_cached_msi_msg(data->msi_desc, &msg); 3205 3206 msg.data &= ~MSI_DATA_VECTOR_MASK; 3207 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3208 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3209 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3210 3211 __write_msi_msg(data->msi_desc, &msg); 3212 3213 return 0; 3214 } 3215 #ifdef CONFIG_INTR_REMAP 3216 /* 3217 * Migrate the MSI irq to another cpumask. This migration is 3218 * done in the process context using interrupt-remapping hardware. 3219 */ 3220 static int 3221 ir_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3222 bool force) 3223 { 3224 struct irq_cfg *cfg = data->chip_data; 3225 unsigned int dest, irq = data->irq; 3226 struct irte irte; 3227 3228 if (get_irte(irq, &irte)) 3229 return -1; 3230 3231 if (__ioapic_set_affinity(data, mask, &dest)) 3232 return -1; 3233 3234 irte.vector = cfg->vector; 3235 irte.dest_id = IRTE_DEST(dest); 3236 3237 /* 3238 * atomically update the IRTE with the new destination and vector. 3239 */ 3240 modify_irte(irq, &irte); 3241 3242 /* 3243 * After this point, all the interrupts will start arriving 3244 * at the new destination. So, time to cleanup the previous 3245 * vector allocation. 3246 */ 3247 if (cfg->move_in_progress) 3248 send_cleanup_vector(cfg); 3249 3250 return 0; 3251 } 3252 3253 #endif 3254 #endif /* CONFIG_SMP */ 3255 3256 /* 3257 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices, 3258 * which implement the MSI or MSI-X Capability Structure. 3259 */ 3260 static struct irq_chip msi_chip = { 3261 .name = "PCI-MSI", 3262 .irq_unmask = unmask_msi_irq, 3263 .irq_mask = mask_msi_irq, 3264 .irq_ack = ack_apic_edge, 3265 #ifdef CONFIG_SMP 3266 .irq_set_affinity = msi_set_affinity, 3267 #endif 3268 .irq_retrigger = ioapic_retrigger_irq, 3269 }; 3270 3271 static struct irq_chip msi_ir_chip = { 3272 .name = "IR-PCI-MSI", 3273 .irq_unmask = unmask_msi_irq, 3274 .irq_mask = mask_msi_irq, 3275 #ifdef CONFIG_INTR_REMAP 3276 .irq_ack = ir_ack_apic_edge, 3277 #ifdef CONFIG_SMP 3278 .irq_set_affinity = ir_msi_set_affinity, 3279 #endif 3280 #endif 3281 .irq_retrigger = ioapic_retrigger_irq, 3282 }; 3283 3284 /* 3285 * Map the PCI dev to the corresponding remapping hardware unit 3286 * and allocate 'nvec' consecutive interrupt-remapping table entries 3287 * in it. 3288 */ 3289 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec) 3290 { 3291 struct intel_iommu *iommu; 3292 int index; 3293 3294 iommu = map_dev_to_ir(dev); 3295 if (!iommu) { 3296 printk(KERN_ERR 3297 "Unable to map PCI %s to iommu\n", pci_name(dev)); 3298 return -ENOENT; 3299 } 3300 3301 index = alloc_irte(iommu, irq, nvec); 3302 if (index < 0) { 3303 printk(KERN_ERR 3304 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3305 pci_name(dev)); 3306 return -ENOSPC; 3307 } 3308 return index; 3309 } 3310 3311 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq) 3312 { 3313 struct msi_msg msg; 3314 int ret; 3315 3316 ret = msi_compose_msg(dev, irq, &msg, -1); 3317 if (ret < 0) 3318 return ret; 3319 3320 set_irq_msi(irq, msidesc); 3321 write_msi_msg(irq, &msg); 3322 3323 if (irq_remapped(get_irq_chip_data(irq))) { 3324 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3325 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge"); 3326 } else 3327 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge"); 3328 3329 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq); 3330 3331 return 0; 3332 } 3333 3334 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 3335 { 3336 int node, ret, sub_handle, index = 0; 3337 unsigned int irq, irq_want; 3338 struct msi_desc *msidesc; 3339 struct intel_iommu *iommu = NULL; 3340 3341 /* x86 doesn't support multiple MSI yet */ 3342 if (type == PCI_CAP_ID_MSI && nvec > 1) 3343 return 1; 3344 3345 node = dev_to_node(&dev->dev); 3346 irq_want = nr_irqs_gsi; 3347 sub_handle = 0; 3348 list_for_each_entry(msidesc, &dev->msi_list, list) { 3349 irq = create_irq_nr(irq_want, node); 3350 if (irq == 0) 3351 return -1; 3352 irq_want = irq + 1; 3353 if (!intr_remapping_enabled) 3354 goto no_ir; 3355 3356 if (!sub_handle) { 3357 /* 3358 * allocate the consecutive block of IRTE's 3359 * for 'nvec' 3360 */ 3361 index = msi_alloc_irte(dev, irq, nvec); 3362 if (index < 0) { 3363 ret = index; 3364 goto error; 3365 } 3366 } else { 3367 iommu = map_dev_to_ir(dev); 3368 if (!iommu) { 3369 ret = -ENOENT; 3370 goto error; 3371 } 3372 /* 3373 * setup the mapping between the irq and the IRTE 3374 * base index, the sub_handle pointing to the 3375 * appropriate interrupt remap table entry. 3376 */ 3377 set_irte_irq(irq, iommu, index, sub_handle); 3378 } 3379 no_ir: 3380 ret = setup_msi_irq(dev, msidesc, irq); 3381 if (ret < 0) 3382 goto error; 3383 sub_handle++; 3384 } 3385 return 0; 3386 3387 error: 3388 destroy_irq(irq); 3389 return ret; 3390 } 3391 3392 void arch_teardown_msi_irq(unsigned int irq) 3393 { 3394 destroy_irq(irq); 3395 } 3396 3397 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP) 3398 #ifdef CONFIG_SMP 3399 static int 3400 dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, 3401 bool force) 3402 { 3403 struct irq_cfg *cfg = data->chip_data; 3404 unsigned int dest, irq = data->irq; 3405 struct msi_msg msg; 3406 3407 if (__ioapic_set_affinity(data, mask, &dest)) 3408 return -1; 3409 3410 dmar_msi_read(irq, &msg); 3411 3412 msg.data &= ~MSI_DATA_VECTOR_MASK; 3413 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3414 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3415 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3416 3417 dmar_msi_write(irq, &msg); 3418 3419 return 0; 3420 } 3421 3422 #endif /* CONFIG_SMP */ 3423 3424 static struct irq_chip dmar_msi_type = { 3425 .name = "DMAR_MSI", 3426 .irq_unmask = dmar_msi_unmask, 3427 .irq_mask = dmar_msi_mask, 3428 .irq_ack = ack_apic_edge, 3429 #ifdef CONFIG_SMP 3430 .irq_set_affinity = dmar_msi_set_affinity, 3431 #endif 3432 .irq_retrigger = ioapic_retrigger_irq, 3433 }; 3434 3435 int arch_setup_dmar_msi(unsigned int irq) 3436 { 3437 int ret; 3438 struct msi_msg msg; 3439 3440 ret = msi_compose_msg(NULL, irq, &msg, -1); 3441 if (ret < 0) 3442 return ret; 3443 dmar_msi_write(irq, &msg); 3444 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq, 3445 "edge"); 3446 return 0; 3447 } 3448 #endif 3449 3450 #ifdef CONFIG_HPET_TIMER 3451 3452 #ifdef CONFIG_SMP 3453 static int hpet_msi_set_affinity(struct irq_data *data, 3454 const struct cpumask *mask, bool force) 3455 { 3456 struct irq_cfg *cfg = data->chip_data; 3457 struct msi_msg msg; 3458 unsigned int dest; 3459 3460 if (__ioapic_set_affinity(data, mask, &dest)) 3461 return -1; 3462 3463 hpet_msi_read(data->handler_data, &msg); 3464 3465 msg.data &= ~MSI_DATA_VECTOR_MASK; 3466 msg.data |= MSI_DATA_VECTOR(cfg->vector); 3467 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; 3468 msg.address_lo |= MSI_ADDR_DEST_ID(dest); 3469 3470 hpet_msi_write(data->handler_data, &msg); 3471 3472 return 0; 3473 } 3474 3475 #endif /* CONFIG_SMP */ 3476 3477 static struct irq_chip ir_hpet_msi_type = { 3478 .name = "IR-HPET_MSI", 3479 .irq_unmask = hpet_msi_unmask, 3480 .irq_mask = hpet_msi_mask, 3481 #ifdef CONFIG_INTR_REMAP 3482 .irq_ack = ir_ack_apic_edge, 3483 #ifdef CONFIG_SMP 3484 .irq_set_affinity = ir_msi_set_affinity, 3485 #endif 3486 #endif 3487 .irq_retrigger = ioapic_retrigger_irq, 3488 }; 3489 3490 static struct irq_chip hpet_msi_type = { 3491 .name = "HPET_MSI", 3492 .irq_unmask = hpet_msi_unmask, 3493 .irq_mask = hpet_msi_mask, 3494 .irq_ack = ack_apic_edge, 3495 #ifdef CONFIG_SMP 3496 .irq_set_affinity = hpet_msi_set_affinity, 3497 #endif 3498 .irq_retrigger = ioapic_retrigger_irq, 3499 }; 3500 3501 int arch_setup_hpet_msi(unsigned int irq, unsigned int id) 3502 { 3503 struct msi_msg msg; 3504 int ret; 3505 3506 if (intr_remapping_enabled) { 3507 struct intel_iommu *iommu = map_hpet_to_ir(id); 3508 int index; 3509 3510 if (!iommu) 3511 return -1; 3512 3513 index = alloc_irte(iommu, irq, 1); 3514 if (index < 0) 3515 return -1; 3516 } 3517 3518 ret = msi_compose_msg(NULL, irq, &msg, id); 3519 if (ret < 0) 3520 return ret; 3521 3522 hpet_msi_write(get_irq_data(irq), &msg); 3523 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); 3524 if (irq_remapped(get_irq_chip_data(irq))) 3525 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type, 3526 handle_edge_irq, "edge"); 3527 else 3528 set_irq_chip_and_handler_name(irq, &hpet_msi_type, 3529 handle_edge_irq, "edge"); 3530 3531 return 0; 3532 } 3533 #endif 3534 3535 #endif /* CONFIG_PCI_MSI */ 3536 /* 3537 * Hypertransport interrupt support 3538 */ 3539 #ifdef CONFIG_HT_IRQ 3540 3541 #ifdef CONFIG_SMP 3542 3543 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector) 3544 { 3545 struct ht_irq_msg msg; 3546 fetch_ht_irq_msg(irq, &msg); 3547 3548 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK); 3549 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK); 3550 3551 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest); 3552 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest); 3553 3554 write_ht_irq_msg(irq, &msg); 3555 } 3556 3557 static int 3558 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) 3559 { 3560 struct irq_cfg *cfg = data->chip_data; 3561 unsigned int dest; 3562 3563 if (__ioapic_set_affinity(data, mask, &dest)) 3564 return -1; 3565 3566 target_ht_irq(data->irq, dest, cfg->vector); 3567 return 0; 3568 } 3569 3570 #endif 3571 3572 static struct irq_chip ht_irq_chip = { 3573 .name = "PCI-HT", 3574 .irq_mask = mask_ht_irq, 3575 .irq_unmask = unmask_ht_irq, 3576 .irq_ack = ack_apic_edge, 3577 #ifdef CONFIG_SMP 3578 .irq_set_affinity = ht_set_affinity, 3579 #endif 3580 .irq_retrigger = ioapic_retrigger_irq, 3581 }; 3582 3583 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) 3584 { 3585 struct irq_cfg *cfg; 3586 int err; 3587 3588 if (disable_apic) 3589 return -ENXIO; 3590 3591 cfg = irq_cfg(irq); 3592 err = assign_irq_vector(irq, cfg, apic->target_cpus()); 3593 if (!err) { 3594 struct ht_irq_msg msg; 3595 unsigned dest; 3596 3597 dest = apic->cpu_mask_to_apicid_and(cfg->domain, 3598 apic->target_cpus()); 3599 3600 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest); 3601 3602 msg.address_lo = 3603 HT_IRQ_LOW_BASE | 3604 HT_IRQ_LOW_DEST_ID(dest) | 3605 HT_IRQ_LOW_VECTOR(cfg->vector) | 3606 ((apic->irq_dest_mode == 0) ? 3607 HT_IRQ_LOW_DM_PHYSICAL : 3608 HT_IRQ_LOW_DM_LOGICAL) | 3609 HT_IRQ_LOW_RQEOI_EDGE | 3610 ((apic->irq_delivery_mode != dest_LowestPrio) ? 3611 HT_IRQ_LOW_MT_FIXED : 3612 HT_IRQ_LOW_MT_ARBITRATED) | 3613 HT_IRQ_LOW_IRQ_MASKED; 3614 3615 write_ht_irq_msg(irq, &msg); 3616 3617 set_irq_chip_and_handler_name(irq, &ht_irq_chip, 3618 handle_edge_irq, "edge"); 3619 3620 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq); 3621 } 3622 return err; 3623 } 3624 #endif /* CONFIG_HT_IRQ */ 3625 3626 int __init io_apic_get_redir_entries (int ioapic) 3627 { 3628 union IO_APIC_reg_01 reg_01; 3629 unsigned long flags; 3630 3631 raw_spin_lock_irqsave(&ioapic_lock, flags); 3632 reg_01.raw = io_apic_read(ioapic, 1); 3633 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3634 3635 /* The register returns the maximum index redir index 3636 * supported, which is one less than the total number of redir 3637 * entries. 3638 */ 3639 return reg_01.bits.entries + 1; 3640 } 3641 3642 void __init probe_nr_irqs_gsi(void) 3643 { 3644 int nr; 3645 3646 nr = gsi_top + NR_IRQS_LEGACY; 3647 if (nr > nr_irqs_gsi) 3648 nr_irqs_gsi = nr; 3649 3650 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3651 } 3652 3653 #ifdef CONFIG_SPARSE_IRQ 3654 int __init arch_probe_nr_irqs(void) 3655 { 3656 int nr; 3657 3658 if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) 3659 nr_irqs = NR_VECTORS * nr_cpu_ids; 3660 3661 nr = nr_irqs_gsi + 8 * nr_cpu_ids; 3662 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) 3663 /* 3664 * for MSI and HT dyn irq 3665 */ 3666 nr += nr_irqs_gsi * 16; 3667 #endif 3668 if (nr < nr_irqs) 3669 nr_irqs = nr; 3670 3671 return NR_IRQS_LEGACY; 3672 } 3673 #endif 3674 3675 static int __io_apic_set_pci_routing(struct device *dev, int irq, 3676 struct io_apic_irq_attr *irq_attr) 3677 { 3678 struct irq_cfg *cfg; 3679 int node; 3680 int ioapic, pin; 3681 int trigger, polarity; 3682 3683 ioapic = irq_attr->ioapic; 3684 if (!IO_APIC_IRQ(irq)) { 3685 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n", 3686 ioapic); 3687 return -EINVAL; 3688 } 3689 3690 if (dev) 3691 node = dev_to_node(dev); 3692 else 3693 node = cpu_to_node(0); 3694 3695 cfg = alloc_irq_and_cfg_at(irq, node); 3696 if (!cfg) 3697 return 0; 3698 3699 pin = irq_attr->ioapic_pin; 3700 trigger = irq_attr->trigger; 3701 polarity = irq_attr->polarity; 3702 3703 /* 3704 * IRQs < 16 are already in the irq_2_pin[] map 3705 */ 3706 if (irq >= legacy_pic->nr_legacy_irqs) { 3707 if (__add_pin_to_irq_node(cfg, node, ioapic, pin)) { 3708 printk(KERN_INFO "can not add pin %d for irq %d\n", 3709 pin, irq); 3710 return 0; 3711 } 3712 } 3713 3714 setup_ioapic_irq(ioapic, pin, irq, cfg, trigger, polarity); 3715 3716 return 0; 3717 } 3718 3719 int io_apic_set_pci_routing(struct device *dev, int irq, 3720 struct io_apic_irq_attr *irq_attr) 3721 { 3722 int ioapic, pin; 3723 /* 3724 * Avoid pin reprogramming. PRTs typically include entries 3725 * with redundant pin->gsi mappings (but unique PCI devices); 3726 * we only program the IOAPIC on the first. 3727 */ 3728 ioapic = irq_attr->ioapic; 3729 pin = irq_attr->ioapic_pin; 3730 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) { 3731 pr_debug("Pin %d-%d already programmed\n", 3732 mp_ioapics[ioapic].apicid, pin); 3733 return 0; 3734 } 3735 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed); 3736 3737 return __io_apic_set_pci_routing(dev, irq, irq_attr); 3738 } 3739 3740 u8 __init io_apic_unique_id(u8 id) 3741 { 3742 #ifdef CONFIG_X86_32 3743 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && 3744 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 3745 return io_apic_get_unique_id(nr_ioapics, id); 3746 else 3747 return id; 3748 #else 3749 int i; 3750 DECLARE_BITMAP(used, 256); 3751 3752 bitmap_zero(used, 256); 3753 for (i = 0; i < nr_ioapics; i++) { 3754 struct mpc_ioapic *ia = &mp_ioapics[i]; 3755 __set_bit(ia->apicid, used); 3756 } 3757 if (!test_bit(id, used)) 3758 return id; 3759 return find_first_zero_bit(used, 256); 3760 #endif 3761 } 3762 3763 #ifdef CONFIG_X86_32 3764 int __init io_apic_get_unique_id(int ioapic, int apic_id) 3765 { 3766 union IO_APIC_reg_00 reg_00; 3767 static physid_mask_t apic_id_map = PHYSID_MASK_NONE; 3768 physid_mask_t tmp; 3769 unsigned long flags; 3770 int i = 0; 3771 3772 /* 3773 * The P4 platform supports up to 256 APIC IDs on two separate APIC 3774 * buses (one for LAPICs, one for IOAPICs), where predecessors only 3775 * supports up to 16 on one shared APIC bus. 3776 * 3777 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full 3778 * advantage of new APIC bus architecture. 3779 */ 3780 3781 if (physids_empty(apic_id_map)) 3782 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3783 3784 raw_spin_lock_irqsave(&ioapic_lock, flags); 3785 reg_00.raw = io_apic_read(ioapic, 0); 3786 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3787 3788 if (apic_id >= get_physical_broadcast()) { 3789 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3790 "%d\n", ioapic, apic_id, reg_00.bits.ID); 3791 apic_id = reg_00.bits.ID; 3792 } 3793 3794 /* 3795 * Every APIC in a system must have a unique ID or we get lots of nice 3796 * 'stuck on smp_invalidate_needed IPI wait' messages. 3797 */ 3798 if (apic->check_apicid_used(&apic_id_map, apic_id)) { 3799 3800 for (i = 0; i < get_physical_broadcast(); i++) { 3801 if (!apic->check_apicid_used(&apic_id_map, i)) 3802 break; 3803 } 3804 3805 if (i == get_physical_broadcast()) 3806 panic("Max apic_id exceeded!\n"); 3807 3808 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, " 3809 "trying %d\n", ioapic, apic_id, i); 3810 3811 apic_id = i; 3812 } 3813 3814 apic->apicid_to_cpu_present(apic_id, &tmp); 3815 physids_or(apic_id_map, apic_id_map, tmp); 3816 3817 if (reg_00.bits.ID != apic_id) { 3818 reg_00.bits.ID = apic_id; 3819 3820 raw_spin_lock_irqsave(&ioapic_lock, flags); 3821 io_apic_write(ioapic, 0, reg_00.raw); 3822 reg_00.raw = io_apic_read(ioapic, 0); 3823 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3824 3825 /* Sanity check */ 3826 if (reg_00.bits.ID != apic_id) { 3827 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic); 3828 return -1; 3829 } 3830 } 3831 3832 apic_printk(APIC_VERBOSE, KERN_INFO 3833 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id); 3834 3835 return apic_id; 3836 } 3837 #endif 3838 3839 int __init io_apic_get_version(int ioapic) 3840 { 3841 union IO_APIC_reg_01 reg_01; 3842 unsigned long flags; 3843 3844 raw_spin_lock_irqsave(&ioapic_lock, flags); 3845 reg_01.raw = io_apic_read(ioapic, 1); 3846 raw_spin_unlock_irqrestore(&ioapic_lock, flags); 3847 3848 return reg_01.bits.version; 3849 } 3850 3851 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity) 3852 { 3853 int ioapic, pin, idx; 3854 3855 if (skip_ioapic_setup) 3856 return -1; 3857 3858 ioapic = mp_find_ioapic(gsi); 3859 if (ioapic < 0) 3860 return -1; 3861 3862 pin = mp_find_ioapic_pin(ioapic, gsi); 3863 if (pin < 0) 3864 return -1; 3865 3866 idx = find_irq_entry(ioapic, pin, mp_INT); 3867 if (idx < 0) 3868 return -1; 3869 3870 *trigger = irq_trigger(idx); 3871 *polarity = irq_polarity(idx); 3872 return 0; 3873 } 3874 3875 /* 3876 * This function currently is only a helper for the i386 smp boot process where 3877 * we need to reprogram the ioredtbls to cater for the cpus which have come online 3878 * so mask in all cases should simply be apic->target_cpus() 3879 */ 3880 #ifdef CONFIG_SMP 3881 void __init setup_ioapic_dest(void) 3882 { 3883 int pin, ioapic, irq, irq_entry; 3884 struct irq_desc *desc; 3885 const struct cpumask *mask; 3886 3887 if (skip_ioapic_setup == 1) 3888 return; 3889 3890 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) 3891 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 3892 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 3893 if (irq_entry == -1) 3894 continue; 3895 irq = pin_2_irq(irq_entry, ioapic, pin); 3896 3897 if ((ioapic > 0) && (irq > 16)) 3898 continue; 3899 3900 desc = irq_to_desc(irq); 3901 3902 /* 3903 * Honour affinities which have been set in early boot 3904 */ 3905 if (desc->status & 3906 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET)) 3907 mask = desc->irq_data.affinity; 3908 else 3909 mask = apic->target_cpus(); 3910 3911 if (intr_remapping_enabled) 3912 ir_ioapic_set_affinity(&desc->irq_data, mask, false); 3913 else 3914 ioapic_set_affinity(&desc->irq_data, mask, false); 3915 } 3916 3917 } 3918 #endif 3919 3920 #define IOAPIC_RESOURCE_NAME_SIZE 11 3921 3922 static struct resource *ioapic_resources; 3923 3924 static struct resource * __init ioapic_setup_resources(int nr_ioapics) 3925 { 3926 unsigned long n; 3927 struct resource *res; 3928 char *mem; 3929 int i; 3930 3931 if (nr_ioapics <= 0) 3932 return NULL; 3933 3934 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); 3935 n *= nr_ioapics; 3936 3937 mem = alloc_bootmem(n); 3938 res = (void *)mem; 3939 3940 mem += sizeof(struct resource) * nr_ioapics; 3941 3942 for (i = 0; i < nr_ioapics; i++) { 3943 res[i].name = mem; 3944 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; 3945 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); 3946 mem += IOAPIC_RESOURCE_NAME_SIZE; 3947 } 3948 3949 ioapic_resources = res; 3950 3951 return res; 3952 } 3953 3954 void __init ioapic_init_mappings(void) 3955 { 3956 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3957 struct resource *ioapic_res; 3958 int i; 3959 3960 ioapic_res = ioapic_setup_resources(nr_ioapics); 3961 for (i = 0; i < nr_ioapics; i++) { 3962 if (smp_found_config) { 3963 ioapic_phys = mp_ioapics[i].apicaddr; 3964 #ifdef CONFIG_X86_32 3965 if (!ioapic_phys) { 3966 printk(KERN_ERR 3967 "WARNING: bogus zero IO-APIC " 3968 "address found in MPTABLE, " 3969 "disabling IO/APIC support!\n"); 3970 smp_found_config = 0; 3971 skip_ioapic_setup = 1; 3972 goto fake_ioapic_page; 3973 } 3974 #endif 3975 } else { 3976 #ifdef CONFIG_X86_32 3977 fake_ioapic_page: 3978 #endif 3979 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 3980 ioapic_phys = __pa(ioapic_phys); 3981 } 3982 set_fixmap_nocache(idx, ioapic_phys); 3983 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", 3984 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), 3985 ioapic_phys); 3986 idx++; 3987 3988 ioapic_res->start = ioapic_phys; 3989 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; 3990 ioapic_res++; 3991 } 3992 } 3993 3994 void __init ioapic_insert_resources(void) 3995 { 3996 int i; 3997 struct resource *r = ioapic_resources; 3998 3999 if (!r) { 4000 if (nr_ioapics > 0) 4001 printk(KERN_ERR 4002 "IO APIC resources couldn't be allocated.\n"); 4003 return; 4004 } 4005 4006 for (i = 0; i < nr_ioapics; i++) { 4007 insert_resource(&iomem_resource, r); 4008 r++; 4009 } 4010 } 4011 4012 int mp_find_ioapic(u32 gsi) 4013 { 4014 int i = 0; 4015 4016 /* Find the IOAPIC that manages this GSI. */ 4017 for (i = 0; i < nr_ioapics; i++) { 4018 if ((gsi >= mp_gsi_routing[i].gsi_base) 4019 && (gsi <= mp_gsi_routing[i].gsi_end)) 4020 return i; 4021 } 4022 4023 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 4024 return -1; 4025 } 4026 4027 int mp_find_ioapic_pin(int ioapic, u32 gsi) 4028 { 4029 if (WARN_ON(ioapic == -1)) 4030 return -1; 4031 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end)) 4032 return -1; 4033 4034 return gsi - mp_gsi_routing[ioapic].gsi_base; 4035 } 4036 4037 static int bad_ioapic(unsigned long address) 4038 { 4039 if (nr_ioapics >= MAX_IO_APICS) { 4040 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded " 4041 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); 4042 return 1; 4043 } 4044 if (!address) { 4045 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" 4046 " found in table, skipping!\n"); 4047 return 1; 4048 } 4049 return 0; 4050 } 4051 4052 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) 4053 { 4054 int idx = 0; 4055 int entries; 4056 4057 if (bad_ioapic(address)) 4058 return; 4059 4060 idx = nr_ioapics; 4061 4062 mp_ioapics[idx].type = MP_IOAPIC; 4063 mp_ioapics[idx].flags = MPC_APIC_USABLE; 4064 mp_ioapics[idx].apicaddr = address; 4065 4066 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 4067 mp_ioapics[idx].apicid = io_apic_unique_id(id); 4068 mp_ioapics[idx].apicver = io_apic_get_version(idx); 4069 4070 /* 4071 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 4072 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 4073 */ 4074 entries = io_apic_get_redir_entries(idx); 4075 mp_gsi_routing[idx].gsi_base = gsi_base; 4076 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1; 4077 4078 /* 4079 * The number of IO-APIC IRQ registers (== #pins): 4080 */ 4081 nr_ioapic_registers[idx] = entries; 4082 4083 if (mp_gsi_routing[idx].gsi_end >= gsi_top) 4084 gsi_top = mp_gsi_routing[idx].gsi_end + 1; 4085 4086 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " 4087 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid, 4088 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr, 4089 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end); 4090 4091 nr_ioapics++; 4092 } 4093 4094 /* Enable IOAPIC early just for system timer */ 4095 void __init pre_init_apic_IRQ0(void) 4096 { 4097 struct irq_cfg *cfg; 4098 4099 printk(KERN_INFO "Early APIC setup for system timer0\n"); 4100 #ifndef CONFIG_SMP 4101 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); 4102 #endif 4103 /* Make sure the irq descriptor is set up */ 4104 cfg = alloc_irq_and_cfg_at(0, 0); 4105 4106 setup_local_APIC(); 4107 4108 add_pin_to_irq_node(cfg, 0, 0, 0); 4109 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge"); 4110 4111 setup_ioapic_irq(0, 0, 0, cfg, 0, 0); 4112 } 4113