2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
16 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
17 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
18 DEFINE_SPINLOCK(octeon_irq_msi_lock);
20 static int octeon_coreid_for_cpu(int cpu)
23 return cpu_logical_map(cpu);
25 return cvmx_get_core_num();
29 static void octeon_irq_core_ack(unsigned int irq)
31 unsigned int bit = irq - OCTEON_IRQ_SW0;
33 * We don't need to disable IRQs to make these atomic since
34 * they are already disabled earlier in the low level
37 clear_c0_status(0x100 << bit);
38 /* The two user interrupts must be cleared manually. */
40 clear_c0_cause(0x100 << bit);
43 static void octeon_irq_core_eoi(unsigned int irq)
45 struct irq_desc *desc = irq_desc + irq;
46 unsigned int bit = irq - OCTEON_IRQ_SW0;
48 * If an IRQ is being processed while we are disabling it the
49 * handler will attempt to unmask the interrupt after it has
52 if (desc->status & IRQ_DISABLED)
55 /* There is a race here. We should fix it. */
58 * We don't need to disable IRQs to make these atomic since
59 * they are already disabled earlier in the low level
62 set_c0_status(0x100 << bit);
65 static void octeon_irq_core_enable(unsigned int irq)
68 unsigned int bit = irq - OCTEON_IRQ_SW0;
71 * We need to disable interrupts to make sure our updates are
74 local_irq_save(flags);
75 set_c0_status(0x100 << bit);
76 local_irq_restore(flags);
79 static void octeon_irq_core_disable_local(unsigned int irq)
82 unsigned int bit = irq - OCTEON_IRQ_SW0;
84 * We need to disable interrupts to make sure our updates are
87 local_irq_save(flags);
88 clear_c0_status(0x100 << bit);
89 local_irq_restore(flags);
92 static void octeon_irq_core_disable(unsigned int irq)
95 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
96 (void *) (long) irq, 1);
98 octeon_irq_core_disable_local(irq);
102 static struct irq_chip octeon_irq_chip_core = {
104 .enable = octeon_irq_core_enable,
105 .disable = octeon_irq_core_disable,
106 .ack = octeon_irq_core_ack,
107 .eoi = octeon_irq_core_eoi,
111 static void octeon_irq_ciu0_ack(unsigned int irq)
114 * In order to avoid any locking accessing the CIU, we
115 * acknowledge CIU interrupts by disabling all of them. This
116 * way we can use a per core register and avoid any out of
117 * core locking requirements. This has the side affect that
118 * CIU interrupts can't be processed recursively.
120 * We don't need to disable IRQs to make these atomic since
121 * they are already disabled earlier in the low level
124 clear_c0_status(0x100 << 2);
127 static void octeon_irq_ciu0_eoi(unsigned int irq)
130 * Enable all CIU interrupts again. We don't need to disable
131 * IRQs to make these atomic since they are already disabled
132 * earlier in the low level interrupt code.
134 set_c0_status(0x100 << 2);
137 static void octeon_irq_ciu0_enable(unsigned int irq)
139 int coreid = cvmx_get_core_num();
142 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
145 * A read lock is used here to make sure only one core is ever
146 * updating the CIU enable bits at a time. During an enable
147 * the cores don't interfere with each other. During a disable
148 * the write lock stops any enables that might cause a
151 read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
152 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
154 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
155 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
159 static void octeon_irq_ciu0_disable(unsigned int irq)
161 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
165 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
166 for_each_online_cpu(cpu) {
167 int coreid = octeon_coreid_for_cpu(cpu);
168 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
169 en0 &= ~(1ull << bit);
170 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
173 * We need to do a read after the last update to make sure all
176 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
177 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
181 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
184 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
186 int index = cvmx_get_core_num() * 2;
187 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
189 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
193 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
196 static void octeon_irq_ciu0_disable_v2(unsigned int irq)
198 int index = cvmx_get_core_num() * 2;
199 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
201 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
205 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
208 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
210 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
213 for_each_online_cpu(cpu) {
214 index = octeon_coreid_for_cpu(cpu) * 2;
215 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
220 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
224 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
226 write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
227 for_each_online_cpu(cpu) {
228 int coreid = octeon_coreid_for_cpu(cpu);
230 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
231 if (cpumask_test_cpu(cpu, dest))
234 en0 &= ~(1ull << bit);
235 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
238 * We need to do a read after the last update to make sure all
241 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
242 write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
248 * Set affinity for the irq for chips that have the EN*_W1{S,C}
251 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
252 const struct cpumask *dest)
256 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
257 for_each_online_cpu(cpu) {
258 index = octeon_coreid_for_cpu(cpu) * 2;
259 if (cpumask_test_cpu(cpu, dest))
260 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
269 * Newer octeon chips have support for lockless CIU operation.
271 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
273 .enable = octeon_irq_ciu0_enable_v2,
274 .disable = octeon_irq_ciu0_disable_all_v2,
275 .ack = octeon_irq_ciu0_disable_v2,
276 .eoi = octeon_irq_ciu0_enable_v2,
278 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
282 static struct irq_chip octeon_irq_chip_ciu0 = {
284 .enable = octeon_irq_ciu0_enable,
285 .disable = octeon_irq_ciu0_disable,
286 .ack = octeon_irq_ciu0_ack,
287 .eoi = octeon_irq_ciu0_eoi,
289 .set_affinity = octeon_irq_ciu0_set_affinity,
294 static void octeon_irq_ciu1_ack(unsigned int irq)
297 * In order to avoid any locking accessing the CIU, we
298 * acknowledge CIU interrupts by disabling all of them. This
299 * way we can use a per core register and avoid any out of
300 * core locking requirements. This has the side affect that
301 * CIU interrupts can't be processed recursively. We don't
302 * need to disable IRQs to make these atomic since they are
303 * already disabled earlier in the low level interrupt code.
305 clear_c0_status(0x100 << 3);
308 static void octeon_irq_ciu1_eoi(unsigned int irq)
311 * Enable all CIU interrupts again. We don't need to disable
312 * IRQs to make these atomic since they are already disabled
313 * earlier in the low level interrupt code.
315 set_c0_status(0x100 << 3);
318 static void octeon_irq_ciu1_enable(unsigned int irq)
320 int coreid = cvmx_get_core_num();
323 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
326 * A read lock is used here to make sure only one core is ever
327 * updating the CIU enable bits at a time. During an enable
328 * the cores don't interfere with each other. During a disable
329 * the write lock stops any enables that might cause a
332 read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
333 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
335 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
336 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
337 read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
340 static void octeon_irq_ciu1_disable(unsigned int irq)
342 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
346 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
347 for_each_online_cpu(cpu) {
348 int coreid = octeon_coreid_for_cpu(cpu);
349 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
350 en1 &= ~(1ull << bit);
351 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
354 * We need to do a read after the last update to make sure all
357 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
358 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
362 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
365 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
367 int index = cvmx_get_core_num() * 2 + 1;
368 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
370 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
374 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
377 static void octeon_irq_ciu1_disable_v2(unsigned int irq)
379 int index = cvmx_get_core_num() * 2 + 1;
380 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
382 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
386 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
389 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
391 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
394 for_each_online_cpu(cpu) {
395 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
396 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
401 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
402 const struct cpumask *dest)
406 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
408 write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
409 for_each_online_cpu(cpu) {
410 int coreid = octeon_coreid_for_cpu(cpu);
412 cvmx_read_csr(CVMX_CIU_INTX_EN1
414 if (cpumask_test_cpu(cpu, dest))
417 en1 &= ~(1ull << bit);
418 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
421 * We need to do a read after the last update to make sure all
424 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
425 write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
431 * Set affinity for the irq for chips that have the EN*_W1{S,C}
434 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
435 const struct cpumask *dest)
439 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
440 for_each_online_cpu(cpu) {
441 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
442 if (cpumask_test_cpu(cpu, dest))
443 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
445 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
452 * Newer octeon chips have support for lockless CIU operation.
454 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
456 .enable = octeon_irq_ciu1_enable_v2,
457 .disable = octeon_irq_ciu1_disable_all_v2,
458 .ack = octeon_irq_ciu1_disable_v2,
459 .eoi = octeon_irq_ciu1_enable_v2,
461 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
465 static struct irq_chip octeon_irq_chip_ciu1 = {
467 .enable = octeon_irq_ciu1_enable,
468 .disable = octeon_irq_ciu1_disable,
469 .ack = octeon_irq_ciu1_ack,
470 .eoi = octeon_irq_ciu1_eoi,
472 .set_affinity = octeon_irq_ciu1_set_affinity,
476 #ifdef CONFIG_PCI_MSI
478 static void octeon_irq_msi_ack(unsigned int irq)
480 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
481 /* These chips have PCI */
482 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
483 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
486 * These chips have PCIe. Thankfully the ACK doesn't
489 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
490 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
494 static void octeon_irq_msi_eoi(unsigned int irq)
499 static void octeon_irq_msi_enable(unsigned int irq)
501 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
503 * Octeon PCI doesn't have the ability to mask/unmask
504 * MSI interrupts individually. Instead of
505 * masking/unmasking them in groups of 16, we simple
506 * assume MSI devices are well behaved. MSI
507 * interrupts are always enable and the ACK is assumed
511 /* These chips have PCIe. Note that we only support
512 * the first 64 MSI interrupts. Unfortunately all the
513 * MSI enables are in the same register. We use
514 * MSI0's lock to control access to them all.
518 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
519 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
520 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
521 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
522 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
523 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
527 static void octeon_irq_msi_disable(unsigned int irq)
529 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
530 /* See comment in enable */
533 * These chips have PCIe. Note that we only support
534 * the first 64 MSI interrupts. Unfortunately all the
535 * MSI enables are in the same register. We use
536 * MSI0's lock to control access to them all.
540 spin_lock_irqsave(&octeon_irq_msi_lock, flags);
541 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
542 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
543 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
544 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
545 spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
549 static struct irq_chip octeon_irq_chip_msi = {
551 .enable = octeon_irq_msi_enable,
552 .disable = octeon_irq_msi_disable,
553 .ack = octeon_irq_msi_ack,
554 .eoi = octeon_irq_msi_eoi,
558 void __init arch_init_irq(void)
561 struct irq_chip *chip0;
562 struct irq_chip *chip1;
565 /* Set the default affinity to the boot cpu. */
566 cpumask_clear(irq_default_affinity);
567 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
570 if (NR_IRQS < OCTEON_IRQ_LAST)
571 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
573 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
574 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
575 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
576 chip0 = &octeon_irq_chip_ciu0_v2;
577 chip1 = &octeon_irq_chip_ciu1_v2;
579 chip0 = &octeon_irq_chip_ciu0;
580 chip1 = &octeon_irq_chip_ciu1;
583 /* 0 - 15 reserved for i8259 master and slave controller. */
585 /* 17 - 23 Mips internal */
586 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
587 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
591 /* 24 - 87 CIU_INT_SUM0 */
592 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
593 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
596 /* 88 - 151 CIU_INT_SUM1 */
597 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
598 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
601 #ifdef CONFIG_PCI_MSI
602 /* 152 - 215 PCI/PCIe MSI interrupts */
603 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
604 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
608 set_c0_status(0x300 << 2);
611 asmlinkage void plat_irq_dispatch(void)
613 const unsigned long core_id = cvmx_get_core_num();
614 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
615 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
616 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
617 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
618 unsigned long cop0_cause;
619 unsigned long cop0_status;
624 cop0_cause = read_c0_cause();
625 cop0_status = read_c0_status();
626 cop0_cause &= cop0_status;
627 cop0_cause &= ST0_IM;
629 if (unlikely(cop0_cause & STATUSF_IP2)) {
630 ciu_sum = cvmx_read_csr(ciu_sum0_address);
631 ciu_en = cvmx_read_csr(ciu_en0_address);
634 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
636 spurious_interrupt();
637 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
638 ciu_sum = cvmx_read_csr(ciu_sum1_address);
639 ciu_en = cvmx_read_csr(ciu_en1_address);
642 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
644 spurious_interrupt();
645 } else if (likely(cop0_cause)) {
646 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
653 #ifdef CONFIG_HOTPLUG_CPU
654 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
657 int coreid = octeon_coreid_for_cpu(cpu);
658 int bit = (irq < OCTEON_IRQ_WDOG0) ?
659 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
661 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
662 (1ull << bit)) >> bit;
664 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
665 (1ull << bit)) >> bit;
670 void fixup_irqs(void)
674 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
675 octeon_irq_core_disable_local(irq);
677 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
678 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
679 /* ciu irq migrates to next cpu */
680 octeon_irq_chip_ciu0.disable(irq);
681 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
686 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
687 octeon_irq_mailbox_mask(irq);
689 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
690 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
691 /* ciu irq migrates to next cpu */
692 octeon_irq_chip_ciu0.disable(irq);
693 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
697 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
698 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
699 /* ciu irq migrates to next cpu */
700 octeon_irq_chip_ciu1.disable(irq);
701 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
706 #endif /* CONFIG_HOTPLUG_CPU */