1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2016,2017 IBM Corporation.
6 #define pr_fmt(fmt) "xive: " fmt
8 #include <linux/types.h>
9 #include <linux/threads.h>
10 #include <linux/kernel.h>
11 #include <linux/irq.h>
12 #include <linux/debugfs.h>
13 #include <linux/smp.h>
14 #include <linux/interrupt.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/cpu.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/msi.h>
26 #include <asm/machdep.h>
28 #include <asm/errno.h>
30 #include <asm/xive-regs.h>
33 #include "xive-internal.h"
39 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
40 smp_processor_id(), ## __VA_ARGS__)
42 #define DBG_VERBOSE(fmt...) do { } while(0)
46 EXPORT_SYMBOL_GPL(__xive_enabled);
47 bool xive_cmdline_disabled;
49 /* We use only one priority for now */
50 static u8 xive_irq_priority;
52 /* TIMA exported to KVM */
53 void __iomem *xive_tima;
54 EXPORT_SYMBOL_GPL(xive_tima);
58 static const struct xive_ops *xive_ops;
60 /* Our global interrupt domain */
61 static struct irq_domain *xive_irq_domain;
64 /* The IPIs all use the same logical irq number */
65 static u32 xive_ipi_irq;
68 /* Xive state for each CPU */
69 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
72 * A "disabled" interrupt should never fire, to catch problems
73 * we set its logical number to this
75 #define XIVE_BAD_IRQ 0x7fffffff
76 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
78 /* An invalid CPU target */
79 #define XIVE_INVALID_TARGET (-1)
82 * Read the next entry in a queue, return its content if it's valid
83 * or 0 if there is no new entry.
85 * The queue pointer is moved forward unless "just_peek" is set
87 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
93 cur = be32_to_cpup(q->qpage + q->idx);
95 /* Check valid bit (31) vs current toggle polarity */
96 if ((cur >> 31) == q->toggle)
99 /* If consuming from the queue ... */
102 q->idx = (q->idx + 1) & q->msk;
104 /* Wrap around: flip valid toggle */
108 /* Mask out the valid bit (31) */
109 return cur & 0x7fffffff;
113 * Scans all the queue that may have interrupts in them
114 * (based on "pending_prio") in priority order until an
115 * interrupt is found or all the queues are empty.
117 * Then updates the CPPR (Current Processor Priority
118 * Register) based on the most favored interrupt found
119 * (0xff if none) and return what was found (0 if none).
121 * If just_peek is set, return the most favored pending
122 * interrupt if any but don't update the queue pointers.
124 * Note: This function can operate generically on any number
125 * of queues (up to 8). The current implementation of the XIVE
126 * driver only uses a single queue however.
128 * Note2: This will also "flush" "the pending_count" of a queue
129 * into the "count" when that queue is observed to be empty.
130 * This is used to keep track of the amount of interrupts
131 * targetting a queue. When an interrupt is moved away from
132 * a queue, we only decrement that queue count once the queue
133 * has been observed empty to avoid races.
135 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
140 /* Find highest pending priority */
141 while (xc->pending_prio != 0) {
144 prio = ffs(xc->pending_prio) - 1;
145 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
148 irq = xive_read_eq(&xc->queue[prio], just_peek);
150 /* Found something ? That's it */
152 if (just_peek || irq_to_desc(irq))
155 * We should never get here; if we do then we must
156 * have failed to synchronize the interrupt properly
157 * when shutting it down.
159 pr_crit("xive: got interrupt %d without descriptor, dropping\n",
165 /* Clear pending bits */
166 xc->pending_prio &= ~(1 << prio);
169 * Check if the queue count needs adjusting due to
170 * interrupts being moved away. See description of
171 * xive_dec_target_count()
173 q = &xc->queue[prio];
174 if (atomic_read(&q->pending_count)) {
175 int p = atomic_xchg(&q->pending_count, 0);
177 WARN_ON(p > atomic_read(&q->count));
178 atomic_sub(p, &q->count);
183 /* If nothing was found, set CPPR to 0xff */
187 /* Update HW CPPR to match if necessary */
188 if (prio != xc->cppr) {
189 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
191 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
198 * This is used to perform the magic loads from an ESB
199 * described in xive.h
201 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
205 /* Handle HW errata */
206 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
207 offset |= offset << 4;
209 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
210 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
212 val = in_be64(xd->eoi_mmio + offset);
217 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
219 /* Handle HW errata */
220 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
221 offset |= offset << 4;
223 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
224 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
226 out_be64(xd->eoi_mmio + offset, data);
230 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
237 i0 = be32_to_cpup(q->qpage + idx);
238 idx = (idx + 1) & q->msk;
239 i1 = be32_to_cpup(q->qpage + idx);
240 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
244 notrace void xmon_xive_do_dump(int cpu)
246 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
248 xmon_printf("XIVE state for CPU %d:\n", cpu);
249 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
250 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
253 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
254 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
255 val & XIVE_ESB_VAL_P ? 'P' : 'p',
256 val & XIVE_ESB_VAL_Q ? 'Q' : 'q');
260 #endif /* CONFIG_XMON */
262 static unsigned int xive_get_irq(void)
264 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
268 * This can be called either as a result of a HW interrupt or
269 * as a "replay" because EOI decided there was still something
270 * in one of the queues.
272 * First we perform an ACK cycle in order to update our mask
273 * of pending priorities. This will also have the effect of
274 * updating the CPPR to the most favored pending interrupts.
276 * In the future, if we have a way to differentiate a first
277 * entry (on HW interrupt) from a replay triggered by EOI,
278 * we could skip this on replays unless we soft-mask tells us
279 * that a new HW interrupt occurred.
281 xive_ops->update_pending(xc);
283 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
285 /* Scan our queue(s) for interrupts */
286 irq = xive_scan_interrupts(xc, false);
288 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
289 irq, xc->pending_prio);
291 /* Return pending interrupt if any */
292 if (irq == XIVE_BAD_IRQ)
298 * After EOI'ing an interrupt, we need to re-check the queue
299 * to see if another interrupt is pending since multiple
300 * interrupts can coalesce into a single notification to the
303 * If we find that there is indeed more in there, we call
304 * force_external_irq_replay() to make Linux synthetize an
305 * external interrupt on the next call to local_irq_restore().
307 static void xive_do_queue_eoi(struct xive_cpu *xc)
309 if (xive_scan_interrupts(xc, true) != 0) {
310 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
311 force_external_irq_replay();
316 * EOI an interrupt at the source. There are several methods
317 * to do this depending on the HW version and source type
319 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
322 /* If the XIVE supports the new "store EOI facility, use it */
323 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
324 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
325 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
327 * The FW told us to call it. This happens for some
328 * interrupt sources that need additional HW whacking
329 * beyond the ESB manipulation. For example LPC interrupts
330 * on P9 DD1.0 needed a latch to be clared in the LPC bridge
331 * itself. The Firmware will take care of it.
333 if (WARN_ON_ONCE(!xive_ops->eoi))
335 xive_ops->eoi(hw_irq);
340 * Otherwise for EOI, we use the special MMIO that does
341 * a clear of both P and Q and returns the old Q,
342 * except for LSIs where we use the "EOI cycle" special
345 * This allows us to then do a re-trigger if Q was set
346 * rather than synthesizing an interrupt in software
348 * For LSIs the HW EOI cycle is used rather than PQ bits,
349 * as they are automatically re-triggred in HW when still
352 if (xd->flags & XIVE_IRQ_FLAG_LSI)
353 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
355 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
356 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
358 /* Re-trigger if needed */
359 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
360 out_be64(xd->trig_mmio, 0);
365 /* irq_chip eoi callback, called with irq descriptor lock held */
366 static void xive_irq_eoi(struct irq_data *d)
368 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
369 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
371 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
372 d->irq, irqd_to_hwirq(d), xc->pending_prio);
375 * EOI the source if it hasn't been disabled and hasn't
376 * been passed-through to a KVM guest
378 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
379 !(xd->flags & XIVE_IRQ_NO_EOI))
380 xive_do_source_eoi(irqd_to_hwirq(d), xd);
385 * Clear saved_p to indicate that it's no longer occupying
386 * a queue slot on the target queue
390 /* Check for more work in the queue */
391 xive_do_queue_eoi(xc);
395 * Helper used to mask and unmask an interrupt source. This
396 * is only called for normal interrupts that do not require
397 * masking/unmasking via firmware.
399 static void xive_do_source_set_mask(struct xive_irq_data *xd,
405 * If the interrupt had P set, it may be in a queue.
407 * We need to make sure we don't re-enable it until it
408 * has been fetched from that queue and EOId. We keep
409 * a copy of that P state and use it to restore the
410 * ESB accordingly on unmask.
413 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
414 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P))
417 } else if (xd->saved_p) {
418 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
421 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
427 * Try to chose "cpu" as a new interrupt target. Increments
428 * the queue accounting for that target if it's not already
431 static bool xive_try_pick_target(int cpu)
433 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
434 struct xive_q *q = &xc->queue[xive_irq_priority];
438 * Calculate max number of interrupts in that queue.
440 * We leave a gap of 1 just in case...
442 max = (q->msk + 1) - 1;
443 return !!atomic_add_unless(&q->count, 1, max);
447 * Un-account an interrupt for a target CPU. We don't directly
448 * decrement q->count since the interrupt might still be present
451 * Instead increment a separate counter "pending_count" which
452 * will be substracted from "count" later when that CPU observes
453 * the queue to be empty.
455 static void xive_dec_target_count(int cpu)
457 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
458 struct xive_q *q = &xc->queue[xive_irq_priority];
460 if (WARN_ON(cpu < 0 || !xc)) {
461 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
466 * We increment the "pending count" which will be used
467 * to decrement the target queue count whenever it's next
468 * processed and found empty. This ensure that we don't
469 * decrement while we still have the interrupt there
472 atomic_inc(&q->pending_count);
475 /* Find a tentative CPU target in a CPU mask */
476 static int xive_find_target_in_mask(const struct cpumask *mask,
479 int cpu, first, num, i;
481 /* Pick up a starting point CPU in the mask based on fuzz */
482 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
486 cpu = cpumask_first(mask);
487 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
488 cpu = cpumask_next(cpu, mask);
491 if (WARN_ON(cpu >= nr_cpu_ids))
492 cpu = cpumask_first(cpu_online_mask);
494 /* Remember first one to handle wrap-around */
498 * Now go through the entire mask until we find a valid
503 * We re-check online as the fallback case passes us
504 * an untested affinity mask
506 if (cpu_online(cpu) && xive_try_pick_target(cpu))
508 cpu = cpumask_next(cpu, mask);
510 if (cpu >= nr_cpu_ids)
511 cpu = cpumask_first(mask);
512 } while (cpu != first);
518 * Pick a target CPU for an interrupt. This is done at
519 * startup or if the affinity is changed in a way that
520 * invalidates the current target.
522 static int xive_pick_irq_target(struct irq_data *d,
523 const struct cpumask *affinity)
525 static unsigned int fuzz;
526 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
531 * If we have chip IDs, first we try to build a mask of
532 * CPUs matching the CPU and find a target in there
534 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
535 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
536 /* Build a mask of matching chip IDs */
537 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
538 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
539 if (xc->chip_id == xd->src_chip)
540 cpumask_set_cpu(cpu, mask);
542 /* Try to find a target */
543 if (cpumask_empty(mask))
546 cpu = xive_find_target_in_mask(mask, fuzz++);
547 free_cpumask_var(mask);
553 /* No chip IDs, fallback to using the affinity mask */
554 return xive_find_target_in_mask(affinity, fuzz++);
557 static unsigned int xive_irq_startup(struct irq_data *d)
559 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
560 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
565 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
568 #ifdef CONFIG_PCI_MSI
570 * The generic MSI code returns with the interrupt disabled on the
571 * card, using the MSI mask bits. Firmware doesn't appear to unmask
572 * at that level, so we do it here by hand.
574 if (irq_data_get_msi_desc(d))
575 pci_msi_unmask_irq(d);
579 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
580 if (target == XIVE_INVALID_TARGET) {
581 /* Try again breaking affinity */
582 target = xive_pick_irq_target(d, cpu_online_mask);
583 if (target == XIVE_INVALID_TARGET)
585 pr_warn("irq %d started with broken affinity\n", d->irq);
589 if (WARN_ON(target == XIVE_INVALID_TARGET ||
590 target >= nr_cpu_ids))
591 target = smp_processor_id();
596 * Configure the logical number to be the Linux IRQ number
597 * and set the target queue
599 rc = xive_ops->configure_irq(hw_irq,
600 get_hard_smp_processor_id(target),
601 xive_irq_priority, d->irq);
606 xive_do_source_set_mask(xd, false);
611 /* called with irq descriptor lock held */
612 static void xive_irq_shutdown(struct irq_data *d)
614 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
615 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
617 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
620 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
623 /* Mask the interrupt at the source */
624 xive_do_source_set_mask(xd, true);
627 * Mask the interrupt in HW in the IVT/EAS and set the number
628 * to be the "bad" IRQ number
630 xive_ops->configure_irq(hw_irq,
631 get_hard_smp_processor_id(xd->target),
634 xive_dec_target_count(xd->target);
635 xd->target = XIVE_INVALID_TARGET;
638 static void xive_irq_unmask(struct irq_data *d)
640 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
642 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
645 * This is a workaround for PCI LSI problems on P9, for
646 * these, we call FW to set the mask. The problems might
647 * be fixed by P9 DD2.0, if that is the case, firmware
648 * will no longer set that flag.
650 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
651 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
652 xive_ops->configure_irq(hw_irq,
653 get_hard_smp_processor_id(xd->target),
654 xive_irq_priority, d->irq);
658 xive_do_source_set_mask(xd, false);
661 static void xive_irq_mask(struct irq_data *d)
663 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
665 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
668 * This is a workaround for PCI LSI problems on P9, for
669 * these, we call OPAL to set the mask. The problems might
670 * be fixed by P9 DD2.0, if that is the case, firmware
671 * will no longer set that flag.
673 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
674 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
675 xive_ops->configure_irq(hw_irq,
676 get_hard_smp_processor_id(xd->target),
681 xive_do_source_set_mask(xd, true);
684 static int xive_irq_set_affinity(struct irq_data *d,
685 const struct cpumask *cpumask,
688 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
689 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
690 u32 target, old_target;
693 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
695 /* Is this valid ? */
696 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
699 /* Don't do anything if the interrupt isn't started */
700 if (!irqd_is_started(d))
701 return IRQ_SET_MASK_OK;
704 * If existing target is already in the new mask, and is
705 * online then do nothing.
707 if (xd->target != XIVE_INVALID_TARGET &&
708 cpu_online(xd->target) &&
709 cpumask_test_cpu(xd->target, cpumask))
710 return IRQ_SET_MASK_OK;
712 /* Pick a new target */
713 target = xive_pick_irq_target(d, cpumask);
715 /* No target found */
716 if (target == XIVE_INVALID_TARGET)
720 if (WARN_ON(target >= nr_cpu_ids))
721 target = smp_processor_id();
723 old_target = xd->target;
726 * Only configure the irq if it's not currently passed-through to
729 if (!irqd_is_forwarded_to_vcpu(d))
730 rc = xive_ops->configure_irq(hw_irq,
731 get_hard_smp_processor_id(target),
732 xive_irq_priority, d->irq);
734 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
738 pr_devel(" target: 0x%x\n", target);
741 /* Give up previous target */
742 if (old_target != XIVE_INVALID_TARGET)
743 xive_dec_target_count(old_target);
745 return IRQ_SET_MASK_OK;
748 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
750 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
753 * We only support these. This has really no effect other than setting
754 * the corresponding descriptor bits mind you but those will in turn
755 * affect the resend function when re-enabling an edge interrupt.
757 * Set set the default to edge as explained in map().
759 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
760 flow_type = IRQ_TYPE_EDGE_RISING;
762 if (flow_type != IRQ_TYPE_EDGE_RISING &&
763 flow_type != IRQ_TYPE_LEVEL_LOW)
766 irqd_set_trigger_type(d, flow_type);
769 * Double check it matches what the FW thinks
771 * NOTE: We don't know yet if the PAPR interface will provide
772 * the LSI vs MSI information apart from the device-tree so
773 * this check might have to move into an optional backend call
774 * that is specific to the native backend
776 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
777 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
778 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
779 d->irq, (u32)irqd_to_hwirq(d),
780 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
781 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
784 return IRQ_SET_MASK_OK_NOCOPY;
787 static int xive_irq_retrigger(struct irq_data *d)
789 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
791 /* This should be only for MSIs */
792 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
796 * To perform a retrigger, we first set the PQ bits to
797 * 11, then perform an EOI.
799 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
802 * Note: We pass "0" to the hw_irq argument in order to
803 * avoid calling into the backend EOI code which we don't
804 * want to do in the case of a re-trigger. Backends typically
805 * only do EOI for LSIs anyway.
807 xive_do_source_eoi(0, xd);
813 * Caller holds the irq descriptor lock, so this won't be called
814 * concurrently with xive_get_irqchip_state on the same interrupt.
816 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
818 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
819 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
824 * We only support this on interrupts that do not require
825 * firmware calls for masking and unmasking
827 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
831 * This is called by KVM with state non-NULL for enabling
832 * pass-through or NULL for disabling it
835 irqd_set_forwarded_to_vcpu(d);
837 /* Set it to PQ=10 state to prevent further sends */
838 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
840 xd->saved_p = !!(pq & XIVE_ESB_VAL_P);
841 xd->stale_p = !xd->saved_p;
844 /* No target ? nothing to do */
845 if (xd->target == XIVE_INVALID_TARGET) {
847 * An untargetted interrupt should have been
848 * also masked at the source
850 WARN_ON(xd->saved_p);
856 * If P was set, adjust state to PQ=11 to indicate
857 * that a resend is needed for the interrupt to reach
858 * the guest. Also remember the value of P.
860 * This also tells us that it's in flight to a host queue
861 * or has already been fetched but hasn't been EOIed yet
862 * by the host. This it's potentially using up a host
863 * queue slot. This is important to know because as long
864 * as this is the case, we must not hard-unmask it when
865 * "returning" that interrupt to the host.
867 * This saved_p is cleared by the host EOI, when we know
868 * for sure the queue slot is no longer in use.
871 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
874 * Sync the XIVE source HW to ensure the interrupt
875 * has gone through the EAS before we change its
876 * target to the guest. That should guarantee us
877 * that we *will* eventually get an EOI for it on
878 * the host. Otherwise there would be a small window
879 * for P to be seen here but the interrupt going
880 * to the guest queue.
882 if (xive_ops->sync_source)
883 xive_ops->sync_source(hw_irq);
886 irqd_clr_forwarded_to_vcpu(d);
888 /* No host target ? hard mask and return */
889 if (xd->target == XIVE_INVALID_TARGET) {
890 xive_do_source_set_mask(xd, true);
895 * Sync the XIVE source HW to ensure the interrupt
896 * has gone through the EAS before we change its
897 * target to the host.
899 if (xive_ops->sync_source)
900 xive_ops->sync_source(hw_irq);
903 * By convention we are called with the interrupt in
904 * a PQ=10 or PQ=11 state, ie, it won't fire and will
905 * have latched in Q whether there's a pending HW
908 * First reconfigure the target.
910 rc = xive_ops->configure_irq(hw_irq,
911 get_hard_smp_processor_id(xd->target),
912 xive_irq_priority, d->irq);
917 * Then if saved_p is not set, effectively re-enable the
918 * interrupt with an EOI. If it is set, we know there is
919 * still a message in a host queue somewhere that will be
922 * Note: We don't check irqd_irq_disabled(). Effectively,
923 * we *will* let the irq get through even if masked if the
924 * HW is still firing it in order to deal with the whole
925 * saved_p business properly. If the interrupt triggers
926 * while masked, the generic code will re-mask it anyway.
929 xive_do_source_eoi(hw_irq, xd);
935 /* Called with irq descriptor lock held. */
936 static int xive_get_irqchip_state(struct irq_data *data,
937 enum irqchip_irq_state which, bool *state)
939 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data);
942 case IRQCHIP_STATE_ACTIVE:
943 *state = !xd->stale_p &&
945 !!(xive_esb_read(xd, XIVE_ESB_GET) & XIVE_ESB_VAL_P));
952 static struct irq_chip xive_irq_chip = {
954 .irq_startup = xive_irq_startup,
955 .irq_shutdown = xive_irq_shutdown,
956 .irq_eoi = xive_irq_eoi,
957 .irq_mask = xive_irq_mask,
958 .irq_unmask = xive_irq_unmask,
959 .irq_set_affinity = xive_irq_set_affinity,
960 .irq_set_type = xive_irq_set_type,
961 .irq_retrigger = xive_irq_retrigger,
962 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
963 .irq_get_irqchip_state = xive_get_irqchip_state,
966 bool is_xive_irq(struct irq_chip *chip)
968 return chip == &xive_irq_chip;
970 EXPORT_SYMBOL_GPL(is_xive_irq);
972 void xive_cleanup_irq_data(struct xive_irq_data *xd)
975 iounmap(xd->eoi_mmio);
976 if (xd->eoi_mmio == xd->trig_mmio)
977 xd->trig_mmio = NULL;
981 iounmap(xd->trig_mmio);
982 xd->trig_mmio = NULL;
985 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
987 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
989 struct xive_irq_data *xd;
992 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
995 rc = xive_ops->populate_irq_data(hw, xd);
1000 xd->target = XIVE_INVALID_TARGET;
1001 irq_set_handler_data(virq, xd);
1006 static void xive_irq_free_data(unsigned int virq)
1008 struct xive_irq_data *xd = irq_get_handler_data(virq);
1012 irq_set_handler_data(virq, NULL);
1013 xive_cleanup_irq_data(xd);
1019 static void xive_cause_ipi(int cpu)
1021 struct xive_cpu *xc;
1022 struct xive_irq_data *xd;
1024 xc = per_cpu(xive_cpu, cpu);
1026 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
1027 smp_processor_id(), cpu, xc->hw_ipi);
1030 if (WARN_ON(!xd->trig_mmio))
1032 out_be64(xd->trig_mmio, 0);
1035 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1037 return smp_ipi_demux();
1040 static void xive_ipi_eoi(struct irq_data *d)
1042 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1044 /* Handle possible race with unplug and drop stale IPIs */
1048 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1049 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1051 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1052 xive_do_queue_eoi(xc);
1055 static void xive_ipi_do_nothing(struct irq_data *d)
1058 * Nothing to do, we never mask/unmask IPIs, but the callback
1059 * has to exist for the struct irq_chip.
1063 static struct irq_chip xive_ipi_chip = {
1065 .irq_eoi = xive_ipi_eoi,
1066 .irq_mask = xive_ipi_do_nothing,
1067 .irq_unmask = xive_ipi_do_nothing,
1070 static void __init xive_request_ipi(void)
1075 * Initialization failed, move on, we might manage to
1076 * reach the point where we display our errors before
1077 * the system falls appart
1079 if (!xive_irq_domain)
1083 virq = irq_create_mapping(xive_irq_domain, 0);
1084 xive_ipi_irq = virq;
1086 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1087 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1090 static int xive_setup_cpu_ipi(unsigned int cpu)
1092 struct xive_cpu *xc;
1095 pr_debug("Setting up IPI for CPU %d\n", cpu);
1097 xc = per_cpu(xive_cpu, cpu);
1099 /* Check if we are already setup */
1100 if (xc->hw_ipi != 0)
1103 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1104 if (xive_ops->get_ipi(cpu, xc))
1108 * Populate the IRQ data in the xive_cpu structure and
1109 * configure the HW / enable the IPIs.
1111 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1113 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1116 rc = xive_ops->configure_irq(xc->hw_ipi,
1117 get_hard_smp_processor_id(cpu),
1118 xive_irq_priority, xive_ipi_irq);
1120 pr_err("Failed to map IPI CPU %d\n", cpu);
1123 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1124 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1127 xive_do_source_set_mask(&xc->ipi_data, false);
1132 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1134 /* Disable the IPI and free the IRQ data */
1136 /* Already cleaned up ? */
1137 if (xc->hw_ipi == 0)
1141 xive_do_source_set_mask(&xc->ipi_data, true);
1144 * Note: We don't call xive_cleanup_irq_data() to free
1145 * the mappings as this is called from an IPI on kexec
1146 * which is not a safe environment to call iounmap()
1149 /* Deconfigure/mask in the backend */
1150 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1151 0xff, xive_ipi_irq);
1153 /* Free the IPIs in the backend */
1154 xive_ops->put_ipi(cpu, xc);
1157 void __init xive_smp_probe(void)
1159 smp_ops->cause_ipi = xive_cause_ipi;
1161 /* Register the IPI */
1164 /* Allocate and setup IPI for the boot CPU */
1165 xive_setup_cpu_ipi(smp_processor_id());
1168 #endif /* CONFIG_SMP */
1170 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1176 * Mark interrupts as edge sensitive by default so that resend
1177 * actually works. Will fix that up below if needed.
1179 irq_clear_status_flags(virq, IRQ_LEVEL);
1182 /* IPIs are special and come up with HW number 0 */
1185 * IPIs are marked per-cpu. We use separate HW interrupts under
1186 * the hood but associated with the same "linux" interrupt
1188 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1194 rc = xive_irq_alloc_data(virq, hw);
1198 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1203 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1205 struct irq_data *data = irq_get_irq_data(virq);
1206 unsigned int hw_irq;
1208 /* XXX Assign BAD number */
1211 hw_irq = (unsigned int)irqd_to_hwirq(data);
1213 xive_irq_free_data(virq);
1216 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1217 const u32 *intspec, unsigned int intsize,
1218 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1221 *out_hwirq = intspec[0];
1224 * If intsize is at least 2, we look for the type in the second cell,
1225 * we assume the LSB indicates a level interrupt.
1229 *out_flags = IRQ_TYPE_LEVEL_LOW;
1231 *out_flags = IRQ_TYPE_EDGE_RISING;
1233 *out_flags = IRQ_TYPE_LEVEL_LOW;
1238 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1239 enum irq_domain_bus_token bus_token)
1241 return xive_ops->match(node);
1244 static const struct irq_domain_ops xive_irq_domain_ops = {
1245 .match = xive_irq_domain_match,
1246 .map = xive_irq_domain_map,
1247 .unmap = xive_irq_domain_unmap,
1248 .xlate = xive_irq_domain_xlate,
1251 static void __init xive_init_host(void)
1253 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1254 &xive_irq_domain_ops, NULL);
1255 if (WARN_ON(xive_irq_domain == NULL))
1257 irq_set_default_host(xive_irq_domain);
1260 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1262 if (xc->queue[xive_irq_priority].qpage)
1263 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1266 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1270 /* We setup 1 queues for now with a 64k page */
1271 if (!xc->queue[xive_irq_priority].qpage)
1272 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1277 static int xive_prepare_cpu(unsigned int cpu)
1279 struct xive_cpu *xc;
1281 xc = per_cpu(xive_cpu, cpu);
1283 struct device_node *np;
1285 xc = kzalloc_node(sizeof(struct xive_cpu),
1286 GFP_KERNEL, cpu_to_node(cpu));
1289 np = of_get_cpu_node(cpu, NULL);
1291 xc->chip_id = of_get_ibm_chip_id(np);
1294 per_cpu(xive_cpu, cpu) = xc;
1297 /* Setup EQs if not already */
1298 return xive_setup_cpu_queues(cpu, xc);
1301 static void xive_setup_cpu(void)
1303 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1305 /* The backend might have additional things to do */
1306 if (xive_ops->setup_cpu)
1307 xive_ops->setup_cpu(smp_processor_id(), xc);
1309 /* Set CPPR to 0xff to enable flow of interrupts */
1311 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1315 void xive_smp_setup_cpu(void)
1317 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1319 /* This will have already been done on the boot CPU */
1320 if (smp_processor_id() != boot_cpuid)
1325 int xive_smp_prepare_cpu(unsigned int cpu)
1329 /* Allocate per-CPU data and queues */
1330 rc = xive_prepare_cpu(cpu);
1334 /* Allocate and setup IPI for the new CPU */
1335 return xive_setup_cpu_ipi(cpu);
1338 #ifdef CONFIG_HOTPLUG_CPU
1339 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1343 /* We assume local irqs are disabled */
1344 WARN_ON(!irqs_disabled());
1346 /* Check what's already in the CPU queue */
1347 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1349 * We need to re-route that interrupt to its new destination.
1350 * First get and lock the descriptor
1352 struct irq_desc *desc = irq_to_desc(irq);
1353 struct irq_data *d = irq_desc_get_irq_data(desc);
1354 struct xive_irq_data *xd;
1355 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1358 * Ignore anything that isn't a XIVE irq and ignore
1359 * IPIs, so can just be dropped.
1361 if (d->domain != xive_irq_domain || hw_irq == 0)
1365 * The IRQ should have already been re-routed, it's just a
1366 * stale in the old queue, so re-trigger it in order to make
1367 * it reach is new destination.
1370 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1373 raw_spin_lock(&desc->lock);
1374 xd = irq_desc_get_handler_data(desc);
1377 * Clear saved_p to indicate that it's no longer pending
1379 xd->saved_p = false;
1382 * For LSIs, we EOI, this will cause a resend if it's
1383 * still asserted. Otherwise do an MSI retrigger.
1385 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1386 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1388 xive_irq_retrigger(d);
1390 raw_spin_unlock(&desc->lock);
1394 void xive_smp_disable_cpu(void)
1396 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1397 unsigned int cpu = smp_processor_id();
1399 /* Migrate interrupts away from the CPU */
1400 irq_migrate_all_off_this_cpu();
1402 /* Set CPPR to 0 to disable flow of interrupts */
1404 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1406 /* Flush everything still in the queue */
1407 xive_flush_cpu_queue(cpu, xc);
1409 /* Re-enable CPPR */
1411 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1414 void xive_flush_interrupt(void)
1416 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1417 unsigned int cpu = smp_processor_id();
1419 /* Called if an interrupt occurs while the CPU is hot unplugged */
1420 xive_flush_cpu_queue(cpu, xc);
1423 #endif /* CONFIG_HOTPLUG_CPU */
1425 #endif /* CONFIG_SMP */
1427 void xive_teardown_cpu(void)
1429 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1430 unsigned int cpu = smp_processor_id();
1432 /* Set CPPR to 0 to disable flow of interrupts */
1434 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1436 if (xive_ops->teardown_cpu)
1437 xive_ops->teardown_cpu(cpu, xc);
1440 /* Get rid of IPI */
1441 xive_cleanup_cpu_ipi(cpu, xc);
1444 /* Disable and free the queues */
1445 xive_cleanup_cpu_queues(cpu, xc);
1448 void xive_shutdown(void)
1450 xive_ops->shutdown();
1453 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1457 xive_tima_offset = offset;
1459 xive_irq_priority = max_prio;
1461 ppc_md.get_irq = xive_get_irq;
1462 __xive_enabled = true;
1464 pr_devel("Initializing host..\n");
1467 pr_devel("Initializing boot CPU..\n");
1469 /* Allocate per-CPU data and queues */
1470 xive_prepare_cpu(smp_processor_id());
1472 /* Get ready for interrupts */
1475 pr_info("Interrupt handling initialized with %s backend\n",
1477 pr_info("Using priority %d for all interrupts\n", max_prio);
1482 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1484 unsigned int alloc_order;
1488 alloc_order = xive_alloc_order(queue_shift);
1489 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1491 return ERR_PTR(-ENOMEM);
1492 qpage = (__be32 *)page_address(pages);
1493 memset(qpage, 0, 1 << queue_shift);
1498 static int __init xive_off(char *arg)
1500 xive_cmdline_disabled = true;
1503 __setup("xive=off", xive_off);