Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / powerpc / sysdev / xive / native.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2016,2017 IBM Corporation.
4  */
5
6 #define pr_fmt(fmt) "xive: " fmt
7
8 #include <linux/types.h>
9 #include <linux/irq.h>
10 #include <linux/debugfs.h>
11 #include <linux/smp.h>
12 #include <linux/interrupt.h>
13 #include <linux/seq_file.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/delay.h>
19 #include <linux/cpumask.h>
20 #include <linux/mm.h>
21
22 #include <asm/prom.h>
23 #include <asm/io.h>
24 #include <asm/smp.h>
25 #include <asm/irq.h>
26 #include <asm/errno.h>
27 #include <asm/xive.h>
28 #include <asm/xive-regs.h>
29 #include <asm/opal.h>
30 #include <asm/kvm_ppc.h>
31
32 #include "xive-internal.h"
33
34
35 static u32 xive_provision_size;
36 static u32 *xive_provision_chips;
37 static u32 xive_provision_chip_count;
38 static u32 xive_queue_shift;
39 static u32 xive_pool_vps = XIVE_INVALID_VP;
40 static struct kmem_cache *xive_provision_cache;
41 static bool xive_has_single_esc;
42
43 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
44 {
45         __be64 flags, eoi_page, trig_page;
46         __be32 esb_shift, src_chip;
47         u64 opal_flags;
48         s64 rc;
49
50         memset(data, 0, sizeof(*data));
51
52         rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
53                                     &esb_shift, &src_chip);
54         if (rc) {
55                 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
56                        hw_irq, rc);
57                 return -EINVAL;
58         }
59
60         opal_flags = be64_to_cpu(flags);
61         if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
62                 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
63         if (opal_flags & OPAL_XIVE_IRQ_LSI)
64                 data->flags |= XIVE_IRQ_FLAG_LSI;
65         if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
66                 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
67         if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
68                 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
69         if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
70                 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
71         data->eoi_page = be64_to_cpu(eoi_page);
72         data->trig_page = be64_to_cpu(trig_page);
73         data->esb_shift = be32_to_cpu(esb_shift);
74         data->src_chip = be32_to_cpu(src_chip);
75
76         data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
77         if (!data->eoi_mmio) {
78                 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
79                 return -ENOMEM;
80         }
81
82         data->hw_irq = hw_irq;
83
84         if (!data->trig_page)
85                 return 0;
86         if (data->trig_page == data->eoi_page) {
87                 data->trig_mmio = data->eoi_mmio;
88                 return 0;
89         }
90
91         data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
92         if (!data->trig_mmio) {
93                 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
94                 return -ENOMEM;
95         }
96         return 0;
97 }
98 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
99
100 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
101 {
102         s64 rc;
103
104         for (;;) {
105                 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
106                 if (rc != OPAL_BUSY)
107                         break;
108                 msleep(OPAL_BUSY_DELAY_MS);
109         }
110         return rc == 0 ? 0 : -ENXIO;
111 }
112 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
113
114
115 /* This can be called multiple time to change a queue configuration */
116 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
117                                 __be32 *qpage, u32 order, bool can_escalate)
118 {
119         s64 rc = 0;
120         __be64 qeoi_page_be;
121         __be32 esc_irq_be;
122         u64 flags, qpage_phys;
123
124         /* If there's an actual queue page, clean it */
125         if (order) {
126                 if (WARN_ON(!qpage))
127                         return -EINVAL;
128                 qpage_phys = __pa(qpage);
129         } else
130                 qpage_phys = 0;
131
132         /* Initialize the rest of the fields */
133         q->msk = order ? ((1u << (order - 2)) - 1) : 0;
134         q->idx = 0;
135         q->toggle = 0;
136
137         rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
138                                       &qeoi_page_be,
139                                       &esc_irq_be,
140                                       NULL);
141         if (rc) {
142                 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
143                 rc = -EIO;
144                 goto fail;
145         }
146         q->eoi_phys = be64_to_cpu(qeoi_page_be);
147
148         /* Default flags */
149         flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
150
151         /* Escalation needed ? */
152         if (can_escalate) {
153                 q->esc_irq = be32_to_cpu(esc_irq_be);
154                 flags |= OPAL_XIVE_EQ_ESCALATE;
155         }
156
157         /* Configure and enable the queue in HW */
158         for (;;) {
159                 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
160                 if (rc != OPAL_BUSY)
161                         break;
162                 msleep(OPAL_BUSY_DELAY_MS);
163         }
164         if (rc) {
165                 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
166                 rc = -EIO;
167         } else {
168                 /*
169                  * KVM code requires all of the above to be visible before
170                  * q->qpage is set due to how it manages IPI EOIs
171                  */
172                 wmb();
173                 q->qpage = qpage;
174         }
175 fail:
176         return rc;
177 }
178 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
179
180 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
181 {
182         s64 rc;
183
184         /* Disable the queue in HW */
185         for (;;) {
186                 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
187                 if (rc != OPAL_BUSY)
188                         break;
189                 msleep(OPAL_BUSY_DELAY_MS);
190         }
191         if (rc)
192                 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
193 }
194
195 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
196 {
197         __xive_native_disable_queue(vp_id, q, prio);
198 }
199 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
200
201 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
202 {
203         struct xive_q *q = &xc->queue[prio];
204         __be32 *qpage;
205
206         qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
207         if (IS_ERR(qpage))
208                 return PTR_ERR(qpage);
209
210         return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
211                                            q, prio, qpage, xive_queue_shift, false);
212 }
213
214 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
215 {
216         struct xive_q *q = &xc->queue[prio];
217         unsigned int alloc_order;
218
219         /*
220          * We use the variant with no iounmap as this is called on exec
221          * from an IPI and iounmap isn't safe
222          */
223         __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
224         alloc_order = xive_alloc_order(xive_queue_shift);
225         free_pages((unsigned long)q->qpage, alloc_order);
226         q->qpage = NULL;
227 }
228
229 static bool xive_native_match(struct device_node *node)
230 {
231         return of_device_is_compatible(node, "ibm,opal-xive-vc");
232 }
233
234 static s64 opal_xive_allocate_irq(u32 chip_id)
235 {
236         s64 irq = opal_xive_allocate_irq_raw(chip_id);
237
238         /*
239          * Old versions of skiboot can incorrectly return 0xffffffff to
240          * indicate no space, fix it up here.
241          */
242         return irq == 0xffffffff ? OPAL_RESOURCE : irq;
243 }
244
245 #ifdef CONFIG_SMP
246 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
247 {
248         s64 irq;
249
250         /* Allocate an IPI and populate info about it */
251         for (;;) {
252                 irq = opal_xive_allocate_irq(xc->chip_id);
253                 if (irq == OPAL_BUSY) {
254                         msleep(OPAL_BUSY_DELAY_MS);
255                         continue;
256                 }
257                 if (irq < 0) {
258                         pr_err("Failed to allocate IPI on CPU %d\n", cpu);
259                         return -ENXIO;
260                 }
261                 xc->hw_ipi = irq;
262                 break;
263         }
264         return 0;
265 }
266 #endif /* CONFIG_SMP */
267
268 u32 xive_native_alloc_irq(void)
269 {
270         s64 rc;
271
272         for (;;) {
273                 rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
274                 if (rc != OPAL_BUSY)
275                         break;
276                 msleep(OPAL_BUSY_DELAY_MS);
277         }
278         if (rc < 0)
279                 return 0;
280         return rc;
281 }
282 EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
283
284 void xive_native_free_irq(u32 irq)
285 {
286         for (;;) {
287                 s64 rc = opal_xive_free_irq(irq);
288                 if (rc != OPAL_BUSY)
289                         break;
290                 msleep(OPAL_BUSY_DELAY_MS);
291         }
292 }
293 EXPORT_SYMBOL_GPL(xive_native_free_irq);
294
295 #ifdef CONFIG_SMP
296 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
297 {
298         s64 rc;
299
300         /* Free the IPI */
301         if (!xc->hw_ipi)
302                 return;
303         for (;;) {
304                 rc = opal_xive_free_irq(xc->hw_ipi);
305                 if (rc == OPAL_BUSY) {
306                         msleep(OPAL_BUSY_DELAY_MS);
307                         continue;
308                 }
309                 xc->hw_ipi = 0;
310                 break;
311         }
312 }
313 #endif /* CONFIG_SMP */
314
315 static void xive_native_shutdown(void)
316 {
317         /* Switch the XIVE to emulation mode */
318         opal_xive_reset(OPAL_XIVE_MODE_EMU);
319 }
320
321 /*
322  * Perform an "ack" cycle on the current thread, thus
323  * grabbing the pending active priorities and updating
324  * the CPPR to the most favored one.
325  */
326 static void xive_native_update_pending(struct xive_cpu *xc)
327 {
328         u8 he, cppr;
329         u16 ack;
330
331         /* Perform the acknowledge hypervisor to register cycle */
332         ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
333
334         /* Synchronize subsequent queue accesses */
335         mb();
336
337         /*
338          * Grab the CPPR and the "HE" field which indicates the source
339          * of the hypervisor interrupt (if any)
340          */
341         cppr = ack & 0xff;
342         he = (ack >> 8) >> 6;
343         switch(he) {
344         case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
345                 break;
346         case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
347                 if (cppr == 0xff)
348                         return;
349                 /* Mark the priority pending */
350                 xc->pending_prio |= 1 << cppr;
351
352                 /*
353                  * A new interrupt should never have a CPPR less favored
354                  * than our current one.
355                  */
356                 if (cppr >= xc->cppr)
357                         pr_err("CPU %d odd ack CPPR, got %d at %d\n",
358                                smp_processor_id(), cppr, xc->cppr);
359
360                 /* Update our idea of what the CPPR is */
361                 xc->cppr = cppr;
362                 break;
363         case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
364         case TM_QW3_NSR_HE_LSI:  /* Legacy FW LSI (unused) */
365                 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
366                        smp_processor_id(), he);
367                 return;
368         }
369 }
370
371 static void xive_native_eoi(u32 hw_irq)
372 {
373         /*
374          * Not normally used except if specific interrupts need
375          * a workaround on EOI.
376          */
377         opal_int_eoi(hw_irq);
378 }
379
380 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
381 {
382         s64 rc;
383         u32 vp;
384         __be64 vp_cam_be;
385         u64 vp_cam;
386
387         if (xive_pool_vps == XIVE_INVALID_VP)
388                 return;
389
390         /* Check if pool VP already active, if it is, pull it */
391         if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
392                 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
393
394         /* Enable the pool VP */
395         vp = xive_pool_vps + cpu;
396         for (;;) {
397                 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
398                 if (rc != OPAL_BUSY)
399                         break;
400                 msleep(OPAL_BUSY_DELAY_MS);
401         }
402         if (rc) {
403                 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
404                 return;
405         }
406
407         /* Grab it's CAM value */
408         rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
409         if (rc) {
410                 pr_err("Failed to get pool VP info CPU %d\n", cpu);
411                 return;
412         }
413         vp_cam = be64_to_cpu(vp_cam_be);
414
415         /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
416         out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
417         out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2, TM_QW2W2_VP | vp_cam);
418 }
419
420 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
421 {
422         s64 rc;
423         u32 vp;
424
425         if (xive_pool_vps == XIVE_INVALID_VP)
426                 return;
427
428         /* Pull the pool VP from the CPU */
429         in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
430
431         /* Disable it */
432         vp = xive_pool_vps + cpu;
433         for (;;) {
434                 rc = opal_xive_set_vp_info(vp, 0, 0);
435                 if (rc != OPAL_BUSY)
436                         break;
437                 msleep(OPAL_BUSY_DELAY_MS);
438         }
439 }
440
441 void xive_native_sync_source(u32 hw_irq)
442 {
443         opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
444 }
445 EXPORT_SYMBOL_GPL(xive_native_sync_source);
446
447 void xive_native_sync_queue(u32 hw_irq)
448 {
449         opal_xive_sync(XIVE_SYNC_QUEUE, hw_irq);
450 }
451 EXPORT_SYMBOL_GPL(xive_native_sync_queue);
452
453 static const struct xive_ops xive_native_ops = {
454         .populate_irq_data      = xive_native_populate_irq_data,
455         .configure_irq          = xive_native_configure_irq,
456         .setup_queue            = xive_native_setup_queue,
457         .cleanup_queue          = xive_native_cleanup_queue,
458         .match                  = xive_native_match,
459         .shutdown               = xive_native_shutdown,
460         .update_pending         = xive_native_update_pending,
461         .eoi                    = xive_native_eoi,
462         .setup_cpu              = xive_native_setup_cpu,
463         .teardown_cpu           = xive_native_teardown_cpu,
464         .sync_source            = xive_native_sync_source,
465 #ifdef CONFIG_SMP
466         .get_ipi                = xive_native_get_ipi,
467         .put_ipi                = xive_native_put_ipi,
468 #endif /* CONFIG_SMP */
469         .name                   = "native",
470 };
471
472 static bool xive_parse_provisioning(struct device_node *np)
473 {
474         int rc;
475
476         if (of_property_read_u32(np, "ibm,xive-provision-page-size",
477                                  &xive_provision_size) < 0)
478                 return true;
479         rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
480         if (rc < 0) {
481                 pr_err("Error %d getting provision chips array\n", rc);
482                 return false;
483         }
484         xive_provision_chip_count = rc;
485         if (rc == 0)
486                 return true;
487
488         xive_provision_chips = kcalloc(4, xive_provision_chip_count,
489                                        GFP_KERNEL);
490         if (WARN_ON(!xive_provision_chips))
491                 return false;
492
493         rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
494                                         xive_provision_chips,
495                                         xive_provision_chip_count);
496         if (rc < 0) {
497                 pr_err("Error %d reading provision chips array\n", rc);
498                 return false;
499         }
500
501         xive_provision_cache = kmem_cache_create("xive-provision",
502                                                  xive_provision_size,
503                                                  xive_provision_size,
504                                                  0, NULL);
505         if (!xive_provision_cache) {
506                 pr_err("Failed to allocate provision cache\n");
507                 return false;
508         }
509         return true;
510 }
511
512 static void xive_native_setup_pools(void)
513 {
514         /* Allocate a pool big enough */
515         pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
516
517         xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
518         if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
519                 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
520
521         pr_debug("XIVE: Pool VPs allocated at 0x%x for %u max CPUs\n",
522                  xive_pool_vps, nr_cpu_ids);
523 }
524
525 u32 xive_native_default_eq_shift(void)
526 {
527         return xive_queue_shift;
528 }
529 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
530
531 unsigned long xive_tima_os;
532 EXPORT_SYMBOL_GPL(xive_tima_os);
533
534 bool __init xive_native_init(void)
535 {
536         struct device_node *np;
537         struct resource r;
538         void __iomem *tima;
539         struct property *prop;
540         u8 max_prio = 7;
541         const __be32 *p;
542         u32 val, cpu;
543         s64 rc;
544
545         if (xive_cmdline_disabled)
546                 return false;
547
548         pr_devel("xive_native_init()\n");
549         np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
550         if (!np) {
551                 pr_devel("not found !\n");
552                 return false;
553         }
554         pr_devel("Found %pOF\n", np);
555
556         /* Resource 1 is HV window */
557         if (of_address_to_resource(np, 1, &r)) {
558                 pr_err("Failed to get thread mgmnt area resource\n");
559                 return false;
560         }
561         tima = ioremap(r.start, resource_size(&r));
562         if (!tima) {
563                 pr_err("Failed to map thread mgmnt area\n");
564                 return false;
565         }
566
567         /* Read number of priorities */
568         if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
569                 max_prio = val - 1;
570
571         /* Iterate the EQ sizes and pick one */
572         of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
573                 xive_queue_shift = val;
574                 if (val == PAGE_SHIFT)
575                         break;
576         }
577
578         /* Do we support single escalation */
579         if (of_get_property(np, "single-escalation-support", NULL) != NULL)
580                 xive_has_single_esc = true;
581
582         /* Configure Thread Management areas for KVM */
583         for_each_possible_cpu(cpu)
584                 kvmppc_set_xive_tima(cpu, r.start, tima);
585
586         /* Resource 2 is OS window */
587         if (of_address_to_resource(np, 2, &r)) {
588                 pr_err("Failed to get thread mgmnt area resource\n");
589                 return false;
590         }
591
592         xive_tima_os = r.start;
593
594         /* Grab size of provisionning pages */
595         xive_parse_provisioning(np);
596
597         /* Switch the XIVE to exploitation mode */
598         rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
599         if (rc) {
600                 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
601                 return false;
602         }
603
604         /* Setup some dummy HV pool VPs */
605         xive_native_setup_pools();
606
607         /* Initialize XIVE core with our backend */
608         if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
609                             max_prio)) {
610                 opal_xive_reset(OPAL_XIVE_MODE_EMU);
611                 return false;
612         }
613         pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
614         return true;
615 }
616
617 static bool xive_native_provision_pages(void)
618 {
619         u32 i;
620         void *p;
621
622         for (i = 0; i < xive_provision_chip_count; i++) {
623                 u32 chip = xive_provision_chips[i];
624
625                 /*
626                  * XXX TODO: Try to make the allocation local to the node where
627                  * the chip resides.
628                  */
629                 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
630                 if (!p) {
631                         pr_err("Failed to allocate provisioning page\n");
632                         return false;
633                 }
634                 opal_xive_donate_page(chip, __pa(p));
635         }
636         return true;
637 }
638
639 u32 xive_native_alloc_vp_block(u32 max_vcpus)
640 {
641         s64 rc;
642         u32 order;
643
644         order = fls(max_vcpus) - 1;
645         if (max_vcpus > (1 << order))
646                 order++;
647
648         pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
649                  max_vcpus, order);
650
651         for (;;) {
652                 rc = opal_xive_alloc_vp_block(order);
653                 switch (rc) {
654                 case OPAL_BUSY:
655                         msleep(OPAL_BUSY_DELAY_MS);
656                         break;
657                 case OPAL_XIVE_PROVISIONING:
658                         if (!xive_native_provision_pages())
659                                 return XIVE_INVALID_VP;
660                         break;
661                 default:
662                         if (rc < 0) {
663                                 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
664                                        order, rc);
665                                 return XIVE_INVALID_VP;
666                         }
667                         return rc;
668                 }
669         }
670 }
671 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
672
673 void xive_native_free_vp_block(u32 vp_base)
674 {
675         s64 rc;
676
677         if (vp_base == XIVE_INVALID_VP)
678                 return;
679
680         rc = opal_xive_free_vp_block(vp_base);
681         if (rc < 0)
682                 pr_warn("OPAL error %lld freeing VP block\n", rc);
683 }
684 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
685
686 int xive_native_enable_vp(u32 vp_id, bool single_escalation)
687 {
688         s64 rc;
689         u64 flags = OPAL_XIVE_VP_ENABLED;
690
691         if (single_escalation)
692                 flags |= OPAL_XIVE_VP_SINGLE_ESCALATION;
693         for (;;) {
694                 rc = opal_xive_set_vp_info(vp_id, flags, 0);
695                 if (rc != OPAL_BUSY)
696                         break;
697                 msleep(OPAL_BUSY_DELAY_MS);
698         }
699         return rc ? -EIO : 0;
700 }
701 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
702
703 int xive_native_disable_vp(u32 vp_id)
704 {
705         s64 rc;
706
707         for (;;) {
708                 rc = opal_xive_set_vp_info(vp_id, 0, 0);
709                 if (rc != OPAL_BUSY)
710                         break;
711                 msleep(OPAL_BUSY_DELAY_MS);
712         }
713         return rc ? -EIO : 0;
714 }
715 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
716
717 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
718 {
719         __be64 vp_cam_be;
720         __be32 vp_chip_id_be;
721         s64 rc;
722
723         rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
724         if (rc)
725                 return -EIO;
726         *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
727         *out_chip_id = be32_to_cpu(vp_chip_id_be);
728
729         return 0;
730 }
731 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
732
733 bool xive_native_has_single_escalation(void)
734 {
735         return xive_has_single_esc;
736 }
737 EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
738
739 int xive_native_get_queue_info(u32 vp_id, u32 prio,
740                                u64 *out_qpage,
741                                u64 *out_qsize,
742                                u64 *out_qeoi_page,
743                                u32 *out_escalate_irq,
744                                u64 *out_qflags)
745 {
746         __be64 qpage;
747         __be64 qsize;
748         __be64 qeoi_page;
749         __be32 escalate_irq;
750         __be64 qflags;
751         s64 rc;
752
753         rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
754                                       &qeoi_page, &escalate_irq, &qflags);
755         if (rc) {
756                 pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
757                        vp_id, prio, rc);
758                 return -EIO;
759         }
760
761         if (out_qpage)
762                 *out_qpage = be64_to_cpu(qpage);
763         if (out_qsize)
764                 *out_qsize = be32_to_cpu(qsize);
765         if (out_qeoi_page)
766                 *out_qeoi_page = be64_to_cpu(qeoi_page);
767         if (out_escalate_irq)
768                 *out_escalate_irq = be32_to_cpu(escalate_irq);
769         if (out_qflags)
770                 *out_qflags = be64_to_cpu(qflags);
771
772         return 0;
773 }
774 EXPORT_SYMBOL_GPL(xive_native_get_queue_info);
775
776 int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
777 {
778         __be32 opal_qtoggle;
779         __be32 opal_qindex;
780         s64 rc;
781
782         rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
783                                        &opal_qindex);
784         if (rc) {
785                 pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
786                        vp_id, prio, rc);
787                 return -EIO;
788         }
789
790         if (qtoggle)
791                 *qtoggle = be32_to_cpu(opal_qtoggle);
792         if (qindex)
793                 *qindex = be32_to_cpu(opal_qindex);
794
795         return 0;
796 }
797 EXPORT_SYMBOL_GPL(xive_native_get_queue_state);
798
799 int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
800 {
801         s64 rc;
802
803         rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
804         if (rc) {
805                 pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
806                        vp_id, prio, rc);
807                 return -EIO;
808         }
809
810         return 0;
811 }
812 EXPORT_SYMBOL_GPL(xive_native_set_queue_state);
813
814 bool xive_native_has_queue_state_support(void)
815 {
816         return opal_check_token(OPAL_XIVE_GET_QUEUE_STATE) &&
817                 opal_check_token(OPAL_XIVE_SET_QUEUE_STATE);
818 }
819 EXPORT_SYMBOL_GPL(xive_native_has_queue_state_support);
820
821 int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
822 {
823         __be64 state;
824         s64 rc;
825
826         rc = opal_xive_get_vp_state(vp_id, &state);
827         if (rc) {
828                 pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
829                        vp_id, rc);
830                 return -EIO;
831         }
832
833         if (out_state)
834                 *out_state = be64_to_cpu(state);
835         return 0;
836 }
837 EXPORT_SYMBOL_GPL(xive_native_get_vp_state);