Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / virt / kvm / arm / vgic / vgic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015, 2016 ARM Ltd.
4  */
5
6 #include <linux/interrupt.h>
7 #include <linux/irq.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/list_sort.h>
11 #include <linux/nospec.h>
12
13 #include <asm/kvm_hyp.h>
14
15 #include "vgic.h"
16
17 #define CREATE_TRACE_POINTS
18 #include "trace.h"
19
20 struct vgic_global kvm_vgic_global_state __ro_after_init = {
21         .gicv3_cpuif = STATIC_KEY_FALSE_INIT,
22 };
23
24 /*
25  * Locking order is always:
26  * kvm->lock (mutex)
27  *   its->cmd_lock (mutex)
28  *     its->its_lock (mutex)
29  *       vgic_cpu->ap_list_lock         must be taken with IRQs disabled
30  *         kvm->lpi_list_lock           must be taken with IRQs disabled
31  *           vgic_irq->irq_lock         must be taken with IRQs disabled
32  *
33  * As the ap_list_lock might be taken from the timer interrupt handler,
34  * we have to disable IRQs before taking this lock and everything lower
35  * than it.
36  *
37  * If you need to take multiple locks, always take the upper lock first,
38  * then the lower ones, e.g. first take the its_lock, then the irq_lock.
39  * If you are already holding a lock and need to take a higher one, you
40  * have to drop the lower ranking lock first and re-aquire it after having
41  * taken the upper one.
42  *
43  * When taking more than one ap_list_lock at the same time, always take the
44  * lowest numbered VCPU's ap_list_lock first, so:
45  *   vcpuX->vcpu_id < vcpuY->vcpu_id:
46  *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
47  *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
48  *
49  * Since the VGIC must support injecting virtual interrupts from ISRs, we have
50  * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
51  * spinlocks for any lock that may be taken while injecting an interrupt.
52  */
53
54 /*
55  * Iterate over the VM's list of mapped LPIs to find the one with a
56  * matching interrupt ID and return a reference to the IRQ structure.
57  */
58 static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
59 {
60         struct vgic_dist *dist = &kvm->arch.vgic;
61         struct vgic_irq *irq = NULL;
62         unsigned long flags;
63
64         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
65
66         list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
67                 if (irq->intid != intid)
68                         continue;
69
70                 /*
71                  * This increases the refcount, the caller is expected to
72                  * call vgic_put_irq() later once it's finished with the IRQ.
73                  */
74                 vgic_get_irq_kref(irq);
75                 goto out_unlock;
76         }
77         irq = NULL;
78
79 out_unlock:
80         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
81
82         return irq;
83 }
84
85 /*
86  * This looks up the virtual interrupt ID to get the corresponding
87  * struct vgic_irq. It also increases the refcount, so any caller is expected
88  * to call vgic_put_irq() once it's finished with this IRQ.
89  */
90 struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
91                               u32 intid)
92 {
93         /* SGIs and PPIs */
94         if (intid <= VGIC_MAX_PRIVATE) {
95                 intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
96                 return &vcpu->arch.vgic_cpu.private_irqs[intid];
97         }
98
99         /* SPIs */
100         if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
101                 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
102                 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
103         }
104
105         /* LPIs */
106         if (intid >= VGIC_MIN_LPI)
107                 return vgic_get_lpi(kvm, intid);
108
109         WARN(1, "Looking up struct vgic_irq for reserved INTID");
110         return NULL;
111 }
112
113 /*
114  * We can't do anything in here, because we lack the kvm pointer to
115  * lock and remove the item from the lpi_list. So we keep this function
116  * empty and use the return value of kref_put() to trigger the freeing.
117  */
118 static void vgic_irq_release(struct kref *ref)
119 {
120 }
121
122 /*
123  * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
124  */
125 void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
126 {
127         struct vgic_dist *dist = &kvm->arch.vgic;
128
129         if (!kref_put(&irq->refcount, vgic_irq_release))
130                 return;
131
132         list_del(&irq->lpi_list);
133         dist->lpi_list_count--;
134
135         kfree(irq);
136 }
137
138 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
139 {
140         struct vgic_dist *dist = &kvm->arch.vgic;
141         unsigned long flags;
142
143         if (irq->intid < VGIC_MIN_LPI)
144                 return;
145
146         raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
147         __vgic_put_lpi_locked(kvm, irq);
148         raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
149 }
150
151 void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
152 {
153         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
154         struct vgic_irq *irq, *tmp;
155         unsigned long flags;
156
157         raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
158
159         list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
160                 if (irq->intid >= VGIC_MIN_LPI) {
161                         raw_spin_lock(&irq->irq_lock);
162                         list_del(&irq->ap_list);
163                         irq->vcpu = NULL;
164                         raw_spin_unlock(&irq->irq_lock);
165                         vgic_put_irq(vcpu->kvm, irq);
166                 }
167         }
168
169         raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
170 }
171
172 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
173 {
174         WARN_ON(irq_set_irqchip_state(irq->host_irq,
175                                       IRQCHIP_STATE_PENDING,
176                                       pending));
177 }
178
179 bool vgic_get_phys_line_level(struct vgic_irq *irq)
180 {
181         bool line_level;
182
183         BUG_ON(!irq->hw);
184
185         if (irq->get_input_level)
186                 return irq->get_input_level(irq->intid);
187
188         WARN_ON(irq_get_irqchip_state(irq->host_irq,
189                                       IRQCHIP_STATE_PENDING,
190                                       &line_level));
191         return line_level;
192 }
193
194 /* Set/Clear the physical active state */
195 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
196 {
197
198         BUG_ON(!irq->hw);
199         WARN_ON(irq_set_irqchip_state(irq->host_irq,
200                                       IRQCHIP_STATE_ACTIVE,
201                                       active));
202 }
203
204 /**
205  * kvm_vgic_target_oracle - compute the target vcpu for an irq
206  *
207  * @irq:        The irq to route. Must be already locked.
208  *
209  * Based on the current state of the interrupt (enabled, pending,
210  * active, vcpu and target_vcpu), compute the next vcpu this should be
211  * given to. Return NULL if this shouldn't be injected at all.
212  *
213  * Requires the IRQ lock to be held.
214  */
215 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
216 {
217         lockdep_assert_held(&irq->irq_lock);
218
219         /* If the interrupt is active, it must stay on the current vcpu */
220         if (irq->active)
221                 return irq->vcpu ? : irq->target_vcpu;
222
223         /*
224          * If the IRQ is not active but enabled and pending, we should direct
225          * it to its configured target VCPU.
226          * If the distributor is disabled, pending interrupts shouldn't be
227          * forwarded.
228          */
229         if (irq->enabled && irq_is_pending(irq)) {
230                 if (unlikely(irq->target_vcpu &&
231                              !irq->target_vcpu->kvm->arch.vgic.enabled))
232                         return NULL;
233
234                 return irq->target_vcpu;
235         }
236
237         /* If neither active nor pending and enabled, then this IRQ should not
238          * be queued to any VCPU.
239          */
240         return NULL;
241 }
242
243 /*
244  * The order of items in the ap_lists defines how we'll pack things in LRs as
245  * well, the first items in the list being the first things populated in the
246  * LRs.
247  *
248  * A hard rule is that active interrupts can never be pushed out of the LRs
249  * (and therefore take priority) since we cannot reliably trap on deactivation
250  * of IRQs and therefore they have to be present in the LRs.
251  *
252  * Otherwise things should be sorted by the priority field and the GIC
253  * hardware support will take care of preemption of priority groups etc.
254  *
255  * Return negative if "a" sorts before "b", 0 to preserve order, and positive
256  * to sort "b" before "a".
257  */
258 static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
259 {
260         struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
261         struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
262         bool penda, pendb;
263         int ret;
264
265         /*
266          * list_sort may call this function with the same element when
267          * the list is fairly long.
268          */
269         if (unlikely(irqa == irqb))
270                 return 0;
271
272         raw_spin_lock(&irqa->irq_lock);
273         raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
274
275         if (irqa->active || irqb->active) {
276                 ret = (int)irqb->active - (int)irqa->active;
277                 goto out;
278         }
279
280         penda = irqa->enabled && irq_is_pending(irqa);
281         pendb = irqb->enabled && irq_is_pending(irqb);
282
283         if (!penda || !pendb) {
284                 ret = (int)pendb - (int)penda;
285                 goto out;
286         }
287
288         /* Both pending and enabled, sort by priority */
289         ret = irqa->priority - irqb->priority;
290 out:
291         raw_spin_unlock(&irqb->irq_lock);
292         raw_spin_unlock(&irqa->irq_lock);
293         return ret;
294 }
295
296 /* Must be called with the ap_list_lock held */
297 static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
298 {
299         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
300
301         lockdep_assert_held(&vgic_cpu->ap_list_lock);
302
303         list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
304 }
305
306 /*
307  * Only valid injection if changing level for level-triggered IRQs or for a
308  * rising edge, and in-kernel connected IRQ lines can only be controlled by
309  * their owner.
310  */
311 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
312 {
313         if (irq->owner != owner)
314                 return false;
315
316         switch (irq->config) {
317         case VGIC_CONFIG_LEVEL:
318                 return irq->line_level != level;
319         case VGIC_CONFIG_EDGE:
320                 return level;
321         }
322
323         return false;
324 }
325
326 /*
327  * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
328  * Do the queuing if necessary, taking the right locks in the right order.
329  * Returns true when the IRQ was queued, false otherwise.
330  *
331  * Needs to be entered with the IRQ lock already held, but will return
332  * with all locks dropped.
333  */
334 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
335                            unsigned long flags)
336 {
337         struct kvm_vcpu *vcpu;
338
339         lockdep_assert_held(&irq->irq_lock);
340
341 retry:
342         vcpu = vgic_target_oracle(irq);
343         if (irq->vcpu || !vcpu) {
344                 /*
345                  * If this IRQ is already on a VCPU's ap_list, then it
346                  * cannot be moved or modified and there is no more work for
347                  * us to do.
348                  *
349                  * Otherwise, if the irq is not pending and enabled, it does
350                  * not need to be inserted into an ap_list and there is also
351                  * no more work for us to do.
352                  */
353                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
354
355                 /*
356                  * We have to kick the VCPU here, because we could be
357                  * queueing an edge-triggered interrupt for which we
358                  * get no EOI maintenance interrupt. In that case,
359                  * while the IRQ is already on the VCPU's AP list, the
360                  * VCPU could have EOI'ed the original interrupt and
361                  * won't see this one until it exits for some other
362                  * reason.
363                  */
364                 if (vcpu) {
365                         kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
366                         kvm_vcpu_kick(vcpu);
367                 }
368                 return false;
369         }
370
371         /*
372          * We must unlock the irq lock to take the ap_list_lock where
373          * we are going to insert this new pending interrupt.
374          */
375         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
376
377         /* someone can do stuff here, which we re-check below */
378
379         raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
380         raw_spin_lock(&irq->irq_lock);
381
382         /*
383          * Did something change behind our backs?
384          *
385          * There are two cases:
386          * 1) The irq lost its pending state or was disabled behind our
387          *    backs and/or it was queued to another VCPU's ap_list.
388          * 2) Someone changed the affinity on this irq behind our
389          *    backs and we are now holding the wrong ap_list_lock.
390          *
391          * In both cases, drop the locks and retry.
392          */
393
394         if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
395                 raw_spin_unlock(&irq->irq_lock);
396                 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
397                                            flags);
398
399                 raw_spin_lock_irqsave(&irq->irq_lock, flags);
400                 goto retry;
401         }
402
403         /*
404          * Grab a reference to the irq to reflect the fact that it is
405          * now in the ap_list.
406          */
407         vgic_get_irq_kref(irq);
408         list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
409         irq->vcpu = vcpu;
410
411         raw_spin_unlock(&irq->irq_lock);
412         raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
413
414         kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
415         kvm_vcpu_kick(vcpu);
416
417         return true;
418 }
419
420 /**
421  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
422  * @kvm:     The VM structure pointer
423  * @cpuid:   The CPU for PPIs
424  * @intid:   The INTID to inject a new state to.
425  * @level:   Edge-triggered:  true:  to trigger the interrupt
426  *                            false: to ignore the call
427  *           Level-sensitive  true:  raise the input signal
428  *                            false: lower the input signal
429  * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
430  *           that the caller is allowed to inject this IRQ.  Userspace
431  *           injections will have owner == NULL.
432  *
433  * The VGIC is not concerned with devices being active-LOW or active-HIGH for
434  * level-sensitive interrupts.  You can think of the level parameter as 1
435  * being HIGH and 0 being LOW and all devices being active-HIGH.
436  */
437 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
438                         bool level, void *owner)
439 {
440         struct kvm_vcpu *vcpu;
441         struct vgic_irq *irq;
442         unsigned long flags;
443         int ret;
444
445         trace_vgic_update_irq_pending(cpuid, intid, level);
446
447         ret = vgic_lazy_init(kvm);
448         if (ret)
449                 return ret;
450
451         vcpu = kvm_get_vcpu(kvm, cpuid);
452         if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
453                 return -EINVAL;
454
455         irq = vgic_get_irq(kvm, vcpu, intid);
456         if (!irq)
457                 return -EINVAL;
458
459         raw_spin_lock_irqsave(&irq->irq_lock, flags);
460
461         if (!vgic_validate_injection(irq, level, owner)) {
462                 /* Nothing to see here, move along... */
463                 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
464                 vgic_put_irq(kvm, irq);
465                 return 0;
466         }
467
468         if (irq->config == VGIC_CONFIG_LEVEL)
469                 irq->line_level = level;
470         else
471                 irq->pending_latch = true;
472
473         vgic_queue_irq_unlock(kvm, irq, flags);
474         vgic_put_irq(kvm, irq);
475
476         return 0;
477 }
478
479 /* @irq->irq_lock must be held */
480 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
481                             unsigned int host_irq,
482                             bool (*get_input_level)(int vindid))
483 {
484         struct irq_desc *desc;
485         struct irq_data *data;
486
487         /*
488          * Find the physical IRQ number corresponding to @host_irq
489          */
490         desc = irq_to_desc(host_irq);
491         if (!desc) {
492                 kvm_err("%s: no interrupt descriptor\n", __func__);
493                 return -EINVAL;
494         }
495         data = irq_desc_get_irq_data(desc);
496         while (data->parent_data)
497                 data = data->parent_data;
498
499         irq->hw = true;
500         irq->host_irq = host_irq;
501         irq->hwintid = data->hwirq;
502         irq->get_input_level = get_input_level;
503         return 0;
504 }
505
506 /* @irq->irq_lock must be held */
507 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
508 {
509         irq->hw = false;
510         irq->hwintid = 0;
511         irq->get_input_level = NULL;
512 }
513
514 int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
515                           u32 vintid, bool (*get_input_level)(int vindid))
516 {
517         struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
518         unsigned long flags;
519         int ret;
520
521         BUG_ON(!irq);
522
523         raw_spin_lock_irqsave(&irq->irq_lock, flags);
524         ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
525         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
526         vgic_put_irq(vcpu->kvm, irq);
527
528         return ret;
529 }
530
531 /**
532  * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
533  * @vcpu: The VCPU pointer
534  * @vintid: The INTID of the interrupt
535  *
536  * Reset the active and pending states of a mapped interrupt.  Kernel
537  * subsystems injecting mapped interrupts should reset their interrupt lines
538  * when we are doing a reset of the VM.
539  */
540 void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
541 {
542         struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
543         unsigned long flags;
544
545         if (!irq->hw)
546                 goto out;
547
548         raw_spin_lock_irqsave(&irq->irq_lock, flags);
549         irq->active = false;
550         irq->pending_latch = false;
551         irq->line_level = false;
552         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
553 out:
554         vgic_put_irq(vcpu->kvm, irq);
555 }
556
557 int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
558 {
559         struct vgic_irq *irq;
560         unsigned long flags;
561
562         if (!vgic_initialized(vcpu->kvm))
563                 return -EAGAIN;
564
565         irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
566         BUG_ON(!irq);
567
568         raw_spin_lock_irqsave(&irq->irq_lock, flags);
569         kvm_vgic_unmap_irq(irq);
570         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
571         vgic_put_irq(vcpu->kvm, irq);
572
573         return 0;
574 }
575
576 /**
577  * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
578  *
579  * @vcpu:   Pointer to the VCPU (used for PPIs)
580  * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
581  * @owner:  Opaque pointer to the owner
582  *
583  * Returns 0 if intid is not already used by another in-kernel device and the
584  * owner is set, otherwise returns an error code.
585  */
586 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
587 {
588         struct vgic_irq *irq;
589         unsigned long flags;
590         int ret = 0;
591
592         if (!vgic_initialized(vcpu->kvm))
593                 return -EAGAIN;
594
595         /* SGIs and LPIs cannot be wired up to any device */
596         if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
597                 return -EINVAL;
598
599         irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
600         raw_spin_lock_irqsave(&irq->irq_lock, flags);
601         if (irq->owner && irq->owner != owner)
602                 ret = -EEXIST;
603         else
604                 irq->owner = owner;
605         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
606
607         return ret;
608 }
609
610 /**
611  * vgic_prune_ap_list - Remove non-relevant interrupts from the list
612  *
613  * @vcpu: The VCPU pointer
614  *
615  * Go over the list of "interesting" interrupts, and prune those that we
616  * won't have to consider in the near future.
617  */
618 static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
619 {
620         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
621         struct vgic_irq *irq, *tmp;
622
623         DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
624
625 retry:
626         raw_spin_lock(&vgic_cpu->ap_list_lock);
627
628         list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
629                 struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
630                 bool target_vcpu_needs_kick = false;
631
632                 raw_spin_lock(&irq->irq_lock);
633
634                 BUG_ON(vcpu != irq->vcpu);
635
636                 target_vcpu = vgic_target_oracle(irq);
637
638                 if (!target_vcpu) {
639                         /*
640                          * We don't need to process this interrupt any
641                          * further, move it off the list.
642                          */
643                         list_del(&irq->ap_list);
644                         irq->vcpu = NULL;
645                         raw_spin_unlock(&irq->irq_lock);
646
647                         /*
648                          * This vgic_put_irq call matches the
649                          * vgic_get_irq_kref in vgic_queue_irq_unlock,
650                          * where we added the LPI to the ap_list. As
651                          * we remove the irq from the list, we drop
652                          * also drop the refcount.
653                          */
654                         vgic_put_irq(vcpu->kvm, irq);
655                         continue;
656                 }
657
658                 if (target_vcpu == vcpu) {
659                         /* We're on the right CPU */
660                         raw_spin_unlock(&irq->irq_lock);
661                         continue;
662                 }
663
664                 /* This interrupt looks like it has to be migrated. */
665
666                 raw_spin_unlock(&irq->irq_lock);
667                 raw_spin_unlock(&vgic_cpu->ap_list_lock);
668
669                 /*
670                  * Ensure locking order by always locking the smallest
671                  * ID first.
672                  */
673                 if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
674                         vcpuA = vcpu;
675                         vcpuB = target_vcpu;
676                 } else {
677                         vcpuA = target_vcpu;
678                         vcpuB = vcpu;
679                 }
680
681                 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
682                 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
683                                       SINGLE_DEPTH_NESTING);
684                 raw_spin_lock(&irq->irq_lock);
685
686                 /*
687                  * If the affinity has been preserved, move the
688                  * interrupt around. Otherwise, it means things have
689                  * changed while the interrupt was unlocked, and we
690                  * need to replay this.
691                  *
692                  * In all cases, we cannot trust the list not to have
693                  * changed, so we restart from the beginning.
694                  */
695                 if (target_vcpu == vgic_target_oracle(irq)) {
696                         struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
697
698                         list_del(&irq->ap_list);
699                         irq->vcpu = target_vcpu;
700                         list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
701                         target_vcpu_needs_kick = true;
702                 }
703
704                 raw_spin_unlock(&irq->irq_lock);
705                 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
706                 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
707
708                 if (target_vcpu_needs_kick) {
709                         kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
710                         kvm_vcpu_kick(target_vcpu);
711                 }
712
713                 goto retry;
714         }
715
716         raw_spin_unlock(&vgic_cpu->ap_list_lock);
717 }
718
719 static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
720 {
721         if (kvm_vgic_global_state.type == VGIC_V2)
722                 vgic_v2_fold_lr_state(vcpu);
723         else
724                 vgic_v3_fold_lr_state(vcpu);
725 }
726
727 /* Requires the irq_lock to be held. */
728 static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
729                                     struct vgic_irq *irq, int lr)
730 {
731         lockdep_assert_held(&irq->irq_lock);
732
733         if (kvm_vgic_global_state.type == VGIC_V2)
734                 vgic_v2_populate_lr(vcpu, irq, lr);
735         else
736                 vgic_v3_populate_lr(vcpu, irq, lr);
737 }
738
739 static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
740 {
741         if (kvm_vgic_global_state.type == VGIC_V2)
742                 vgic_v2_clear_lr(vcpu, lr);
743         else
744                 vgic_v3_clear_lr(vcpu, lr);
745 }
746
747 static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
748 {
749         if (kvm_vgic_global_state.type == VGIC_V2)
750                 vgic_v2_set_underflow(vcpu);
751         else
752                 vgic_v3_set_underflow(vcpu);
753 }
754
755 /* Requires the ap_list_lock to be held. */
756 static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
757                                  bool *multi_sgi)
758 {
759         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
760         struct vgic_irq *irq;
761         int count = 0;
762
763         *multi_sgi = false;
764
765         lockdep_assert_held(&vgic_cpu->ap_list_lock);
766
767         list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
768                 int w;
769
770                 raw_spin_lock(&irq->irq_lock);
771                 /* GICv2 SGIs can count for more than one... */
772                 w = vgic_irq_get_lr_count(irq);
773                 raw_spin_unlock(&irq->irq_lock);
774
775                 count += w;
776                 *multi_sgi |= (w > 1);
777         }
778         return count;
779 }
780
781 /* Requires the VCPU's ap_list_lock to be held. */
782 static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
783 {
784         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
785         struct vgic_irq *irq;
786         int count;
787         bool multi_sgi;
788         u8 prio = 0xff;
789
790         lockdep_assert_held(&vgic_cpu->ap_list_lock);
791
792         count = compute_ap_list_depth(vcpu, &multi_sgi);
793         if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
794                 vgic_sort_ap_list(vcpu);
795
796         count = 0;
797
798         list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
799                 raw_spin_lock(&irq->irq_lock);
800
801                 /*
802                  * If we have multi-SGIs in the pipeline, we need to
803                  * guarantee that they are all seen before any IRQ of
804                  * lower priority. In that case, we need to filter out
805                  * these interrupts by exiting early. This is easy as
806                  * the AP list has been sorted already.
807                  */
808                 if (multi_sgi && irq->priority > prio) {
809                         _raw_spin_unlock(&irq->irq_lock);
810                         break;
811                 }
812
813                 if (likely(vgic_target_oracle(irq) == vcpu)) {
814                         vgic_populate_lr(vcpu, irq, count++);
815
816                         if (irq->source)
817                                 prio = irq->priority;
818                 }
819
820                 raw_spin_unlock(&irq->irq_lock);
821
822                 if (count == kvm_vgic_global_state.nr_lr) {
823                         if (!list_is_last(&irq->ap_list,
824                                           &vgic_cpu->ap_list_head))
825                                 vgic_set_underflow(vcpu);
826                         break;
827                 }
828         }
829
830         vcpu->arch.vgic_cpu.used_lrs = count;
831
832         /* Nuke remaining LRs */
833         for ( ; count < kvm_vgic_global_state.nr_lr; count++)
834                 vgic_clear_lr(vcpu, count);
835 }
836
837 static inline bool can_access_vgic_from_kernel(void)
838 {
839         /*
840          * GICv2 can always be accessed from the kernel because it is
841          * memory-mapped, and VHE systems can access GICv3 EL2 system
842          * registers.
843          */
844         return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
845 }
846
847 static inline void vgic_save_state(struct kvm_vcpu *vcpu)
848 {
849         if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
850                 vgic_v2_save_state(vcpu);
851         else
852                 __vgic_v3_save_state(vcpu);
853 }
854
855 /* Sync back the hardware VGIC state into our emulation after a guest's run. */
856 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
857 {
858         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
859
860         WARN_ON(vgic_v4_sync_hwstate(vcpu));
861
862         /* An empty ap_list_head implies used_lrs == 0 */
863         if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
864                 return;
865
866         if (can_access_vgic_from_kernel())
867                 vgic_save_state(vcpu);
868
869         if (vgic_cpu->used_lrs)
870                 vgic_fold_lr_state(vcpu);
871         vgic_prune_ap_list(vcpu);
872 }
873
874 static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
875 {
876         if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
877                 vgic_v2_restore_state(vcpu);
878         else
879                 __vgic_v3_restore_state(vcpu);
880 }
881
882 /* Flush our emulation state into the GIC hardware before entering the guest. */
883 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
884 {
885         WARN_ON(vgic_v4_flush_hwstate(vcpu));
886
887         /*
888          * If there are no virtual interrupts active or pending for this
889          * VCPU, then there is no work to do and we can bail out without
890          * taking any lock.  There is a potential race with someone injecting
891          * interrupts to the VCPU, but it is a benign race as the VCPU will
892          * either observe the new interrupt before or after doing this check,
893          * and introducing additional synchronization mechanism doesn't change
894          * this.
895          *
896          * Note that we still need to go through the whole thing if anything
897          * can be directly injected (GICv4).
898          */
899         if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
900             !vgic_supports_direct_msis(vcpu->kvm))
901                 return;
902
903         DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
904
905         if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
906                 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
907                 vgic_flush_lr_state(vcpu);
908                 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
909         }
910
911         if (can_access_vgic_from_kernel())
912                 vgic_restore_state(vcpu);
913 }
914
915 void kvm_vgic_load(struct kvm_vcpu *vcpu)
916 {
917         if (unlikely(!vgic_initialized(vcpu->kvm)))
918                 return;
919
920         if (kvm_vgic_global_state.type == VGIC_V2)
921                 vgic_v2_load(vcpu);
922         else
923                 vgic_v3_load(vcpu);
924 }
925
926 void kvm_vgic_put(struct kvm_vcpu *vcpu)
927 {
928         if (unlikely(!vgic_initialized(vcpu->kvm)))
929                 return;
930
931         if (kvm_vgic_global_state.type == VGIC_V2)
932                 vgic_v2_put(vcpu);
933         else
934                 vgic_v3_put(vcpu);
935 }
936
937 void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
938 {
939         if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
940                 return;
941
942         if (kvm_vgic_global_state.type == VGIC_V2)
943                 vgic_v2_vmcr_sync(vcpu);
944         else
945                 vgic_v3_vmcr_sync(vcpu);
946 }
947
948 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
949 {
950         struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
951         struct vgic_irq *irq;
952         bool pending = false;
953         unsigned long flags;
954         struct vgic_vmcr vmcr;
955
956         if (!vcpu->kvm->arch.vgic.enabled)
957                 return false;
958
959         if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
960                 return true;
961
962         vgic_get_vmcr(vcpu, &vmcr);
963
964         raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
965
966         list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
967                 raw_spin_lock(&irq->irq_lock);
968                 pending = irq_is_pending(irq) && irq->enabled &&
969                           !irq->active &&
970                           irq->priority < vmcr.pmr;
971                 raw_spin_unlock(&irq->irq_lock);
972
973                 if (pending)
974                         break;
975         }
976
977         raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
978
979         return pending;
980 }
981
982 void vgic_kick_vcpus(struct kvm *kvm)
983 {
984         struct kvm_vcpu *vcpu;
985         int c;
986
987         /*
988          * We've injected an interrupt, time to find out who deserves
989          * a good kick...
990          */
991         kvm_for_each_vcpu(c, vcpu, kvm) {
992                 if (kvm_vgic_vcpu_pending_irq(vcpu)) {
993                         kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
994                         kvm_vcpu_kick(vcpu);
995                 }
996         }
997 }
998
999 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
1000 {
1001         struct vgic_irq *irq;
1002         bool map_is_active;
1003         unsigned long flags;
1004
1005         if (!vgic_initialized(vcpu->kvm))
1006                 return false;
1007
1008         irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
1009         raw_spin_lock_irqsave(&irq->irq_lock, flags);
1010         map_is_active = irq->hw && irq->active;
1011         raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
1012         vgic_put_irq(vcpu->kvm, irq);
1013
1014         return map_is_active;
1015 }