Linux-libre 4.9.123-gnu
[librecmc/linux-libre.git] / arch / arm64 / kvm / hyp / switch.c
1 /*
2  * Copyright (C) 2015 - ARM Ltd
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
22
23 #include <kvm/arm_psci.h>
24
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28
29 static bool __hyp_text __fpsimd_enabled_nvhe(void)
30 {
31         return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
32 }
33
34 static bool __hyp_text __fpsimd_enabled_vhe(void)
35 {
36         return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
37 }
38
39 static hyp_alternate_select(__fpsimd_is_enabled,
40                             __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
41                             ARM64_HAS_VIRT_HOST_EXTN);
42
43 bool __hyp_text __fpsimd_enabled(void)
44 {
45         return __fpsimd_is_enabled()();
46 }
47
48 static void __hyp_text __activate_traps_vhe(void)
49 {
50         u64 val;
51
52         val = read_sysreg(cpacr_el1);
53         val |= CPACR_EL1_TTA;
54         val &= ~CPACR_EL1_FPEN;
55         write_sysreg(val, cpacr_el1);
56
57         write_sysreg(kvm_get_hyp_vector(), vbar_el1);
58 }
59
60 static void __hyp_text __activate_traps_nvhe(void)
61 {
62         u64 val;
63
64         val = CPTR_EL2_DEFAULT;
65         val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
66         write_sysreg(val, cptr_el2);
67 }
68
69 static hyp_alternate_select(__activate_traps_arch,
70                             __activate_traps_nvhe, __activate_traps_vhe,
71                             ARM64_HAS_VIRT_HOST_EXTN);
72
73 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
74 {
75         u64 val;
76
77         /*
78          * We are about to set CPTR_EL2.TFP to trap all floating point
79          * register accesses to EL2, however, the ARM ARM clearly states that
80          * traps are only taken to EL2 if the operation would not otherwise
81          * trap to EL1.  Therefore, always make sure that for 32-bit guests,
82          * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
83          */
84         val = vcpu->arch.hcr_el2;
85         if (!(val & HCR_RW)) {
86                 write_sysreg(1 << 30, fpexc32_el2);
87                 isb();
88         }
89         write_sysreg(val, hcr_el2);
90         /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
91         write_sysreg(1 << 15, hstr_el2);
92         /*
93          * Make sure we trap PMU access from EL0 to EL2. Also sanitize
94          * PMSELR_EL0 to make sure it never contains the cycle
95          * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
96          * EL1 instead of being trapped to EL2.
97          */
98         write_sysreg(0, pmselr_el0);
99         write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
100         write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
101         __activate_traps_arch()();
102 }
103
104 static void __hyp_text __deactivate_traps_vhe(void)
105 {
106         extern char vectors[];  /* kernel exception vectors */
107
108         write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
109         write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
110         write_sysreg(vectors, vbar_el1);
111 }
112
113 static void __hyp_text __deactivate_traps_nvhe(void)
114 {
115         write_sysreg(HCR_RW, hcr_el2);
116         write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
117 }
118
119 static hyp_alternate_select(__deactivate_traps_arch,
120                             __deactivate_traps_nvhe, __deactivate_traps_vhe,
121                             ARM64_HAS_VIRT_HOST_EXTN);
122
123 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
124 {
125         /*
126          * If we pended a virtual abort, preserve it until it gets
127          * cleared. See D1.14.3 (Virtual Interrupts) for details, but
128          * the crucial bit is "On taking a vSError interrupt,
129          * HCR_EL2.VSE is cleared to 0."
130          */
131         if (vcpu->arch.hcr_el2 & HCR_VSE)
132                 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
133
134         __deactivate_traps_arch()();
135         write_sysreg(0, hstr_el2);
136         write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
137         write_sysreg(0, pmuserenr_el0);
138 }
139
140 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
141 {
142         struct kvm *kvm = kern_hyp_va(vcpu->kvm);
143         write_sysreg(kvm->arch.vttbr, vttbr_el2);
144 }
145
146 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
147 {
148         write_sysreg(0, vttbr_el2);
149 }
150
151 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
152 {
153         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
154                 __vgic_v3_save_state(vcpu);
155         else
156                 __vgic_v2_save_state(vcpu);
157
158         write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
159 }
160
161 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
162 {
163         u64 val;
164
165         val = read_sysreg(hcr_el2);
166         val |=  HCR_INT_OVERRIDE;
167         val |= vcpu->arch.irq_lines;
168         write_sysreg(val, hcr_el2);
169
170         if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
171                 __vgic_v3_restore_state(vcpu);
172         else
173                 __vgic_v2_restore_state(vcpu);
174 }
175
176 static bool __hyp_text __true_value(void)
177 {
178         return true;
179 }
180
181 static bool __hyp_text __false_value(void)
182 {
183         return false;
184 }
185
186 static hyp_alternate_select(__check_arm_834220,
187                             __false_value, __true_value,
188                             ARM64_WORKAROUND_834220);
189
190 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
191 {
192         u64 par, tmp;
193
194         /*
195          * Resolve the IPA the hard way using the guest VA.
196          *
197          * Stage-1 translation already validated the memory access
198          * rights. As such, we can use the EL1 translation regime, and
199          * don't have to distinguish between EL0 and EL1 access.
200          *
201          * We do need to save/restore PAR_EL1 though, as we haven't
202          * saved the guest context yet, and we may return early...
203          */
204         par = read_sysreg(par_el1);
205         asm volatile("at s1e1r, %0" : : "r" (far));
206         isb();
207
208         tmp = read_sysreg(par_el1);
209         write_sysreg(par, par_el1);
210
211         if (unlikely(tmp & 1))
212                 return false; /* Translation failed, back to guest */
213
214         /* Convert PAR to HPFAR format */
215         *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
216         return true;
217 }
218
219 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
220 {
221         u64 esr = read_sysreg_el2(esr);
222         u8 ec = ESR_ELx_EC(esr);
223         u64 hpfar, far;
224
225         vcpu->arch.fault.esr_el2 = esr;
226
227         if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
228                 return true;
229
230         far = read_sysreg_el2(far);
231
232         /*
233          * The HPFAR can be invalid if the stage 2 fault did not
234          * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
235          * bit is clear) and one of the two following cases are true:
236          *   1. The fault was due to a permission fault
237          *   2. The processor carries errata 834220
238          *
239          * Therefore, for all non S1PTW faults where we either have a
240          * permission fault or the errata workaround is enabled, we
241          * resolve the IPA using the AT instruction.
242          */
243         if (!(esr & ESR_ELx_S1PTW) &&
244             (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
245                 if (!__translate_far_to_hpfar(far, &hpfar))
246                         return false;
247         } else {
248                 hpfar = read_sysreg(hpfar_el2);
249         }
250
251         vcpu->arch.fault.far_el2 = far;
252         vcpu->arch.fault.hpfar_el2 = hpfar;
253         return true;
254 }
255
256 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
257 {
258         *vcpu_pc(vcpu) = read_sysreg_el2(elr);
259
260         if (vcpu_mode_is_32bit(vcpu)) {
261                 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
262                 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
263                 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
264         } else {
265                 *vcpu_pc(vcpu) += 4;
266         }
267
268         write_sysreg_el2(*vcpu_pc(vcpu), elr);
269 }
270
271 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
272 {
273         if (!cpus_have_cap(ARM64_SSBD))
274                 return false;
275
276         return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
277 }
278
279 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
280 {
281 #ifdef CONFIG_ARM64_SSBD
282         /*
283          * The host runs with the workaround always present. If the
284          * guest wants it disabled, so be it...
285          */
286         if (__needs_ssbd_off(vcpu) &&
287             __hyp_this_cpu_read(arm64_ssbd_callback_required))
288                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
289 #endif
290 }
291
292 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
293 {
294 #ifdef CONFIG_ARM64_SSBD
295         /*
296          * If the guest has disabled the workaround, bring it back on.
297          */
298         if (__needs_ssbd_off(vcpu) &&
299             __hyp_this_cpu_read(arm64_ssbd_callback_required))
300                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
301 #endif
302 }
303
304 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
305 {
306         struct kvm_cpu_context *host_ctxt;
307         struct kvm_cpu_context *guest_ctxt;
308         bool fp_enabled;
309         u64 exit_code;
310
311         vcpu = kern_hyp_va(vcpu);
312
313         host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
314         host_ctxt->__hyp_running_vcpu = vcpu;
315         guest_ctxt = &vcpu->arch.ctxt;
316
317         __sysreg_save_host_state(host_ctxt);
318         __debug_cond_save_host_state(vcpu);
319
320         __activate_traps(vcpu);
321         __activate_vm(vcpu);
322
323         __vgic_restore_state(vcpu);
324         __timer_restore_state(vcpu);
325
326         /*
327          * We must restore the 32-bit state before the sysregs, thanks
328          * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
329          */
330         __sysreg32_restore_state(vcpu);
331         __sysreg_restore_guest_state(guest_ctxt);
332         __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
333
334         __set_guest_arch_workaround_state(vcpu);
335
336         /* Jump in the fire! */
337 again:
338         exit_code = __guest_enter(vcpu, host_ctxt);
339         /* And we're baaack! */
340
341         /*
342          * We're using the raw exception code in order to only process
343          * the trap if no SError is pending. We will come back to the
344          * same PC once the SError has been injected, and replay the
345          * trapping instruction.
346          */
347         if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
348                 goto again;
349
350         if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
351             exit_code == ARM_EXCEPTION_TRAP) {
352                 bool valid;
353
354                 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
355                         kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
356                         kvm_vcpu_dabt_isvalid(vcpu) &&
357                         !kvm_vcpu_dabt_isextabt(vcpu) &&
358                         !kvm_vcpu_dabt_iss1tw(vcpu);
359
360                 if (valid) {
361                         int ret = __vgic_v2_perform_cpuif_access(vcpu);
362
363                         if (ret == 1) {
364                                 __skip_instr(vcpu);
365                                 goto again;
366                         }
367
368                         if (ret == -1) {
369                                 /* Promote an illegal access to an SError */
370                                 __skip_instr(vcpu);
371                                 exit_code = ARM_EXCEPTION_EL1_SERROR;
372                         }
373
374                         /* 0 falls through to be handler out of EL2 */
375                 }
376         }
377
378         __set_host_arch_workaround_state(vcpu);
379
380         fp_enabled = __fpsimd_enabled();
381
382         __sysreg_save_guest_state(guest_ctxt);
383         __sysreg32_save_state(vcpu);
384         __timer_save_state(vcpu);
385         __vgic_save_state(vcpu);
386
387         __deactivate_traps(vcpu);
388         __deactivate_vm(vcpu);
389
390         __sysreg_restore_host_state(host_ctxt);
391
392         if (fp_enabled) {
393                 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
394                 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
395         }
396
397         __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
398         __debug_cond_restore_host_state(vcpu);
399
400         return exit_code;
401 }
402
403 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
404
405 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
406                                              struct kvm_vcpu *vcpu)
407 {
408         unsigned long str_va;
409
410         /*
411          * Force the panic string to be loaded from the literal pool,
412          * making sure it is a kernel address and not a PC-relative
413          * reference.
414          */
415         asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
416
417         __hyp_do_panic(str_va,
418                        spsr,  elr,
419                        read_sysreg(esr_el2),   read_sysreg_el2(far),
420                        read_sysreg(hpfar_el2), par, vcpu);
421 }
422
423 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
424                                             struct kvm_vcpu *vcpu)
425 {
426         panic(__hyp_panic_string,
427               spsr,  elr,
428               read_sysreg_el2(esr),   read_sysreg_el2(far),
429               read_sysreg(hpfar_el2), par, vcpu);
430 }
431
432 static hyp_alternate_select(__hyp_call_panic,
433                             __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
434                             ARM64_HAS_VIRT_HOST_EXTN);
435
436 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
437 {
438         struct kvm_vcpu *vcpu = NULL;
439
440         u64 spsr = read_sysreg_el2(spsr);
441         u64 elr = read_sysreg_el2(elr);
442         u64 par = read_sysreg(par_el1);
443
444         if (read_sysreg(vttbr_el2)) {
445                 vcpu = host_ctxt->__hyp_running_vcpu;
446                 __timer_save_state(vcpu);
447                 __deactivate_traps(vcpu);
448                 __deactivate_vm(vcpu);
449                 __sysreg_restore_host_state(host_ctxt);
450         }
451
452         /* Call panic for real */
453         __hyp_call_panic()(spsr, elr, par, vcpu);
454
455         unreachable();
456 }