ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 811-kvm-0003-arm-arm64-KVM-drop-qman-mmio-cacheable-mapping-hack.patch
1 From d637252f72998261c9d77c0be57317c73ad77f83 Mon Sep 17 00:00:00 2001
2 From: Laurentiu Tudor <laurentiu.tudor@nxp.com>
3 Date: Tue, 26 Jul 2016 16:38:18 +0300
4 Subject: [PATCH] arm/arm64: KVM: drop qman mmio cacheable mapping hack
5
6 Instead of hardcoding checks for qman cacheable
7 mmio region physical addresses extract mapping
8 information from the user-space mapping.
9 The involves several steps;
10  - get access to a pte part of the user-space mapping
11    by using get_locked_pte() / pte_unmap_unlock() apis
12  - extract memtype (normal / device), shareability from
13    the pte
14  - convert to S2 translation bits in newly added
15    function stage1_to_stage2_pgprot()
16  - finish making the s2 translation with the obtained bits
17
18 Another explored option was using vm_area_struct::vm_page_prot
19 which is set in vfio-mc mmap code to the correct page bits.
20 However, experiments show that these bits are later altered
21 in the generic mmap code (e.g. the shareability bit is always
22 set on arm64).
23 The only place where the original bits can still be found
24 is the user-space mapping, using the method described above.
25
26 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
27 [Bharat - Fixed mem_type check issue]
28 [changed "ifdef ARM64" to CONFIG_ARM64]
29 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
30 [Ioana - added a sanity check for hugepages]
31 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
32 [Fixed format issues]
33 Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
34 ---
35  virt/kvm/arm/mmu.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
36  1 file changed, 53 insertions(+), 2 deletions(-)
37
38 --- a/virt/kvm/arm/mmu.c
39 +++ b/virt/kvm/arm/mmu.c
40 @@ -1375,6 +1375,30 @@ out:
41         return ret;
42  }
43  
44 +#ifdef CONFIG_ARM64
45 +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
46 +{
47 +       switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
48 +       case PTE_ATTRINDX(MT_DEVICE_nGnRE):
49 +       case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
50 +       case PTE_ATTRINDX(MT_DEVICE_GRE):
51 +               return PAGE_S2_DEVICE;
52 +       case PTE_ATTRINDX(MT_NORMAL_NC):
53 +       case PTE_ATTRINDX(MT_NORMAL):
54 +               return (pgprot_val(prot) & PTE_SHARED)
55 +                       ? PAGE_S2
56 +                       : PAGE_S2_NS;
57 +       }
58 +
59 +       return PAGE_S2_DEVICE;
60 +}
61 +#else
62 +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
63 +{
64 +       return PAGE_S2_DEVICE;
65 +}
66 +#endif
67 +
68  static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
69  {
70         kvm_pfn_t pfn = *pfnp;
71 @@ -1719,8 +1743,23 @@ static int user_mem_abort(struct kvm_vcp
72          * 3 levels, i.e, PMD is not folded.
73          */
74         if (vma_pagesize == PMD_SIZE ||
75 -           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
76 +           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) {
77                 gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
78 +       } else {
79 +               if (!is_vm_hugetlb_page(vma)) {
80 +                       pte_t *pte;
81 +                       spinlock_t *ptl;
82 +                       pgprot_t prot;
83 +
84 +                       pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
85 +                       prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
86 +                       pte_unmap_unlock(pte, ptl);
87 +#ifdef CONFIG_ARM64
88 +                       if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
89 +                               mem_type = PAGE_S2_NS;
90 +#endif
91 +               }
92 +       }
93         up_read(&current->mm->mmap_sem);
94  
95         /* We need minimum second+third level pages */
96 @@ -1749,6 +1788,11 @@ static int user_mem_abort(struct kvm_vcp
97         if (is_error_noslot_pfn(pfn))
98                 return -EFAULT;
99  
100 +#ifdef CONFIG_ARM64
101 +       if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
102 +               flags |= KVM_S2PTE_FLAG_IS_IOMAP;
103 +       } else
104 +#endif
105         if (kvm_is_device_pfn(pfn)) {
106                 mem_type = PAGE_S2_DEVICE;
107                 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
108 @@ -2336,6 +2380,9 @@ int kvm_arch_prepare_memory_region(struc
109                         gpa_t gpa = mem->guest_phys_addr +
110                                     (vm_start - mem->userspace_addr);
111                         phys_addr_t pa;
112 +                       pgprot_t prot;
113 +                       pte_t *pte;
114 +                       spinlock_t *ptl;
115  
116                         pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
117                         pa += vm_start - vma->vm_start;
118 @@ -2346,9 +2393,13 @@ int kvm_arch_prepare_memory_region(struc
119                                 goto out;
120                         }
121  
122 +                       pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
123 +                       prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
124 +                       pte_unmap_unlock(pte, ptl);
125 +
126                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
127                                                     vm_end - vm_start,
128 -                                                   writable, PAGE_S2_DEVICE);
129 +                                                   writable, prot);
130                         if (ret)
131                                 break;
132                 }