Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / s390 / mm / vmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2006
4  *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5  */
6
7 #include <linux/memblock.h>
8 #include <linux/pfn.h>
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/set_memory.h>
21
22 static DEFINE_MUTEX(vmem_mutex);
23
24 struct memory_segment {
25         struct list_head list;
26         unsigned long start;
27         unsigned long size;
28 };
29
30 static LIST_HEAD(mem_segs);
31
32 static void __ref *vmem_alloc_pages(unsigned int order)
33 {
34         unsigned long size = PAGE_SIZE << order;
35
36         if (slab_is_available())
37                 return (void *)__get_free_pages(GFP_KERNEL, order);
38         return (void *) memblock_phys_alloc(size, size);
39 }
40
41 void *vmem_crst_alloc(unsigned long val)
42 {
43         unsigned long *table;
44
45         table = vmem_alloc_pages(CRST_ALLOC_ORDER);
46         if (table)
47                 crst_table_init(table, val);
48         return table;
49 }
50
51 pte_t __ref *vmem_pte_alloc(void)
52 {
53         unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
54         pte_t *pte;
55
56         if (slab_is_available())
57                 pte = (pte_t *) page_table_alloc(&init_mm);
58         else
59                 pte = (pte_t *) memblock_phys_alloc(size, size);
60         if (!pte)
61                 return NULL;
62         memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
63         return pte;
64 }
65
66 /*
67  * Add a physical memory range to the 1:1 mapping.
68  */
69 static int vmem_add_mem(unsigned long start, unsigned long size)
70 {
71         unsigned long pgt_prot, sgt_prot, r3_prot;
72         unsigned long pages4k, pages1m, pages2g;
73         unsigned long end = start + size;
74         unsigned long address = start;
75         pgd_t *pg_dir;
76         p4d_t *p4_dir;
77         pud_t *pu_dir;
78         pmd_t *pm_dir;
79         pte_t *pt_dir;
80         int ret = -ENOMEM;
81
82         pgt_prot = pgprot_val(PAGE_KERNEL);
83         sgt_prot = pgprot_val(SEGMENT_KERNEL);
84         r3_prot = pgprot_val(REGION3_KERNEL);
85         if (!MACHINE_HAS_NX) {
86                 pgt_prot &= ~_PAGE_NOEXEC;
87                 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
88                 r3_prot &= ~_REGION_ENTRY_NOEXEC;
89         }
90         pages4k = pages1m = pages2g = 0;
91         while (address < end) {
92                 pg_dir = pgd_offset_k(address);
93                 if (pgd_none(*pg_dir)) {
94                         p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
95                         if (!p4_dir)
96                                 goto out;
97                         pgd_populate(&init_mm, pg_dir, p4_dir);
98                 }
99                 p4_dir = p4d_offset(pg_dir, address);
100                 if (p4d_none(*p4_dir)) {
101                         pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
102                         if (!pu_dir)
103                                 goto out;
104                         p4d_populate(&init_mm, p4_dir, pu_dir);
105                 }
106                 pu_dir = pud_offset(p4_dir, address);
107                 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
108                     !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
109                      !debug_pagealloc_enabled()) {
110                         pud_val(*pu_dir) = address | r3_prot;
111                         address += PUD_SIZE;
112                         pages2g++;
113                         continue;
114                 }
115                 if (pud_none(*pu_dir)) {
116                         pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
117                         if (!pm_dir)
118                                 goto out;
119                         pud_populate(&init_mm, pu_dir, pm_dir);
120                 }
121                 pm_dir = pmd_offset(pu_dir, address);
122                 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
123                     !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
124                     !debug_pagealloc_enabled()) {
125                         pmd_val(*pm_dir) = address | sgt_prot;
126                         address += PMD_SIZE;
127                         pages1m++;
128                         continue;
129                 }
130                 if (pmd_none(*pm_dir)) {
131                         pt_dir = vmem_pte_alloc();
132                         if (!pt_dir)
133                                 goto out;
134                         pmd_populate(&init_mm, pm_dir, pt_dir);
135                 }
136
137                 pt_dir = pte_offset_kernel(pm_dir, address);
138                 pte_val(*pt_dir) = address | pgt_prot;
139                 address += PAGE_SIZE;
140                 pages4k++;
141         }
142         ret = 0;
143 out:
144         update_page_count(PG_DIRECT_MAP_4K, pages4k);
145         update_page_count(PG_DIRECT_MAP_1M, pages1m);
146         update_page_count(PG_DIRECT_MAP_2G, pages2g);
147         return ret;
148 }
149
150 /*
151  * Remove a physical memory range from the 1:1 mapping.
152  * Currently only invalidates page table entries.
153  */
154 static void vmem_remove_range(unsigned long start, unsigned long size)
155 {
156         unsigned long pages4k, pages1m, pages2g;
157         unsigned long end = start + size;
158         unsigned long address = start;
159         pgd_t *pg_dir;
160         p4d_t *p4_dir;
161         pud_t *pu_dir;
162         pmd_t *pm_dir;
163         pte_t *pt_dir;
164
165         pages4k = pages1m = pages2g = 0;
166         while (address < end) {
167                 pg_dir = pgd_offset_k(address);
168                 if (pgd_none(*pg_dir)) {
169                         address += PGDIR_SIZE;
170                         continue;
171                 }
172                 p4_dir = p4d_offset(pg_dir, address);
173                 if (p4d_none(*p4_dir)) {
174                         address += P4D_SIZE;
175                         continue;
176                 }
177                 pu_dir = pud_offset(p4_dir, address);
178                 if (pud_none(*pu_dir)) {
179                         address += PUD_SIZE;
180                         continue;
181                 }
182                 if (pud_large(*pu_dir)) {
183                         pud_clear(pu_dir);
184                         address += PUD_SIZE;
185                         pages2g++;
186                         continue;
187                 }
188                 pm_dir = pmd_offset(pu_dir, address);
189                 if (pmd_none(*pm_dir)) {
190                         address += PMD_SIZE;
191                         continue;
192                 }
193                 if (pmd_large(*pm_dir)) {
194                         pmd_clear(pm_dir);
195                         address += PMD_SIZE;
196                         pages1m++;
197                         continue;
198                 }
199                 pt_dir = pte_offset_kernel(pm_dir, address);
200                 pte_clear(&init_mm, address, pt_dir);
201                 address += PAGE_SIZE;
202                 pages4k++;
203         }
204         flush_tlb_kernel_range(start, end);
205         update_page_count(PG_DIRECT_MAP_4K, -pages4k);
206         update_page_count(PG_DIRECT_MAP_1M, -pages1m);
207         update_page_count(PG_DIRECT_MAP_2G, -pages2g);
208 }
209
210 /*
211  * Add a backed mem_map array to the virtual mem_map array.
212  */
213 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
214                 struct vmem_altmap *altmap)
215 {
216         unsigned long pgt_prot, sgt_prot;
217         unsigned long address = start;
218         pgd_t *pg_dir;
219         p4d_t *p4_dir;
220         pud_t *pu_dir;
221         pmd_t *pm_dir;
222         pte_t *pt_dir;
223         int ret = -ENOMEM;
224
225         pgt_prot = pgprot_val(PAGE_KERNEL);
226         sgt_prot = pgprot_val(SEGMENT_KERNEL);
227         if (!MACHINE_HAS_NX) {
228                 pgt_prot &= ~_PAGE_NOEXEC;
229                 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
230         }
231         for (address = start; address < end;) {
232                 pg_dir = pgd_offset_k(address);
233                 if (pgd_none(*pg_dir)) {
234                         p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
235                         if (!p4_dir)
236                                 goto out;
237                         pgd_populate(&init_mm, pg_dir, p4_dir);
238                 }
239
240                 p4_dir = p4d_offset(pg_dir, address);
241                 if (p4d_none(*p4_dir)) {
242                         pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
243                         if (!pu_dir)
244                                 goto out;
245                         p4d_populate(&init_mm, p4_dir, pu_dir);
246                 }
247
248                 pu_dir = pud_offset(p4_dir, address);
249                 if (pud_none(*pu_dir)) {
250                         pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
251                         if (!pm_dir)
252                                 goto out;
253                         pud_populate(&init_mm, pu_dir, pm_dir);
254                 }
255
256                 pm_dir = pmd_offset(pu_dir, address);
257                 if (pmd_none(*pm_dir)) {
258                         /* Use 1MB frames for vmemmap if available. We always
259                          * use large frames even if they are only partially
260                          * used.
261                          * Otherwise we would have also page tables since
262                          * vmemmap_populate gets called for each section
263                          * separately. */
264                         if (MACHINE_HAS_EDAT1) {
265                                 void *new_page;
266
267                                 new_page = vmemmap_alloc_block(PMD_SIZE, node);
268                                 if (!new_page)
269                                         goto out;
270                                 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
271                                 address = (address + PMD_SIZE) & PMD_MASK;
272                                 continue;
273                         }
274                         pt_dir = vmem_pte_alloc();
275                         if (!pt_dir)
276                                 goto out;
277                         pmd_populate(&init_mm, pm_dir, pt_dir);
278                 } else if (pmd_large(*pm_dir)) {
279                         address = (address + PMD_SIZE) & PMD_MASK;
280                         continue;
281                 }
282
283                 pt_dir = pte_offset_kernel(pm_dir, address);
284                 if (pte_none(*pt_dir)) {
285                         void *new_page;
286
287                         new_page = vmemmap_alloc_block(PAGE_SIZE, node);
288                         if (!new_page)
289                                 goto out;
290                         pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
291                 }
292                 address += PAGE_SIZE;
293         }
294         ret = 0;
295 out:
296         return ret;
297 }
298
299 void vmemmap_free(unsigned long start, unsigned long end,
300                 struct vmem_altmap *altmap)
301 {
302 }
303
304 /*
305  * Add memory segment to the segment list if it doesn't overlap with
306  * an already present segment.
307  */
308 static int insert_memory_segment(struct memory_segment *seg)
309 {
310         struct memory_segment *tmp;
311
312         if (seg->start + seg->size > VMEM_MAX_PHYS ||
313             seg->start + seg->size < seg->start)
314                 return -ERANGE;
315
316         list_for_each_entry(tmp, &mem_segs, list) {
317                 if (seg->start >= tmp->start + tmp->size)
318                         continue;
319                 if (seg->start + seg->size <= tmp->start)
320                         continue;
321                 return -ENOSPC;
322         }
323         list_add(&seg->list, &mem_segs);
324         return 0;
325 }
326
327 /*
328  * Remove memory segment from the segment list.
329  */
330 static void remove_memory_segment(struct memory_segment *seg)
331 {
332         list_del(&seg->list);
333 }
334
335 static void __remove_shared_memory(struct memory_segment *seg)
336 {
337         remove_memory_segment(seg);
338         vmem_remove_range(seg->start, seg->size);
339 }
340
341 int vmem_remove_mapping(unsigned long start, unsigned long size)
342 {
343         struct memory_segment *seg;
344         int ret;
345
346         mutex_lock(&vmem_mutex);
347
348         ret = -ENOENT;
349         list_for_each_entry(seg, &mem_segs, list) {
350                 if (seg->start == start && seg->size == size)
351                         break;
352         }
353
354         if (seg->start != start || seg->size != size)
355                 goto out;
356
357         ret = 0;
358         __remove_shared_memory(seg);
359         kfree(seg);
360 out:
361         mutex_unlock(&vmem_mutex);
362         return ret;
363 }
364
365 int vmem_add_mapping(unsigned long start, unsigned long size)
366 {
367         struct memory_segment *seg;
368         int ret;
369
370         mutex_lock(&vmem_mutex);
371         ret = -ENOMEM;
372         seg = kzalloc(sizeof(*seg), GFP_KERNEL);
373         if (!seg)
374                 goto out;
375         seg->start = start;
376         seg->size = size;
377
378         ret = insert_memory_segment(seg);
379         if (ret)
380                 goto out_free;
381
382         ret = vmem_add_mem(start, size);
383         if (ret)
384                 goto out_remove;
385         goto out;
386
387 out_remove:
388         __remove_shared_memory(seg);
389 out_free:
390         kfree(seg);
391 out:
392         mutex_unlock(&vmem_mutex);
393         return ret;
394 }
395
396 /*
397  * map whole physical memory to virtual memory (identity mapping)
398  * we reserve enough space in the vmalloc area for vmemmap to hotplug
399  * additional memory segments.
400  */
401 void __init vmem_map_init(void)
402 {
403         struct memblock_region *reg;
404
405         for_each_memblock(memory, reg)
406                 vmem_add_mem(reg->base, reg->size);
407         __set_memory((unsigned long)_stext,
408                      (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
409                      SET_MEMORY_RO | SET_MEMORY_X);
410         __set_memory((unsigned long)_etext,
411                      (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
412                      SET_MEMORY_RO);
413         __set_memory((unsigned long)_sinittext,
414                      (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
415                      SET_MEMORY_RO | SET_MEMORY_X);
416         __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
417                      SET_MEMORY_RO | SET_MEMORY_X);
418         pr_info("Write protected kernel read-only data: %luk\n",
419                 (unsigned long)(__end_rodata - _stext) >> 10);
420 }
421
422 /*
423  * Convert memblock.memory  to a memory segment list so there is a single
424  * list that contains all memory segments.
425  */
426 static int __init vmem_convert_memory_chunk(void)
427 {
428         struct memblock_region *reg;
429         struct memory_segment *seg;
430
431         mutex_lock(&vmem_mutex);
432         for_each_memblock(memory, reg) {
433                 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
434                 if (!seg)
435                         panic("Out of memory...\n");
436                 seg->start = reg->base;
437                 seg->size = reg->size;
438                 insert_memory_segment(seg);
439         }
440         mutex_unlock(&vmem_mutex);
441         return 0;
442 }
443
444 core_initcall(vmem_convert_memory_chunk);