2 * Set up the VMAs to tell the VM about the vDSO.
3 * Copyright 2007 Andi Kleen, SUSE Labs.
4 * Subject to the GPL, v.2
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/init.h>
11 #include <linux/random.h>
12 #include <linux/elf.h>
13 #include <asm/vsyscall.h>
14 #include <asm/vgtod.h>
15 #include <asm/proto.h>
19 unsigned int __read_mostly vdso_enabled = 1;
21 extern char vdso_start[], vdso_end[];
22 extern unsigned short vdso_sync_cpuid;
24 extern struct page *vdso_pages[];
25 static unsigned vdso_size;
27 #ifdef CONFIG_X86_X32_ABI
28 extern char vdsox32_start[], vdsox32_end[];
29 extern struct page *vdsox32_pages[];
30 static unsigned vdsox32_size;
32 static void __init patch_vdsox32(void *vdso, size_t len)
34 Elf32_Ehdr *hdr = vdso;
35 Elf32_Shdr *sechdrs, *alt_sec = 0;
40 BUG_ON(len < sizeof(Elf32_Ehdr));
41 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
43 sechdrs = (void *)hdr + hdr->e_shoff;
44 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
46 for (i = 1; i < hdr->e_shnum; i++) {
47 Elf32_Shdr *shdr = &sechdrs[i];
48 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
54 /* If we get here, it's probably a bug. */
55 pr_warning("patch_vdsox32: .altinstructions not found\n");
56 return; /* nothing to patch */
59 alt_data = (void *)hdr + alt_sec->sh_offset;
60 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
64 static void __init patch_vdso64(void *vdso, size_t len)
66 Elf64_Ehdr *hdr = vdso;
67 Elf64_Shdr *sechdrs, *alt_sec = 0;
72 BUG_ON(len < sizeof(Elf64_Ehdr));
73 BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
75 sechdrs = (void *)hdr + hdr->e_shoff;
76 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
78 for (i = 1; i < hdr->e_shnum; i++) {
79 Elf64_Shdr *shdr = &sechdrs[i];
80 if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
86 /* If we get here, it's probably a bug. */
87 pr_warning("patch_vdso64: .altinstructions not found\n");
88 return; /* nothing to patch */
91 alt_data = (void *)hdr + alt_sec->sh_offset;
92 apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
95 static int __init init_vdso(void)
97 int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
100 patch_vdso64(vdso_start, vdso_end - vdso_start);
102 vdso_size = npages << PAGE_SHIFT;
103 for (i = 0; i < npages; i++)
104 vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
106 #ifdef CONFIG_X86_X32_ABI
107 patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
108 npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
109 vdsox32_size = npages << PAGE_SHIFT;
110 for (i = 0; i < npages; i++)
111 vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
116 subsys_initcall(init_vdso);
121 * Put the vdso above the (randomized) stack with another randomized
122 * offset. This way there is no hole in the middle of address space.
123 * To save memory make sure it is still in the same PTE as the stack
124 * top. This doesn't give that many random bits.
126 * Note that this algorithm is imperfect: the distribution of the vdso
127 * start address within a PMD is biased toward the end.
129 * Only used for the 64-bit and x32 vdsos.
131 static unsigned long vdso_addr(unsigned long start, unsigned len)
133 unsigned long addr, end;
137 * Round up the start address. It can start out unaligned as a result
138 * of stack start randomization.
140 start = PAGE_ALIGN(start);
142 /* Round the lowest possible end address up to a PMD boundary. */
143 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
144 if (end >= TASK_SIZE_MAX)
149 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
150 addr = start + (offset << PAGE_SHIFT);
156 * Forcibly align the final address in case we have a hardware
157 * issue that requires alignment for performance reasons.
159 addr = align_vdso_addr(addr);
164 /* Setup a VMA at program startup for the vsyscall page.
165 Not called for compat tasks */
166 static int setup_additional_pages(struct linux_binprm *bprm,
171 struct mm_struct *mm = current->mm;
178 down_write(&mm->mmap_sem);
179 addr = vdso_addr(mm->start_stack, size);
180 addr = get_unmapped_area(NULL, addr, size, 0, 0);
181 if (IS_ERR_VALUE(addr)) {
186 current->mm->context.vdso = (void *)addr;
188 ret = install_special_mapping(mm, addr, size,
190 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
193 current->mm->context.vdso = NULL;
198 up_write(&mm->mmap_sem);
202 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
204 return setup_additional_pages(bprm, uses_interp, vdso_pages,
208 #ifdef CONFIG_X86_X32_ABI
209 int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
211 return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
216 static __init int vdso_setup(char *s)
218 vdso_enabled = simple_strtoul(s, NULL, 0);
221 __setup("vdso=", vdso_setup);