3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Derived from "arch/i386/mm/fault.c"
6 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
8 * Modified by Cort Dougan and Paul Mackerras.
10 * Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
18 #include <linux/signal.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/ptrace.h>
26 #include <linux/mman.h>
28 #include <linux/interrupt.h>
29 #include <linux/highmem.h>
30 #include <linux/extable.h>
31 #include <linux/kprobes.h>
32 #include <linux/kdebug.h>
33 #include <linux/perf_event.h>
34 #include <linux/ratelimit.h>
35 #include <linux/context_tracking.h>
36 #include <linux/hugetlb.h>
37 #include <linux/uaccess.h>
39 #include <asm/firmware.h>
41 #include <asm/pgtable.h>
43 #include <asm/mmu_context.h>
44 #include <asm/tlbflush.h>
45 #include <asm/siginfo.h>
46 #include <asm/debug.h>
48 static inline bool notify_page_fault(struct pt_regs *regs)
53 /* kprobe_running() needs smp_processor_id() */
54 if (!user_mode(regs)) {
56 if (kprobe_running() && kprobe_fault_handler(regs, 11))
60 #endif /* CONFIG_KPROBES */
62 if (unlikely(debugger_fault_handler(regs)))
69 * Check whether the instruction at regs->nip is a store using
70 * an update addressing form which will update r1.
72 static bool store_updates_sp(struct pt_regs *regs)
76 if (get_user(inst, (unsigned int __user *)regs->nip))
78 /* check for 1 in the rA field */
79 if (((inst >> 16) & 0x1f) != 1)
81 /* check major opcode */
89 case 62: /* std or stdu */
90 return (inst & 3) == 1;
92 /* check minor opcode */
93 switch ((inst >> 1) & 0x3ff) {
98 case 695: /* stfsux */
99 case 759: /* stfdux */
106 * do_page_fault error handling helpers
110 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long address, int si_code)
113 * If we are in kernel mode, bail out with a SEGV, this will
114 * be caught by the assembly which will restore the non-volatile
115 * registers before calling bad_page_fault()
117 if (!user_mode(regs))
120 _exception(SIGSEGV, regs, si_code, address);
125 static noinline int bad_area_nosemaphore(struct pt_regs *regs, unsigned long address)
127 return __bad_area_nosemaphore(regs, address, SEGV_MAPERR);
130 static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
132 struct mm_struct *mm = current->mm;
135 * Something tried to access memory that isn't in our memory map..
136 * Fix it, but check if it's kernel or user first..
138 up_read(&mm->mmap_sem);
140 return __bad_area_nosemaphore(regs, address, si_code);
143 static noinline int bad_area(struct pt_regs *regs, unsigned long address)
145 return __bad_area(regs, address, SEGV_MAPERR);
148 static noinline int bad_access(struct pt_regs *regs, unsigned long address)
150 return __bad_area(regs, address, SEGV_ACCERR);
153 static int do_sigbus(struct pt_regs *regs, unsigned long address,
157 unsigned int lsb = 0;
159 if (!user_mode(regs))
162 current->thread.trap_nr = BUS_ADRERR;
163 info.si_signo = SIGBUS;
165 info.si_code = BUS_ADRERR;
166 info.si_addr = (void __user *)address;
167 #ifdef CONFIG_MEMORY_FAILURE
168 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
169 pr_err("MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
170 current->comm, current->pid, address);
171 info.si_code = BUS_MCEERR_AR;
174 if (fault & VM_FAULT_HWPOISON_LARGE)
175 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
176 if (fault & VM_FAULT_HWPOISON)
179 info.si_addr_lsb = lsb;
180 force_sig_info(SIGBUS, &info, current);
184 static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
187 * Kernel page fault interrupted by SIGKILL. We have no reason to
188 * continue processing.
190 if (fatal_signal_pending(current) && !user_mode(regs))
194 if (fault & VM_FAULT_OOM) {
196 * We ran out of memory, or some other thing happened to us that
197 * made us unable to handle the page fault gracefully.
199 if (!user_mode(regs))
201 pagefault_out_of_memory();
203 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
204 VM_FAULT_HWPOISON_LARGE))
205 return do_sigbus(regs, addr, fault);
206 else if (fault & VM_FAULT_SIGSEGV)
207 return bad_area_nosemaphore(regs, addr);
214 /* Is this a bad kernel fault ? */
215 static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
216 unsigned long address)
218 if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
219 printk_ratelimited(KERN_CRIT "kernel tried to execute"
220 " exec-protected page (%lx) -"
221 "exploit attempt? (uid: %d)\n",
222 address, from_kuid(&init_user_ns,
225 return is_exec || (address >= TASK_SIZE);
228 static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
229 struct vm_area_struct *vma,
230 bool store_update_sp)
233 * N.B. The POWER/Open ABI allows programs to access up to
234 * 288 bytes below the stack pointer.
235 * The kernel signal delivery code writes up to about 1.5kB
236 * below the stack pointer (r1) before decrementing it.
237 * The exec code can write slightly over 640kB to the stack
238 * before setting the user r1. Thus we allow the stack to
239 * expand to 1MB without further checks.
241 if (address + 0x100000 < vma->vm_end) {
242 /* get user regs even if this fault is in kernel mode */
243 struct pt_regs *uregs = current->thread.regs;
248 * A user-mode access to an address a long way below
249 * the stack pointer is only valid if the instruction
250 * is one which would update the stack pointer to the
251 * address accessed if the instruction completed,
252 * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
253 * (or the byte, halfword, float or double forms).
255 * If we don't check this then any write to the area
256 * between the last mapped region and the stack will
257 * expand the stack rather than segfaulting.
259 if (address + 2048 < uregs->gpr[1] && !store_update_sp)
265 static bool access_error(bool is_write, bool is_exec,
266 struct vm_area_struct *vma)
269 * Allow execution from readable areas if the MMU does not
270 * provide separate controls over reading and executing.
272 * Note: That code used to not be enabled for 4xx/BookE.
273 * It is now as I/D cache coherency for these is done at
274 * set_pte_at() time and I see no reason why the test
275 * below wouldn't be valid on those processors. This -may-
276 * break programs compiled with a really old ABI though.
279 return !(vma->vm_flags & VM_EXEC) &&
280 (cpu_has_feature(CPU_FTR_NOEXECUTE) ||
281 !(vma->vm_flags & (VM_READ | VM_WRITE)));
285 if (unlikely(!(vma->vm_flags & VM_WRITE)))
290 if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
296 #ifdef CONFIG_PPC_SMLPAR
297 static inline void cmo_account_page_fault(void)
299 if (firmware_has_feature(FW_FEATURE_CMO)) {
303 page_ins = be32_to_cpu(get_lppaca()->page_ins);
304 page_ins += 1 << PAGE_FACTOR;
305 get_lppaca()->page_ins = cpu_to_be32(page_ins);
310 static inline void cmo_account_page_fault(void) { }
311 #endif /* CONFIG_PPC_SMLPAR */
313 #ifdef CONFIG_PPC_STD_MMU
314 static void sanity_check_fault(bool is_write, unsigned long error_code)
317 * For hash translation mode, we should never get a
318 * PROTFAULT. Any update to pte to reduce access will result in us
319 * removing the hash page table entry, thus resulting in a DSISR_NOHPTE
320 * fault instead of DSISR_PROTFAULT.
322 * A pte update to relax the access will not result in a hash page table
323 * entry invalidate and hence can result in DSISR_PROTFAULT.
324 * ptep_set_access_flags() doesn't do a hpte flush. This is why we have
325 * the special !is_write in the below conditional.
327 * For platforms that doesn't supports coherent icache and do support
328 * per page noexec bit, we do setup things such that we do the
329 * sync between D/I cache via fault. But that is handled via low level
330 * hash fault code (hash_page_do_lazy_icache()) and we should not reach
333 * For wrong access that can result in PROTFAULT, the above vma->vm_flags
334 * check should handle those and hence we should fall to the bad_area
335 * handling correctly.
337 * For embedded with per page exec support that doesn't support coherent
338 * icache we do get PROTFAULT and we handle that D/I cache sync in
339 * set_pte_at while taking the noexec/prot fault. Hence this is WARN_ON
340 * is conditional for server MMU.
342 * For radix, we can get prot fault for autonuma case, because radix
343 * page table will have them marked noaccess for user.
345 if (!radix_enabled() && !is_write)
346 WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
349 static void sanity_check_fault(bool is_write, unsigned long error_code) { }
350 #endif /* CONFIG_PPC_STD_MMU */
353 * Define the correct "is_write" bit in error_code based
354 * on the processor family
356 #if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
357 #define page_fault_is_write(__err) ((__err) & ESR_DST)
358 #define page_fault_is_bad(__err) (0)
360 #define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
361 #if defined(CONFIG_PPC_8xx)
362 #define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
363 #elif defined(CONFIG_PPC64)
364 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S)
366 #define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
371 * For 600- and 800-family processors, the error_code parameter is DSISR
372 * for a data fault, SRR1 for an instruction fault. For 400-family processors
373 * the error_code parameter is ESR for a data fault, 0 for an instruction
375 * For 64-bit processors, the error_code parameter is
376 * - DSISR for a non-SLB data access fault,
377 * - SRR1 & 0x08000000 for a non-SLB instruction access fault
380 * The return value is 0 if the fault was handled, or the signal
381 * number if this is a kernel fault that can't be handled here.
383 static int __do_page_fault(struct pt_regs *regs, unsigned long address,
384 unsigned long error_code)
386 struct vm_area_struct * vma;
387 struct mm_struct *mm = current->mm;
388 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
389 int is_exec = TRAP(regs) == 0x400;
390 int is_user = user_mode(regs);
391 int is_write = page_fault_is_write(error_code);
392 int fault, major = 0;
393 bool store_update_sp = false;
395 if (notify_page_fault(regs))
398 if (unlikely(page_fault_is_bad(error_code))) {
400 _exception(SIGBUS, regs, BUS_OBJERR, address);
406 /* Additional sanity check(s) */
407 sanity_check_fault(is_write, error_code);
410 * The kernel should never take an execute fault nor should it
411 * take a page fault to a kernel address.
413 if (unlikely(!is_user && bad_kernel_fault(is_exec, error_code, address)))
417 * If we're in an interrupt, have no user context or are running
418 * in a region with pagefaults disabled then we must not take the fault
420 if (unlikely(faulthandler_disabled() || !mm)) {
422 printk_ratelimited(KERN_ERR "Page fault in user mode"
423 " with faulthandler_disabled()=%d"
425 faulthandler_disabled(), mm);
426 return bad_area_nosemaphore(regs, address);
429 /* We restore the interrupt state now */
430 if (!arch_irq_disabled_regs(regs))
433 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
436 * We want to do this outside mmap_sem, because reading code around nip
437 * can result in fault, which will cause a deadlock when called with
440 if (is_write && is_user)
441 store_update_sp = store_updates_sp(regs);
444 flags |= FAULT_FLAG_USER;
446 flags |= FAULT_FLAG_WRITE;
448 flags |= FAULT_FLAG_INSTRUCTION;
450 /* When running in the kernel we expect faults to occur only to
451 * addresses in user space. All other faults represent errors in the
452 * kernel and should generate an OOPS. Unfortunately, in the case of an
453 * erroneous fault occurring in a code path which already holds mmap_sem
454 * we will deadlock attempting to validate the fault against the
455 * address space. Luckily the kernel only validly references user
456 * space from well defined areas of code, which are listed in the
459 * As the vast majority of faults will be valid we will only perform
460 * the source reference check when there is a possibility of a deadlock.
461 * Attempt to lock the address space, if we cannot we then validate the
462 * source. If this is invalid we can skip the address space check,
463 * thus avoiding the deadlock.
465 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
466 if (!is_user && !search_exception_tables(regs->nip))
467 return bad_area_nosemaphore(regs, address);
470 down_read(&mm->mmap_sem);
473 * The above down_read_trylock() might have succeeded in
474 * which case we'll have missed the might_sleep() from
480 vma = find_vma(mm, address);
482 return bad_area(regs, address);
483 if (likely(vma->vm_start <= address))
485 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
486 return bad_area(regs, address);
488 /* The stack is being expanded, check if it's valid */
489 if (unlikely(bad_stack_expansion(regs, address, vma, store_update_sp)))
490 return bad_area(regs, address);
492 /* Try to expand it */
493 if (unlikely(expand_stack(vma, address)))
494 return bad_area(regs, address);
497 if (unlikely(access_error(is_write, is_exec, vma)))
498 return bad_access(regs, address);
501 * If for any reason at all we couldn't handle the fault,
502 * make sure we exit gracefully rather than endlessly redo
505 fault = handle_mm_fault(vma, address, flags);
506 major |= fault & VM_FAULT_MAJOR;
509 * Handle the retry right now, the mmap_sem has been released in that
512 if (unlikely(fault & VM_FAULT_RETRY)) {
513 /* We retry only once */
514 if (flags & FAULT_FLAG_ALLOW_RETRY) {
516 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
519 flags &= ~FAULT_FLAG_ALLOW_RETRY;
520 flags |= FAULT_FLAG_TRIED;
521 if (!fatal_signal_pending(current))
526 * User mode? Just return to handle the fatal exception otherwise
527 * return to bad_page_fault
529 return is_user ? 0 : SIGBUS;
532 up_read(¤t->mm->mmap_sem);
534 if (unlikely(fault & VM_FAULT_ERROR))
535 return mm_fault_error(regs, address, fault);
538 * Major/minor page fault accounting.
542 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
543 cmo_account_page_fault();
546 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
550 NOKPROBE_SYMBOL(__do_page_fault);
552 int do_page_fault(struct pt_regs *regs, unsigned long address,
553 unsigned long error_code)
555 enum ctx_state prev_state = exception_enter();
556 int rc = __do_page_fault(regs, address, error_code);
557 exception_exit(prev_state);
560 NOKPROBE_SYMBOL(do_page_fault);
563 * bad_page_fault is called when we have a bad access from the kernel.
564 * It is called from the DSI and ISI handlers in head.S and from some
565 * of the procedures in traps.c.
567 void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
569 const struct exception_table_entry *entry;
571 /* Are we prepared to handle this fault? */
572 if ((entry = search_exception_tables(regs->nip)) != NULL) {
573 regs->nip = extable_fixup(entry);
577 /* kernel has accessed a bad area */
579 switch (regs->trap) {
582 printk(KERN_ALERT "Unable to handle kernel paging request for "
583 "data at address 0x%08lx\n", regs->dar);
587 printk(KERN_ALERT "Unable to handle kernel paging request for "
588 "instruction fetch\n");
591 printk(KERN_ALERT "Unable to handle kernel paging request for "
592 "unaligned access at address 0x%08lx\n", regs->dar);
595 printk(KERN_ALERT "Unable to handle kernel paging request for "
599 printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
602 if (task_stack_end_corrupted(current))
603 printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
605 die("Kernel access of bad area", regs, sig);