1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Machine check exception handling.
5 * Copyright 2013 IBM Corporation
6 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
10 #define pr_fmt(fmt) "mce: " fmt
12 #include <linux/hardirq.h>
13 #include <linux/types.h>
14 #include <linux/ptrace.h>
15 #include <linux/percpu.h>
16 #include <linux/export.h>
17 #include <linux/irq_work.h>
19 #include <asm/machdep.h>
23 static DEFINE_PER_CPU(int, mce_nest_count);
24 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
26 /* Queue for delayed MCE events. */
27 static DEFINE_PER_CPU(int, mce_queue_count);
28 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
30 /* Queue for delayed MCE UE events. */
31 static DEFINE_PER_CPU(int, mce_ue_count);
32 static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
35 static void machine_check_process_queued_event(struct irq_work *work);
36 static void machine_check_ue_irq_work(struct irq_work *work);
37 void machine_check_ue_event(struct machine_check_event *evt);
38 static void machine_process_ue_event(struct work_struct *work);
40 static struct irq_work mce_event_process_work = {
41 .func = machine_check_process_queued_event,
44 static struct irq_work mce_ue_event_irq_work = {
45 .func = machine_check_ue_irq_work,
48 DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
50 static void mce_set_error_info(struct machine_check_event *mce,
51 struct mce_error_info *mce_err)
53 mce->error_type = mce_err->error_type;
54 switch (mce_err->error_type) {
55 case MCE_ERROR_TYPE_UE:
56 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
58 case MCE_ERROR_TYPE_SLB:
59 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
61 case MCE_ERROR_TYPE_ERAT:
62 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
64 case MCE_ERROR_TYPE_TLB:
65 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
67 case MCE_ERROR_TYPE_USER:
68 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
70 case MCE_ERROR_TYPE_RA:
71 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
73 case MCE_ERROR_TYPE_LINK:
74 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
76 case MCE_ERROR_TYPE_UNKNOWN:
83 * Decode and save high level MCE information into per cpu buffer which
84 * is an array of machine_check_event structure.
86 void save_mce_event(struct pt_regs *regs, long handled,
87 struct mce_error_info *mce_err,
88 uint64_t nip, uint64_t addr, uint64_t phys_addr)
90 int index = __this_cpu_inc_return(mce_nest_count) - 1;
91 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
94 * Return if we don't have enough space to log mce event.
95 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
96 * the check below will stop buffer overrun.
98 if (index >= MAX_MC_EVT)
101 /* Populate generic machine check info */
102 mce->version = MCE_V1;
104 mce->srr1 = regs->msr;
105 mce->gpr3 = regs->gpr[3];
107 mce->cpu = get_paca()->paca_index;
109 /* Mark it recovered if we have handled it and MSR(RI=1). */
110 if (handled && (regs->msr & MSR_RI))
111 mce->disposition = MCE_DISPOSITION_RECOVERED;
113 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
115 mce->initiator = mce_err->initiator;
116 mce->severity = mce_err->severity;
117 mce->sync_error = mce_err->sync_error;
118 mce->error_class = mce_err->error_class;
121 * Populate the mce error_type and type-specific error_type.
123 mce_set_error_info(mce, mce_err);
128 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
129 mce->u.tlb_error.effective_address_provided = true;
130 mce->u.tlb_error.effective_address = addr;
131 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
132 mce->u.slb_error.effective_address_provided = true;
133 mce->u.slb_error.effective_address = addr;
134 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
135 mce->u.erat_error.effective_address_provided = true;
136 mce->u.erat_error.effective_address = addr;
137 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
138 mce->u.user_error.effective_address_provided = true;
139 mce->u.user_error.effective_address = addr;
140 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
141 mce->u.ra_error.effective_address_provided = true;
142 mce->u.ra_error.effective_address = addr;
143 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
144 mce->u.link_error.effective_address_provided = true;
145 mce->u.link_error.effective_address = addr;
146 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
147 mce->u.ue_error.effective_address_provided = true;
148 mce->u.ue_error.effective_address = addr;
149 if (phys_addr != ULONG_MAX) {
150 mce->u.ue_error.physical_address_provided = true;
151 mce->u.ue_error.physical_address = phys_addr;
152 machine_check_ue_event(mce);
160 * mce Pointer to machine_check_event structure to be filled.
161 * release Flag to indicate whether to free the event slot or not.
162 * 0 <= do not release the mce event. Caller will invoke
163 * release_mce_event() once event has been consumed.
164 * 1 <= release the slot.
169 * get_mce_event() will be called by platform specific machine check
170 * handle routine and in KVM.
171 * When we call get_mce_event(), we are still in interrupt context and
172 * preemption will not be scheduled until ret_from_expect() routine
175 int get_mce_event(struct machine_check_event *mce, bool release)
177 int index = __this_cpu_read(mce_nest_count) - 1;
178 struct machine_check_event *mc_evt;
185 /* Check if we have MCE info to process. */
186 if (index < MAX_MC_EVT) {
187 mc_evt = this_cpu_ptr(&mce_event[index]);
188 /* Copy the event structure and release the original */
195 /* Decrement the count to free the slot. */
197 __this_cpu_dec(mce_nest_count);
202 void release_mce_event(void)
204 get_mce_event(NULL, true);
207 static void machine_check_ue_irq_work(struct irq_work *work)
209 schedule_work(&mce_ue_event_work);
213 * Queue up the MCE event which then can be handled later.
215 void machine_check_ue_event(struct machine_check_event *evt)
219 index = __this_cpu_inc_return(mce_ue_count) - 1;
220 /* If queue is full, just return for now. */
221 if (index >= MAX_MC_EVT) {
222 __this_cpu_dec(mce_ue_count);
225 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
227 /* Queue work to process this event later. */
228 irq_work_queue(&mce_ue_event_irq_work);
232 * Queue up the MCE event which then can be handled later.
234 void machine_check_queue_event(void)
237 struct machine_check_event evt;
239 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
242 index = __this_cpu_inc_return(mce_queue_count) - 1;
243 /* If queue is full, just return for now. */
244 if (index >= MAX_MC_EVT) {
245 __this_cpu_dec(mce_queue_count);
248 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
250 /* Queue irq work to process this event later. */
251 irq_work_queue(&mce_event_process_work);
254 * process pending MCE event from the mce event queue. This function will be
255 * called during syscall exit.
257 static void machine_process_ue_event(struct work_struct *work)
260 struct machine_check_event *evt;
262 while (__this_cpu_read(mce_ue_count) > 0) {
263 index = __this_cpu_read(mce_ue_count) - 1;
264 evt = this_cpu_ptr(&mce_ue_event_queue[index]);
265 #ifdef CONFIG_MEMORY_FAILURE
267 * This should probably queued elsewhere, but
270 if (evt->error_type == MCE_ERROR_TYPE_UE) {
271 if (evt->u.ue_error.physical_address_provided) {
274 pfn = evt->u.ue_error.physical_address >>
276 memory_failure(pfn, 0);
278 pr_warn("Failed to identify bad address from "
279 "where the uncorrectable error (UE) "
283 __this_cpu_dec(mce_ue_count);
287 * process pending MCE event from the mce event queue. This function will be
288 * called during syscall exit.
290 static void machine_check_process_queued_event(struct irq_work *work)
293 struct machine_check_event *evt;
295 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
298 * For now just print it to console.
299 * TODO: log this error event to FSP or nvram.
301 while (__this_cpu_read(mce_queue_count) > 0) {
302 index = __this_cpu_read(mce_queue_count) - 1;
303 evt = this_cpu_ptr(&mce_event_queue[index]);
304 machine_check_print_event_info(evt, false, false);
305 __this_cpu_dec(mce_queue_count);
309 void machine_check_print_event_info(struct machine_check_event *evt,
310 bool user_mode, bool in_guest)
312 const char *level, *sevstr, *subtype, *err_type;
313 uint64_t ea = 0, pa = 0;
317 static const char *mc_ue_types[] = {
320 "Page table walk ifetch",
322 "Page table walk Load/Store",
324 static const char *mc_slb_types[] = {
329 static const char *mc_erat_types[] = {
334 static const char *mc_tlb_types[] = {
339 static const char *mc_user_types[] = {
343 static const char *mc_ra_types[] = {
345 "Instruction fetch (bad)",
346 "Instruction fetch (foreign)",
347 "Page table walk ifetch (bad)",
348 "Page table walk ifetch (foreign)",
351 "Page table walk Load/Store (bad)",
352 "Page table walk Load/Store (foreign)",
353 "Load/Store (foreign)",
355 static const char *mc_link_types[] = {
357 "Instruction fetch (timeout)",
358 "Page table walk ifetch (timeout)",
361 "Page table walk Load/Store (timeout)",
363 static const char *mc_error_class[] = {
366 "Probable Hardware error (some chance of software cause)",
368 "Probable Software error (some chance of hardware cause)",
371 /* Print things out */
372 if (evt->version != MCE_V1) {
373 pr_err("Machine Check Exception, Unknown event version %d !\n",
377 switch (evt->severity) {
378 case MCE_SEV_NO_ERROR:
382 case MCE_SEV_WARNING:
383 level = KERN_WARNING;
397 switch (evt->error_type) {
398 case MCE_ERROR_TYPE_UE:
400 subtype = evt->u.ue_error.ue_error_type <
401 ARRAY_SIZE(mc_ue_types) ?
402 mc_ue_types[evt->u.ue_error.ue_error_type]
404 if (evt->u.ue_error.effective_address_provided)
405 ea = evt->u.ue_error.effective_address;
406 if (evt->u.ue_error.physical_address_provided)
407 pa = evt->u.ue_error.physical_address;
409 case MCE_ERROR_TYPE_SLB:
411 subtype = evt->u.slb_error.slb_error_type <
412 ARRAY_SIZE(mc_slb_types) ?
413 mc_slb_types[evt->u.slb_error.slb_error_type]
415 if (evt->u.slb_error.effective_address_provided)
416 ea = evt->u.slb_error.effective_address;
418 case MCE_ERROR_TYPE_ERAT:
420 subtype = evt->u.erat_error.erat_error_type <
421 ARRAY_SIZE(mc_erat_types) ?
422 mc_erat_types[evt->u.erat_error.erat_error_type]
424 if (evt->u.erat_error.effective_address_provided)
425 ea = evt->u.erat_error.effective_address;
427 case MCE_ERROR_TYPE_TLB:
429 subtype = evt->u.tlb_error.tlb_error_type <
430 ARRAY_SIZE(mc_tlb_types) ?
431 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
433 if (evt->u.tlb_error.effective_address_provided)
434 ea = evt->u.tlb_error.effective_address;
436 case MCE_ERROR_TYPE_USER:
438 subtype = evt->u.user_error.user_error_type <
439 ARRAY_SIZE(mc_user_types) ?
440 mc_user_types[evt->u.user_error.user_error_type]
442 if (evt->u.user_error.effective_address_provided)
443 ea = evt->u.user_error.effective_address;
445 case MCE_ERROR_TYPE_RA:
446 err_type = "Real address";
447 subtype = evt->u.ra_error.ra_error_type <
448 ARRAY_SIZE(mc_ra_types) ?
449 mc_ra_types[evt->u.ra_error.ra_error_type]
451 if (evt->u.ra_error.effective_address_provided)
452 ea = evt->u.ra_error.effective_address;
454 case MCE_ERROR_TYPE_LINK:
456 subtype = evt->u.link_error.link_error_type <
457 ARRAY_SIZE(mc_link_types) ?
458 mc_link_types[evt->u.link_error.link_error_type]
460 if (evt->u.link_error.effective_address_provided)
461 ea = evt->u.link_error.effective_address;
464 case MCE_ERROR_TYPE_UNKNOWN:
465 err_type = "Unknown";
470 dar_str[0] = pa_str[0] = '\0';
471 if (ea && evt->srr0 != ea) {
472 /* Load/Store address */
473 n = sprintf(dar_str, "DAR: %016llx ", ea);
475 sprintf(dar_str + n, "paddr: %016llx ", pa);
477 sprintf(pa_str, " paddr: %016llx", pa);
480 printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
481 level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
482 err_type, subtype, dar_str,
483 evt->disposition == MCE_DISPOSITION_RECOVERED ?
484 "Recovered" : "Not recovered");
486 if (in_guest || user_mode) {
487 printk("%sMCE: CPU%d: PID: %d Comm: %s %sNIP: [%016llx]%s\n",
488 level, evt->cpu, current->pid, current->comm,
489 in_guest ? "Guest " : "", evt->srr0, pa_str);
491 printk("%sMCE: CPU%d: NIP: [%016llx] %pS%s\n",
492 level, evt->cpu, evt->srr0, (void *)evt->srr0, pa_str);
495 subtype = evt->error_class < ARRAY_SIZE(mc_error_class) ?
496 mc_error_class[evt->error_class] : "Unknown";
497 printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
499 EXPORT_SYMBOL_GPL(machine_check_print_event_info);
502 * This function is called in real mode. Strictly no printk's please.
504 * regs->nip and regs->msr contains srr0 and ssr1.
506 long machine_check_early(struct pt_regs *regs)
510 hv_nmi_check_nonrecoverable(regs);
513 * See if platform is capable of handling machine check.
515 if (ppc_md.machine_check_early)
516 handled = ppc_md.machine_check_early(regs);
520 /* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
523 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
524 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
525 } hmer_debug_trig_function;
527 static int init_debug_trig_function(void)
530 struct device_node *cpun;
531 struct property *prop = NULL;
534 /* First look in the device tree */
536 cpun = of_get_cpu_node(smp_processor_id(), NULL);
538 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
540 if (strcmp(str, "bit17-vector-ci-load") == 0)
541 hmer_debug_trig_function = DTRIG_VECTOR_CI;
542 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
543 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
549 /* If we found the property, don't look at PVR */
553 pvr = mfspr(SPRN_PVR);
554 /* Check for POWER9 Nimbus (scale-out) */
555 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
556 /* DD2.2 and later */
557 if ((pvr & 0xfff) >= 0x202)
558 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
559 /* DD2.0 and DD2.1 - used for vector CI load emulation */
560 else if ((pvr & 0xfff) >= 0x200)
561 hmer_debug_trig_function = DTRIG_VECTOR_CI;
565 switch (hmer_debug_trig_function) {
566 case DTRIG_VECTOR_CI:
567 pr_debug("HMI debug trigger used for vector CI load\n");
569 case DTRIG_SUSPEND_ESCAPE:
570 pr_debug("HMI debug trigger used for TM suspend escape\n");
577 __initcall(init_debug_trig_function);
580 * Handle HMIs that occur as a result of a debug trigger.
582 * -1 means this is not a HMI cause that we know about
583 * 0 means no further handling is required
584 * 1 means further handling is required
586 long hmi_handle_debugtrig(struct pt_regs *regs)
588 unsigned long hmer = mfspr(SPRN_HMER);
591 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
592 if (!((hmer & HMER_DEBUG_TRIG)
593 && hmer_debug_trig_function != DTRIG_UNKNOWN))
596 hmer &= ~HMER_DEBUG_TRIG;
597 /* HMER is a write-AND register */
598 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
600 switch (hmer_debug_trig_function) {
601 case DTRIG_VECTOR_CI:
603 * Now to avoid problems with soft-disable we
604 * only do the emulation if we are coming from
607 if (regs && user_mode(regs))
608 ret = local_paca->hmi_p9_special_emu = 1;
617 * See if any other HMI causes remain to be handled
619 if (hmer & mfspr(SPRN_HMEER))
628 long hmi_exception_realmode(struct pt_regs *regs)
632 __this_cpu_inc(irq_stat.hmi_exceptions);
634 ret = hmi_handle_debugtrig(regs);
638 wait_for_subcore_guest_exit();
640 if (ppc_md.hmi_exception_early)
641 ppc_md.hmi_exception_early(regs);
643 wait_for_tb_resync();