Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / s390 / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  SMP related functions
4  *
5  *    Copyright IBM Corp. 1999, 2012
6  *    Author(s): Denis Joseph Barrow,
7  *               Martin Schwidefsky <schwidefsky@de.ibm.com>,
8  *               Heiko Carstens <heiko.carstens@de.ibm.com>,
9  *
10  *  based on other smp stuff by
11  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
12  *    (c) 1998 Ingo Molnar
13  *
14  * The code outside of smp.c uses logical cpu numbers, only smp.c does
15  * the translation of logical to physical cpu ids. All new code that
16  * operates on physical cpu numbers needs to go into smp.c.
17  */
18
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21
22 #include <linux/workqueue.h>
23 #include <linux/memblock.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
26 #include <linux/mm.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/cpu.h>
34 #include <linux/slab.h>
35 #include <linux/sched/hotplug.h>
36 #include <linux/sched/task_stack.h>
37 #include <linux/crash_dump.h>
38 #include <linux/kprobes.h>
39 #include <asm/asm-offsets.h>
40 #include <asm/diag.h>
41 #include <asm/switch_to.h>
42 #include <asm/facility.h>
43 #include <asm/ipl.h>
44 #include <asm/setup.h>
45 #include <asm/irq.h>
46 #include <asm/tlbflush.h>
47 #include <asm/vtimer.h>
48 #include <asm/lowcore.h>
49 #include <asm/sclp.h>
50 #include <asm/vdso.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
53 #include <asm/sigp.h>
54 #include <asm/idle.h>
55 #include <asm/nmi.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
58 #include "entry.h"
59
60 enum {
61         ec_schedule = 0,
62         ec_call_function_single,
63         ec_stop_cpu,
64 };
65
66 enum {
67         CPU_STATE_STANDBY,
68         CPU_STATE_CONFIGURED,
69 };
70
71 static DEFINE_PER_CPU(struct cpu *, cpu_device);
72
73 struct pcpu {
74         struct lowcore *lowcore;        /* lowcore page(s) for the cpu */
75         unsigned long ec_mask;          /* bit mask for ec_xxx functions */
76         unsigned long ec_clk;           /* sigp timestamp for ec_xxx */
77         signed char state;              /* physical cpu state */
78         signed char polarization;       /* physical polarization */
79         u16 address;                    /* physical cpu address */
80 };
81
82 static u8 boot_core_type;
83 static struct pcpu pcpu_devices[NR_CPUS];
84
85 unsigned int smp_cpu_mt_shift;
86 EXPORT_SYMBOL(smp_cpu_mt_shift);
87
88 unsigned int smp_cpu_mtid;
89 EXPORT_SYMBOL(smp_cpu_mtid);
90
91 #ifdef CONFIG_CRASH_DUMP
92 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
93 #endif
94
95 static unsigned int smp_max_threads __initdata = -1U;
96
97 static int __init early_nosmt(char *s)
98 {
99         smp_max_threads = 1;
100         return 0;
101 }
102 early_param("nosmt", early_nosmt);
103
104 static int __init early_smt(char *s)
105 {
106         get_option(&s, &smp_max_threads);
107         return 0;
108 }
109 early_param("smt", early_smt);
110
111 /*
112  * The smp_cpu_state_mutex must be held when changing the state or polarization
113  * member of a pcpu data structure within the pcpu_devices arreay.
114  */
115 DEFINE_MUTEX(smp_cpu_state_mutex);
116
117 /*
118  * Signal processor helper functions.
119  */
120 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
121 {
122         int cc;
123
124         while (1) {
125                 cc = __pcpu_sigp(addr, order, parm, NULL);
126                 if (cc != SIGP_CC_BUSY)
127                         return cc;
128                 cpu_relax();
129         }
130 }
131
132 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
133 {
134         int cc, retry;
135
136         for (retry = 0; ; retry++) {
137                 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
138                 if (cc != SIGP_CC_BUSY)
139                         break;
140                 if (retry >= 3)
141                         udelay(10);
142         }
143         return cc;
144 }
145
146 static inline int pcpu_stopped(struct pcpu *pcpu)
147 {
148         u32 uninitialized_var(status);
149
150         if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
151                         0, &status) != SIGP_CC_STATUS_STORED)
152                 return 0;
153         return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
154 }
155
156 static inline int pcpu_running(struct pcpu *pcpu)
157 {
158         if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
159                         0, NULL) != SIGP_CC_STATUS_STORED)
160                 return 1;
161         /* Status stored condition code is equivalent to cpu not running. */
162         return 0;
163 }
164
165 /*
166  * Find struct pcpu by cpu address.
167  */
168 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
169 {
170         int cpu;
171
172         for_each_cpu(cpu, mask)
173                 if (pcpu_devices[cpu].address == address)
174                         return pcpu_devices + cpu;
175         return NULL;
176 }
177
178 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
179 {
180         int order;
181
182         if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
183                 return;
184         order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
185         pcpu->ec_clk = get_tod_clock_fast();
186         pcpu_sigp_retry(pcpu, order, 0);
187 }
188
189 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
190 {
191         unsigned long async_stack, nodat_stack;
192         struct lowcore *lc;
193
194         if (pcpu != &pcpu_devices[0]) {
195                 pcpu->lowcore = (struct lowcore *)
196                         __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
197                 nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
198                 if (!pcpu->lowcore || !nodat_stack)
199                         goto out;
200         } else {
201                 nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
202         }
203         async_stack = stack_alloc();
204         if (!async_stack)
205                 goto out;
206         lc = pcpu->lowcore;
207         memcpy(lc, &S390_lowcore, 512);
208         memset((char *) lc + 512, 0, sizeof(*lc) - 512);
209         lc->async_stack = async_stack + STACK_INIT_OFFSET;
210         lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
211         lc->cpu_nr = cpu;
212         lc->spinlock_lockval = arch_spin_lockval(cpu);
213         lc->spinlock_index = 0;
214         lc->br_r1_trampoline = 0x07f1;  /* br %r1 */
215         if (nmi_alloc_per_cpu(lc))
216                 goto out_async;
217         if (vdso_alloc_per_cpu(lc))
218                 goto out_mcesa;
219         lowcore_ptr[cpu] = lc;
220         pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
221         return 0;
222
223 out_mcesa:
224         nmi_free_per_cpu(lc);
225 out_async:
226         stack_free(async_stack);
227 out:
228         if (pcpu != &pcpu_devices[0]) {
229                 free_pages(nodat_stack, THREAD_SIZE_ORDER);
230                 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
231         }
232         return -ENOMEM;
233 }
234
235 static void pcpu_free_lowcore(struct pcpu *pcpu)
236 {
237         unsigned long async_stack, nodat_stack, lowcore;
238
239         nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
240         async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
241         lowcore = (unsigned long) pcpu->lowcore;
242
243         pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
244         lowcore_ptr[pcpu - pcpu_devices] = NULL;
245         vdso_free_per_cpu(pcpu->lowcore);
246         nmi_free_per_cpu(pcpu->lowcore);
247         stack_free(async_stack);
248         if (pcpu == &pcpu_devices[0])
249                 return;
250         free_pages(nodat_stack, THREAD_SIZE_ORDER);
251         free_pages(lowcore, LC_ORDER);
252 }
253
254 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
255 {
256         struct lowcore *lc = pcpu->lowcore;
257
258         cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
259         cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
260         lc->cpu_nr = cpu;
261         lc->spinlock_lockval = arch_spin_lockval(cpu);
262         lc->spinlock_index = 0;
263         lc->percpu_offset = __per_cpu_offset[cpu];
264         lc->kernel_asce = S390_lowcore.kernel_asce;
265         lc->machine_flags = S390_lowcore.machine_flags;
266         lc->user_timer = lc->system_timer =
267                 lc->steal_timer = lc->avg_steal_timer = 0;
268         __ctl_store(lc->cregs_save_area, 0, 15);
269         save_access_regs((unsigned int *) lc->access_regs_save_area);
270         memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
271                sizeof(lc->stfle_fac_list));
272         memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
273                sizeof(lc->alt_stfle_fac_list));
274         arch_spin_lock_setup(cpu);
275 }
276
277 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
278 {
279         struct lowcore *lc = pcpu->lowcore;
280
281         lc->kernel_stack = (unsigned long) task_stack_page(tsk)
282                 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
283         lc->current_task = (unsigned long) tsk;
284         lc->lpp = LPP_MAGIC;
285         lc->current_pid = tsk->pid;
286         lc->user_timer = tsk->thread.user_timer;
287         lc->guest_timer = tsk->thread.guest_timer;
288         lc->system_timer = tsk->thread.system_timer;
289         lc->hardirq_timer = tsk->thread.hardirq_timer;
290         lc->softirq_timer = tsk->thread.softirq_timer;
291         lc->steal_timer = 0;
292 }
293
294 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
295 {
296         struct lowcore *lc = pcpu->lowcore;
297
298         lc->restart_stack = lc->nodat_stack;
299         lc->restart_fn = (unsigned long) func;
300         lc->restart_data = (unsigned long) data;
301         lc->restart_source = -1UL;
302         pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
303 }
304
305 /*
306  * Call function via PSW restart on pcpu and stop the current cpu.
307  */
308 static void __pcpu_delegate(void (*func)(void*), void *data)
309 {
310         func(data);     /* should not return */
311 }
312
313 static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
314                                                 void (*func)(void *),
315                                                 void *data, unsigned long stack)
316 {
317         struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
318         unsigned long source_cpu = stap();
319
320         __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
321         if (pcpu->address == source_cpu)
322                 CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
323         /* Stop target cpu (if func returns this stops the current cpu). */
324         pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
325         /* Restart func on the target cpu and stop the current cpu. */
326         mem_assign_absolute(lc->restart_stack, stack);
327         mem_assign_absolute(lc->restart_fn, (unsigned long) func);
328         mem_assign_absolute(lc->restart_data, (unsigned long) data);
329         mem_assign_absolute(lc->restart_source, source_cpu);
330         __bpon();
331         asm volatile(
332                 "0:     sigp    0,%0,%2 # sigp restart to target cpu\n"
333                 "       brc     2,0b    # busy, try again\n"
334                 "1:     sigp    0,%1,%3 # sigp stop to current cpu\n"
335                 "       brc     2,1b    # busy, try again\n"
336                 : : "d" (pcpu->address), "d" (source_cpu),
337                     "K" (SIGP_RESTART), "K" (SIGP_STOP)
338                 : "0", "1", "cc");
339         for (;;) ;
340 }
341
342 /*
343  * Enable additional logical cpus for multi-threading.
344  */
345 static int pcpu_set_smt(unsigned int mtid)
346 {
347         int cc;
348
349         if (smp_cpu_mtid == mtid)
350                 return 0;
351         cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
352         if (cc == 0) {
353                 smp_cpu_mtid = mtid;
354                 smp_cpu_mt_shift = 0;
355                 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
356                         smp_cpu_mt_shift++;
357                 pcpu_devices[0].address = stap();
358         }
359         return cc;
360 }
361
362 /*
363  * Call function on an online CPU.
364  */
365 void smp_call_online_cpu(void (*func)(void *), void *data)
366 {
367         struct pcpu *pcpu;
368
369         /* Use the current cpu if it is online. */
370         pcpu = pcpu_find_address(cpu_online_mask, stap());
371         if (!pcpu)
372                 /* Use the first online cpu. */
373                 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
374         pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
375 }
376
377 /*
378  * Call function on the ipl CPU.
379  */
380 void smp_call_ipl_cpu(void (*func)(void *), void *data)
381 {
382         struct lowcore *lc = pcpu_devices->lowcore;
383
384         if (pcpu_devices[0].address == stap())
385                 lc = &S390_lowcore;
386
387         pcpu_delegate(&pcpu_devices[0], func, data,
388                       lc->nodat_stack);
389 }
390
391 int smp_find_processor_id(u16 address)
392 {
393         int cpu;
394
395         for_each_present_cpu(cpu)
396                 if (pcpu_devices[cpu].address == address)
397                         return cpu;
398         return -1;
399 }
400
401 bool arch_vcpu_is_preempted(int cpu)
402 {
403         if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
404                 return false;
405         if (pcpu_running(pcpu_devices + cpu))
406                 return false;
407         return true;
408 }
409 EXPORT_SYMBOL(arch_vcpu_is_preempted);
410
411 void smp_yield_cpu(int cpu)
412 {
413         if (MACHINE_HAS_DIAG9C) {
414                 diag_stat_inc_norecursion(DIAG_STAT_X09C);
415                 asm volatile("diag %0,0,0x9c"
416                              : : "d" (pcpu_devices[cpu].address));
417         } else if (MACHINE_HAS_DIAG44 && !smp_cpu_mtid) {
418                 diag_stat_inc_norecursion(DIAG_STAT_X044);
419                 asm volatile("diag 0,0,0x44");
420         }
421 }
422
423 /*
424  * Send cpus emergency shutdown signal. This gives the cpus the
425  * opportunity to complete outstanding interrupts.
426  */
427 void notrace smp_emergency_stop(void)
428 {
429         cpumask_t cpumask;
430         u64 end;
431         int cpu;
432
433         cpumask_copy(&cpumask, cpu_online_mask);
434         cpumask_clear_cpu(smp_processor_id(), &cpumask);
435
436         end = get_tod_clock() + (1000000UL << 12);
437         for_each_cpu(cpu, &cpumask) {
438                 struct pcpu *pcpu = pcpu_devices + cpu;
439                 set_bit(ec_stop_cpu, &pcpu->ec_mask);
440                 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
441                                    0, NULL) == SIGP_CC_BUSY &&
442                        get_tod_clock() < end)
443                         cpu_relax();
444         }
445         while (get_tod_clock() < end) {
446                 for_each_cpu(cpu, &cpumask)
447                         if (pcpu_stopped(pcpu_devices + cpu))
448                                 cpumask_clear_cpu(cpu, &cpumask);
449                 if (cpumask_empty(&cpumask))
450                         break;
451                 cpu_relax();
452         }
453 }
454 NOKPROBE_SYMBOL(smp_emergency_stop);
455
456 /*
457  * Stop all cpus but the current one.
458  */
459 void smp_send_stop(void)
460 {
461         int cpu;
462
463         /* Disable all interrupts/machine checks */
464         __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
465         trace_hardirqs_off();
466
467         debug_set_critical();
468
469         if (oops_in_progress)
470                 smp_emergency_stop();
471
472         /* stop all processors */
473         for_each_online_cpu(cpu) {
474                 if (cpu == smp_processor_id())
475                         continue;
476                 pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
477                 while (!pcpu_stopped(pcpu_devices + cpu))
478                         cpu_relax();
479         }
480 }
481
482 /*
483  * This is the main routine where commands issued by other
484  * cpus are handled.
485  */
486 static void smp_handle_ext_call(void)
487 {
488         unsigned long bits;
489
490         /* handle bit signal external calls */
491         bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
492         if (test_bit(ec_stop_cpu, &bits))
493                 smp_stop_cpu();
494         if (test_bit(ec_schedule, &bits))
495                 scheduler_ipi();
496         if (test_bit(ec_call_function_single, &bits))
497                 generic_smp_call_function_single_interrupt();
498 }
499
500 static void do_ext_call_interrupt(struct ext_code ext_code,
501                                   unsigned int param32, unsigned long param64)
502 {
503         inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
504         smp_handle_ext_call();
505 }
506
507 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
508 {
509         int cpu;
510
511         for_each_cpu(cpu, mask)
512                 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
513 }
514
515 void arch_send_call_function_single_ipi(int cpu)
516 {
517         pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
518 }
519
520 /*
521  * this function sends a 'reschedule' IPI to another CPU.
522  * it goes straight through and wastes no time serializing
523  * anything. Worst case is that we lose a reschedule ...
524  */
525 void smp_send_reschedule(int cpu)
526 {
527         pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
528 }
529
530 /*
531  * parameter area for the set/clear control bit callbacks
532  */
533 struct ec_creg_mask_parms {
534         unsigned long orval;
535         unsigned long andval;
536         int cr;
537 };
538
539 /*
540  * callback for setting/clearing control bits
541  */
542 static void smp_ctl_bit_callback(void *info)
543 {
544         struct ec_creg_mask_parms *pp = info;
545         unsigned long cregs[16];
546
547         __ctl_store(cregs, 0, 15);
548         cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
549         __ctl_load(cregs, 0, 15);
550 }
551
552 /*
553  * Set a bit in a control register of all cpus
554  */
555 void smp_ctl_set_bit(int cr, int bit)
556 {
557         struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
558
559         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
560 }
561 EXPORT_SYMBOL(smp_ctl_set_bit);
562
563 /*
564  * Clear a bit in a control register of all cpus
565  */
566 void smp_ctl_clear_bit(int cr, int bit)
567 {
568         struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
569
570         on_each_cpu(smp_ctl_bit_callback, &parms, 1);
571 }
572 EXPORT_SYMBOL(smp_ctl_clear_bit);
573
574 #ifdef CONFIG_CRASH_DUMP
575
576 int smp_store_status(int cpu)
577 {
578         struct pcpu *pcpu = pcpu_devices + cpu;
579         unsigned long pa;
580
581         pa = __pa(&pcpu->lowcore->floating_pt_save_area);
582         if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
583                               pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
584                 return -EIO;
585         if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
586                 return 0;
587         pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
588         if (MACHINE_HAS_GS)
589                 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
590         if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
591                               pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
592                 return -EIO;
593         return 0;
594 }
595
596 /*
597  * Collect CPU state of the previous, crashed system.
598  * There are four cases:
599  * 1) standard zfcp dump
600  *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
601  *    The state for all CPUs except the boot CPU needs to be collected
602  *    with sigp stop-and-store-status. The boot CPU state is located in
603  *    the absolute lowcore of the memory stored in the HSA. The zcore code
604  *    will copy the boot CPU state from the HSA.
605  * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
606  *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
607  *    The state for all CPUs except the boot CPU needs to be collected
608  *    with sigp stop-and-store-status. The firmware or the boot-loader
609  *    stored the registers of the boot CPU in the absolute lowcore in the
610  *    memory of the old system.
611  * 3) kdump and the old kernel did not store the CPU state,
612  *    or stand-alone kdump for DASD
613  *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
614  *    The state for all CPUs except the boot CPU needs to be collected
615  *    with sigp stop-and-store-status. The kexec code or the boot-loader
616  *    stored the registers of the boot CPU in the memory of the old system.
617  * 4) kdump and the old kernel stored the CPU state
618  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
619  *    This case does not exist for s390 anymore, setup_arch explicitly
620  *    deactivates the elfcorehdr= kernel parameter
621  */
622 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
623                                      bool is_boot_cpu, unsigned long page)
624 {
625         __vector128 *vxrs = (__vector128 *) page;
626
627         if (is_boot_cpu)
628                 vxrs = boot_cpu_vector_save_area;
629         else
630                 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
631         save_area_add_vxrs(sa, vxrs);
632 }
633
634 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
635                                      bool is_boot_cpu, unsigned long page)
636 {
637         void *regs = (void *) page;
638
639         if (is_boot_cpu)
640                 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
641         else
642                 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
643         save_area_add_regs(sa, regs);
644 }
645
646 void __init smp_save_dump_cpus(void)
647 {
648         int addr, boot_cpu_addr, max_cpu_addr;
649         struct save_area *sa;
650         unsigned long page;
651         bool is_boot_cpu;
652
653         if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
654                 /* No previous system present, normal boot. */
655                 return;
656         /* Allocate a page as dumping area for the store status sigps */
657         page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
658         if (!page)
659                 panic("ERROR: Failed to allocate %lx bytes below %lx\n",
660                       PAGE_SIZE, 1UL << 31);
661
662         /* Set multi-threading state to the previous system. */
663         pcpu_set_smt(sclp.mtid_prev);
664         boot_cpu_addr = stap();
665         max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
666         for (addr = 0; addr <= max_cpu_addr; addr++) {
667                 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
668                     SIGP_CC_NOT_OPERATIONAL)
669                         continue;
670                 is_boot_cpu = (addr == boot_cpu_addr);
671                 /* Allocate save area */
672                 sa = save_area_alloc(is_boot_cpu);
673                 if (!sa)
674                         panic("could not allocate memory for save area\n");
675                 if (MACHINE_HAS_VX)
676                         /* Get the vector registers */
677                         smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
678                 /*
679                  * For a zfcp dump OLDMEM_BASE == NULL and the registers
680                  * of the boot CPU are stored in the HSA. To retrieve
681                  * these registers an SCLP request is required which is
682                  * done by drivers/s390/char/zcore.c:init_cpu_info()
683                  */
684                 if (!is_boot_cpu || OLDMEM_BASE)
685                         /* Get the CPU registers */
686                         smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
687         }
688         memblock_free(page, PAGE_SIZE);
689         diag_dma_ops.diag308_reset();
690         pcpu_set_smt(0);
691 }
692 #endif /* CONFIG_CRASH_DUMP */
693
694 void smp_cpu_set_polarization(int cpu, int val)
695 {
696         pcpu_devices[cpu].polarization = val;
697 }
698
699 int smp_cpu_get_polarization(int cpu)
700 {
701         return pcpu_devices[cpu].polarization;
702 }
703
704 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
705 {
706         static int use_sigp_detection;
707         int address;
708
709         if (use_sigp_detection || sclp_get_core_info(info, early)) {
710                 use_sigp_detection = 1;
711                 for (address = 0;
712                      address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
713                      address += (1U << smp_cpu_mt_shift)) {
714                         if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
715                             SIGP_CC_NOT_OPERATIONAL)
716                                 continue;
717                         info->core[info->configured].core_id =
718                                 address >> smp_cpu_mt_shift;
719                         info->configured++;
720                 }
721                 info->combined = info->configured;
722         }
723 }
724
725 static int smp_add_present_cpu(int cpu);
726
727 static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
728 {
729         struct pcpu *pcpu;
730         cpumask_t avail;
731         int cpu, nr, i, j;
732         u16 address;
733
734         nr = 0;
735         cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
736         cpu = cpumask_first(&avail);
737         for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
738                 if (sclp.has_core_type && info->core[i].type != boot_core_type)
739                         continue;
740                 address = info->core[i].core_id << smp_cpu_mt_shift;
741                 for (j = 0; j <= smp_cpu_mtid; j++) {
742                         if (pcpu_find_address(cpu_present_mask, address + j))
743                                 continue;
744                         pcpu = pcpu_devices + cpu;
745                         pcpu->address = address + j;
746                         pcpu->state =
747                                 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
748                                 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
749                         smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
750                         set_cpu_present(cpu, true);
751                         if (sysfs_add && smp_add_present_cpu(cpu) != 0)
752                                 set_cpu_present(cpu, false);
753                         else
754                                 nr++;
755                         cpu = cpumask_next(cpu, &avail);
756                         if (cpu >= nr_cpu_ids)
757                                 break;
758                 }
759         }
760         return nr;
761 }
762
763 void __init smp_detect_cpus(void)
764 {
765         unsigned int cpu, mtid, c_cpus, s_cpus;
766         struct sclp_core_info *info;
767         u16 address;
768
769         /* Get CPU information */
770         info = memblock_alloc(sizeof(*info), 8);
771         if (!info)
772                 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
773                       __func__, sizeof(*info), 8);
774         smp_get_core_info(info, 1);
775         /* Find boot CPU type */
776         if (sclp.has_core_type) {
777                 address = stap();
778                 for (cpu = 0; cpu < info->combined; cpu++)
779                         if (info->core[cpu].core_id == address) {
780                                 /* The boot cpu dictates the cpu type. */
781                                 boot_core_type = info->core[cpu].type;
782                                 break;
783                         }
784                 if (cpu >= info->combined)
785                         panic("Could not find boot CPU type");
786         }
787
788         /* Set multi-threading state for the current system */
789         mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
790         mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
791         pcpu_set_smt(mtid);
792
793         /* Print number of CPUs */
794         c_cpus = s_cpus = 0;
795         for (cpu = 0; cpu < info->combined; cpu++) {
796                 if (sclp.has_core_type &&
797                     info->core[cpu].type != boot_core_type)
798                         continue;
799                 if (cpu < info->configured)
800                         c_cpus += smp_cpu_mtid + 1;
801                 else
802                         s_cpus += smp_cpu_mtid + 1;
803         }
804         pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
805
806         /* Add CPUs present at boot */
807         get_online_cpus();
808         __smp_rescan_cpus(info, 0);
809         put_online_cpus();
810         memblock_free_early((unsigned long)info, sizeof(*info));
811 }
812
813 static void smp_init_secondary(void)
814 {
815         int cpu = smp_processor_id();
816
817         S390_lowcore.last_update_clock = get_tod_clock();
818         restore_access_regs(S390_lowcore.access_regs_save_area);
819         cpu_init();
820         preempt_disable();
821         init_cpu_timer();
822         vtime_init();
823         pfault_init();
824         notify_cpu_starting(smp_processor_id());
825         if (topology_cpu_dedicated(cpu))
826                 set_cpu_flag(CIF_DEDICATED_CPU);
827         else
828                 clear_cpu_flag(CIF_DEDICATED_CPU);
829         set_cpu_online(smp_processor_id(), true);
830         inc_irq_stat(CPU_RST);
831         local_irq_enable();
832         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
833 }
834
835 /*
836  *      Activate a secondary processor.
837  */
838 static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
839 {
840         S390_lowcore.restart_stack = (unsigned long) restart_stack;
841         S390_lowcore.restart_fn = (unsigned long) do_restart;
842         S390_lowcore.restart_data = 0;
843         S390_lowcore.restart_source = -1UL;
844         __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
845         __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
846         CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0);
847 }
848
849 /* Upping and downing of CPUs */
850 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
851 {
852         struct pcpu *pcpu;
853         int base, i, rc;
854
855         pcpu = pcpu_devices + cpu;
856         if (pcpu->state != CPU_STATE_CONFIGURED)
857                 return -EIO;
858         base = smp_get_base_cpu(cpu);
859         for (i = 0; i <= smp_cpu_mtid; i++) {
860                 if (base + i < nr_cpu_ids)
861                         if (cpu_online(base + i))
862                                 break;
863         }
864         /*
865          * If this is the first CPU of the core to get online
866          * do an initial CPU reset.
867          */
868         if (i > smp_cpu_mtid &&
869             pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
870             SIGP_CC_ORDER_CODE_ACCEPTED)
871                 return -EIO;
872
873         rc = pcpu_alloc_lowcore(pcpu, cpu);
874         if (rc)
875                 return rc;
876         pcpu_prepare_secondary(pcpu, cpu);
877         pcpu_attach_task(pcpu, tidle);
878         pcpu_start_fn(pcpu, smp_start_secondary, NULL);
879         /* Wait until cpu puts itself in the online & active maps */
880         while (!cpu_online(cpu))
881                 cpu_relax();
882         return 0;
883 }
884
885 static unsigned int setup_possible_cpus __initdata;
886
887 static int __init _setup_possible_cpus(char *s)
888 {
889         get_option(&s, &setup_possible_cpus);
890         return 0;
891 }
892 early_param("possible_cpus", _setup_possible_cpus);
893
894 int __cpu_disable(void)
895 {
896         unsigned long cregs[16];
897
898         /* Handle possible pending IPIs */
899         smp_handle_ext_call();
900         set_cpu_online(smp_processor_id(), false);
901         /* Disable pseudo page faults on this cpu. */
902         pfault_fini();
903         /* Disable interrupt sources via control register. */
904         __ctl_store(cregs, 0, 15);
905         cregs[0]  &= ~0x0000ee70UL;     /* disable all external interrupts */
906         cregs[6]  &= ~0xff000000UL;     /* disable all I/O interrupts */
907         cregs[14] &= ~0x1f000000UL;     /* disable most machine checks */
908         __ctl_load(cregs, 0, 15);
909         clear_cpu_flag(CIF_NOHZ_DELAY);
910         return 0;
911 }
912
913 void __cpu_die(unsigned int cpu)
914 {
915         struct pcpu *pcpu;
916
917         /* Wait until target cpu is down */
918         pcpu = pcpu_devices + cpu;
919         while (!pcpu_stopped(pcpu))
920                 cpu_relax();
921         pcpu_free_lowcore(pcpu);
922         cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
923         cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
924 }
925
926 void __noreturn cpu_die(void)
927 {
928         idle_task_exit();
929         __bpon();
930         pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
931         for (;;) ;
932 }
933
934 void __init smp_fill_possible_mask(void)
935 {
936         unsigned int possible, sclp_max, cpu;
937
938         sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
939         sclp_max = min(smp_max_threads, sclp_max);
940         sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
941         possible = setup_possible_cpus ?: nr_cpu_ids;
942         possible = min(possible, sclp_max);
943         for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
944                 set_cpu_possible(cpu, true);
945 }
946
947 void __init smp_prepare_cpus(unsigned int max_cpus)
948 {
949         /* request the 0x1201 emergency signal external interrupt */
950         if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
951                 panic("Couldn't request external interrupt 0x1201");
952         /* request the 0x1202 external call external interrupt */
953         if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
954                 panic("Couldn't request external interrupt 0x1202");
955 }
956
957 void __init smp_prepare_boot_cpu(void)
958 {
959         struct pcpu *pcpu = pcpu_devices;
960
961         WARN_ON(!cpu_present(0) || !cpu_online(0));
962         pcpu->state = CPU_STATE_CONFIGURED;
963         pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
964         S390_lowcore.percpu_offset = __per_cpu_offset[0];
965         smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
966 }
967
968 void __init smp_cpus_done(unsigned int max_cpus)
969 {
970 }
971
972 void __init smp_setup_processor_id(void)
973 {
974         pcpu_devices[0].address = stap();
975         S390_lowcore.cpu_nr = 0;
976         S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
977         S390_lowcore.spinlock_index = 0;
978 }
979
980 /*
981  * the frequency of the profiling timer can be changed
982  * by writing a multiplier value into /proc/profile.
983  *
984  * usually you want to run this on all CPUs ;)
985  */
986 int setup_profiling_timer(unsigned int multiplier)
987 {
988         return 0;
989 }
990
991 static ssize_t cpu_configure_show(struct device *dev,
992                                   struct device_attribute *attr, char *buf)
993 {
994         ssize_t count;
995
996         mutex_lock(&smp_cpu_state_mutex);
997         count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
998         mutex_unlock(&smp_cpu_state_mutex);
999         return count;
1000 }
1001
1002 static ssize_t cpu_configure_store(struct device *dev,
1003                                    struct device_attribute *attr,
1004                                    const char *buf, size_t count)
1005 {
1006         struct pcpu *pcpu;
1007         int cpu, val, rc, i;
1008         char delim;
1009
1010         if (sscanf(buf, "%d %c", &val, &delim) != 1)
1011                 return -EINVAL;
1012         if (val != 0 && val != 1)
1013                 return -EINVAL;
1014         get_online_cpus();
1015         mutex_lock(&smp_cpu_state_mutex);
1016         rc = -EBUSY;
1017         /* disallow configuration changes of online cpus and cpu 0 */
1018         cpu = dev->id;
1019         cpu = smp_get_base_cpu(cpu);
1020         if (cpu == 0)
1021                 goto out;
1022         for (i = 0; i <= smp_cpu_mtid; i++)
1023                 if (cpu_online(cpu + i))
1024                         goto out;
1025         pcpu = pcpu_devices + cpu;
1026         rc = 0;
1027         switch (val) {
1028         case 0:
1029                 if (pcpu->state != CPU_STATE_CONFIGURED)
1030                         break;
1031                 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1032                 if (rc)
1033                         break;
1034                 for (i = 0; i <= smp_cpu_mtid; i++) {
1035                         if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1036                                 continue;
1037                         pcpu[i].state = CPU_STATE_STANDBY;
1038                         smp_cpu_set_polarization(cpu + i,
1039                                                  POLARIZATION_UNKNOWN);
1040                 }
1041                 topology_expect_change();
1042                 break;
1043         case 1:
1044                 if (pcpu->state != CPU_STATE_STANDBY)
1045                         break;
1046                 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1047                 if (rc)
1048                         break;
1049                 for (i = 0; i <= smp_cpu_mtid; i++) {
1050                         if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1051                                 continue;
1052                         pcpu[i].state = CPU_STATE_CONFIGURED;
1053                         smp_cpu_set_polarization(cpu + i,
1054                                                  POLARIZATION_UNKNOWN);
1055                 }
1056                 topology_expect_change();
1057                 break;
1058         default:
1059                 break;
1060         }
1061 out:
1062         mutex_unlock(&smp_cpu_state_mutex);
1063         put_online_cpus();
1064         return rc ? rc : count;
1065 }
1066 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1067
1068 static ssize_t show_cpu_address(struct device *dev,
1069                                 struct device_attribute *attr, char *buf)
1070 {
1071         return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1072 }
1073 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1074
1075 static struct attribute *cpu_common_attrs[] = {
1076         &dev_attr_configure.attr,
1077         &dev_attr_address.attr,
1078         NULL,
1079 };
1080
1081 static struct attribute_group cpu_common_attr_group = {
1082         .attrs = cpu_common_attrs,
1083 };
1084
1085 static struct attribute *cpu_online_attrs[] = {
1086         &dev_attr_idle_count.attr,
1087         &dev_attr_idle_time_us.attr,
1088         NULL,
1089 };
1090
1091 static struct attribute_group cpu_online_attr_group = {
1092         .attrs = cpu_online_attrs,
1093 };
1094
1095 static int smp_cpu_online(unsigned int cpu)
1096 {
1097         struct device *s = &per_cpu(cpu_device, cpu)->dev;
1098
1099         return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1100 }
1101 static int smp_cpu_pre_down(unsigned int cpu)
1102 {
1103         struct device *s = &per_cpu(cpu_device, cpu)->dev;
1104
1105         sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1106         return 0;
1107 }
1108
1109 static int smp_add_present_cpu(int cpu)
1110 {
1111         struct device *s;
1112         struct cpu *c;
1113         int rc;
1114
1115         c = kzalloc(sizeof(*c), GFP_KERNEL);
1116         if (!c)
1117                 return -ENOMEM;
1118         per_cpu(cpu_device, cpu) = c;
1119         s = &c->dev;
1120         c->hotpluggable = 1;
1121         rc = register_cpu(c, cpu);
1122         if (rc)
1123                 goto out;
1124         rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1125         if (rc)
1126                 goto out_cpu;
1127         rc = topology_cpu_init(c);
1128         if (rc)
1129                 goto out_topology;
1130         return 0;
1131
1132 out_topology:
1133         sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1134 out_cpu:
1135         unregister_cpu(c);
1136 out:
1137         return rc;
1138 }
1139
1140 int __ref smp_rescan_cpus(void)
1141 {
1142         struct sclp_core_info *info;
1143         int nr;
1144
1145         info = kzalloc(sizeof(*info), GFP_KERNEL);
1146         if (!info)
1147                 return -ENOMEM;
1148         smp_get_core_info(info, 0);
1149         get_online_cpus();
1150         mutex_lock(&smp_cpu_state_mutex);
1151         nr = __smp_rescan_cpus(info, 1);
1152         mutex_unlock(&smp_cpu_state_mutex);
1153         put_online_cpus();
1154         kfree(info);
1155         if (nr)
1156                 topology_schedule_update();
1157         return 0;
1158 }
1159
1160 static ssize_t __ref rescan_store(struct device *dev,
1161                                   struct device_attribute *attr,
1162                                   const char *buf,
1163                                   size_t count)
1164 {
1165         int rc;
1166
1167         rc = lock_device_hotplug_sysfs();
1168         if (rc)
1169                 return rc;
1170         rc = smp_rescan_cpus();
1171         unlock_device_hotplug();
1172         return rc ? rc : count;
1173 }
1174 static DEVICE_ATTR_WO(rescan);
1175
1176 static int __init s390_smp_init(void)
1177 {
1178         int cpu, rc = 0;
1179
1180         rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1181         if (rc)
1182                 return rc;
1183         for_each_present_cpu(cpu) {
1184                 rc = smp_add_present_cpu(cpu);
1185                 if (rc)
1186                         goto out;
1187         }
1188
1189         rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1190                                smp_cpu_online, smp_cpu_pre_down);
1191         rc = rc <= 0 ? rc : 0;
1192 out:
1193         return rc;
1194 }
1195 subsys_initcall(s390_smp_init);