Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / mips / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) 2000, 2001 Kanoj Sarcar
5  * Copyright (C) 2000, 2001 Ralf Baechle
6  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8  */
9 #include <linux/cache.h>
10 #include <linux/delay.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/smp.h>
14 #include <linux/spinlock.h>
15 #include <linux/threads.h>
16 #include <linux/export.h>
17 #include <linux/time.h>
18 #include <linux/timex.h>
19 #include <linux/sched/mm.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpu.h>
22 #include <linux/err.h>
23 #include <linux/ftrace.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of.h>
26 #include <linux/of_irq.h>
27
28 #include <linux/atomic.h>
29 #include <asm/cpu.h>
30 #include <asm/ginvt.h>
31 #include <asm/processor.h>
32 #include <asm/idle.h>
33 #include <asm/r4k-timer.h>
34 #include <asm/mips-cps.h>
35 #include <asm/mmu_context.h>
36 #include <asm/time.h>
37 #include <asm/setup.h>
38 #include <asm/maar.h>
39
40 int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
41 EXPORT_SYMBOL(__cpu_number_map);
42
43 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
44 EXPORT_SYMBOL(__cpu_logical_map);
45
46 /* Number of TCs (or siblings in Intel speak) per CPU core */
47 int smp_num_siblings = 1;
48 EXPORT_SYMBOL(smp_num_siblings);
49
50 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
51 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
52 EXPORT_SYMBOL(cpu_sibling_map);
53
54 /* representing the core map of multi-core chips of each logical CPU */
55 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
56 EXPORT_SYMBOL(cpu_core_map);
57
58 static DECLARE_COMPLETION(cpu_starting);
59 static DECLARE_COMPLETION(cpu_running);
60
61 /*
62  * A logcal cpu mask containing only one VPE per core to
63  * reduce the number of IPIs on large MT systems.
64  */
65 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
66 EXPORT_SYMBOL(cpu_foreign_map);
67
68 /* representing cpus for which sibling maps can be computed */
69 static cpumask_t cpu_sibling_setup_map;
70
71 /* representing cpus for which core maps can be computed */
72 static cpumask_t cpu_core_setup_map;
73
74 cpumask_t cpu_coherent_mask;
75
76 #ifdef CONFIG_GENERIC_IRQ_IPI
77 static struct irq_desc *call_desc;
78 static struct irq_desc *sched_desc;
79 #endif
80
81 static inline void set_cpu_sibling_map(int cpu)
82 {
83         int i;
84
85         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
86
87         if (smp_num_siblings > 1) {
88                 for_each_cpu(i, &cpu_sibling_setup_map) {
89                         if (cpus_are_siblings(cpu, i)) {
90                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
91                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
92                         }
93                 }
94         } else
95                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
96 }
97
98 static inline void set_cpu_core_map(int cpu)
99 {
100         int i;
101
102         cpumask_set_cpu(cpu, &cpu_core_setup_map);
103
104         for_each_cpu(i, &cpu_core_setup_map) {
105                 if (cpu_data[cpu].package == cpu_data[i].package) {
106                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
107                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
108                 }
109         }
110 }
111
112 /*
113  * Calculate a new cpu_foreign_map mask whenever a
114  * new cpu appears or disappears.
115  */
116 void calculate_cpu_foreign_map(void)
117 {
118         int i, k, core_present;
119         cpumask_t temp_foreign_map;
120
121         /* Re-calculate the mask */
122         cpumask_clear(&temp_foreign_map);
123         for_each_online_cpu(i) {
124                 core_present = 0;
125                 for_each_cpu(k, &temp_foreign_map)
126                         if (cpus_are_siblings(i, k))
127                                 core_present = 1;
128                 if (!core_present)
129                         cpumask_set_cpu(i, &temp_foreign_map);
130         }
131
132         for_each_online_cpu(i)
133                 cpumask_andnot(&cpu_foreign_map[i],
134                                &temp_foreign_map, &cpu_sibling_map[i]);
135 }
136
137 const struct plat_smp_ops *mp_ops;
138 EXPORT_SYMBOL(mp_ops);
139
140 void register_smp_ops(const struct plat_smp_ops *ops)
141 {
142         if (mp_ops)
143                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
144
145         mp_ops = ops;
146 }
147
148 #ifdef CONFIG_GENERIC_IRQ_IPI
149 void mips_smp_send_ipi_single(int cpu, unsigned int action)
150 {
151         mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152 }
153
154 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155 {
156         unsigned long flags;
157         unsigned int core;
158         int cpu;
159
160         local_irq_save(flags);
161
162         switch (action) {
163         case SMP_CALL_FUNCTION:
164                 __ipi_send_mask(call_desc, mask);
165                 break;
166
167         case SMP_RESCHEDULE_YOURSELF:
168                 __ipi_send_mask(sched_desc, mask);
169                 break;
170
171         default:
172                 BUG();
173         }
174
175         if (mips_cpc_present()) {
176                 for_each_cpu(cpu, mask) {
177                         if (cpus_are_siblings(cpu, smp_processor_id()))
178                                 continue;
179
180                         core = cpu_core(&cpu_data[cpu]);
181
182                         while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
183                                 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
184                                 mips_cpc_lock_other(core);
185                                 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186                                 mips_cpc_unlock_other();
187                                 mips_cm_unlock_other();
188                         }
189                 }
190         }
191
192         local_irq_restore(flags);
193 }
194
195
196 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197 {
198         scheduler_ipi();
199
200         return IRQ_HANDLED;
201 }
202
203 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204 {
205         generic_smp_call_function_interrupt();
206
207         return IRQ_HANDLED;
208 }
209
210 static struct irqaction irq_resched = {
211         .handler        = ipi_resched_interrupt,
212         .flags          = IRQF_PERCPU,
213         .name           = "IPI resched"
214 };
215
216 static struct irqaction irq_call = {
217         .handler        = ipi_call_interrupt,
218         .flags          = IRQF_PERCPU,
219         .name           = "IPI call"
220 };
221
222 static void smp_ipi_init_one(unsigned int virq,
223                                     struct irqaction *action)
224 {
225         int ret;
226
227         irq_set_handler(virq, handle_percpu_irq);
228         ret = setup_irq(virq, action);
229         BUG_ON(ret);
230 }
231
232 static unsigned int call_virq, sched_virq;
233
234 int mips_smp_ipi_allocate(const struct cpumask *mask)
235 {
236         int virq;
237         struct irq_domain *ipidomain;
238         struct device_node *node;
239
240         node = of_irq_find_parent(of_root);
241         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
242
243         /*
244          * Some platforms have half DT setup. So if we found irq node but
245          * didn't find an ipidomain, try to search for one that is not in the
246          * DT.
247          */
248         if (node && !ipidomain)
249                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
250
251         /*
252          * There are systems which use IPI IRQ domains, but only have one
253          * registered when some runtime condition is met. For example a Malta
254          * kernel may include support for GIC & CPU interrupt controller IPI
255          * IRQ domains, but if run on a system with no GIC & no MT ASE then
256          * neither will be supported or registered.
257          *
258          * We only have a problem if we're actually using multiple CPUs so fail
259          * loudly if that is the case. Otherwise simply return, skipping IPI
260          * setup, if we're running with only a single CPU.
261          */
262         if (!ipidomain) {
263                 BUG_ON(num_present_cpus() > 1);
264                 return 0;
265         }
266
267         virq = irq_reserve_ipi(ipidomain, mask);
268         BUG_ON(!virq);
269         if (!call_virq)
270                 call_virq = virq;
271
272         virq = irq_reserve_ipi(ipidomain, mask);
273         BUG_ON(!virq);
274         if (!sched_virq)
275                 sched_virq = virq;
276
277         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
278                 int cpu;
279
280                 for_each_cpu(cpu, mask) {
281                         smp_ipi_init_one(call_virq + cpu, &irq_call);
282                         smp_ipi_init_one(sched_virq + cpu, &irq_resched);
283                 }
284         } else {
285                 smp_ipi_init_one(call_virq, &irq_call);
286                 smp_ipi_init_one(sched_virq, &irq_resched);
287         }
288
289         return 0;
290 }
291
292 int mips_smp_ipi_free(const struct cpumask *mask)
293 {
294         struct irq_domain *ipidomain;
295         struct device_node *node;
296
297         node = of_irq_find_parent(of_root);
298         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
299
300         /*
301          * Some platforms have half DT setup. So if we found irq node but
302          * didn't find an ipidomain, try to search for one that is not in the
303          * DT.
304          */
305         if (node && !ipidomain)
306                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
307
308         BUG_ON(!ipidomain);
309
310         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
311                 int cpu;
312
313                 for_each_cpu(cpu, mask) {
314                         remove_irq(call_virq + cpu, &irq_call);
315                         remove_irq(sched_virq + cpu, &irq_resched);
316                 }
317         }
318         irq_destroy_ipi(call_virq, mask);
319         irq_destroy_ipi(sched_virq, mask);
320         return 0;
321 }
322
323
324 static int __init mips_smp_ipi_init(void)
325 {
326         if (num_possible_cpus() == 1)
327                 return 0;
328
329         mips_smp_ipi_allocate(cpu_possible_mask);
330
331         call_desc = irq_to_desc(call_virq);
332         sched_desc = irq_to_desc(sched_virq);
333
334         return 0;
335 }
336 early_initcall(mips_smp_ipi_init);
337 #endif
338
339 /*
340  * First C code run on the secondary CPUs after being started up by
341  * the master.
342  */
343 asmlinkage void start_secondary(void)
344 {
345         unsigned int cpu;
346
347         cpu_probe();
348         per_cpu_trap_init(false);
349         mips_clockevent_init();
350         mp_ops->init_secondary();
351         cpu_report();
352         maar_init();
353
354         /*
355          * XXX parity protection should be folded in here when it's converted
356          * to an option instead of something based on .cputype
357          */
358
359         calibrate_delay();
360         preempt_disable();
361         cpu = smp_processor_id();
362         cpu_data[cpu].udelay_val = loops_per_jiffy;
363
364         cpumask_set_cpu(cpu, &cpu_coherent_mask);
365         notify_cpu_starting(cpu);
366
367         /* Notify boot CPU that we're starting & ready to sync counters */
368         complete(&cpu_starting);
369
370         synchronise_count_slave(cpu);
371
372         /* The CPU is running and counters synchronised, now mark it online */
373         set_cpu_online(cpu, true);
374
375         set_cpu_sibling_map(cpu);
376         set_cpu_core_map(cpu);
377
378         calculate_cpu_foreign_map();
379
380         /*
381          * Notify boot CPU that we're up & online and it can safely return
382          * from __cpu_up
383          */
384         complete(&cpu_running);
385
386         /*
387          * irq will be enabled in ->smp_finish(), enabling it too early
388          * is dangerous.
389          */
390         WARN_ON_ONCE(!irqs_disabled());
391         mp_ops->smp_finish();
392
393         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
394 }
395
396 static void stop_this_cpu(void *dummy)
397 {
398         /*
399          * Remove this CPU:
400          */
401
402         set_cpu_online(smp_processor_id(), false);
403         calculate_cpu_foreign_map();
404         local_irq_disable();
405         while (1);
406 }
407
408 void smp_send_stop(void)
409 {
410         smp_call_function(stop_this_cpu, NULL, 0);
411 }
412
413 void __init smp_cpus_done(unsigned int max_cpus)
414 {
415 }
416
417 /* called from main before smp_init() */
418 void __init smp_prepare_cpus(unsigned int max_cpus)
419 {
420         init_new_context(current, &init_mm);
421         current_thread_info()->cpu = 0;
422         mp_ops->prepare_cpus(max_cpus);
423         set_cpu_sibling_map(0);
424         set_cpu_core_map(0);
425         calculate_cpu_foreign_map();
426 #ifndef CONFIG_HOTPLUG_CPU
427         init_cpu_present(cpu_possible_mask);
428 #endif
429         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
430 }
431
432 /* preload SMP state for boot cpu */
433 void smp_prepare_boot_cpu(void)
434 {
435         if (mp_ops->prepare_boot_cpu)
436                 mp_ops->prepare_boot_cpu();
437         set_cpu_possible(0, true);
438         set_cpu_online(0, true);
439 }
440
441 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
442 {
443         int err;
444
445         err = mp_ops->boot_secondary(cpu, tidle);
446         if (err)
447                 return err;
448
449         /* Wait for CPU to start and be ready to sync counters */
450         if (!wait_for_completion_timeout(&cpu_starting,
451                                          msecs_to_jiffies(1000))) {
452                 pr_crit("CPU%u: failed to start\n", cpu);
453                 return -EIO;
454         }
455
456         synchronise_count_master(cpu);
457
458         /* Wait for CPU to finish startup & mark itself online before return */
459         wait_for_completion(&cpu_running);
460         return 0;
461 }
462
463 /* Not really SMP stuff ... */
464 int setup_profiling_timer(unsigned int multiplier)
465 {
466         return 0;
467 }
468
469 static void flush_tlb_all_ipi(void *info)
470 {
471         local_flush_tlb_all();
472 }
473
474 void flush_tlb_all(void)
475 {
476         if (cpu_has_mmid) {
477                 htw_stop();
478                 ginvt_full();
479                 sync_ginv();
480                 instruction_hazard();
481                 htw_start();
482                 return;
483         }
484
485         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
486 }
487
488 static void flush_tlb_mm_ipi(void *mm)
489 {
490         drop_mmu_context((struct mm_struct *)mm);
491 }
492
493 /*
494  * Special Variant of smp_call_function for use by TLB functions:
495  *
496  *  o No return value
497  *  o collapses to normal function call on UP kernels
498  *  o collapses to normal function call on systems with a single shared
499  *    primary cache.
500  */
501 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
502 {
503         smp_call_function(func, info, 1);
504 }
505
506 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
507 {
508         preempt_disable();
509
510         smp_on_other_tlbs(func, info);
511         func(info);
512
513         preempt_enable();
514 }
515
516 /*
517  * The following tlb flush calls are invoked when old translations are
518  * being torn down, or pte attributes are changing. For single threaded
519  * address spaces, a new context is obtained on the current cpu, and tlb
520  * context on other cpus are invalidated to force a new context allocation
521  * at switch_mm time, should the mm ever be used on other cpus. For
522  * multithreaded address spaces, intercpu interrupts have to be sent.
523  * Another case where intercpu interrupts are required is when the target
524  * mm might be active on another cpu (eg debuggers doing the flushes on
525  * behalf of debugees, kswapd stealing pages from another process etc).
526  * Kanoj 07/00.
527  */
528
529 void flush_tlb_mm(struct mm_struct *mm)
530 {
531         preempt_disable();
532
533         if (cpu_has_mmid) {
534                 /*
535                  * No need to worry about other CPUs - the ginvt in
536                  * drop_mmu_context() will be globalized.
537                  */
538         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
539                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
540         } else {
541                 unsigned int cpu;
542
543                 for_each_online_cpu(cpu) {
544                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
545                                 set_cpu_context(cpu, mm, 0);
546                 }
547         }
548         drop_mmu_context(mm);
549
550         preempt_enable();
551 }
552
553 struct flush_tlb_data {
554         struct vm_area_struct *vma;
555         unsigned long addr1;
556         unsigned long addr2;
557 };
558
559 static void flush_tlb_range_ipi(void *info)
560 {
561         struct flush_tlb_data *fd = info;
562
563         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
564 }
565
566 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
567 {
568         struct mm_struct *mm = vma->vm_mm;
569         unsigned long addr;
570         u32 old_mmid;
571
572         preempt_disable();
573         if (cpu_has_mmid) {
574                 htw_stop();
575                 old_mmid = read_c0_memorymapid();
576                 write_c0_memorymapid(cpu_asid(0, mm));
577                 mtc0_tlbw_hazard();
578                 addr = round_down(start, PAGE_SIZE * 2);
579                 end = round_up(end, PAGE_SIZE * 2);
580                 do {
581                         ginvt_va_mmid(addr);
582                         sync_ginv();
583                         addr += PAGE_SIZE * 2;
584                 } while (addr < end);
585                 write_c0_memorymapid(old_mmid);
586                 instruction_hazard();
587                 htw_start();
588         } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
589                 struct flush_tlb_data fd = {
590                         .vma = vma,
591                         .addr1 = start,
592                         .addr2 = end,
593                 };
594
595                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
596                 local_flush_tlb_range(vma, start, end);
597         } else {
598                 unsigned int cpu;
599                 int exec = vma->vm_flags & VM_EXEC;
600
601                 for_each_online_cpu(cpu) {
602                         /*
603                          * flush_cache_range() will only fully flush icache if
604                          * the VMA is executable, otherwise we must invalidate
605                          * ASID without it appearing to has_valid_asid() as if
606                          * mm has been completely unused by that CPU.
607                          */
608                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
609                                 set_cpu_context(cpu, mm, !exec);
610                 }
611                 local_flush_tlb_range(vma, start, end);
612         }
613         preempt_enable();
614 }
615
616 static void flush_tlb_kernel_range_ipi(void *info)
617 {
618         struct flush_tlb_data *fd = info;
619
620         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
621 }
622
623 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
624 {
625         struct flush_tlb_data fd = {
626                 .addr1 = start,
627                 .addr2 = end,
628         };
629
630         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
631 }
632
633 static void flush_tlb_page_ipi(void *info)
634 {
635         struct flush_tlb_data *fd = info;
636
637         local_flush_tlb_page(fd->vma, fd->addr1);
638 }
639
640 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
641 {
642         u32 old_mmid;
643
644         preempt_disable();
645         if (cpu_has_mmid) {
646                 htw_stop();
647                 old_mmid = read_c0_memorymapid();
648                 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
649                 mtc0_tlbw_hazard();
650                 ginvt_va_mmid(page);
651                 sync_ginv();
652                 write_c0_memorymapid(old_mmid);
653                 instruction_hazard();
654                 htw_start();
655         } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
656                    (current->mm != vma->vm_mm)) {
657                 struct flush_tlb_data fd = {
658                         .vma = vma,
659                         .addr1 = page,
660                 };
661
662                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
663                 local_flush_tlb_page(vma, page);
664         } else {
665                 unsigned int cpu;
666
667                 for_each_online_cpu(cpu) {
668                         /*
669                          * flush_cache_page() only does partial flushes, so
670                          * invalidate ASID without it appearing to
671                          * has_valid_asid() as if mm has been completely unused
672                          * by that CPU.
673                          */
674                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
675                                 set_cpu_context(cpu, vma->vm_mm, 1);
676                 }
677                 local_flush_tlb_page(vma, page);
678         }
679         preempt_enable();
680 }
681
682 static void flush_tlb_one_ipi(void *info)
683 {
684         unsigned long vaddr = (unsigned long) info;
685
686         local_flush_tlb_one(vaddr);
687 }
688
689 void flush_tlb_one(unsigned long vaddr)
690 {
691         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
692 }
693
694 EXPORT_SYMBOL(flush_tlb_page);
695 EXPORT_SYMBOL(flush_tlb_one);
696
697 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
698
699 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
700 static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
701
702 void tick_broadcast(const struct cpumask *mask)
703 {
704         atomic_t *count;
705         call_single_data_t *csd;
706         int cpu;
707
708         for_each_cpu(cpu, mask) {
709                 count = &per_cpu(tick_broadcast_count, cpu);
710                 csd = &per_cpu(tick_broadcast_csd, cpu);
711
712                 if (atomic_inc_return(count) == 1)
713                         smp_call_function_single_async(cpu, csd);
714         }
715 }
716
717 static void tick_broadcast_callee(void *info)
718 {
719         int cpu = smp_processor_id();
720         tick_receive_broadcast();
721         atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
722 }
723
724 static int __init tick_broadcast_init(void)
725 {
726         call_single_data_t *csd;
727         int cpu;
728
729         for (cpu = 0; cpu < NR_CPUS; cpu++) {
730                 csd = &per_cpu(tick_broadcast_csd, cpu);
731                 csd->func = tick_broadcast_callee;
732         }
733
734         return 0;
735 }
736 early_initcall(tick_broadcast_init);
737
738 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */