Linux-libre 5.4.49-gnu
[librecmc/linux-libre.git] / arch / x86 / kernel / apic / x2apic_cluster.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/cpuhotplug.h>
4 #include <linux/cpumask.h>
5 #include <linux/slab.h>
6 #include <linux/mm.h>
7
8 #include <asm/apic.h>
9
10 #include "local.h"
11
12 struct cluster_mask {
13         unsigned int    clusterid;
14         int             node;
15         struct cpumask  mask;
16 };
17
18 static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
19 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
20 static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
21 static struct cluster_mask *cluster_hotplug_mask;
22
23 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
24 {
25         return x2apic_enabled();
26 }
27
28 static void x2apic_send_IPI(int cpu, int vector)
29 {
30         u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
31
32         x2apic_wrmsr_fence();
33         __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
34 }
35
36 static void
37 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
38 {
39         unsigned int cpu, clustercpu;
40         struct cpumask *tmpmsk;
41         unsigned long flags;
42         u32 dest;
43
44         x2apic_wrmsr_fence();
45         local_irq_save(flags);
46
47         tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
48         cpumask_copy(tmpmsk, mask);
49         /* If IPI should not be sent to self, clear current CPU */
50         if (apic_dest != APIC_DEST_ALLINC)
51                 __cpumask_clear_cpu(smp_processor_id(), tmpmsk);
52
53         /* Collapse cpus in a cluster so a single IPI per cluster is sent */
54         for_each_cpu(cpu, tmpmsk) {
55                 struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
56
57                 dest = 0;
58                 for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
59                         dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
60
61                 if (!dest)
62                         continue;
63
64                 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
65                 /* Remove cluster CPUs from tmpmask */
66                 cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
67         }
68
69         local_irq_restore(flags);
70 }
71
72 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
73 {
74         __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
75 }
76
77 static void
78 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
79 {
80         __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
81 }
82
83 static void x2apic_send_IPI_allbutself(int vector)
84 {
85         __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLBUT);
86 }
87
88 static void x2apic_send_IPI_all(int vector)
89 {
90         __x2apic_send_IPI_shorthand(vector, APIC_DEST_ALLINC);
91 }
92
93 static u32 x2apic_calc_apicid(unsigned int cpu)
94 {
95         return per_cpu(x86_cpu_to_logical_apicid, cpu);
96 }
97
98 static void init_x2apic_ldr(void)
99 {
100         struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
101         u32 cluster, apicid = apic_read(APIC_LDR);
102         unsigned int cpu;
103
104         this_cpu_write(x86_cpu_to_logical_apicid, apicid);
105
106         if (cmsk)
107                 goto update;
108
109         cluster = apicid >> 16;
110         for_each_online_cpu(cpu) {
111                 cmsk = per_cpu(cluster_masks, cpu);
112                 /* Matching cluster found. Link and update it. */
113                 if (cmsk && cmsk->clusterid == cluster)
114                         goto update;
115         }
116         cmsk = cluster_hotplug_mask;
117         cmsk->clusterid = cluster;
118         cluster_hotplug_mask = NULL;
119 update:
120         this_cpu_write(cluster_masks, cmsk);
121         cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
122 }
123
124 static int alloc_clustermask(unsigned int cpu, int node)
125 {
126         if (per_cpu(cluster_masks, cpu))
127                 return 0;
128         /*
129          * If a hotplug spare mask exists, check whether it's on the right
130          * node. If not, free it and allocate a new one.
131          */
132         if (cluster_hotplug_mask) {
133                 if (cluster_hotplug_mask->node == node)
134                         return 0;
135                 kfree(cluster_hotplug_mask);
136         }
137
138         cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
139                                             GFP_KERNEL, node);
140         if (!cluster_hotplug_mask)
141                 return -ENOMEM;
142         cluster_hotplug_mask->node = node;
143         return 0;
144 }
145
146 static int x2apic_prepare_cpu(unsigned int cpu)
147 {
148         if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
149                 return -ENOMEM;
150         if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static int x2apic_dead_cpu(unsigned int dead_cpu)
156 {
157         struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
158
159         if (cmsk)
160                 cpumask_clear_cpu(dead_cpu, &cmsk->mask);
161         free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
162         return 0;
163 }
164
165 static int x2apic_cluster_probe(void)
166 {
167         if (!x2apic_mode)
168                 return 0;
169
170         if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
171                               x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
172                 pr_err("Failed to register X2APIC_PREPARE\n");
173                 return 0;
174         }
175         init_x2apic_ldr();
176         return 1;
177 }
178
179 static struct apic apic_x2apic_cluster __ro_after_init = {
180
181         .name                           = "cluster x2apic",
182         .probe                          = x2apic_cluster_probe,
183         .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
184         .apic_id_valid                  = x2apic_apic_id_valid,
185         .apic_id_registered             = x2apic_apic_id_registered,
186
187         .irq_delivery_mode              = dest_Fixed,
188         .irq_dest_mode                  = 1, /* logical */
189
190         .disable_esr                    = 0,
191         .dest_logical                   = APIC_DEST_LOGICAL,
192         .check_apicid_used              = NULL,
193
194         .init_apic_ldr                  = init_x2apic_ldr,
195
196         .ioapic_phys_id_map             = NULL,
197         .setup_apic_routing             = NULL,
198         .cpu_present_to_apicid          = default_cpu_present_to_apicid,
199         .apicid_to_cpu_present          = NULL,
200         .check_phys_apicid_present      = default_check_phys_apicid_present,
201         .phys_pkg_id                    = x2apic_phys_pkg_id,
202
203         .get_apic_id                    = x2apic_get_apic_id,
204         .set_apic_id                    = x2apic_set_apic_id,
205
206         .calc_dest_apicid               = x2apic_calc_apicid,
207
208         .send_IPI                       = x2apic_send_IPI,
209         .send_IPI_mask                  = x2apic_send_IPI_mask,
210         .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
211         .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
212         .send_IPI_all                   = x2apic_send_IPI_all,
213         .send_IPI_self                  = x2apic_send_IPI_self,
214
215         .inquire_remote_apic            = NULL,
216
217         .read                           = native_apic_msr_read,
218         .write                          = native_apic_msr_write,
219         .eoi_write                      = native_apic_msr_eoi_write,
220         .icr_read                       = native_x2apic_icr_read,
221         .icr_write                      = native_x2apic_icr_write,
222         .wait_icr_idle                  = native_x2apic_wait_icr_idle,
223         .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
224 };
225
226 apic_driver(apic_x2apic_cluster);