Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / ia64 / mm / tlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TLB support routines.
4  *
5  * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
6  *      David Mosberger-Tang <davidm@hpl.hp.com>
7  *
8  * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
9  *              Modified RID allocation for SMP
10  *          Goutham Rao <goutham.rao@intel.com>
11  *              IPI based ptc implementation and A-step IPI implementation.
12  * Rohit Seth <rohit.seth@intel.com>
13  * Ken Chen <kenneth.w.chen@intel.com>
14  * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
15  * Copyright (C) 2007 Intel Corp
16  *      Fenghua Yu <fenghua.yu@intel.com>
17  *      Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
18  */
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/mm.h>
25 #include <linux/memblock.h>
26 #include <linux/slab.h>
27
28 #include <asm/delay.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgalloc.h>
31 #include <asm/pal.h>
32 #include <asm/tlbflush.h>
33 #include <asm/dma.h>
34 #include <asm/processor.h>
35 #include <asm/sal.h>
36 #include <asm/tlb.h>
37
38 static struct {
39         u64 mask;               /* mask of supported purge page-sizes */
40         unsigned long max_bits; /* log2 of largest supported purge page-size */
41 } purge;
42
43 struct ia64_ctx ia64_ctx = {
44         .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
45         .next = 1,
46         .max_ctx = ~0U
47 };
48
49 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
50 DEFINE_PER_CPU(u8, ia64_tr_num);  /*Number of TR slots in current processor*/
51 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
52
53 struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
54
55 /*
56  * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
57  * Called after cpu_init() has setup ia64_ctx.max_ctx based on
58  * maximum RID that is supported by boot CPU.
59  */
60 void __init
61 mmu_context_init (void)
62 {
63         ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
64                                          SMP_CACHE_BYTES);
65         if (!ia64_ctx.bitmap)
66                 panic("%s: Failed to allocate %u bytes\n", __func__,
67                       (ia64_ctx.max_ctx + 1) >> 3);
68         ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
69                                            SMP_CACHE_BYTES);
70         if (!ia64_ctx.flushmap)
71                 panic("%s: Failed to allocate %u bytes\n", __func__,
72                       (ia64_ctx.max_ctx + 1) >> 3);
73 }
74
75 /*
76  * Acquire the ia64_ctx.lock before calling this function!
77  */
78 void
79 wrap_mmu_context (struct mm_struct *mm)
80 {
81         int i, cpu;
82         unsigned long flush_bit;
83
84         for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
85                 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
86                 ia64_ctx.bitmap[i] ^= flush_bit;
87         }
88  
89         /* use offset at 300 to skip daemons */
90         ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
91                                 ia64_ctx.max_ctx, 300);
92         ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
93                                 ia64_ctx.max_ctx, ia64_ctx.next);
94
95         /*
96          * can't call flush_tlb_all() here because of race condition
97          * with O(1) scheduler [EF]
98          */
99         cpu = get_cpu(); /* prevent preemption/migration */
100         for_each_online_cpu(i)
101                 if (i != cpu)
102                         per_cpu(ia64_need_tlb_flush, i) = 1;
103         put_cpu();
104         local_flush_tlb_all();
105 }
106
107 /*
108  * Implement "spinaphores" ... like counting semaphores, but they
109  * spin instead of sleeping.  If there are ever any other users for
110  * this primitive it can be moved up to a spinaphore.h header.
111  */
112 struct spinaphore {
113         unsigned long   ticket;
114         unsigned long   serve;
115 };
116
117 static inline void spinaphore_init(struct spinaphore *ss, int val)
118 {
119         ss->ticket = 0;
120         ss->serve = val;
121 }
122
123 static inline void down_spin(struct spinaphore *ss)
124 {
125         unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
126
127         if (time_before(t, ss->serve))
128                 return;
129
130         ia64_invala();
131
132         for (;;) {
133                 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
134                 if (time_before(t, serve))
135                         return;
136                 cpu_relax();
137         }
138 }
139
140 static inline void up_spin(struct spinaphore *ss)
141 {
142         ia64_fetchadd(1, &ss->serve, rel);
143 }
144
145 static struct spinaphore ptcg_sem;
146 static u16 nptcg = 1;
147 static int need_ptcg_sem = 1;
148 static int toolatetochangeptcgsem = 0;
149
150 /*
151  * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
152  * purges which is reported from either PAL or SAL PALO.
153  *
154  * We don't have sanity checking for nptcg value. It's the user's responsibility
155  * for valid nptcg value on the platform. Otherwise, kernel may hang in some
156  * cases.
157  */
158 static int __init
159 set_nptcg(char *str)
160 {
161         int value = 0;
162
163         get_option(&str, &value);
164         setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
165
166         return 1;
167 }
168
169 __setup("nptcg=", set_nptcg);
170
171 /*
172  * Maximum number of simultaneous ptc.g purges in the system can
173  * be defined by PAL_VM_SUMMARY (in which case we should take
174  * the smallest value for any cpu in the system) or by the PAL
175  * override table (in which case we should ignore the value from
176  * PAL_VM_SUMMARY).
177  *
178  * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
179  * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
180  * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
181  *
182  * Complicating the logic here is the fact that num_possible_cpus()
183  * isn't fully setup until we start bringing cpus online.
184  */
185 void
186 setup_ptcg_sem(int max_purges, int nptcg_from)
187 {
188         static int kp_override;
189         static int palo_override;
190         static int firstcpu = 1;
191
192         if (toolatetochangeptcgsem) {
193                 if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
194                         BUG_ON(1 < nptcg);
195                 else
196                         BUG_ON(max_purges < nptcg);
197                 return;
198         }
199
200         if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
201                 kp_override = 1;
202                 nptcg = max_purges;
203                 goto resetsema;
204         }
205         if (kp_override) {
206                 need_ptcg_sem = num_possible_cpus() > nptcg;
207                 return;
208         }
209
210         if (nptcg_from == NPTCG_FROM_PALO) {
211                 palo_override = 1;
212
213                 /* In PALO max_purges == 0 really means it! */
214                 if (max_purges == 0)
215                         panic("Whoa! Platform does not support global TLB purges.\n");
216                 nptcg = max_purges;
217                 if (nptcg == PALO_MAX_TLB_PURGES) {
218                         need_ptcg_sem = 0;
219                         return;
220                 }
221                 goto resetsema;
222         }
223         if (palo_override) {
224                 if (nptcg != PALO_MAX_TLB_PURGES)
225                         need_ptcg_sem = (num_possible_cpus() > nptcg);
226                 return;
227         }
228
229         /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
230         if (max_purges == 0) max_purges = 1;
231
232         if (firstcpu) {
233                 nptcg = max_purges;
234                 firstcpu = 0;
235         }
236         if (max_purges < nptcg)
237                 nptcg = max_purges;
238         if (nptcg == PAL_MAX_PURGES) {
239                 need_ptcg_sem = 0;
240                 return;
241         } else
242                 need_ptcg_sem = (num_possible_cpus() > nptcg);
243
244 resetsema:
245         spinaphore_init(&ptcg_sem, max_purges);
246 }
247
248 void
249 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
250                        unsigned long end, unsigned long nbits)
251 {
252         struct mm_struct *active_mm = current->active_mm;
253
254         toolatetochangeptcgsem = 1;
255
256         if (mm != active_mm) {
257                 /* Restore region IDs for mm */
258                 if (mm && active_mm) {
259                         activate_context(mm);
260                 } else {
261                         flush_tlb_all();
262                         return;
263                 }
264         }
265
266         if (need_ptcg_sem)
267                 down_spin(&ptcg_sem);
268
269         do {
270                 /*
271                  * Flush ALAT entries also.
272                  */
273                 ia64_ptcga(start, (nbits << 2));
274                 ia64_srlz_i();
275                 start += (1UL << nbits);
276         } while (start < end);
277
278         if (need_ptcg_sem)
279                 up_spin(&ptcg_sem);
280
281         if (mm != active_mm) {
282                 activate_context(active_mm);
283         }
284 }
285
286 void
287 local_flush_tlb_all (void)
288 {
289         unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
290
291         addr    = local_cpu_data->ptce_base;
292         count0  = local_cpu_data->ptce_count[0];
293         count1  = local_cpu_data->ptce_count[1];
294         stride0 = local_cpu_data->ptce_stride[0];
295         stride1 = local_cpu_data->ptce_stride[1];
296
297         local_irq_save(flags);
298         for (i = 0; i < count0; ++i) {
299                 for (j = 0; j < count1; ++j) {
300                         ia64_ptce(addr);
301                         addr += stride1;
302                 }
303                 addr += stride0;
304         }
305         local_irq_restore(flags);
306         ia64_srlz_i();                  /* srlz.i implies srlz.d */
307 }
308
309 static void
310 __flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
311                  unsigned long end)
312 {
313         struct mm_struct *mm = vma->vm_mm;
314         unsigned long size = end - start;
315         unsigned long nbits;
316
317 #ifndef CONFIG_SMP
318         if (mm != current->active_mm) {
319                 mm->context = 0;
320                 return;
321         }
322 #endif
323
324         nbits = ia64_fls(size + 0xfff);
325         while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
326                         (nbits < purge.max_bits))
327                 ++nbits;
328         if (nbits > purge.max_bits)
329                 nbits = purge.max_bits;
330         start &= ~((1UL << nbits) - 1);
331
332         preempt_disable();
333 #ifdef CONFIG_SMP
334         if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
335                 platform_global_tlb_purge(mm, start, end, nbits);
336                 preempt_enable();
337                 return;
338         }
339 #endif
340         do {
341                 ia64_ptcl(start, (nbits<<2));
342                 start += (1UL << nbits);
343         } while (start < end);
344         preempt_enable();
345         ia64_srlz_i();                  /* srlz.i implies srlz.d */
346 }
347
348 void flush_tlb_range(struct vm_area_struct *vma,
349                 unsigned long start, unsigned long end)
350 {
351         if (unlikely(end - start >= 1024*1024*1024*1024UL
352                         || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) {
353                 /*
354                  * If we flush more than a tera-byte or across regions, we're
355                  * probably better off just flushing the entire TLB(s).  This
356                  * should be very rare and is not worth optimizing for.
357                  */
358                 flush_tlb_all();
359         } else {
360                 /* flush the address range from the tlb */
361                 __flush_tlb_range(vma, start, end);
362                 /* flush the virt. page-table area mapping the addr range */
363                 __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end));
364         }
365 }
366 EXPORT_SYMBOL(flush_tlb_range);
367
368 void ia64_tlb_init(void)
369 {
370         ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
371         u64 tr_pgbits;
372         long status;
373         pal_vm_info_1_u_t vm_info_1;
374         pal_vm_info_2_u_t vm_info_2;
375         int cpu = smp_processor_id();
376
377         if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
378                 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
379                        "defaulting to architected purge page-sizes.\n", status);
380                 purge.mask = 0x115557000UL;
381         }
382         purge.max_bits = ia64_fls(purge.mask);
383
384         ia64_get_ptce(&ptce_info);
385         local_cpu_data->ptce_base = ptce_info.base;
386         local_cpu_data->ptce_count[0] = ptce_info.count[0];
387         local_cpu_data->ptce_count[1] = ptce_info.count[1];
388         local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
389         local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
390
391         local_flush_tlb_all();  /* nuke left overs from bootstrapping... */
392         status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
393
394         if (status) {
395                 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
396                 per_cpu(ia64_tr_num, cpu) = 8;
397                 return;
398         }
399         per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
400         if (per_cpu(ia64_tr_num, cpu) >
401                                 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
402                 per_cpu(ia64_tr_num, cpu) =
403                                 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
404         if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
405                 static int justonce = 1;
406                 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
407                 if (justonce) {
408                         justonce = 0;
409                         printk(KERN_DEBUG "TR register number exceeds "
410                                "IA64_TR_ALLOC_MAX!\n");
411                 }
412         }
413 }
414
415 /*
416  * is_tr_overlap
417  *
418  * Check overlap with inserted TRs.
419  */
420 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
421 {
422         u64 tr_log_size;
423         u64 tr_end;
424         u64 va_rr = ia64_get_rr(va);
425         u64 va_rid = RR_TO_RID(va_rr);
426         u64 va_end = va + (1<<log_size) - 1;
427
428         if (va_rid != RR_TO_RID(p->rr))
429                 return 0;
430         tr_log_size = (p->itir & 0xff) >> 2;
431         tr_end = p->ifa + (1<<tr_log_size) - 1;
432
433         if (va > tr_end || p->ifa > va_end)
434                 return 0;
435         return 1;
436
437 }
438
439 /*
440  * ia64_insert_tr in virtual mode. Allocate a TR slot
441  *
442  * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
443  *
444  * va   : virtual address.
445  * pte  : pte entries inserted.
446  * log_size: range to be covered.
447  *
448  * Return value:  <0 :  error No.
449  *
450  *                >=0 : slot number allocated for TR.
451  * Must be called with preemption disabled.
452  */
453 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
454 {
455         int i, r;
456         unsigned long psr;
457         struct ia64_tr_entry *p;
458         int cpu = smp_processor_id();
459
460         if (!ia64_idtrs[cpu]) {
461                 ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
462                                                 sizeof(struct ia64_tr_entry),
463                                                 GFP_KERNEL);
464                 if (!ia64_idtrs[cpu])
465                         return -ENOMEM;
466         }
467         r = -EINVAL;
468         /*Check overlap with existing TR entries*/
469         if (target_mask & 0x1) {
470                 p = ia64_idtrs[cpu];
471                 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
472                                                                 i++, p++) {
473                         if (p->pte & 0x1)
474                                 if (is_tr_overlap(p, va, log_size)) {
475                                         printk(KERN_DEBUG "Overlapped Entry"
476                                                 "Inserted for TR Register!!\n");
477                                         goto out;
478                         }
479                 }
480         }
481         if (target_mask & 0x2) {
482                 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
483                 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
484                                                                 i++, p++) {
485                         if (p->pte & 0x1)
486                                 if (is_tr_overlap(p, va, log_size)) {
487                                         printk(KERN_DEBUG "Overlapped Entry"
488                                                 "Inserted for TR Register!!\n");
489                                         goto out;
490                                 }
491                 }
492         }
493
494         for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
495                 switch (target_mask & 0x3) {
496                 case 1:
497                         if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
498                                 goto found;
499                         continue;
500                 case 2:
501                         if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
502                                 goto found;
503                         continue;
504                 case 3:
505                         if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
506                             !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
507                                 goto found;
508                         continue;
509                 default:
510                         r = -EINVAL;
511                         goto out;
512                 }
513         }
514 found:
515         if (i >= per_cpu(ia64_tr_num, cpu))
516                 return -EBUSY;
517
518         /*Record tr info for mca hander use!*/
519         if (i > per_cpu(ia64_tr_used, cpu))
520                 per_cpu(ia64_tr_used, cpu) = i;
521
522         psr = ia64_clear_ic();
523         if (target_mask & 0x1) {
524                 ia64_itr(0x1, i, va, pte, log_size);
525                 ia64_srlz_i();
526                 p = ia64_idtrs[cpu] + i;
527                 p->ifa = va;
528                 p->pte = pte;
529                 p->itir = log_size << 2;
530                 p->rr = ia64_get_rr(va);
531         }
532         if (target_mask & 0x2) {
533                 ia64_itr(0x2, i, va, pte, log_size);
534                 ia64_srlz_i();
535                 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
536                 p->ifa = va;
537                 p->pte = pte;
538                 p->itir = log_size << 2;
539                 p->rr = ia64_get_rr(va);
540         }
541         ia64_set_psr(psr);
542         r = i;
543 out:
544         return r;
545 }
546 EXPORT_SYMBOL_GPL(ia64_itr_entry);
547
548 /*
549  * ia64_purge_tr
550  *
551  * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
552  * slot: slot number to be freed.
553  *
554  * Must be called with preemption disabled.
555  */
556 void ia64_ptr_entry(u64 target_mask, int slot)
557 {
558         int cpu = smp_processor_id();
559         int i;
560         struct ia64_tr_entry *p;
561
562         if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
563                 return;
564
565         if (target_mask & 0x1) {
566                 p = ia64_idtrs[cpu] + slot;
567                 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
568                         p->pte = 0;
569                         ia64_ptr(0x1, p->ifa, p->itir>>2);
570                         ia64_srlz_i();
571                 }
572         }
573
574         if (target_mask & 0x2) {
575                 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
576                 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
577                         p->pte = 0;
578                         ia64_ptr(0x2, p->ifa, p->itir>>2);
579                         ia64_srlz_i();
580                 }
581         }
582
583         for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
584                 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
585                     ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
586                         break;
587         }
588         per_cpu(ia64_tr_used, cpu) = i;
589 }
590 EXPORT_SYMBOL_GPL(ia64_ptr_entry);