2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/initrd.h>
17 #include <linux/swap.h>
18 #include <linux/pagemap.h>
19 #include <linux/poison.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
24 #include <linux/sort.h>
25 #include <linux/percpu.h>
26 #include <linux/memblock.h>
27 #include <linux/mmzone.h>
28 #include <linux/gfp.h>
32 #include <asm/pgalloc.h>
33 #include <asm/pgtable.h>
34 #include <asm/oplib.h>
35 #include <asm/iommu.h>
37 #include <asm/uaccess.h>
38 #include <asm/mmu_context.h>
39 #include <asm/tlbflush.h>
41 #include <asm/starfire.h>
43 #include <asm/spitfire.h>
44 #include <asm/sections.h>
46 #include <asm/hypervisor.h>
48 #include <asm/mdesc.h>
49 #include <asm/cpudata.h>
50 #include <asm/setup.h>
55 unsigned long kern_linear_pte_xor[4] __read_mostly;
57 /* A bitmap, two bits for every 256MB of physical memory. These two
58 * bits determine what page size we use for kernel linear
59 * translations. They form an index into kern_linear_pte_xor[]. The
60 * value in the indexed slot is XOR'd with the TLB miss virtual
61 * address to form the resulting TTE. The mapping is:
68 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
69 * support 2GB pages, and hopefully future cpus will support the 16GB
70 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
71 * if these larger page sizes are not supported by the cpu.
73 * It would be nice to determine this from the machine description
74 * 'cpu' properties, but we need to have this table setup before the
75 * MDESC is initialized.
78 #ifndef CONFIG_DEBUG_PAGEALLOC
79 /* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
80 * Space is allocated for this right after the trap table in
81 * arch/sparc64/kernel/head.S
83 extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
85 extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
87 static unsigned long cpu_pgsz_mask;
89 #define MAX_BANKS 1024
91 static struct linux_prom64_registers pavail[MAX_BANKS];
92 static int pavail_ents;
94 static int cmp_p64(const void *a, const void *b)
96 const struct linux_prom64_registers *x = a, *y = b;
98 if (x->phys_addr > y->phys_addr)
100 if (x->phys_addr < y->phys_addr)
105 static void __init read_obp_memory(const char *property,
106 struct linux_prom64_registers *regs,
109 phandle node = prom_finddevice("/memory");
110 int prop_size = prom_getproplen(node, property);
113 ents = prop_size / sizeof(struct linux_prom64_registers);
114 if (ents > MAX_BANKS) {
115 prom_printf("The machine has more %s property entries than "
116 "this kernel can support (%d).\n",
117 property, MAX_BANKS);
121 ret = prom_getproperty(node, property, (char *) regs, prop_size);
123 prom_printf("Couldn't get %s property from /memory.\n",
128 /* Sanitize what we got from the firmware, by page aligning
131 for (i = 0; i < ents; i++) {
132 unsigned long base, size;
134 base = regs[i].phys_addr;
135 size = regs[i].reg_size;
138 if (base & ~PAGE_MASK) {
139 unsigned long new_base = PAGE_ALIGN(base);
141 size -= new_base - base;
142 if ((long) size < 0L)
147 /* If it is empty, simply get rid of it.
148 * This simplifies the logic of the other
149 * functions that process these arrays.
151 memmove(®s[i], ®s[i + 1],
152 (ents - i - 1) * sizeof(regs[0]));
157 regs[i].phys_addr = base;
158 regs[i].reg_size = size;
163 sort(regs, ents, sizeof(struct linux_prom64_registers),
167 /* Kernel physical address base and size in bytes. */
168 unsigned long kern_base __read_mostly;
169 unsigned long kern_size __read_mostly;
171 /* Initial ramdisk setup */
172 extern unsigned long sparc_ramdisk_image64;
173 extern unsigned int sparc_ramdisk_image;
174 extern unsigned int sparc_ramdisk_size;
176 struct page *mem_map_zero __read_mostly;
177 EXPORT_SYMBOL(mem_map_zero);
179 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
181 unsigned long sparc64_kern_pri_context __read_mostly;
182 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
183 unsigned long sparc64_kern_sec_context __read_mostly;
185 int num_kernel_image_mappings;
187 #ifdef CONFIG_DEBUG_DCFLUSH
188 atomic_t dcpage_flushes = ATOMIC_INIT(0);
190 atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
194 inline void flush_dcache_page_impl(struct page *page)
196 BUG_ON(tlb_type == hypervisor);
197 #ifdef CONFIG_DEBUG_DCFLUSH
198 atomic_inc(&dcpage_flushes);
201 #ifdef DCACHE_ALIASING_POSSIBLE
202 __flush_dcache_page(page_address(page),
203 ((tlb_type == spitfire) &&
204 page_mapping(page) != NULL));
206 if (page_mapping(page) != NULL &&
207 tlb_type == spitfire)
208 __flush_icache_page(__pa(page_address(page)));
212 #define PG_dcache_dirty PG_arch_1
213 #define PG_dcache_cpu_shift 32UL
214 #define PG_dcache_cpu_mask \
215 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
217 #define dcache_dirty_cpu(page) \
218 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
220 static inline void set_dcache_dirty(struct page *page, int this_cpu)
222 unsigned long mask = this_cpu;
223 unsigned long non_cpu_bits;
225 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
226 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
228 __asm__ __volatile__("1:\n\t"
230 "and %%g7, %1, %%g1\n\t"
231 "or %%g1, %0, %%g1\n\t"
232 "casx [%2], %%g7, %%g1\n\t"
234 "bne,pn %%xcc, 1b\n\t"
237 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
241 static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
243 unsigned long mask = (1UL << PG_dcache_dirty);
245 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
248 "srlx %%g7, %4, %%g1\n\t"
249 "and %%g1, %3, %%g1\n\t"
251 "bne,pn %%icc, 2f\n\t"
252 " andn %%g7, %1, %%g1\n\t"
253 "casx [%2], %%g7, %%g1\n\t"
255 "bne,pn %%xcc, 1b\n\t"
259 : "r" (cpu), "r" (mask), "r" (&page->flags),
260 "i" (PG_dcache_cpu_mask),
261 "i" (PG_dcache_cpu_shift)
265 static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
267 unsigned long tsb_addr = (unsigned long) ent;
269 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
270 tsb_addr = __pa(tsb_addr);
272 __tsb_insert(tsb_addr, tag, pte);
275 unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
277 static void flush_dcache(unsigned long pfn)
281 page = pfn_to_page(pfn);
283 unsigned long pg_flags;
285 pg_flags = page->flags;
286 if (pg_flags & (1UL << PG_dcache_dirty)) {
287 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
289 int this_cpu = get_cpu();
291 /* This is just to optimize away some function calls
295 flush_dcache_page_impl(page);
297 smp_flush_dcache_page_impl(page, cpu);
299 clear_dcache_dirty_cpu(page, cpu);
306 /* mm->context.lock must be held */
307 static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
308 unsigned long tsb_hash_shift, unsigned long address,
311 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
317 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
319 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, tte);
323 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
324 static inline bool is_hugetlb_pte(pte_t pte)
326 if ((tlb_type == hypervisor &&
327 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
328 (tlb_type != hypervisor &&
329 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U))
335 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
337 struct mm_struct *mm;
341 if (tlb_type != hypervisor) {
342 unsigned long pfn = pte_pfn(pte);
350 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
351 if (!pte_accessible(mm, pte))
354 spin_lock_irqsave(&mm->context.lock, flags);
356 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
357 if (mm->context.huge_pte_count && is_hugetlb_pte(pte))
358 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
359 address, pte_val(pte));
362 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
363 address, pte_val(pte));
365 spin_unlock_irqrestore(&mm->context.lock, flags);
368 void flush_dcache_page(struct page *page)
370 struct address_space *mapping;
373 if (tlb_type == hypervisor)
376 /* Do not bother with the expensive D-cache flush if it
377 * is merely the zero page. The 'bigcore' testcase in GDB
378 * causes this case to run millions of times.
380 if (page == ZERO_PAGE(0))
383 this_cpu = get_cpu();
385 mapping = page_mapping(page);
386 if (mapping && !mapping_mapped(mapping)) {
387 int dirty = test_bit(PG_dcache_dirty, &page->flags);
389 int dirty_cpu = dcache_dirty_cpu(page);
391 if (dirty_cpu == this_cpu)
393 smp_flush_dcache_page_impl(page, dirty_cpu);
395 set_dcache_dirty(page, this_cpu);
397 /* We could delay the flush for the !page_mapping
398 * case too. But that case is for exec env/arg
399 * pages and those are %99 certainly going to get
400 * faulted into the tlb (and thus flushed) anyways.
402 flush_dcache_page_impl(page);
408 EXPORT_SYMBOL(flush_dcache_page);
410 void __kprobes flush_icache_range(unsigned long start, unsigned long end)
412 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
413 if (tlb_type == spitfire) {
416 /* This code only runs on Spitfire cpus so this is
417 * why we can assume _PAGE_PADDR_4U.
419 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
420 unsigned long paddr, mask = _PAGE_PADDR_4U;
422 if (kaddr >= PAGE_OFFSET)
423 paddr = kaddr & mask;
425 pgd_t *pgdp = pgd_offset_k(kaddr);
426 pud_t *pudp = pud_offset(pgdp, kaddr);
427 pmd_t *pmdp = pmd_offset(pudp, kaddr);
428 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
430 paddr = pte_val(*ptep) & mask;
432 __flush_icache_page(paddr);
436 EXPORT_SYMBOL(flush_icache_range);
438 void mmu_info(struct seq_file *m)
440 static const char *pgsz_strings[] = {
441 "8K", "64K", "512K", "4MB", "32MB",
442 "256MB", "2GB", "16GB",
446 if (tlb_type == cheetah)
447 seq_printf(m, "MMU Type\t: Cheetah\n");
448 else if (tlb_type == cheetah_plus)
449 seq_printf(m, "MMU Type\t: Cheetah+\n");
450 else if (tlb_type == spitfire)
451 seq_printf(m, "MMU Type\t: Spitfire\n");
452 else if (tlb_type == hypervisor)
453 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
455 seq_printf(m, "MMU Type\t: ???\n");
457 seq_printf(m, "MMU PGSZs\t: ");
459 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
460 if (cpu_pgsz_mask & (1UL << i)) {
461 seq_printf(m, "%s%s",
462 printed ? "," : "", pgsz_strings[i]);
468 #ifdef CONFIG_DEBUG_DCFLUSH
469 seq_printf(m, "DCPageFlushes\t: %d\n",
470 atomic_read(&dcpage_flushes));
472 seq_printf(m, "DCPageFlushesXC\t: %d\n",
473 atomic_read(&dcpage_flushes_xcall));
474 #endif /* CONFIG_SMP */
475 #endif /* CONFIG_DEBUG_DCFLUSH */
478 struct linux_prom_translation prom_trans[512] __read_mostly;
479 unsigned int prom_trans_ents __read_mostly;
481 unsigned long kern_locked_tte_data;
483 /* The obp translations are saved based on 8k pagesize, since obp can
484 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
485 * HI_OBP_ADDRESS range are handled in ktlb.S.
487 static inline int in_obp_range(unsigned long vaddr)
489 return (vaddr >= LOW_OBP_ADDRESS &&
490 vaddr < HI_OBP_ADDRESS);
493 static int cmp_ptrans(const void *a, const void *b)
495 const struct linux_prom_translation *x = a, *y = b;
497 if (x->virt > y->virt)
499 if (x->virt < y->virt)
504 /* Read OBP translations property into 'prom_trans[]'. */
505 static void __init read_obp_translations(void)
507 int n, node, ents, first, last, i;
509 node = prom_finddevice("/virtual-memory");
510 n = prom_getproplen(node, "translations");
511 if (unlikely(n == 0 || n == -1)) {
512 prom_printf("prom_mappings: Couldn't get size.\n");
515 if (unlikely(n > sizeof(prom_trans))) {
516 prom_printf("prom_mappings: Size %d is too big.\n", n);
520 if ((n = prom_getproperty(node, "translations",
521 (char *)&prom_trans[0],
522 sizeof(prom_trans))) == -1) {
523 prom_printf("prom_mappings: Couldn't get property.\n");
527 n = n / sizeof(struct linux_prom_translation);
531 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
534 /* Now kick out all the non-OBP entries. */
535 for (i = 0; i < ents; i++) {
536 if (in_obp_range(prom_trans[i].virt))
540 for (; i < ents; i++) {
541 if (!in_obp_range(prom_trans[i].virt))
546 for (i = 0; i < (last - first); i++) {
547 struct linux_prom_translation *src = &prom_trans[i + first];
548 struct linux_prom_translation *dest = &prom_trans[i];
552 for (; i < ents; i++) {
553 struct linux_prom_translation *dest = &prom_trans[i];
554 dest->virt = dest->size = dest->data = 0x0UL;
557 prom_trans_ents = last - first;
559 if (tlb_type == spitfire) {
560 /* Clear diag TTE bits. */
561 for (i = 0; i < prom_trans_ents; i++)
562 prom_trans[i].data &= ~0x0003fe0000000000UL;
565 /* Force execute bit on. */
566 for (i = 0; i < prom_trans_ents; i++)
567 prom_trans[i].data |= (tlb_type == hypervisor ?
568 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
571 static void __init hypervisor_tlb_lock(unsigned long vaddr,
575 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
578 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
579 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
584 static unsigned long kern_large_tte(unsigned long paddr);
586 static void __init remap_kernel(void)
588 unsigned long phys_page, tte_vaddr, tte_data;
589 int i, tlb_ent = sparc64_highest_locked_tlbent();
591 tte_vaddr = (unsigned long) KERNBASE;
592 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
593 tte_data = kern_large_tte(phys_page);
595 kern_locked_tte_data = tte_data;
597 /* Now lock us into the TLBs via Hypervisor or OBP. */
598 if (tlb_type == hypervisor) {
599 for (i = 0; i < num_kernel_image_mappings; i++) {
600 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
601 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
602 tte_vaddr += 0x400000;
603 tte_data += 0x400000;
606 for (i = 0; i < num_kernel_image_mappings; i++) {
607 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
608 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
609 tte_vaddr += 0x400000;
610 tte_data += 0x400000;
612 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
614 if (tlb_type == cheetah_plus) {
615 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
616 CTX_CHEETAH_PLUS_NUC);
617 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
618 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
623 static void __init inherit_prom_mappings(void)
625 /* Now fixup OBP's idea about where we really are mapped. */
626 printk("Remapping the kernel... ");
631 void prom_world(int enter)
636 __asm__ __volatile__("flushw");
639 void __flush_dcache_range(unsigned long start, unsigned long end)
643 if (tlb_type == spitfire) {
646 for (va = start; va < end; va += 32) {
647 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
651 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
654 for (va = start; va < end; va += 32)
655 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
659 "i" (ASI_DCACHE_INVALIDATE));
662 EXPORT_SYMBOL(__flush_dcache_range);
664 /* get_new_mmu_context() uses "cache + 1". */
665 DEFINE_SPINLOCK(ctx_alloc_lock);
666 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
667 #define MAX_CTX_NR (1UL << CTX_NR_BITS)
668 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
669 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
671 /* Caller does TLB context flushing on local CPU if necessary.
672 * The caller also ensures that CTX_VALID(mm->context) is false.
674 * We must be careful about boundary cases so that we never
675 * let the user have CTX 0 (nucleus) or we ever use a CTX
676 * version of zero (and thus NO_CONTEXT would not be caught
677 * by version mis-match tests in mmu_context.h).
679 * Always invoked with interrupts disabled.
681 void get_new_mmu_context(struct mm_struct *mm)
683 unsigned long ctx, new_ctx;
684 unsigned long orig_pgsz_bits;
687 spin_lock(&ctx_alloc_lock);
688 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
689 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
690 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
692 if (new_ctx >= (1 << CTX_NR_BITS)) {
693 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
694 if (new_ctx >= ctx) {
696 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
699 new_ctx = CTX_FIRST_VERSION;
701 /* Don't call memset, for 16 entries that's just
704 mmu_context_bmap[0] = 3;
705 mmu_context_bmap[1] = 0;
706 mmu_context_bmap[2] = 0;
707 mmu_context_bmap[3] = 0;
708 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
709 mmu_context_bmap[i + 0] = 0;
710 mmu_context_bmap[i + 1] = 0;
711 mmu_context_bmap[i + 2] = 0;
712 mmu_context_bmap[i + 3] = 0;
718 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
719 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
721 tlb_context_cache = new_ctx;
722 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
723 spin_unlock(&ctx_alloc_lock);
725 if (unlikely(new_version))
726 smp_new_mmu_context_version();
729 static int numa_enabled = 1;
730 static int numa_debug;
732 static int __init early_numa(char *p)
737 if (strstr(p, "off"))
740 if (strstr(p, "debug"))
745 early_param("numa", early_numa);
747 #define numadbg(f, a...) \
748 do { if (numa_debug) \
749 printk(KERN_INFO f, ## a); \
752 static void __init find_ramdisk(unsigned long phys_base)
754 #ifdef CONFIG_BLK_DEV_INITRD
755 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
756 unsigned long ramdisk_image;
758 /* Older versions of the bootloader only supported a
759 * 32-bit physical address for the ramdisk image
760 * location, stored at sparc_ramdisk_image. Newer
761 * SILO versions set sparc_ramdisk_image to zero and
762 * provide a full 64-bit physical address at
763 * sparc_ramdisk_image64.
765 ramdisk_image = sparc_ramdisk_image;
767 ramdisk_image = sparc_ramdisk_image64;
769 /* Another bootloader quirk. The bootloader normalizes
770 * the physical address to KERNBASE, so we have to
771 * factor that back out and add in the lowest valid
772 * physical page address to get the true physical address.
774 ramdisk_image -= KERNBASE;
775 ramdisk_image += phys_base;
777 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
778 ramdisk_image, sparc_ramdisk_size);
780 initrd_start = ramdisk_image;
781 initrd_end = ramdisk_image + sparc_ramdisk_size;
783 memblock_reserve(initrd_start, sparc_ramdisk_size);
785 initrd_start += PAGE_OFFSET;
786 initrd_end += PAGE_OFFSET;
791 struct node_mem_mask {
795 static struct node_mem_mask node_masks[MAX_NUMNODES];
796 static int num_node_masks;
798 #ifdef CONFIG_NEED_MULTIPLE_NODES
800 int numa_cpu_lookup_table[NR_CPUS];
801 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
803 struct mdesc_mblock {
806 u64 offset; /* RA-to-PA */
808 static struct mdesc_mblock *mblocks;
809 static int num_mblocks;
811 static unsigned long ra_to_pa(unsigned long addr)
815 for (i = 0; i < num_mblocks; i++) {
816 struct mdesc_mblock *m = &mblocks[i];
818 if (addr >= m->base &&
819 addr < (m->base + m->size)) {
827 static int find_node(unsigned long addr)
831 addr = ra_to_pa(addr);
832 for (i = 0; i < num_node_masks; i++) {
833 struct node_mem_mask *p = &node_masks[i];
835 if ((addr & p->mask) == p->val)
838 /* The following condition has been observed on LDOM guests.*/
839 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
840 " rule. Some physical memory will be owned by node 0.");
844 static u64 memblock_nid_range(u64 start, u64 end, int *nid)
846 *nid = find_node(start);
848 while (start < end) {
849 int n = find_node(start);
863 /* This must be invoked after performing all of the necessary
864 * memblock_set_node() calls for 'nid'. We need to be able to get
865 * correct data from get_pfn_range_for_nid().
867 static void __init allocate_node_data(int nid)
869 struct pglist_data *p;
870 unsigned long start_pfn, end_pfn;
871 #ifdef CONFIG_NEED_MULTIPLE_NODES
874 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
876 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
879 NODE_DATA(nid) = __va(paddr);
880 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
882 NODE_DATA(nid)->node_id = nid;
887 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
888 p->node_start_pfn = start_pfn;
889 p->node_spanned_pages = end_pfn - start_pfn;
892 static void init_node_masks_nonnuma(void)
894 #ifdef CONFIG_NEED_MULTIPLE_NODES
898 numadbg("Initializing tables for non-numa.\n");
900 node_masks[0].mask = node_masks[0].val = 0;
903 #ifdef CONFIG_NEED_MULTIPLE_NODES
904 for (i = 0; i < NR_CPUS; i++)
905 numa_cpu_lookup_table[i] = 0;
907 cpumask_setall(&numa_cpumask_lookup_table[0]);
911 #ifdef CONFIG_NEED_MULTIPLE_NODES
912 struct pglist_data *node_data[MAX_NUMNODES];
914 EXPORT_SYMBOL(numa_cpu_lookup_table);
915 EXPORT_SYMBOL(numa_cpumask_lookup_table);
916 EXPORT_SYMBOL(node_data);
918 struct mdesc_mlgroup {
924 static struct mdesc_mlgroup *mlgroups;
925 static int num_mlgroups;
927 static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
932 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
933 u64 target = mdesc_arc_target(md, arc);
936 val = mdesc_get_property(md, target,
938 if (val && *val == cfg_handle)
944 static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
947 u64 arc, candidate, best_latency = ~(u64)0;
949 candidate = MDESC_NODE_NULL;
950 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
951 u64 target = mdesc_arc_target(md, arc);
952 const char *name = mdesc_node_name(md, target);
955 if (strcmp(name, "pio-latency-group"))
958 val = mdesc_get_property(md, target, "latency", NULL);
962 if (*val < best_latency) {
968 if (candidate == MDESC_NODE_NULL)
971 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
974 int of_node_to_nid(struct device_node *dp)
976 const struct linux_prom64_registers *regs;
977 struct mdesc_handle *md;
982 /* This is the right thing to do on currently supported
983 * SUN4U NUMA platforms as well, as the PCI controller does
984 * not sit behind any particular memory controller.
989 regs = of_get_property(dp, "reg", NULL);
993 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
999 mdesc_for_each_node_by_name(md, grp, "group") {
1000 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1012 static void __init add_node_ranges(void)
1014 struct memblock_region *reg;
1016 for_each_memblock(memory, reg) {
1017 unsigned long size = reg->size;
1018 unsigned long start, end;
1022 while (start < end) {
1023 unsigned long this_end;
1026 this_end = memblock_nid_range(start, end, &nid);
1028 numadbg("Setting memblock NUMA node nid[%d] "
1029 "start[%lx] end[%lx]\n",
1030 nid, start, this_end);
1032 memblock_set_node(start, this_end - start,
1033 &memblock.memory, nid);
1039 static int __init grab_mlgroups(struct mdesc_handle *md)
1041 unsigned long paddr;
1045 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1050 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1055 mlgroups = __va(paddr);
1056 num_mlgroups = count;
1059 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1060 struct mdesc_mlgroup *m = &mlgroups[count++];
1065 val = mdesc_get_property(md, node, "latency", NULL);
1067 val = mdesc_get_property(md, node, "address-match", NULL);
1069 val = mdesc_get_property(md, node, "address-mask", NULL);
1072 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1073 "match[%llx] mask[%llx]\n",
1074 count - 1, m->node, m->latency, m->match, m->mask);
1080 static int __init grab_mblocks(struct mdesc_handle *md)
1082 unsigned long paddr;
1086 mdesc_for_each_node_by_name(md, node, "mblock")
1091 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1096 mblocks = __va(paddr);
1097 num_mblocks = count;
1100 mdesc_for_each_node_by_name(md, node, "mblock") {
1101 struct mdesc_mblock *m = &mblocks[count++];
1104 val = mdesc_get_property(md, node, "base", NULL);
1106 val = mdesc_get_property(md, node, "size", NULL);
1108 val = mdesc_get_property(md, node,
1109 "address-congruence-offset", NULL);
1111 /* The address-congruence-offset property is optional.
1112 * Explicity zero it be identifty this.
1119 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1120 count - 1, m->base, m->size, m->offset);
1126 static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1127 u64 grp, cpumask_t *mask)
1131 cpumask_clear(mask);
1133 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1134 u64 target = mdesc_arc_target(md, arc);
1135 const char *name = mdesc_node_name(md, target);
1138 if (strcmp(name, "cpu"))
1140 id = mdesc_get_property(md, target, "id", NULL);
1141 if (*id < nr_cpu_ids)
1142 cpumask_set_cpu(*id, mask);
1146 static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1150 for (i = 0; i < num_mlgroups; i++) {
1151 struct mdesc_mlgroup *m = &mlgroups[i];
1152 if (m->node == node)
1158 static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1161 struct mdesc_mlgroup *candidate = NULL;
1162 u64 arc, best_latency = ~(u64)0;
1163 struct node_mem_mask *n;
1165 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1166 u64 target = mdesc_arc_target(md, arc);
1167 struct mdesc_mlgroup *m = find_mlgroup(target);
1170 if (m->latency < best_latency) {
1172 best_latency = m->latency;
1178 if (num_node_masks != index) {
1179 printk(KERN_ERR "Inconsistent NUMA state, "
1180 "index[%d] != num_node_masks[%d]\n",
1181 index, num_node_masks);
1185 n = &node_masks[num_node_masks++];
1187 n->mask = candidate->mask;
1188 n->val = candidate->match;
1190 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1191 index, n->mask, n->val, candidate->latency);
1196 static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1202 numa_parse_mdesc_group_cpus(md, grp, &mask);
1204 for_each_cpu(cpu, &mask)
1205 numa_cpu_lookup_table[cpu] = index;
1206 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1209 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1210 for_each_cpu(cpu, &mask)
1215 return numa_attach_mlgroup(md, grp, index);
1218 static int __init numa_parse_mdesc(void)
1220 struct mdesc_handle *md = mdesc_grab();
1224 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1225 if (node == MDESC_NODE_NULL) {
1230 err = grab_mblocks(md);
1234 err = grab_mlgroups(md);
1239 mdesc_for_each_node_by_name(md, node, "group") {
1240 err = numa_parse_mdesc_group(md, node, count);
1248 for (i = 0; i < num_node_masks; i++) {
1249 allocate_node_data(i);
1259 static int __init numa_parse_jbus(void)
1261 unsigned long cpu, index;
1263 /* NUMA node id is encoded in bits 36 and higher, and there is
1264 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1267 for_each_present_cpu(cpu) {
1268 numa_cpu_lookup_table[cpu] = index;
1269 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1270 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1271 node_masks[index].val = cpu << 36UL;
1275 num_node_masks = index;
1279 for (index = 0; index < num_node_masks; index++) {
1280 allocate_node_data(index);
1281 node_set_online(index);
1287 static int __init numa_parse_sun4u(void)
1289 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1292 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1293 if ((ver >> 32UL) == __JALAPENO_ID ||
1294 (ver >> 32UL) == __SERRANO_ID)
1295 return numa_parse_jbus();
1300 static int __init bootmem_init_numa(void)
1304 numadbg("bootmem_init_numa()\n");
1307 if (tlb_type == hypervisor)
1308 err = numa_parse_mdesc();
1310 err = numa_parse_sun4u();
1317 static int bootmem_init_numa(void)
1324 static void __init bootmem_init_nonnuma(void)
1326 unsigned long top_of_ram = memblock_end_of_DRAM();
1327 unsigned long total_ram = memblock_phys_mem_size();
1329 numadbg("bootmem_init_nonnuma()\n");
1331 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1332 top_of_ram, total_ram);
1333 printk(KERN_INFO "Memory hole size: %ldMB\n",
1334 (top_of_ram - total_ram) >> 20);
1336 init_node_masks_nonnuma();
1337 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1338 allocate_node_data(0);
1342 static unsigned long __init bootmem_init(unsigned long phys_base)
1344 unsigned long end_pfn;
1346 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1347 max_pfn = max_low_pfn = end_pfn;
1348 min_low_pfn = (phys_base >> PAGE_SHIFT);
1350 if (bootmem_init_numa() < 0)
1351 bootmem_init_nonnuma();
1353 /* Dump memblock with node info. */
1354 memblock_dump_all();
1356 /* XXX cpu notifier XXX */
1358 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1364 static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1365 static int pall_ents __initdata;
1367 static unsigned long max_phys_bits = 40;
1369 bool kern_addr_valid(unsigned long addr)
1376 if ((long)addr < 0L) {
1377 unsigned long pa = __pa(addr);
1379 if ((addr >> max_phys_bits) != 0UL)
1382 return pfn_valid(pa >> PAGE_SHIFT);
1385 if (addr >= (unsigned long) KERNBASE &&
1386 addr < (unsigned long)&_end)
1389 pgd = pgd_offset_k(addr);
1393 pud = pud_offset(pgd, addr);
1397 if (pud_large(*pud))
1398 return pfn_valid(pud_pfn(*pud));
1400 pmd = pmd_offset(pud, addr);
1404 if (pmd_large(*pmd))
1405 return pfn_valid(pmd_pfn(*pmd));
1407 pte = pte_offset_kernel(pmd, addr);
1411 return pfn_valid(pte_pfn(*pte));
1413 EXPORT_SYMBOL(kern_addr_valid);
1415 static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1419 const unsigned long mask16gb = (1UL << 34) - 1UL;
1420 u64 pte_val = vstart;
1422 /* Each PUD is 8GB */
1423 if ((vstart & mask16gb) ||
1424 (vend - vstart <= mask16gb)) {
1425 pte_val ^= kern_linear_pte_xor[2];
1426 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1428 return vstart + PUD_SIZE;
1431 pte_val ^= kern_linear_pte_xor[3];
1432 pte_val |= _PAGE_PUD_HUGE;
1434 vend = vstart + mask16gb + 1UL;
1435 while (vstart < vend) {
1436 pud_val(*pud) = pte_val;
1438 pte_val += PUD_SIZE;
1445 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1448 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1454 static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1458 const unsigned long mask256mb = (1UL << 28) - 1UL;
1459 const unsigned long mask2gb = (1UL << 31) - 1UL;
1460 u64 pte_val = vstart;
1462 /* Each PMD is 8MB */
1463 if ((vstart & mask256mb) ||
1464 (vend - vstart <= mask256mb)) {
1465 pte_val ^= kern_linear_pte_xor[0];
1466 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1468 return vstart + PMD_SIZE;
1471 if ((vstart & mask2gb) ||
1472 (vend - vstart <= mask2gb)) {
1473 pte_val ^= kern_linear_pte_xor[1];
1474 pte_val |= _PAGE_PMD_HUGE;
1475 vend = vstart + mask256mb + 1UL;
1477 pte_val ^= kern_linear_pte_xor[2];
1478 pte_val |= _PAGE_PMD_HUGE;
1479 vend = vstart + mask2gb + 1UL;
1482 while (vstart < vend) {
1483 pmd_val(*pmd) = pte_val;
1485 pte_val += PMD_SIZE;
1493 static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1496 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1502 static unsigned long __ref kernel_map_range(unsigned long pstart,
1503 unsigned long pend, pgprot_t prot,
1506 unsigned long vstart = PAGE_OFFSET + pstart;
1507 unsigned long vend = PAGE_OFFSET + pend;
1508 unsigned long alloc_bytes = 0UL;
1510 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1511 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1516 while (vstart < vend) {
1517 unsigned long this_end, paddr = __pa(vstart);
1518 pgd_t *pgd = pgd_offset_k(vstart);
1523 if (pgd_none(*pgd)) {
1526 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1527 alloc_bytes += PAGE_SIZE;
1528 pgd_populate(&init_mm, pgd, new);
1530 pud = pud_offset(pgd, vstart);
1531 if (pud_none(*pud)) {
1534 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1535 vstart = kernel_map_hugepud(vstart, vend, pud);
1538 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1539 alloc_bytes += PAGE_SIZE;
1540 pud_populate(&init_mm, pud, new);
1543 pmd = pmd_offset(pud, vstart);
1544 if (pmd_none(*pmd)) {
1547 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1548 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1551 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1552 alloc_bytes += PAGE_SIZE;
1553 pmd_populate_kernel(&init_mm, pmd, new);
1556 pte = pte_offset_kernel(pmd, vstart);
1557 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1558 if (this_end > vend)
1561 while (vstart < this_end) {
1562 pte_val(*pte) = (paddr | pgprot_val(prot));
1564 vstart += PAGE_SIZE;
1573 static void __init flush_all_kernel_tsbs(void)
1577 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1578 struct tsb *ent = &swapper_tsb[i];
1580 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1582 #ifndef CONFIG_DEBUG_PAGEALLOC
1583 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1584 struct tsb *ent = &swapper_4m_tsb[i];
1586 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1591 extern unsigned int kvmap_linear_patch[1];
1593 static void __init kernel_physical_mapping_init(void)
1595 unsigned long i, mem_alloced = 0UL;
1596 bool use_huge = true;
1598 #ifdef CONFIG_DEBUG_PAGEALLOC
1601 for (i = 0; i < pall_ents; i++) {
1602 unsigned long phys_start, phys_end;
1604 phys_start = pall[i].phys_addr;
1605 phys_end = phys_start + pall[i].reg_size;
1607 mem_alloced += kernel_map_range(phys_start, phys_end,
1608 PAGE_KERNEL, use_huge);
1611 printk("Allocated %ld bytes for kernel page tables.\n",
1614 kvmap_linear_patch[0] = 0x01000000; /* nop */
1615 flushi(&kvmap_linear_patch[0]);
1617 flush_all_kernel_tsbs();
1622 #ifdef CONFIG_DEBUG_PAGEALLOC
1623 void kernel_map_pages(struct page *page, int numpages, int enable)
1625 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1626 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1628 kernel_map_range(phys_start, phys_end,
1629 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1631 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1632 PAGE_OFFSET + phys_end);
1634 /* we should perform an IPI and flush all tlbs,
1635 * but that can deadlock->flush only current cpu.
1637 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1638 PAGE_OFFSET + phys_end);
1642 unsigned long __init find_ecache_flush_span(unsigned long size)
1646 for (i = 0; i < pavail_ents; i++) {
1647 if (pavail[i].reg_size >= size)
1648 return pavail[i].phys_addr;
1654 unsigned long PAGE_OFFSET;
1655 EXPORT_SYMBOL(PAGE_OFFSET);
1657 unsigned long VMALLOC_END = 0x0000010000000000UL;
1658 EXPORT_SYMBOL(VMALLOC_END);
1660 unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1661 unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1663 static void __init setup_page_offset(void)
1665 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1666 /* Cheetah/Panther support a full 64-bit virtual
1667 * address, so we can use all that our page tables
1670 sparc64_va_hole_top = 0xfff0000000000000UL;
1671 sparc64_va_hole_bottom = 0x0010000000000000UL;
1674 } else if (tlb_type == hypervisor) {
1675 switch (sun4v_chip_type) {
1676 case SUN4V_CHIP_NIAGARA1:
1677 case SUN4V_CHIP_NIAGARA2:
1678 /* T1 and T2 support 48-bit virtual addresses. */
1679 sparc64_va_hole_top = 0xffff800000000000UL;
1680 sparc64_va_hole_bottom = 0x0000800000000000UL;
1684 case SUN4V_CHIP_NIAGARA3:
1685 /* T3 supports 48-bit virtual addresses. */
1686 sparc64_va_hole_top = 0xffff800000000000UL;
1687 sparc64_va_hole_bottom = 0x0000800000000000UL;
1691 case SUN4V_CHIP_NIAGARA4:
1692 case SUN4V_CHIP_NIAGARA5:
1693 case SUN4V_CHIP_SPARC64X:
1694 case SUN4V_CHIP_SPARC_M6:
1695 /* T4 and later support 52-bit virtual addresses. */
1696 sparc64_va_hole_top = 0xfff8000000000000UL;
1697 sparc64_va_hole_bottom = 0x0008000000000000UL;
1700 case SUN4V_CHIP_SPARC_M7:
1702 /* M7 and later support 52-bit virtual addresses. */
1703 sparc64_va_hole_top = 0xfff8000000000000UL;
1704 sparc64_va_hole_bottom = 0x0008000000000000UL;
1710 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1711 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1716 PAGE_OFFSET = sparc64_va_hole_top;
1717 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1718 (sparc64_va_hole_bottom >> 2));
1720 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1721 PAGE_OFFSET, max_phys_bits);
1722 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1723 VMALLOC_START, VMALLOC_END);
1724 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1725 VMEMMAP_BASE, VMEMMAP_BASE << 1);
1728 static void __init tsb_phys_patch(void)
1730 struct tsb_ldquad_phys_patch_entry *pquad;
1731 struct tsb_phys_patch_entry *p;
1733 pquad = &__tsb_ldquad_phys_patch;
1734 while (pquad < &__tsb_ldquad_phys_patch_end) {
1735 unsigned long addr = pquad->addr;
1737 if (tlb_type == hypervisor)
1738 *(unsigned int *) addr = pquad->sun4v_insn;
1740 *(unsigned int *) addr = pquad->sun4u_insn;
1742 __asm__ __volatile__("flush %0"
1749 p = &__tsb_phys_patch;
1750 while (p < &__tsb_phys_patch_end) {
1751 unsigned long addr = p->addr;
1753 *(unsigned int *) addr = p->insn;
1755 __asm__ __volatile__("flush %0"
1763 /* Don't mark as init, we give this to the Hypervisor. */
1764 #ifndef CONFIG_DEBUG_PAGEALLOC
1765 #define NUM_KTSB_DESCR 2
1767 #define NUM_KTSB_DESCR 1
1769 static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1771 /* The swapper TSBs are loaded with a base sequence of:
1773 * sethi %uhi(SYMBOL), REG1
1774 * sethi %hi(SYMBOL), REG2
1775 * or REG1, %ulo(SYMBOL), REG1
1776 * or REG2, %lo(SYMBOL), REG2
1777 * sllx REG1, 32, REG1
1778 * or REG1, REG2, REG1
1780 * When we use physical addressing for the TSB accesses, we patch the
1781 * first four instructions in the above sequence.
1784 static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1786 unsigned long high_bits, low_bits;
1788 high_bits = (pa >> 32) & 0xffffffff;
1789 low_bits = (pa >> 0) & 0xffffffff;
1791 while (start < end) {
1792 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1794 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
1795 __asm__ __volatile__("flush %0" : : "r" (ia));
1797 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
1798 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1800 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1801 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1803 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1804 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1810 static void ktsb_phys_patch(void)
1812 extern unsigned int __swapper_tsb_phys_patch;
1813 extern unsigned int __swapper_tsb_phys_patch_end;
1814 unsigned long ktsb_pa;
1816 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1817 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1818 &__swapper_tsb_phys_patch_end, ktsb_pa);
1819 #ifndef CONFIG_DEBUG_PAGEALLOC
1821 extern unsigned int __swapper_4m_tsb_phys_patch;
1822 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1823 ktsb_pa = (kern_base +
1824 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1825 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1826 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1831 static void __init sun4v_ktsb_init(void)
1833 unsigned long ktsb_pa;
1835 /* First KTSB for PAGE_SIZE mappings. */
1836 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1838 switch (PAGE_SIZE) {
1841 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1842 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1846 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1847 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1851 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1852 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1855 case 4 * 1024 * 1024:
1856 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1857 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1861 ktsb_descr[0].assoc = 1;
1862 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1863 ktsb_descr[0].ctx_idx = 0;
1864 ktsb_descr[0].tsb_base = ktsb_pa;
1865 ktsb_descr[0].resv = 0;
1867 #ifndef CONFIG_DEBUG_PAGEALLOC
1868 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
1869 ktsb_pa = (kern_base +
1870 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1872 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1873 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1874 HV_PGSZ_MASK_256MB |
1876 HV_PGSZ_MASK_16GB) &
1878 ktsb_descr[1].assoc = 1;
1879 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1880 ktsb_descr[1].ctx_idx = 0;
1881 ktsb_descr[1].tsb_base = ktsb_pa;
1882 ktsb_descr[1].resv = 0;
1886 void sun4v_ktsb_register(void)
1888 unsigned long pa, ret;
1890 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1892 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
1894 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
1895 "errors with %lx\n", pa, ret);
1900 static void __init sun4u_linear_pte_xor_finalize(void)
1902 #ifndef CONFIG_DEBUG_PAGEALLOC
1903 /* This is where we would add Panther support for
1904 * 32MB and 256MB pages.
1909 static void __init sun4v_linear_pte_xor_finalize(void)
1911 #ifndef CONFIG_DEBUG_PAGEALLOC
1912 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
1913 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
1915 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1916 _PAGE_P_4V | _PAGE_W_4V);
1918 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
1921 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
1922 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
1924 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1925 _PAGE_P_4V | _PAGE_W_4V);
1927 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
1930 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
1931 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
1933 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1934 _PAGE_P_4V | _PAGE_W_4V);
1936 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
1941 /* paging_init() sets up the page tables */
1943 static unsigned long last_valid_pfn;
1945 static void sun4u_pgprot_init(void);
1946 static void sun4v_pgprot_init(void);
1948 void __init paging_init(void)
1950 unsigned long end_pfn, shift, phys_base;
1951 unsigned long real_end, i;
1954 setup_page_offset();
1956 /* These build time checkes make sure that the dcache_dirty_cpu()
1957 * page->flags usage will work.
1959 * When a page gets marked as dcache-dirty, we store the
1960 * cpu number starting at bit 32 in the page->flags. Also,
1961 * functions like clear_dcache_dirty_cpu use the cpu mask
1962 * in 13-bit signed-immediate instruction fields.
1966 * Page flags must not reach into upper 32 bits that are used
1967 * for the cpu number
1969 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
1972 * The bit fields placed in the high range must not reach below
1973 * the 32 bit boundary. Otherwise we cannot place the cpu field
1974 * at the 32 bit boundary.
1976 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1977 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
1979 BUILD_BUG_ON(NR_CPUS > 4096);
1981 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
1982 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1984 /* Invalidate both kernel TSBs. */
1985 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
1986 #ifndef CONFIG_DEBUG_PAGEALLOC
1987 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
1990 if (tlb_type == hypervisor)
1991 sun4v_pgprot_init();
1993 sun4u_pgprot_init();
1995 if (tlb_type == cheetah_plus ||
1996 tlb_type == hypervisor) {
2001 if (tlb_type == hypervisor)
2002 sun4v_patch_tlb_handlers();
2004 /* Find available physical memory...
2006 * Read it twice in order to work around a bug in openfirmware.
2007 * The call to grab this table itself can cause openfirmware to
2008 * allocate memory, which in turn can take away some space from
2009 * the list of available memory. Reading it twice makes sure
2010 * we really do get the final value.
2012 read_obp_translations();
2013 read_obp_memory("reg", &pall[0], &pall_ents);
2014 read_obp_memory("available", &pavail[0], &pavail_ents);
2015 read_obp_memory("available", &pavail[0], &pavail_ents);
2017 phys_base = 0xffffffffffffffffUL;
2018 for (i = 0; i < pavail_ents; i++) {
2019 phys_base = min(phys_base, pavail[i].phys_addr);
2020 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2023 memblock_reserve(kern_base, kern_size);
2025 find_ramdisk(phys_base);
2027 memblock_enforce_memory_limit(cmdline_memory_size);
2029 memblock_allow_resize();
2030 memblock_dump_all();
2032 set_bit(0, mmu_context_bmap);
2034 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2036 real_end = (unsigned long)_end;
2037 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2038 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2039 num_kernel_image_mappings);
2041 /* Set kernel pgd to upper alias so physical page computations
2044 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2046 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2048 inherit_prom_mappings();
2050 /* Ok, we can use our TLB miss and window trap handlers safely. */
2055 prom_build_devicetree();
2056 of_populate_present_mask();
2058 of_fill_in_cpu_data();
2061 if (tlb_type == hypervisor) {
2063 mdesc_populate_present_mask(cpu_all_mask);
2065 mdesc_fill_in_cpu_data(cpu_all_mask);
2067 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2069 sun4v_linear_pte_xor_finalize();
2072 sun4v_ktsb_register();
2074 unsigned long impl, ver;
2076 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2077 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2079 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2080 impl = ((ver >> 32) & 0xffff);
2081 if (impl == PANTHER_IMPL)
2082 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2083 HV_PGSZ_MASK_256MB);
2085 sun4u_linear_pte_xor_finalize();
2088 /* Flush the TLBs and the 4M TSB so that the updated linear
2089 * pte XOR settings are realized for all mappings.
2092 #ifndef CONFIG_DEBUG_PAGEALLOC
2093 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2097 /* Setup bootmem... */
2098 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2100 /* Once the OF device tree and MDESC have been setup, we know
2101 * the list of possible cpus. Therefore we can allocate the
2104 for_each_possible_cpu(i) {
2105 node = cpu_to_node(i);
2107 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2110 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
2115 kernel_physical_mapping_init();
2118 unsigned long max_zone_pfns[MAX_NR_ZONES];
2120 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2122 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2124 free_area_init_nodes(max_zone_pfns);
2127 printk("Booting Linux...\n");
2130 int page_in_phys_avail(unsigned long paddr)
2136 for (i = 0; i < pavail_ents; i++) {
2137 unsigned long start, end;
2139 start = pavail[i].phys_addr;
2140 end = start + pavail[i].reg_size;
2142 if (paddr >= start && paddr < end)
2145 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2147 #ifdef CONFIG_BLK_DEV_INITRD
2148 if (paddr >= __pa(initrd_start) &&
2149 paddr < __pa(PAGE_ALIGN(initrd_end)))
2156 static void __init register_page_bootmem_info(void)
2158 #ifdef CONFIG_NEED_MULTIPLE_NODES
2161 for_each_online_node(i)
2162 if (NODE_DATA(i)->node_spanned_pages)
2163 register_page_bootmem_info_node(NODE_DATA(i));
2166 void __init mem_init(void)
2168 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2170 register_page_bootmem_info();
2174 * Set up the zero page, mark it reserved, so that page count
2175 * is not manipulated when freeing the page from user ptes.
2177 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2178 if (mem_map_zero == NULL) {
2179 prom_printf("paging_init: Cannot alloc zero page.\n");
2182 mark_page_reserved(mem_map_zero);
2184 mem_init_print_info(NULL);
2186 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2187 cheetah_ecache_flush_init();
2190 void free_initmem(void)
2192 unsigned long addr, initend;
2195 /* If the physical memory maps were trimmed by kernel command
2196 * line options, don't even try freeing this initmem stuff up.
2197 * The kernel image could have been in the trimmed out region
2198 * and if so the freeing below will free invalid page structs.
2200 if (cmdline_memory_size)
2204 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2206 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2207 initend = (unsigned long)(__init_end) & PAGE_MASK;
2208 for (; addr < initend; addr += PAGE_SIZE) {
2212 ((unsigned long) __va(kern_base)) -
2213 ((unsigned long) KERNBASE));
2214 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2217 free_reserved_page(virt_to_page(page));
2221 #ifdef CONFIG_BLK_DEV_INITRD
2222 void free_initrd_mem(unsigned long start, unsigned long end)
2224 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2229 #define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2230 #define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2231 #define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2232 #define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2233 #define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2234 #define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2236 pgprot_t PAGE_KERNEL __read_mostly;
2237 EXPORT_SYMBOL(PAGE_KERNEL);
2239 pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2240 pgprot_t PAGE_COPY __read_mostly;
2242 pgprot_t PAGE_SHARED __read_mostly;
2243 EXPORT_SYMBOL(PAGE_SHARED);
2245 unsigned long pg_iobits __read_mostly;
2247 unsigned long _PAGE_IE __read_mostly;
2248 EXPORT_SYMBOL(_PAGE_IE);
2250 unsigned long _PAGE_E __read_mostly;
2251 EXPORT_SYMBOL(_PAGE_E);
2253 unsigned long _PAGE_CACHE __read_mostly;
2254 EXPORT_SYMBOL(_PAGE_CACHE);
2256 #ifdef CONFIG_SPARSEMEM_VMEMMAP
2257 int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2260 unsigned long pte_base;
2262 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2263 _PAGE_CP_4U | _PAGE_CV_4U |
2264 _PAGE_P_4U | _PAGE_W_4U);
2265 if (tlb_type == hypervisor)
2266 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2267 _PAGE_CP_4V | _PAGE_CV_4V |
2268 _PAGE_P_4V | _PAGE_W_4V);
2270 pte_base |= _PAGE_PMD_HUGE;
2272 vstart = vstart & PMD_MASK;
2273 vend = ALIGN(vend, PMD_SIZE);
2274 for (; vstart < vend; vstart += PMD_SIZE) {
2275 pgd_t *pgd = pgd_offset_k(vstart);
2280 if (pgd_none(*pgd)) {
2281 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2285 pgd_populate(&init_mm, pgd, new);
2288 pud = pud_offset(pgd, vstart);
2289 if (pud_none(*pud)) {
2290 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2294 pud_populate(&init_mm, pud, new);
2297 pmd = pmd_offset(pud, vstart);
2299 pte = pmd_val(*pmd);
2300 if (!(pte & _PAGE_VALID)) {
2301 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2306 pmd_val(*pmd) = pte_base | __pa(block);
2313 void vmemmap_free(unsigned long start, unsigned long end)
2316 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
2318 static void prot_init_common(unsigned long page_none,
2319 unsigned long page_shared,
2320 unsigned long page_copy,
2321 unsigned long page_readonly,
2322 unsigned long page_exec_bit)
2324 PAGE_COPY = __pgprot(page_copy);
2325 PAGE_SHARED = __pgprot(page_shared);
2327 protection_map[0x0] = __pgprot(page_none);
2328 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2329 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2330 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2331 protection_map[0x4] = __pgprot(page_readonly);
2332 protection_map[0x5] = __pgprot(page_readonly);
2333 protection_map[0x6] = __pgprot(page_copy);
2334 protection_map[0x7] = __pgprot(page_copy);
2335 protection_map[0x8] = __pgprot(page_none);
2336 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2337 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2338 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2339 protection_map[0xc] = __pgprot(page_readonly);
2340 protection_map[0xd] = __pgprot(page_readonly);
2341 protection_map[0xe] = __pgprot(page_shared);
2342 protection_map[0xf] = __pgprot(page_shared);
2345 static void __init sun4u_pgprot_init(void)
2347 unsigned long page_none, page_shared, page_copy, page_readonly;
2348 unsigned long page_exec_bit;
2351 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2352 _PAGE_CACHE_4U | _PAGE_P_4U |
2353 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2355 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2356 _PAGE_CACHE_4U | _PAGE_P_4U |
2357 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2358 _PAGE_EXEC_4U | _PAGE_L_4U);
2360 _PAGE_IE = _PAGE_IE_4U;
2361 _PAGE_E = _PAGE_E_4U;
2362 _PAGE_CACHE = _PAGE_CACHE_4U;
2364 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2365 __ACCESS_BITS_4U | _PAGE_E_4U);
2367 #ifdef CONFIG_DEBUG_PAGEALLOC
2368 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2370 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2373 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2374 _PAGE_P_4U | _PAGE_W_4U);
2376 for (i = 1; i < 4; i++)
2377 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2379 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2380 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2381 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2384 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2385 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2386 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2387 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2388 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2389 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2390 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2392 page_exec_bit = _PAGE_EXEC_4U;
2394 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2398 static void __init sun4v_pgprot_init(void)
2400 unsigned long page_none, page_shared, page_copy, page_readonly;
2401 unsigned long page_exec_bit;
2404 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2405 _PAGE_CACHE_4V | _PAGE_P_4V |
2406 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2408 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2410 _PAGE_IE = _PAGE_IE_4V;
2411 _PAGE_E = _PAGE_E_4V;
2412 _PAGE_CACHE = _PAGE_CACHE_4V;
2414 #ifdef CONFIG_DEBUG_PAGEALLOC
2415 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2417 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2420 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
2421 _PAGE_P_4V | _PAGE_W_4V);
2423 for (i = 1; i < 4; i++)
2424 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2426 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2427 __ACCESS_BITS_4V | _PAGE_E_4V);
2429 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2430 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2431 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2432 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2434 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
2435 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2436 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2437 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2438 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2439 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
2440 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2442 page_exec_bit = _PAGE_EXEC_4V;
2444 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2448 unsigned long pte_sz_bits(unsigned long sz)
2450 if (tlb_type == hypervisor) {
2454 return _PAGE_SZ8K_4V;
2456 return _PAGE_SZ64K_4V;
2458 return _PAGE_SZ512K_4V;
2459 case 4 * 1024 * 1024:
2460 return _PAGE_SZ4MB_4V;
2466 return _PAGE_SZ8K_4U;
2468 return _PAGE_SZ64K_4U;
2470 return _PAGE_SZ512K_4U;
2471 case 4 * 1024 * 1024:
2472 return _PAGE_SZ4MB_4U;
2477 pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2481 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2482 pte_val(pte) |= (((unsigned long)space) << 32);
2483 pte_val(pte) |= pte_sz_bits(page_size);
2488 static unsigned long kern_large_tte(unsigned long paddr)
2492 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2493 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2494 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2495 if (tlb_type == hypervisor)
2496 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2497 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
2498 _PAGE_EXEC_4V | _PAGE_W_4V);
2503 /* If not locked, zap it. */
2504 void __flush_tlb_all(void)
2506 unsigned long pstate;
2509 __asm__ __volatile__("flushw\n\t"
2510 "rdpr %%pstate, %0\n\t"
2511 "wrpr %0, %1, %%pstate"
2514 if (tlb_type == hypervisor) {
2515 sun4v_mmu_demap_all();
2516 } else if (tlb_type == spitfire) {
2517 for (i = 0; i < 64; i++) {
2518 /* Spitfire Errata #32 workaround */
2519 /* NOTE: Always runs on spitfire, so no
2520 * cheetah+ page size encodings.
2522 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2526 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2528 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2529 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2532 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2533 spitfire_put_dtlb_data(i, 0x0UL);
2536 /* Spitfire Errata #32 workaround */
2537 /* NOTE: Always runs on spitfire, so no
2538 * cheetah+ page size encodings.
2540 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2544 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2546 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2547 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2550 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2551 spitfire_put_itlb_data(i, 0x0UL);
2554 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2555 cheetah_flush_dtlb_all();
2556 cheetah_flush_itlb_all();
2558 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2562 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2563 unsigned long address)
2565 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2566 __GFP_REPEAT | __GFP_ZERO);
2570 pte = (pte_t *) page_address(page);
2575 pgtable_t pte_alloc_one(struct mm_struct *mm,
2576 unsigned long address)
2578 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
2579 __GFP_REPEAT | __GFP_ZERO);
2582 if (!pgtable_page_ctor(page)) {
2583 free_hot_cold_page(page, 0);
2586 return (pte_t *) page_address(page);
2589 void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2591 free_page((unsigned long)pte);
2594 static void __pte_free(pgtable_t pte)
2596 struct page *page = virt_to_page(pte);
2598 pgtable_page_dtor(page);
2602 void pte_free(struct mm_struct *mm, pgtable_t pte)
2607 void pgtable_free(void *table, bool is_page)
2612 kmem_cache_free(pgtable_cache, table);
2615 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2616 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2619 unsigned long pte, flags;
2620 struct mm_struct *mm;
2623 if (!pmd_large(entry) || !pmd_young(entry))
2626 pte = pmd_val(entry);
2628 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2629 if (!(pte & _PAGE_VALID))
2632 /* We are fabricating 8MB pages using 4MB real hw pages. */
2633 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2637 spin_lock_irqsave(&mm->context.lock, flags);
2639 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2640 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2643 spin_unlock_irqrestore(&mm->context.lock, flags);
2645 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2647 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2648 static void context_reload(void *__data)
2650 struct mm_struct *mm = __data;
2652 if (mm == current->mm)
2653 load_secondary_context(mm);
2656 void hugetlb_setup(struct pt_regs *regs)
2658 struct mm_struct *mm = current->mm;
2659 struct tsb_config *tp;
2661 if (in_atomic() || !mm) {
2662 const struct exception_table_entry *entry;
2664 entry = search_exception_tables(regs->tpc);
2666 regs->tpc = entry->fixup;
2667 regs->tnpc = regs->tpc + 4;
2670 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2671 die_if_kernel("HugeTSB in atomic", regs);
2674 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2675 if (likely(tp->tsb == NULL))
2676 tsb_grow(mm, MM_TSB_HUGE, 0);
2678 tsb_context_switch(mm);
2681 /* On UltraSPARC-III+ and later, configure the second half of
2682 * the Data-TLB for huge pages.
2684 if (tlb_type == cheetah_plus) {
2687 spin_lock(&ctx_alloc_lock);
2688 ctx = mm->context.sparc64_ctx_val;
2689 ctx &= ~CTX_PGSZ_MASK;
2690 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2691 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2693 if (ctx != mm->context.sparc64_ctx_val) {
2694 /* When changing the page size fields, we
2695 * must perform a context flush so that no
2696 * stale entries match. This flush must
2697 * occur with the original context register
2700 do_flush_tlb_mm(mm);
2702 /* Reload the context register of all processors
2703 * also executing in this address space.
2705 mm->context.sparc64_ctx_val = ctx;
2706 on_each_cpu(context_reload, mm, 0);
2708 spin_unlock(&ctx_alloc_lock);
2714 #define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2716 #define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2719 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2721 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2722 if (start < LOW_OBP_ADDRESS) {
2723 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2724 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2726 if (end > HI_OBP_ADDRESS) {
2727 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2728 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
2731 flush_tsb_kernel_range(start, end);
2732 do_flush_tlb_kernel_range(start, end);