Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / arch / mips / loongson64 / common / mem.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  */
4 #include <linux/fs.h>
5 #include <linux/fcntl.h>
6 #include <linux/mm.h>
7
8 #include <asm/bootinfo.h>
9
10 #include <loongson.h>
11 #include <boot_param.h>
12 #include <mem.h>
13 #include <pci.h>
14
15 #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
16
17 u32 memsize, highmemsize;
18
19 void __init prom_init_memory(void)
20 {
21         add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
22
23         add_memory_region(memsize << 20, LOONGSON_PCI_MEM_START - (memsize <<
24                                 20), BOOT_MEM_RESERVED);
25
26 #ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
27         {
28                 int bit;
29
30                 bit = fls(memsize + highmemsize);
31                 if (bit != ffs(memsize + highmemsize))
32                         bit += 20;
33                 else
34                         bit = bit + 20 - 1;
35
36                 /* set cpu window3 to map CPU to DDR: 2G -> 2G */
37                 LOONGSON_ADDRWIN_CPUTODDR(ADDRWIN_WIN3, 0x80000000ul,
38                                           0x80000000ul, (1 << bit));
39                 mmiowb();
40         }
41 #endif /* !CONFIG_CPU_SUPPORTS_ADDRWINCFG */
42
43 #ifdef CONFIG_64BIT
44         if (highmemsize > 0)
45                 add_memory_region(LOONGSON_HIGHMEM_START,
46                                   highmemsize << 20, BOOT_MEM_RAM);
47
48         add_memory_region(LOONGSON_PCI_MEM_END + 1, LOONGSON_HIGHMEM_START -
49                           LOONGSON_PCI_MEM_END - 1, BOOT_MEM_RESERVED);
50
51 #endif /* !CONFIG_64BIT */
52 }
53
54 #else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
55
56 void __init prom_init_memory(void)
57 {
58         int i;
59         u32 node_id;
60         u32 mem_type;
61
62         /* parse memory information */
63         for (i = 0; i < loongson_memmap->nr_map; i++) {
64                 node_id = loongson_memmap->map[i].node_id;
65                 mem_type = loongson_memmap->map[i].mem_type;
66
67                 if (node_id == 0) {
68                         switch (mem_type) {
69                         case SYSTEM_RAM_LOW:
70                                 add_memory_region(loongson_memmap->map[i].mem_start,
71                                         (u64)loongson_memmap->map[i].mem_size << 20,
72                                         BOOT_MEM_RAM);
73                                 break;
74                         case SYSTEM_RAM_HIGH:
75                                 add_memory_region(loongson_memmap->map[i].mem_start,
76                                         (u64)loongson_memmap->map[i].mem_size << 20,
77                                         BOOT_MEM_RAM);
78                                 break;
79                         case SYSTEM_RAM_RESERVED:
80                                 add_memory_region(loongson_memmap->map[i].mem_start,
81                                         (u64)loongson_memmap->map[i].mem_size << 20,
82                                         BOOT_MEM_RESERVED);
83                                 break;
84                         }
85                 }
86         }
87 }
88
89 #endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
90
91 /* override of arch/mips/mm/cache.c: __uncached_access */
92 int __uncached_access(struct file *file, unsigned long addr)
93 {
94         if (file->f_flags & O_DSYNC)
95                 return 1;
96
97         return addr >= __pa(high_memory) ||
98                 ((addr >= LOONGSON_MMIO_MEM_START) &&
99                  (addr < LOONGSON_MMIO_MEM_END));
100 }
101
102 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED
103
104 #include <linux/pci.h>
105 #include <linux/sched.h>
106 #include <asm/current.h>
107
108 static unsigned long uca_start, uca_end;
109
110 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
111                               unsigned long size, pgprot_t vma_prot)
112 {
113         unsigned long offset = pfn << PAGE_SHIFT;
114         unsigned long end = offset + size;
115
116         if (__uncached_access(file, offset)) {
117                 if (uca_start && (offset >= uca_start) &&
118                     (end <= uca_end))
119                         return __pgprot((pgprot_val(vma_prot) &
120                                          ~_CACHE_MASK) |
121                                         _CACHE_UNCACHED_ACCELERATED);
122                 else
123                         return pgprot_noncached(vma_prot);
124         }
125         return vma_prot;
126 }
127
128 static int __init find_vga_mem_init(void)
129 {
130         struct pci_dev *dev = 0;
131         struct resource *r;
132         int idx;
133
134         if (uca_start)
135                 return 0;
136
137         for_each_pci_dev(dev) {
138                 if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
139                         for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
140                                 r = &dev->resource[idx];
141                                 if (!r->start && r->end)
142                                         continue;
143                                 if (r->flags & IORESOURCE_IO)
144                                         continue;
145                                 if (r->flags & IORESOURCE_MEM) {
146                                         uca_start = r->start;
147                                         uca_end = r->end;
148                                         return 0;
149                                 }
150                         }
151                 }
152         }
153
154         return 0;
155 }
156
157 late_initcall(find_vga_mem_init);
158 #endif /* !CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED */