1 From 8c846a50fd244e719c7f463c38e9333c7bd95977 Mon Sep 17 00:00:00 2001
2 From: Phil Elwell <phil@raspberrypi.org>
3 Date: Tue, 19 Feb 2019 22:06:59 +0000
4 Subject: [PATCH 529/773] PCI: brcmstb: Add dma-range mapping for inbound
7 The Broadcom STB PCIe host controller is intimately related to the
8 memory subsystem. This close relationship adds complexity to how cpu
9 system memory is mapped to PCIe memory. Ideally, this mapping is an
10 identity mapping, or an identity mapping off by a constant. Not so in
13 Consider the Broadcom reference board BCM97445LCC_4X8 which has 6 GB
14 of system memory. Here is how the PCIe controller maps the
15 system memory to PCIe memory:
17 memc0-a@[ 0....3fffffff] <=> pci@[ 0....3fffffff]
18 memc0-b@[100000000...13fffffff] <=> pci@[ 40000000....7fffffff]
19 memc1-a@[ 40000000....7fffffff] <=> pci@[ 80000000....bfffffff]
20 memc1-b@[300000000...33fffffff] <=> pci@[ c0000000....ffffffff]
21 memc2-a@[ 80000000....bfffffff] <=> pci@[100000000...13fffffff]
22 memc2-b@[c00000000...c3fffffff] <=> pci@[140000000...17fffffff]
24 Although there are some "gaps" that can be added between the
25 individual mappings by software, the permutation of memory regions for
26 the most part is fixed by HW. The solution of having something close
27 to an identity mapping is not possible.
29 The idea behind this HW design is that the same PCIe module can
30 act as an RC or EP, and if it acts as an EP it concatenates all
31 of system memory into a BAR so anything can be accessed. Unfortunately,
32 when the PCIe block is in the role of an RC it also presents this
33 "BAR" to downstream PCIe devices, rather than offering an identity map
34 between its system memory and PCIe space.
36 Suppose that an endpoint driver allocs some DMA memory. Suppose this
37 memory is located at 0x6000_0000, which is in the middle of memc1-a.
38 The driver wants a dma_addr_t value that it can pass on to the EP to
39 use. Without doing any custom mapping, the EP will use this value for
40 DMA: the driver will get a dma_addr_t equal to 0x6000_0000. But this
41 won't work; the device needs a dma_addr_t that reflects the PCIe space
42 address, namely 0xa000_0000.
44 So, essentially the solution to this problem must modify the
45 dma_addr_t returned by the DMA routines routines. There are two
46 ways (I know of) of doing this:
48 (a) overriding/redefining the dma_to_phys() and phys_to_dma() calls
49 that are used by the dma_ops routines. This is the approach of
51 arch/mips/cavium-octeon/dma-octeon.c
53 In ARM and ARM64 these two routines are defiend in asm/dma-mapping.h
54 as static inline functions.
56 (b) Subscribe to a notifier that notifies when a device is added to a
57 bus. When this happens, set_dma_ops() can be called for the device.
58 This method is mentioned in:
60 http://lxr.free-electrons.com/source/drivers/of/platform.c?v=3.16#L152
62 where it says as a comment
64 "In case if platform code need to use own special DMA
65 configuration, it can use Platform bus notifier and
66 handle BUS_NOTIFY_ADD_DEVICE event to fix up DMA
69 Solution (b) is what this commit does. It uses its own set of
70 dma_ops which are wrappers around the arch_dma_ops. The
71 wrappers translate the dma addresses before/after invoking
72 the arch_dma_ops, as appropriate.
74 Signed-off-by: Jim Quinlan <jim2101024@gmail.com>
76 drivers/pci/controller/pcie-brcmstb.c | 420 +++++++++++++++++++++++++-
77 1 file changed, 411 insertions(+), 9 deletions(-)
79 --- a/drivers/pci/controller/pcie-brcmstb.c
80 +++ b/drivers/pci/controller/pcie-brcmstb.c
82 #include <linux/clk.h>
83 #include <linux/compiler.h>
84 #include <linux/delay.h>
85 +#include <linux/dma-mapping.h>
86 #include <linux/init.h>
87 #include <linux/interrupt.h>
89 @@ -319,11 +320,307 @@ static struct pci_ops brcm_pcie_ops = {
90 ((val & ~reg##_##field##_MASK) | \
91 (reg##_##field##_MASK & (field_val << reg##_##field##_SHIFT)))
93 +static const struct dma_map_ops *arch_dma_ops;
94 +static const struct dma_map_ops *brcm_dma_ops_ptr;
95 +static struct of_pci_range *dma_ranges;
96 +static int num_dma_ranges;
98 static phys_addr_t scb_size[BRCM_MAX_SCB];
101 static DEFINE_MUTEX(brcm_pcie_lock);
103 +static dma_addr_t brcm_to_pci(dma_addr_t addr)
105 + struct of_pci_range *p;
107 + if (!num_dma_ranges)
110 + for (p = dma_ranges; p < &dma_ranges[num_dma_ranges]; p++)
111 + if (addr >= p->cpu_addr && addr < (p->cpu_addr + p->size))
112 + return addr - p->cpu_addr + p->pci_addr;
117 +static dma_addr_t brcm_to_cpu(dma_addr_t addr)
119 + struct of_pci_range *p;
121 + if (!num_dma_ranges)
124 + for (p = dma_ranges; p < &dma_ranges[num_dma_ranges]; p++)
125 + if (addr >= p->pci_addr && addr < (p->pci_addr + p->size))
126 + return addr - p->pci_addr + p->cpu_addr;
131 +static void *brcm_alloc(struct device *dev, size_t size, dma_addr_t *handle,
132 + gfp_t gfp, unsigned long attrs)
136 + ret = arch_dma_ops->alloc(dev, size, handle, gfp, attrs);
138 + *handle = brcm_to_pci(*handle);
142 +static void brcm_free(struct device *dev, size_t size, void *cpu_addr,
143 + dma_addr_t handle, unsigned long attrs)
145 + handle = brcm_to_cpu(handle);
146 + arch_dma_ops->free(dev, size, cpu_addr, handle, attrs);
149 +static int brcm_mmap(struct device *dev, struct vm_area_struct *vma,
150 + void *cpu_addr, dma_addr_t dma_addr, size_t size,
151 + unsigned long attrs)
153 + dma_addr = brcm_to_cpu(dma_addr);
154 + return arch_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
157 +static int brcm_get_sgtable(struct device *dev, struct sg_table *sgt,
158 + void *cpu_addr, dma_addr_t handle, size_t size,
159 + unsigned long attrs)
161 + handle = brcm_to_cpu(handle);
162 + return arch_dma_ops->get_sgtable(dev, sgt, cpu_addr, handle, size,
166 +static dma_addr_t brcm_map_page(struct device *dev, struct page *page,
167 + unsigned long offset, size_t size,
168 + enum dma_data_direction dir,
169 + unsigned long attrs)
171 + return brcm_to_pci(arch_dma_ops->map_page(dev, page, offset, size,
175 +static void brcm_unmap_page(struct device *dev, dma_addr_t handle,
176 + size_t size, enum dma_data_direction dir,
177 + unsigned long attrs)
179 + handle = brcm_to_cpu(handle);
180 + arch_dma_ops->unmap_page(dev, handle, size, dir, attrs);
183 +static int brcm_map_sg(struct device *dev, struct scatterlist *sgl,
184 + int nents, enum dma_data_direction dir,
185 + unsigned long attrs)
188 + struct scatterlist *sg;
190 + for_each_sg(sgl, sg, nents, i) {
191 +#ifdef CONFIG_NEED_SG_DMA_LENGTH
192 + sg->dma_length = sg->length;
195 + brcm_dma_ops_ptr->map_page(dev, sg_page(sg), sg->offset,
196 + sg->length, dir, attrs);
197 + if (dma_mapping_error(dev, sg->dma_address))
203 + for_each_sg(sgl, sg, i, j)
204 + brcm_dma_ops_ptr->unmap_page(dev, sg_dma_address(sg),
205 + sg_dma_len(sg), dir, attrs);
209 +static void brcm_unmap_sg(struct device *dev,
210 + struct scatterlist *sgl, int nents,
211 + enum dma_data_direction dir,
212 + unsigned long attrs)
215 + struct scatterlist *sg;
217 + for_each_sg(sgl, sg, nents, i)
218 + brcm_dma_ops_ptr->unmap_page(dev, sg_dma_address(sg),
219 + sg_dma_len(sg), dir, attrs);
222 +static void brcm_sync_single_for_cpu(struct device *dev,
223 + dma_addr_t handle, size_t size,
224 + enum dma_data_direction dir)
226 + handle = brcm_to_cpu(handle);
227 + arch_dma_ops->sync_single_for_cpu(dev, handle, size, dir);
230 +static void brcm_sync_single_for_device(struct device *dev,
231 + dma_addr_t handle, size_t size,
232 + enum dma_data_direction dir)
234 + handle = brcm_to_cpu(handle);
235 + arch_dma_ops->sync_single_for_device(dev, handle, size, dir);
238 +static dma_addr_t brcm_map_resource(struct device *dev, phys_addr_t phys,
240 + enum dma_data_direction dir,
241 + unsigned long attrs)
243 + if (arch_dma_ops->map_resource)
244 + return brcm_to_pci(arch_dma_ops->map_resource
245 + (dev, phys, size, dir, attrs));
246 + return brcm_to_pci((dma_addr_t)phys);
249 +static void brcm_unmap_resource(struct device *dev, dma_addr_t handle,
250 + size_t size, enum dma_data_direction dir,
251 + unsigned long attrs)
253 + if (arch_dma_ops->unmap_resource)
254 + arch_dma_ops->unmap_resource(dev, brcm_to_cpu(handle), size,
258 +void brcm_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
259 + int nents, enum dma_data_direction dir)
261 + struct scatterlist *sg;
264 + for_each_sg(sgl, sg, nents, i)
265 + brcm_dma_ops_ptr->sync_single_for_cpu(dev, sg_dma_address(sg),
269 +void brcm_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
270 + int nents, enum dma_data_direction dir)
272 + struct scatterlist *sg;
275 + for_each_sg(sgl, sg, nents, i)
276 + brcm_dma_ops_ptr->sync_single_for_device(dev,
277 + sg_dma_address(sg),
281 +static int brcm_mapping_error(struct device *dev, dma_addr_t dma_addr)
283 + return arch_dma_ops->mapping_error(dev, dma_addr);
286 +static int brcm_dma_supported(struct device *dev, u64 mask)
288 + if (num_dma_ranges) {
290 + * It is our translated addresses that the EP will "see", so
291 + * we check all of the ranges for the largest possible value.
295 + for (i = 0; i < num_dma_ranges; i++)
296 + if (dma_ranges[i].pci_addr + dma_ranges[i].size - 1
302 + return arch_dma_ops->dma_supported(dev, mask);
305 +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
306 +u64 brcm_get_required_mask)(struct device *dev)
308 + return arch_dma_ops->get_required_mask(dev);
312 +static const struct dma_map_ops brcm_dma_ops = {
313 + .alloc = brcm_alloc,
316 + .get_sgtable = brcm_get_sgtable,
317 + .map_page = brcm_map_page,
318 + .unmap_page = brcm_unmap_page,
319 + .map_sg = brcm_map_sg,
320 + .unmap_sg = brcm_unmap_sg,
321 + .map_resource = brcm_map_resource,
322 + .unmap_resource = brcm_unmap_resource,
323 + .sync_single_for_cpu = brcm_sync_single_for_cpu,
324 + .sync_single_for_device = brcm_sync_single_for_device,
325 + .sync_sg_for_cpu = brcm_sync_sg_for_cpu,
326 + .sync_sg_for_device = brcm_sync_sg_for_device,
327 + .mapping_error = brcm_mapping_error,
328 + .dma_supported = brcm_dma_supported,
329 +#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
330 + .get_required_mask = brcm_get_required_mask,
334 +static void brcm_set_dma_ops(struct device *dev)
338 + if (IS_ENABLED(CONFIG_ARM64)) {
340 + * We are going to invoke get_dma_ops(). That
341 + * function, at this point in time, invokes
342 + * get_arch_dma_ops(), and for ARM64 that function
343 + * returns a pointer to dummy_dma_ops. So then we'd
344 + * like to call arch_setup_dma_ops(), but that isn't
345 + * exported. Instead, we call of_dma_configure(),
346 + * which is exported, and this calls
347 + * arch_setup_dma_ops(). Once we do this the call to
348 + * get_dma_ops() will work properly because
349 + * dev->dma_ops will be set.
351 + ret = of_dma_configure(dev, dev->of_node, true);
353 + dev_err(dev, "of_dma_configure() failed: %d\n", ret);
358 + arch_dma_ops = get_dma_ops(dev);
359 + if (!arch_dma_ops) {
360 + dev_err(dev, "failed to get arch_dma_ops\n");
364 + set_dma_ops(dev, &brcm_dma_ops);
367 +static int brcmstb_platform_notifier(struct notifier_block *nb,
368 + unsigned long event, void *__dev)
370 + struct device *dev = __dev;
372 + brcm_dma_ops_ptr = &brcm_dma_ops;
373 + if (event != BUS_NOTIFY_ADD_DEVICE)
374 + return NOTIFY_DONE;
376 + brcm_set_dma_ops(dev);
380 +static struct notifier_block brcmstb_platform_nb = {
381 + .notifier_call = brcmstb_platform_notifier,
384 +static int brcm_register_notifier(void)
386 + return bus_register_notifier(&pci_bus_type, &brcmstb_platform_nb);
389 +static int brcm_unregister_notifier(void)
391 + return bus_unregister_notifier(&pci_bus_type, &brcmstb_platform_nb);
394 static u32 rd_fld(void __iomem *p, u32 mask, int shift)
396 return (bcm_readl(p) & mask) >> shift;
397 @@ -597,9 +894,71 @@ static inline void brcm_pcie_perst_set(s
398 WR_FLD_RB(pcie->base, PCIE_MISC_PCIE_CTRL, PCIE_PERSTB, !val);
401 +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
402 + struct device_node *node)
404 + const int na = 3, ns = 2;
407 + parser->node = node;
408 + parser->pna = of_n_addr_cells(node);
409 + parser->np = parser->pna + na + ns;
411 + parser->range = of_get_property(node, "dma-ranges", &rlen);
412 + if (!parser->range)
415 + parser->end = parser->range + rlen / sizeof(__be32);
420 +static int brcm_pcie_parse_map_dma_ranges(struct brcm_pcie *pcie)
423 + struct of_pci_range_parser parser;
424 + struct device_node *dn = pcie->dn;
427 + * Parse dma-ranges property if present. If there are multiple
428 + * PCIe controllers, we only have to parse from one of them since
429 + * the others will have an identical mapping.
431 + if (!pci_dma_range_parser_init(&parser, dn)) {
432 + unsigned int max_ranges
433 + = (parser.end - parser.range) / parser.np;
435 + dma_ranges = kcalloc(max_ranges, sizeof(struct of_pci_range),
440 + for (i = 0; of_pci_range_parser_one(&parser, dma_ranges + i);
445 + for (i = 0, num_memc = 0; i < BRCM_MAX_SCB; i++) {
446 + u64 size = brcmstb_memory_memc_size(i);
448 + if (size == (u64)-1) {
449 + dev_err(pcie->dev, "cannot get memc%d size", i);
452 + scb_size[i] = roundup_pow_of_two_64(size);
462 static int brcm_pcie_add_controller(struct brcm_pcie *pcie)
465 + struct device *dev = pcie->dev;
467 mutex_lock(&brcm_pcie_lock);
469 @@ -607,12 +966,21 @@ static int brcm_pcie_add_controller(stru
473 + ret = brcm_register_notifier();
475 + dev_err(dev, "failed to register pci bus notifier\n");
478 + ret = brcm_pcie_parse_map_dma_ranges(pcie);
482 /* Determine num_memc and their sizes */
483 for (i = 0, num_memc = 0; i < BRCM_MAX_SCB; i++) {
484 u64 size = brcmstb_memory_memc_size(i);
486 if (size == (u64)-1) {
487 - dev_err(pcie->dev, "cannot get memc%d size\n", i);
488 + dev_err(dev, "cannot get memc%d size\n", i);
492 @@ -636,8 +1004,16 @@ done:
493 static void brcm_pcie_remove_controller(struct brcm_pcie *pcie)
495 mutex_lock(&brcm_pcie_lock);
496 - if (--num_pcie == 0)
498 + if (--num_pcie > 0)
501 + if (brcm_unregister_notifier())
502 + dev_err(pcie->dev, "failed to unregister pci bus notifier\n");
505 + num_dma_ranges = 0;
508 mutex_unlock(&brcm_pcie_lock);
511 @@ -757,6 +1133,38 @@ static int brcm_pcie_setup(struct brcm_p
517 + * The best-case scenario is to place the inbound
518 + * region in the first 4GB of pci-space, as some
519 + * legacy devices can only address 32bits.
520 + * We would also like to put the MSI under 4GB
521 + * as well, since some devices require a 32bit
522 + * MSI target address.
524 + if (total_mem_size <= 0xc0000000ULL &&
525 + rc_bar2_size <= 0x100000000ULL) {
526 + rc_bar2_offset = 0;
529 + * The system memory is 4GB or larger so we
530 + * cannot start the inbound region at location
531 + * 0 (since we have to allow some space for
532 + * outbound memory @ 3GB). So instead we
533 + * start it at the 1x multiple of its size
535 + rc_bar2_offset = rc_bar2_size;
540 + * Set simple configuration based on memory sizes
541 + * only. We always start the viewport at address 0,
542 + * and set the MSI target address accordingly.
544 + rc_bar2_offset = 0;
547 tmp = lower_32_bits(rc_bar2_offset);
548 tmp = INSERT_FIELD(tmp, PCIE_MISC_RC_BAR2_CONFIG_LO, SIZE,
549 encode_ibar_size(rc_bar2_size));
550 @@ -967,7 +1375,6 @@ static int brcm_pcie_probe(struct platfo
551 struct brcm_pcie *pcie;
552 struct resource *res;
555 struct pci_host_bridge *bridge;
556 struct pci_bus *child;
558 @@ -984,11 +1391,6 @@ static int brcm_pcie_probe(struct platfo
562 - if (of_property_read_u32(dn, "dma-ranges", &tmp) == 0) {
563 - dev_err(&pdev->dev, "cannot yet handle dma-ranges\n");
568 pcie->reg_offsets = data->offsets;
569 pcie->reg_field_info = data->reg_field_info;