1 // SPDX-License-Identifier: GPL-2.0-only
3 * This file implements the DMA operations for NVLink devices. The NPU
4 * devices all point to the same iommu table as the parent PCI device.
6 * Copyright Alistair Popple, IBM Corporation 2015.
9 #include <linux/mmu_notifier.h>
10 #include <linux/mmu_context.h>
12 #include <linux/pci.h>
13 #include <linux/memblock.h>
14 #include <linux/sizes.h>
16 #include <asm/debugfs.h>
17 #include <asm/powernv.h>
22 static struct pci_dev *get_pci_dev(struct device_node *dn)
24 struct pci_dn *pdn = PCI_DN(dn);
27 pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
28 pdn->busno, pdn->devfn);
31 * pci_get_domain_bus_and_slot() increased the reference count of
32 * the PCI device, but callers don't need that actually as the PE
33 * already holds a reference to the device. Since callers aren't
34 * aware of the reference count change, call pci_dev_put() now to
43 /* Given a NPU device get the associated PCI device. */
44 struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
46 struct device_node *dn;
47 struct pci_dev *gpdev;
52 if (WARN_ON(!npdev->dev.of_node))
55 /* Get assoicated PCI device */
56 dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
60 gpdev = get_pci_dev(dn);
65 EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
67 /* Given the real PCI device get a linked NPU device. */
68 struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
70 struct device_node *dn;
71 struct pci_dev *npdev;
76 /* Not all PCI devices have device-tree nodes */
77 if (!gpdev->dev.of_node)
80 /* Get assoicated PCI device */
81 dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
85 npdev = get_pci_dev(dn);
90 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
93 * Returns the PE assoicated with the PCI device of the given
94 * NPU. Returns the linked pci device if pci_dev != NULL.
96 static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
97 struct pci_dev **gpdev)
100 struct pci_controller *hose;
101 struct pci_dev *pdev;
102 struct pnv_ioda_pe *pe;
105 pdev = pnv_pci_get_gpu_dev(npe->pdev);
109 pdn = pci_get_pdn(pdev);
110 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
113 hose = pci_bus_to_host(pdev->bus);
114 phb = hose->private_data;
115 pe = &phb->ioda.pe_array[pdn->pe_number];
123 static long pnv_npu_unset_window(struct iommu_table_group *table_group,
126 static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
127 struct iommu_table *tbl)
129 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
131 struct pnv_phb *phb = npe->phb;
133 const unsigned long size = tbl->it_indirect_levels ?
134 tbl->it_level_size : tbl->it_size;
135 const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
136 const __u64 win_size = tbl->it_size << tbl->it_page_shift;
137 int num2 = (num == 0) ? 1 : 0;
139 /* NPU has just one TVE so if there is another table, remove it first */
140 if (npe->table_group.tables[num2])
141 pnv_npu_unset_window(&npe->table_group, num2);
143 pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
144 start_addr, start_addr + win_size - 1,
145 IOMMU_PAGE_SIZE(tbl));
147 rc = opal_pci_map_pe_dma_window(phb->opal_id,
150 tbl->it_indirect_levels + 1,
153 IOMMU_PAGE_SIZE(tbl));
155 pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
158 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
160 /* Add the table to the list so its TCE cache will get invalidated */
161 pnv_pci_link_table_and_group(phb->hose->node, num,
162 tbl, &npe->table_group);
167 static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
169 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
171 struct pnv_phb *phb = npe->phb;
174 if (!npe->table_group.tables[num])
177 pe_info(npe, "Removing DMA window\n");
179 rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
181 0/* levels */, 0/* table address */,
182 0/* table size */, 0/* page size */);
184 pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
187 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
189 pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
196 * Enables 32 bit DMA on NPU.
198 static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
200 struct pci_dev *gpdev;
201 struct pnv_ioda_pe *gpe;
205 * Find the assoicated PCI devices and get the dma window
206 * information from there.
208 if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
211 gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
215 rc = pnv_npu_set_window(&npe->table_group, 0,
216 gpe->table_group.tables[0]);
219 * NVLink devices use the same TCE table configuration as
220 * their parent device so drivers shouldn't be doing DMA
221 * operations directly on these devices.
223 set_dma_ops(&npe->pdev->dev, &dma_dummy_ops);
227 * Enables bypass mode on the NPU. The NPU only supports one
228 * window per link, so bypass needs to be explicitly enabled or
229 * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
230 * active at the same time.
232 static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
234 struct pnv_phb *phb = npe->phb;
236 phys_addr_t top = memblock_end_of_DRAM();
238 if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev)
241 rc = pnv_npu_unset_window(&npe->table_group, 0);
242 if (rc != OPAL_SUCCESS)
245 /* Enable the bypass window */
247 top = roundup_pow_of_two(top);
248 dev_info(&npe->pdev->dev, "Enabling bypass for PE %x\n",
250 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
251 npe->pe_number, npe->pe_number,
252 0 /* bypass base */, top);
254 if (rc == OPAL_SUCCESS)
255 pnv_pci_ioda2_tce_invalidate_entire(phb, false);
260 void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
265 struct pnv_ioda_pe *npe;
266 struct pci_dev *npdev;
269 npdev = pnv_pci_get_npu_dev(gpdev, i);
274 pdn = pci_get_pdn(npdev);
275 if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
278 phb = pci_bus_to_host(npdev->bus)->private_data;
280 /* We only do bypass if it's enabled on the linked device */
281 npe = &phb->ioda.pe_array[pdn->pe_number];
284 dev_info(&npdev->dev,
285 "Using 64-bit DMA iommu bypass\n");
286 pnv_npu_dma_set_bypass(npe);
288 dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
289 pnv_npu_dma_set_32(npe);
294 #ifdef CONFIG_IOMMU_API
295 /* Switch ownership from platform code to external user (e.g. VFIO) */
296 static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
298 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
300 struct pnv_phb *phb = npe->phb;
302 struct pci_dev *gpdev = NULL;
305 * Note: NPU has just a single TVE in the hardware which means that
306 * while used by the kernel, it can have either 32bit window or
307 * DMA bypass but never both. So we deconfigure 32bit window only
308 * if it was enabled at the moment of ownership change.
310 if (npe->table_group.tables[0]) {
311 pnv_npu_unset_window(&npe->table_group, 0);
316 rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
317 npe->pe_number, npe->pe_number,
318 0 /* bypass base */, 0);
320 pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
323 pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
325 get_gpu_pci_dev_and_pe(npe, &gpdev);
327 pnv_npu2_unmap_lpar_dev(gpdev);
330 static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
332 struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
334 struct pci_dev *gpdev = NULL;
336 get_gpu_pci_dev_and_pe(npe, &gpdev);
338 pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
341 static struct iommu_table_group_ops pnv_pci_npu_ops = {
342 .set_window = pnv_npu_set_window,
343 .unset_window = pnv_npu_unset_window,
344 .take_ownership = pnv_npu_take_ownership,
345 .release_ownership = pnv_npu_release_ownership,
347 #endif /* !CONFIG_IOMMU_API */
352 /* Maximum possible number of ATSD MMIO registers per NPU */
353 #define NV_NMMU_ATSD_REGS 8
354 #define NV_NPU_MAX_PE_NUM 16
357 * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
358 * up to 3 x (GPU + 2xNPUs) (POWER9).
361 struct iommu_table_group table_group;
363 struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
366 /* An NPU descriptor, valid for POWER9 only */
369 struct npu_comp npucomp;
372 #ifdef CONFIG_IOMMU_API
373 static long pnv_npu_peers_create_table_userspace(
374 struct iommu_table_group *table_group,
375 int num, __u32 page_shift, __u64 window_size, __u32 levels,
376 struct iommu_table **ptbl)
378 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
381 if (!npucomp->pe_num || !npucomp->pe[0] ||
382 !npucomp->pe[0]->table_group.ops ||
383 !npucomp->pe[0]->table_group.ops->create_table)
386 return npucomp->pe[0]->table_group.ops->create_table(
387 &npucomp->pe[0]->table_group, num, page_shift,
388 window_size, levels, ptbl);
391 static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
392 int num, struct iommu_table *tbl)
396 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
399 for (i = 0; i < npucomp->pe_num; ++i) {
400 struct pnv_ioda_pe *pe = npucomp->pe[i];
402 if (!pe->table_group.ops->set_window)
405 ret = pe->table_group.ops->set_window(&pe->table_group,
412 for (j = 0; j < i; ++j) {
413 struct pnv_ioda_pe *pe = npucomp->pe[j];
415 if (!pe->table_group.ops->unset_window)
418 ret = pe->table_group.ops->unset_window(
419 &pe->table_group, num);
424 table_group->tables[num] = iommu_tce_table_get(tbl);
430 static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
435 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
438 for (i = 0; i < npucomp->pe_num; ++i) {
439 struct pnv_ioda_pe *pe = npucomp->pe[i];
441 WARN_ON(npucomp->table_group.tables[num] !=
442 table_group->tables[num]);
443 if (!npucomp->table_group.tables[num])
446 if (!pe->table_group.ops->unset_window)
449 ret = pe->table_group.ops->unset_window(&pe->table_group, num);
455 for (j = 0; j < i; ++j) {
456 struct pnv_ioda_pe *pe = npucomp->pe[j];
458 if (!npucomp->table_group.tables[num])
461 if (!pe->table_group.ops->set_window)
464 ret = pe->table_group.ops->set_window(&pe->table_group,
465 num, table_group->tables[num]);
469 } else if (table_group->tables[num]) {
470 iommu_tce_table_put(table_group->tables[num]);
471 table_group->tables[num] = NULL;
477 static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
480 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
483 for (i = 0; i < npucomp->pe_num; ++i) {
484 struct pnv_ioda_pe *pe = npucomp->pe[i];
486 if (!pe->table_group.ops->take_ownership)
488 pe->table_group.ops->take_ownership(&pe->table_group);
492 static void pnv_npu_peers_release_ownership(
493 struct iommu_table_group *table_group)
496 struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
499 for (i = 0; i < npucomp->pe_num; ++i) {
500 struct pnv_ioda_pe *pe = npucomp->pe[i];
502 if (!pe->table_group.ops->release_ownership)
504 pe->table_group.ops->release_ownership(&pe->table_group);
508 static struct iommu_table_group_ops pnv_npu_peers_ops = {
509 .get_table_size = pnv_pci_ioda2_get_table_size,
510 .create_table = pnv_npu_peers_create_table_userspace,
511 .set_window = pnv_npu_peers_set_window,
512 .unset_window = pnv_npu_peers_unset_window,
513 .take_ownership = pnv_npu_peers_take_ownership,
514 .release_ownership = pnv_npu_peers_release_ownership,
517 static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
518 struct pnv_ioda_pe *pe)
520 if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
523 npucomp->pe[npucomp->pe_num] = pe;
527 struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
529 struct iommu_table_group *table_group;
530 struct npu_comp *npucomp;
531 struct pci_dev *gpdev = NULL;
532 struct pci_controller *hose;
533 struct pci_dev *npdev = NULL;
535 list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
536 npdev = pnv_pci_get_npu_dev(gpdev, 0);
542 /* It is not an NPU attached device, skip */
545 hose = pci_bus_to_host(npdev->bus);
548 table_group = &hose->npu->npucomp.table_group;
550 if (!table_group->group) {
551 table_group->ops = &pnv_npu_peers_ops;
552 iommu_register_group(table_group,
557 /* Create a group for 1 GPU and attached NPUs for POWER8 */
558 pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
559 table_group = &pe->npucomp->table_group;
560 table_group->ops = &pnv_npu_peers_ops;
561 iommu_register_group(table_group, hose->global_number,
565 /* Steal capabilities from a GPU PE */
566 table_group->max_dynamic_windows_supported =
567 pe->table_group.max_dynamic_windows_supported;
568 table_group->tce32_start = pe->table_group.tce32_start;
569 table_group->tce32_size = pe->table_group.tce32_size;
570 table_group->max_levels = pe->table_group.max_levels;
571 if (!table_group->pgsizes)
572 table_group->pgsizes = pe->table_group.pgsizes;
574 npucomp = container_of(table_group, struct npu_comp, table_group);
575 pnv_comp_attach_table_group(npucomp, pe);
580 struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
582 struct iommu_table_group *table_group;
583 struct npu_comp *npucomp;
584 struct pci_dev *gpdev = NULL;
585 struct pci_dev *npdev;
586 struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
588 WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
593 * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
594 * but NPU bridges do not have this hook defined so we do it here.
595 * We do not setup other table group parameters as they won't be used
596 * anyway - NVLink bridges are subordinate PEs.
598 pe->table_group.ops = &pnv_pci_npu_ops;
600 table_group = iommu_group_get_iommudata(
601 iommu_group_get(&gpdev->dev));
604 * On P9 NPU PHB and PCI PHB support different page sizes,
605 * keep only matching. We expect here that NVLink bridge PE pgsizes is
606 * initialized by the caller.
608 table_group->pgsizes &= pe->table_group.pgsizes;
609 npucomp = container_of(table_group, struct npu_comp, table_group);
610 pnv_comp_attach_table_group(npucomp, pe);
612 list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
613 struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
615 if (gpdevtmp != gpdev)
618 iommu_add_device(table_group, &npdev->dev);
623 #endif /* CONFIG_IOMMU_API */
625 int pnv_npu2_init(struct pci_controller *hose)
627 static int npu_index;
631 npu = kzalloc(sizeof(*npu), GFP_KERNEL);
636 if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
640 npu->index = npu_index;
650 int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
654 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
655 struct pci_controller *hose;
656 struct pnv_phb *nphb;
661 hose = pci_bus_to_host(npdev->bus);
662 nphb = hose->private_data;
664 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
665 nphb->opal_id, lparid);
667 * Currently we only support radix and non-zero LPCR only makes sense
668 * for hash tables so skiboot expects the LPCR parameter to be a zero.
670 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
673 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
677 dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
679 ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
682 dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
688 EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
690 void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
692 struct pci_dev *gpdev;
694 list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
695 pnv_npu2_map_lpar_dev(gpdev, 0, msr);
698 int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
701 struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
702 struct pci_controller *hose;
703 struct pnv_phb *nphb;
708 hose = pci_bus_to_host(npdev->bus);
709 nphb = hose->private_data;
711 dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
713 ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
716 dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
720 /* Set LPID to 0 anyway, just to be safe */
721 dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
722 ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
725 dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
729 EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);