kernel: bump 4.9 to 4.9.175
[oweals/openwrt.git] / target / linux / layerscape / patches-4.9 / 818-vfio-support-layerscape.patch
1 From 954edeee88305fecefe3f681e67a298f06e27974 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 17 Jan 2018 15:48:47 +0800
4 Subject: [PATCH 30/30] vfio: support layerscape
5
6 This is an integrated patch for layerscape vfio support.
7
8 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
9 Signed-off-by: Eric Auger <eric.auger@redhat.com>
10 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
11 Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14  drivers/vfio/Kconfig                      |   1 +
15  drivers/vfio/Makefile                     |   1 +
16  drivers/vfio/fsl-mc/Kconfig               |   9 +
17  drivers/vfio/fsl-mc/Makefile              |   2 +
18  drivers/vfio/fsl-mc/vfio_fsl_mc.c         | 753 ++++++++++++++++++++++++++++++
19  drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c    | 199 ++++++++
20  drivers/vfio/fsl-mc/vfio_fsl_mc_private.h |  55 +++
21  drivers/vfio/vfio_iommu_type1.c           |  39 +-
22  include/uapi/linux/vfio.h                 |   1 +
23  9 files changed, 1058 insertions(+), 2 deletions(-)
24  create mode 100644 drivers/vfio/fsl-mc/Kconfig
25  create mode 100644 drivers/vfio/fsl-mc/Makefile
26  create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
27  create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
28  create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
29
30 --- a/drivers/vfio/Kconfig
31 +++ b/drivers/vfio/Kconfig
32 @@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU
33  
34  source "drivers/vfio/pci/Kconfig"
35  source "drivers/vfio/platform/Kconfig"
36 +source "drivers/vfio/fsl-mc/Kconfig"
37  source "virt/lib/Kconfig"
38 --- a/drivers/vfio/Makefile
39 +++ b/drivers/vfio/Makefile
40 @@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vf
41  obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
42  obj-$(CONFIG_VFIO_PCI) += pci/
43  obj-$(CONFIG_VFIO_PLATFORM) += platform/
44 +obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
45 --- /dev/null
46 +++ b/drivers/vfio/fsl-mc/Kconfig
47 @@ -0,0 +1,9 @@
48 +config VFIO_FSL_MC
49 +       tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
50 +       depends on VFIO && FSL_MC_BUS && EVENTFD
51 +       help
52 +         Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
53 +         (Management Complex) devices. This is required to passthrough
54 +         fsl-mc bus devices using the VFIO framework.
55 +
56 +         If you don't know what to do here, say N.
57 --- /dev/null
58 +++ b/drivers/vfio/fsl-mc/Makefile
59 @@ -0,0 +1,2 @@
60 +vfio-fsl_mc-y := vfio_fsl_mc.o
61 +obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
62 --- /dev/null
63 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
64 @@ -0,0 +1,753 @@
65 +/*
66 + * Freescale Management Complex (MC) device passthrough using VFIO
67 + *
68 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
69 + * Copyright 2016-2017 NXP
70 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
71 + *
72 + * This file is licensed under the terms of the GNU General Public
73 + * License version 2. This program is licensed "as is" without any
74 + * warranty of any kind, whether express or implied.
75 + */
76 +
77 +#include <linux/device.h>
78 +#include <linux/iommu.h>
79 +#include <linux/module.h>
80 +#include <linux/mutex.h>
81 +#include <linux/slab.h>
82 +#include <linux/types.h>
83 +#include <linux/vfio.h>
84 +#include <linux/delay.h>
85 +
86 +#include "../../staging/fsl-mc/include/mc.h"
87 +#include "../../staging/fsl-mc/include/mc-bus.h"
88 +#include "../../staging/fsl-mc/include/mc-sys.h"
89 +#include "../../staging/fsl-mc/bus/dprc-cmd.h"
90 +
91 +#include "vfio_fsl_mc_private.h"
92 +
93 +#define DRIVER_VERSION "0.10"
94 +#define DRIVER_AUTHOR  "Bharat Bhushan <bharat.bhushan@nxp.com>"
95 +#define DRIVER_DESC    "VFIO for FSL-MC devices - User Level meta-driver"
96 +
97 +static DEFINE_MUTEX(driver_lock);
98 +
99 +/* FSl-MC device regions (address and size) are aligned to 64K.
100 + * While MC firmware reports size less than 64K for some objects (it actually
101 + * reports size which does not include reserved space beyond valid bytes).
102 + * Align the size to PAGE_SIZE for userspace to mmap.
103 + */
104 +static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
105 +{
106 +       size_t size;
107 +
108 +       size = resource_size(&mc_dev->regions[index]);
109 +       return PAGE_ALIGN(size);
110 +}
111 +
112 +static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
113 +{
114 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
115 +       int count = mc_dev->obj_desc.region_count;
116 +       int i;
117 +
118 +       vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
119 +                               GFP_KERNEL);
120 +       if (!vdev->regions)
121 +               return -ENOMEM;
122 +
123 +       for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
124 +               vdev->regions[i].addr = mc_dev->regions[i].start;
125 +               vdev->regions[i].size = aligned_region_size(mc_dev, i);
126 +               vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
127 +               if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
128 +                       vdev->regions[i].type |=
129 +                                       VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
130 +               vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
131 +               vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
132 +               if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
133 +                       vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
134 +       }
135 +
136 +       vdev->num_regions = mc_dev->obj_desc.region_count;
137 +       return 0;
138 +}
139 +
140 +static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
141 +{
142 +       int i;
143 +
144 +       for (i = 0; i < vdev->num_regions; i++)
145 +               iounmap(vdev->regions[i].ioaddr);
146 +
147 +       vdev->num_regions = 0;
148 +       kfree(vdev->regions);
149 +}
150 +
151 +static int vfio_fsl_mc_open(void *device_data)
152 +{
153 +       struct vfio_fsl_mc_device *vdev = device_data;
154 +       int ret;
155 +
156 +       if (!try_module_get(THIS_MODULE))
157 +               return -ENODEV;
158 +
159 +       mutex_lock(&driver_lock);
160 +       if (!vdev->refcnt) {
161 +               ret = vfio_fsl_mc_regions_init(vdev);
162 +               if (ret)
163 +                       goto error_region_init;
164 +
165 +               ret = vfio_fsl_mc_irqs_init(vdev);
166 +               if (ret)
167 +                       goto error_irq_init;
168 +       }
169 +
170 +       vdev->refcnt++;
171 +       mutex_unlock(&driver_lock);
172 +       return 0;
173 +
174 +error_irq_init:
175 +       vfio_fsl_mc_regions_cleanup(vdev);
176 +error_region_init:
177 +       mutex_unlock(&driver_lock);
178 +       if (ret)
179 +               module_put(THIS_MODULE);
180 +
181 +       return ret;
182 +}
183 +
184 +static void vfio_fsl_mc_release(void *device_data)
185 +{
186 +       struct vfio_fsl_mc_device *vdev = device_data;
187 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
188 +
189 +       mutex_lock(&driver_lock);
190 +
191 +       if (!(--vdev->refcnt)) {
192 +               vfio_fsl_mc_regions_cleanup(vdev);
193 +               vfio_fsl_mc_irqs_cleanup(vdev);
194 +       }
195 +
196 +       if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
197 +               dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
198 +                                    mc_dev->obj_desc.id);
199 +
200 +       mutex_unlock(&driver_lock);
201 +
202 +       module_put(THIS_MODULE);
203 +}
204 +
205 +static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
206 +                             unsigned long arg)
207 +{
208 +       struct vfio_fsl_mc_device *vdev = device_data;
209 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
210 +       unsigned long minsz;
211 +
212 +       if (WARN_ON(!mc_dev))
213 +               return -ENODEV;
214 +
215 +       switch (cmd) {
216 +       case VFIO_DEVICE_GET_INFO:
217 +       {
218 +               struct vfio_device_info info;
219 +
220 +               minsz = offsetofend(struct vfio_device_info, num_irqs);
221 +
222 +               if (copy_from_user(&info, (void __user *)arg, minsz))
223 +                       return -EFAULT;
224 +
225 +               if (info.argsz < minsz)
226 +                       return -EINVAL;
227 +
228 +               info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
229 +               info.num_regions = mc_dev->obj_desc.region_count;
230 +               info.num_irqs = mc_dev->obj_desc.irq_count;
231 +
232 +               return copy_to_user((void __user *)arg, &info, minsz);
233 +       }
234 +       case VFIO_DEVICE_GET_REGION_INFO:
235 +       {
236 +               struct vfio_region_info info;
237 +
238 +               minsz = offsetofend(struct vfio_region_info, offset);
239 +
240 +               if (copy_from_user(&info, (void __user *)arg, minsz))
241 +                       return -EFAULT;
242 +
243 +               if (info.argsz < minsz)
244 +                       return -EINVAL;
245 +
246 +               if (info.index >= vdev->num_regions)
247 +                       return -EINVAL;
248 +
249 +               /* map offset to the physical address  */
250 +               info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
251 +               info.size = vdev->regions[info.index].size;
252 +               info.flags = vdev->regions[info.index].flags;
253 +
254 +               return copy_to_user((void __user *)arg, &info, minsz);
255 +       }
256 +       case VFIO_DEVICE_GET_IRQ_INFO:
257 +       {
258 +               struct vfio_irq_info info;
259 +
260 +               minsz = offsetofend(struct vfio_irq_info, count);
261 +               if (copy_from_user(&info, (void __user *)arg, minsz))
262 +                       return -EFAULT;
263 +
264 +               if (info.argsz < minsz)
265 +                       return -EINVAL;
266 +
267 +               if (info.index >= mc_dev->obj_desc.irq_count)
268 +                       return -EINVAL;
269 +
270 +               if (vdev->mc_irqs != NULL) {
271 +                       info.flags = vdev->mc_irqs[info.index].flags;
272 +                       info.count = vdev->mc_irqs[info.index].count;
273 +               } else {
274 +                       /*
275 +                        * If IRQs are not initialized then these can not
276 +                        * be configuted and used by user-space/
277 +                        */
278 +                       info.flags = 0;
279 +                       info.count = 0;
280 +               }
281 +
282 +               return copy_to_user((void __user *)arg, &info, minsz);
283 +       }
284 +       case VFIO_DEVICE_SET_IRQS:
285 +       {
286 +               struct vfio_irq_set hdr;
287 +               u8 *data = NULL;
288 +               int ret = 0;
289 +
290 +               minsz = offsetofend(struct vfio_irq_set, count);
291 +
292 +               if (copy_from_user(&hdr, (void __user *)arg, minsz))
293 +                       return -EFAULT;
294 +
295 +               if (hdr.argsz < minsz)
296 +                       return -EINVAL;
297 +
298 +               if (hdr.index >= mc_dev->obj_desc.irq_count)
299 +                       return -EINVAL;
300 +
301 +               if (hdr.start != 0 || hdr.count > 1)
302 +                       return -EINVAL;
303 +
304 +               if (hdr.count == 0 &&
305 +                   (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
306 +                   !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
307 +                       return -EINVAL;
308 +
309 +               if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
310 +                                 VFIO_IRQ_SET_ACTION_TYPE_MASK))
311 +                       return -EINVAL;
312 +
313 +               if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
314 +                       size_t size;
315 +
316 +                       if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
317 +                               size = sizeof(uint8_t);
318 +                       else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
319 +                               size = sizeof(int32_t);
320 +                       else
321 +                               return -EINVAL;
322 +
323 +                       if (hdr.argsz - minsz < hdr.count * size)
324 +                               return -EINVAL;
325 +
326 +                       data = memdup_user((void __user *)(arg + minsz),
327 +                                          hdr.count * size);
328 +                       if (IS_ERR(data))
329 +                               return PTR_ERR(data);
330 +               }
331 +
332 +               ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
333 +                                                hdr.index, hdr.start,
334 +                                                hdr.count, data);
335 +               return ret;
336 +       }
337 +       case VFIO_DEVICE_RESET:
338 +       {
339 +               return -EINVAL;
340 +       }
341 +       default:
342 +               return -EINVAL;
343 +       }
344 +}
345 +
346 +static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
347 +                               size_t count, loff_t *ppos)
348 +{
349 +       struct vfio_fsl_mc_device *vdev = device_data;
350 +       unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
351 +       loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
352 +       struct vfio_fsl_mc_region *region;
353 +       uint64_t data[8];
354 +       int i;
355 +
356 +       /* Read ioctl supported only for DPRC device */
357 +       if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
358 +               return -EINVAL;
359 +
360 +       if (index >= vdev->num_regions)
361 +               return -EINVAL;
362 +
363 +       region = &vdev->regions[index];
364 +
365 +       if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
366 +               return -EINVAL;
367 +
368 +       if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
369 +               return -EINVAL;
370 +
371 +       if (!region->ioaddr) {
372 +               region->ioaddr = ioremap_nocache(region->addr, region->size);
373 +               if (!region->ioaddr)
374 +                       return -ENOMEM;
375 +       }
376 +
377 +       if (count != 64 || off != 0)
378 +               return -EINVAL;
379 +
380 +       for (i = 7; i >= 0; i--)
381 +               data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
382 +
383 +       if (copy_to_user(buf, data, 64))
384 +               return -EFAULT;
385 +
386 +       return count;
387 +}
388 +
389 +#define MC_CMD_COMPLETION_TIMEOUT_MS   5000
390 +#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS    500
391 +
392 +static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
393 +{
394 +       enum mc_cmd_status status;
395 +       unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
396 +
397 +       for (;;) {
398 +               u64 header;
399 +               struct mc_cmd_header *resp_hdr;
400 +
401 +               __iormb();
402 +               header = readq(ioaddr);
403 +               __iormb();
404 +
405 +               resp_hdr = (struct mc_cmd_header *)&header;
406 +               status = (enum mc_cmd_status)resp_hdr->status;
407 +               if (status != MC_CMD_STATUS_READY)
408 +                       break;
409 +
410 +               udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
411 +               timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
412 +               if (timeout_usecs == 0)
413 +                       return -ETIMEDOUT;
414 +       }
415 +
416 +       return 0;
417 +}
418 +
419 +static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
420 +{
421 +       int i;
422 +
423 +       /* Write at command header in the end */
424 +       for (i = 7; i >= 0; i--)
425 +               writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
426 +
427 +       /* Wait for response before returning to user-space
428 +        * This can be optimized in future to even prepare response
429 +        * before returning to user-space and avoid read ioctl.
430 +        */
431 +       return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
432 +}
433 +
434 +static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
435 +{
436 +       uint64_t cmd_hdr = cmd_data[0];
437 +       int cmd = (cmd_hdr >> 52) & 0xfff;
438 +
439 +       switch (cmd) {
440 +       case DPRC_CMDID_OPEN:
441 +       default:
442 +               return vfio_fsl_mc_send_command(ioaddr, cmd_data);
443 +       }
444 +
445 +       return 0;
446 +}
447 +
448 +static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
449 +                                size_t count, loff_t *ppos)
450 +{
451 +       struct vfio_fsl_mc_device *vdev = device_data;
452 +       unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
453 +       loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
454 +       struct vfio_fsl_mc_region *region;
455 +       uint64_t data[8];
456 +       int ret;
457 +
458 +       /* Write ioctl supported only for DPRC device */
459 +       if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
460 +               return -EINVAL;
461 +
462 +       if (index >= vdev->num_regions)
463 +               return -EINVAL;
464 +
465 +       region = &vdev->regions[index];
466 +
467 +       if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
468 +               return -EINVAL;
469 +
470 +       if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
471 +               return -EINVAL;
472 +
473 +       if (!region->ioaddr) {
474 +               region->ioaddr = ioremap_nocache(region->addr, region->size);
475 +               if (!region->ioaddr)
476 +                       return -ENOMEM;
477 +       }
478 +
479 +       if (count != 64 || off != 0)
480 +               return -EINVAL;
481 +
482 +       if (copy_from_user(&data, buf, 64))
483 +               return -EFAULT;
484 +
485 +       ret = vfio_handle_dprc_commands(region->ioaddr, data);
486 +       if (ret)
487 +               return ret;
488 +
489 +       return count;
490 +}
491 +
492 +static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
493 +                                struct vm_area_struct *vma)
494 +{
495 +       u64 size = vma->vm_end - vma->vm_start;
496 +       u64 pgoff, base;
497 +
498 +       pgoff = vma->vm_pgoff &
499 +               ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
500 +       base = pgoff << PAGE_SHIFT;
501 +
502 +       if (region.size < PAGE_SIZE || base + size > region.size)
503 +               return -EINVAL;
504 +       /*
505 +        * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
506 +        * cache inhibited area of the portal to avoid coherency issues
507 +        * if a user migrates to another core.
508 +        */
509 +       if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
510 +               vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
511 +       else
512 +               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
513 +
514 +       vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
515 +
516 +       return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
517 +                              size, vma->vm_page_prot);
518 +}
519 +
520 +/* Allows mmaping fsl_mc device regions in assigned DPRC */
521 +static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
522 +{
523 +       struct vfio_fsl_mc_device *vdev = device_data;
524 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
525 +       unsigned long size, addr;
526 +       int index;
527 +
528 +       index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
529 +
530 +       if (vma->vm_end < vma->vm_start)
531 +               return -EINVAL;
532 +       if (vma->vm_start & ~PAGE_MASK)
533 +               return -EINVAL;
534 +       if (vma->vm_end & ~PAGE_MASK)
535 +               return -EINVAL;
536 +       if (!(vma->vm_flags & VM_SHARED))
537 +               return -EINVAL;
538 +       if (index >= vdev->num_regions)
539 +               return -EINVAL;
540 +
541 +       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
542 +               return -EINVAL;
543 +
544 +       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
545 +                       && (vma->vm_flags & VM_READ))
546 +               return -EINVAL;
547 +
548 +       if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
549 +                       && (vma->vm_flags & VM_WRITE))
550 +               return -EINVAL;
551 +
552 +       addr = vdev->regions[index].addr;
553 +       size = vdev->regions[index].size;
554 +
555 +       vma->vm_private_data = mc_dev;
556 +
557 +       if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
558 +               return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
559 +
560 +       return -EFAULT;
561 +}
562 +
563 +static const struct vfio_device_ops vfio_fsl_mc_ops = {
564 +       .name           = "vfio-fsl-mc",
565 +       .open           = vfio_fsl_mc_open,
566 +       .release        = vfio_fsl_mc_release,
567 +       .ioctl          = vfio_fsl_mc_ioctl,
568 +       .read           = vfio_fsl_mc_read,
569 +       .write          = vfio_fsl_mc_write,
570 +       .mmap           = vfio_fsl_mc_mmap,
571 +};
572 +
573 +static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
574 +{
575 +       struct device *root_dprc_dev;
576 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
577 +       struct device *dev = &mc_dev->dev;
578 +       struct fsl_mc_bus *mc_bus;
579 +       struct irq_domain *mc_msi_domain;
580 +       unsigned int irq_count;
581 +       int ret;
582 +
583 +       /* device must be DPRC */
584 +       if (strcmp(mc_dev->obj_desc.type, "dprc"))
585 +               return -EINVAL;
586 +
587 +       /* mc_io must be un-initialized */
588 +       WARN_ON(mc_dev->mc_io);
589 +
590 +       /* allocate a portal from the root DPRC for vfio use */
591 +       fsl_mc_get_root_dprc(dev, &root_dprc_dev);
592 +       if (WARN_ON(!root_dprc_dev))
593 +               return -EINVAL;
594 +
595 +       ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
596 +                                    FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
597 +                                    &mc_dev->mc_io);
598 +       if (ret < 0)
599 +               goto clean_msi_domain;
600 +
601 +       /* Reset MCP before move on */
602 +       ret = fsl_mc_portal_reset(mc_dev->mc_io);
603 +       if (ret < 0) {
604 +               dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
605 +               goto free_mc_portal;
606 +       }
607 +
608 +       /* MSI domain set up */
609 +       ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
610 +       if (ret < 0)
611 +               goto free_mc_portal;
612 +
613 +       dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
614 +
615 +       ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
616 +                       &mc_dev->mc_handle);
617 +       if (ret) {
618 +               dev_err(dev, "dprc_open() failed: error = %d\n", ret);
619 +               goto free_mc_portal;
620 +       }
621 +
622 +       /* Initialize resource pool */
623 +       fsl_mc_init_all_resource_pools(mc_dev);
624 +
625 +       mc_bus = to_fsl_mc_bus(mc_dev);
626 +
627 +       if (!mc_bus->irq_resources) {
628 +               irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
629 +               ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
630 +               if (ret < 0) {
631 +                       dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
632 +                       goto clean_resource_pool;
633 +               }
634 +       }
635 +
636 +       mutex_init(&mc_bus->scan_mutex);
637 +
638 +       mutex_lock(&mc_bus->scan_mutex);
639 +       ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
640 +                               &irq_count);
641 +       mutex_unlock(&mc_bus->scan_mutex);
642 +       if (ret) {
643 +               dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
644 +               goto clean_irq_pool;
645 +       }
646 +
647 +       if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
648 +               dev_warn(&mc_dev->dev,
649 +                        "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
650 +                        irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
651 +       }
652 +
653 +       return 0;
654 +
655 +clean_irq_pool:
656 +       fsl_mc_cleanup_irq_pool(mc_bus);
657 +
658 +clean_resource_pool:
659 +       fsl_mc_cleanup_all_resource_pools(mc_dev);
660 +       dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
661 +
662 +free_mc_portal:
663 +       fsl_mc_portal_free(mc_dev->mc_io);
664 +
665 +clean_msi_domain:
666 +       dev_set_msi_domain(&mc_dev->dev, NULL);
667 +
668 +       return ret;
669 +}
670 +
671 +static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
672 +{
673 +       struct fsl_mc_device *mc_dev;
674 +
675 +       WARN_ON(dev == NULL);
676 +
677 +       mc_dev = to_fsl_mc_device(dev);
678 +       if (WARN_ON(mc_dev == NULL))
679 +               return -ENODEV;
680 +
681 +       fsl_mc_device_remove(mc_dev);
682 +       return 0;
683 +}
684 +
685 +static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
686 +{
687 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
688 +       struct fsl_mc_bus *mc_bus;
689 +
690 +       /* device must be DPRC */
691 +       if (strcmp(mc_dev->obj_desc.type, "dprc"))
692 +               return;
693 +
694 +       device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
695 +
696 +       mc_bus = to_fsl_mc_bus(mc_dev);
697 +       if (dev_get_msi_domain(&mc_dev->dev))
698 +               fsl_mc_cleanup_irq_pool(mc_bus);
699 +
700 +       dev_set_msi_domain(&mc_dev->dev, NULL);
701 +
702 +       fsl_mc_cleanup_all_resource_pools(mc_dev);
703 +       dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
704 +       fsl_mc_portal_free(mc_dev->mc_io);
705 +}
706 +
707 +static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
708 +{
709 +       struct iommu_group *group;
710 +       struct vfio_fsl_mc_device *vdev;
711 +       struct device *dev = &mc_dev->dev;
712 +       int ret;
713 +
714 +       group = vfio_iommu_group_get(dev);
715 +       if (!group) {
716 +               dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
717 +               return -EINVAL;
718 +       }
719 +
720 +       vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
721 +       if (!vdev) {
722 +               vfio_iommu_group_put(group, dev);
723 +               return -ENOMEM;
724 +       }
725 +
726 +       vdev->mc_dev = mc_dev;
727 +
728 +       ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
729 +       if (ret) {
730 +               dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
731 +               goto free_vfio_device;
732 +       }
733 +
734 +       /* DPRC container scanned and it's chilren bound with vfio driver */
735 +       if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
736 +               ret = vfio_fsl_mc_initialize_dprc(vdev);
737 +               if (ret) {
738 +                       vfio_del_group_dev(dev);
739 +                       goto free_vfio_device;
740 +               }
741 +       } else {
742 +               struct fsl_mc_device *mc_bus_dev;
743 +
744 +               /* Non-dprc devices share mc_io from the parent dprc */
745 +               mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
746 +               if (mc_bus_dev == NULL) {
747 +                       vfio_del_group_dev(dev);
748 +                       goto free_vfio_device;
749 +               }
750 +
751 +               mc_dev->mc_io = mc_bus_dev->mc_io;
752 +
753 +               /* Inherit parent MSI domain */
754 +               dev_set_msi_domain(&mc_dev->dev,
755 +                                  dev_get_msi_domain(mc_dev->dev.parent));
756 +       }
757 +       return 0;
758 +
759 +free_vfio_device:
760 +       kfree(vdev);
761 +       vfio_iommu_group_put(group, dev);
762 +       return ret;
763 +}
764 +
765 +static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
766 +{
767 +       struct vfio_fsl_mc_device *vdev;
768 +       struct device *dev = &mc_dev->dev;
769 +
770 +       vdev = vfio_del_group_dev(dev);
771 +       if (!vdev)
772 +               return -EINVAL;
773 +
774 +       if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
775 +               vfio_fsl_mc_cleanup_dprc(vdev);
776 +       else
777 +               dev_set_msi_domain(&mc_dev->dev, NULL);
778 +
779 +       mc_dev->mc_io = NULL;
780 +
781 +       vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
782 +       kfree(vdev);
783 +
784 +       return 0;
785 +}
786 +
787 +/*
788 + * vfio-fsl_mc is a meta-driver, so use driver_override interface to
789 + * bind a fsl_mc container with this driver and match_id_table is NULL.
790 + */
791 +static struct fsl_mc_driver vfio_fsl_mc_driver = {
792 +       .probe          = vfio_fsl_mc_probe,
793 +       .remove         = vfio_fsl_mc_remove,
794 +       .match_id_table = NULL,
795 +       .driver = {
796 +               .name   = "vfio-fsl-mc",
797 +               .owner  = THIS_MODULE,
798 +       },
799 +};
800 +
801 +static int __init vfio_fsl_mc_driver_init(void)
802 +{
803 +       return fsl_mc_driver_register(&vfio_fsl_mc_driver);
804 +}
805 +
806 +static void __exit vfio_fsl_mc_driver_exit(void)
807 +{
808 +       fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
809 +}
810 +
811 +module_init(vfio_fsl_mc_driver_init);
812 +module_exit(vfio_fsl_mc_driver_exit);
813 +
814 +MODULE_VERSION(DRIVER_VERSION);
815 +MODULE_LICENSE("GPL v2");
816 +MODULE_AUTHOR(DRIVER_AUTHOR);
817 +MODULE_DESCRIPTION(DRIVER_DESC);
818 --- /dev/null
819 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
820 @@ -0,0 +1,199 @@
821 +/*
822 + * Freescale Management Complex (MC) device passthrough using VFIO
823 + *
824 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
825 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
826 + *
827 + * This file is licensed under the terms of the GNU General Public
828 + * License version 2. This program is licensed "as is" without any
829 + * warranty of any kind, whether express or implied.
830 + */
831 +
832 +#include <linux/vfio.h>
833 +#include <linux/slab.h>
834 +#include <linux/types.h>
835 +#include <linux/eventfd.h>
836 +#include <linux/msi.h>
837 +
838 +#include "../../staging/fsl-mc/include/mc.h"
839 +#include "vfio_fsl_mc_private.h"
840 +
841 +static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
842 +{
843 +       struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
844 +
845 +       eventfd_signal(mc_irq->trigger, 1);
846 +       return IRQ_HANDLED;
847 +}
848 +
849 +static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
850 +                               unsigned int index, unsigned int start,
851 +                               unsigned int count, uint32_t flags,
852 +                               void *data)
853 +{
854 +       return -EINVAL;
855 +}
856 +
857 +static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
858 +                               unsigned int index, unsigned int start,
859 +                               unsigned int count, uint32_t flags,
860 +                               void *data)
861 +{
862 +       return -EINVAL;
863 +}
864 +
865 +static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
866 +                           int index, int fd)
867 +{
868 +       struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
869 +       struct eventfd_ctx *trigger;
870 +       int hwirq;
871 +       int ret;
872 +
873 +       hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
874 +       if (irq->trigger) {
875 +               free_irq(hwirq, irq);
876 +               kfree(irq->name);
877 +               eventfd_ctx_put(irq->trigger);
878 +               irq->trigger = NULL;
879 +       }
880 +
881 +       if (fd < 0) /* Disable only */
882 +               return 0;
883 +
884 +       irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
885 +                                       hwirq, dev_name(&vdev->mc_dev->dev));
886 +       if (!irq->name)
887 +               return -ENOMEM;
888 +
889 +       trigger = eventfd_ctx_fdget(fd);
890 +       if (IS_ERR(trigger)) {
891 +               kfree(irq->name);
892 +               return PTR_ERR(trigger);
893 +       }
894 +
895 +       irq->trigger = trigger;
896 +
897 +       ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
898 +                         irq->name, irq);
899 +       if (ret) {
900 +               kfree(irq->name);
901 +               eventfd_ctx_put(trigger);
902 +               irq->trigger = NULL;
903 +               return ret;
904 +       }
905 +
906 +       return 0;
907 +}
908 +
909 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
910 +{
911 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
912 +       struct vfio_fsl_mc_irq *mc_irq;
913 +       int irq_count;
914 +       int ret, i;
915 +
916 +       /* Device does not support any interrupt */
917 +       if (mc_dev->obj_desc.irq_count == 0)
918 +               return 0;
919 +
920 +       irq_count = mc_dev->obj_desc.irq_count;
921 +
922 +       mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
923 +       if (mc_irq == NULL)
924 +               return -ENOMEM;
925 +
926 +       /* Allocate IRQs */
927 +       ret = fsl_mc_allocate_irqs(mc_dev);
928 +       if  (ret) {
929 +               kfree(mc_irq);
930 +               return ret;
931 +       }
932 +
933 +       for (i = 0; i < irq_count; i++) {
934 +               mc_irq[i].count = 1;
935 +               mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
936 +       }
937 +
938 +       vdev->mc_irqs = mc_irq;
939 +
940 +       return 0;
941 +}
942 +
943 +/* Free All IRQs for the given MC object */
944 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
945 +{
946 +       struct fsl_mc_device *mc_dev = vdev->mc_dev;
947 +       int irq_count = mc_dev->obj_desc.irq_count;
948 +       int i;
949 +
950 +       /* Device does not support any interrupt */
951 +       if (mc_dev->obj_desc.irq_count == 0)
952 +               return;
953 +
954 +       for (i = 0; i < irq_count; i++)
955 +               vfio_set_trigger(vdev, i, -1);
956 +
957 +       fsl_mc_free_irqs(mc_dev);
958 +       kfree(vdev->mc_irqs);
959 +}
960 +
961 +static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
962 +                                      unsigned int index, unsigned int start,
963 +                                      unsigned int count, uint32_t flags,
964 +                                      void *data)
965 +{
966 +       struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
967 +       int hwirq;
968 +
969 +       if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
970 +               return vfio_set_trigger(vdev, index, -1);
971 +
972 +       if (start != 0 || count != 1)
973 +               return -EINVAL;
974 +
975 +       if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
976 +               int32_t fd = *(int32_t *)data;
977 +
978 +               return vfio_set_trigger(vdev, index, fd);
979 +       }
980 +
981 +       hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
982 +
983 +       if (flags & VFIO_IRQ_SET_DATA_NONE) {
984 +               vfio_fsl_mc_irq_handler(hwirq, irq);
985 +
986 +       } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
987 +               uint8_t trigger = *(uint8_t *)data;
988 +
989 +               if (trigger)
990 +                       vfio_fsl_mc_irq_handler(hwirq, irq);
991 +       }
992 +
993 +       return 0;
994 +}
995 +
996 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
997 +                              uint32_t flags, unsigned int index,
998 +                              unsigned int start, unsigned int count,
999 +                              void *data)
1000 +{
1001 +       int ret = -ENOTTY;
1002 +
1003 +       switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1004 +       case VFIO_IRQ_SET_ACTION_MASK:
1005 +               ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
1006 +                                          flags, data);
1007 +               break;
1008 +       case VFIO_IRQ_SET_ACTION_UNMASK:
1009 +               ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
1010 +                                            flags, data);
1011 +               break;
1012 +       case VFIO_IRQ_SET_ACTION_TRIGGER:
1013 +               ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
1014 +                                                 count, flags, data);
1015 +               break;
1016 +       }
1017 +
1018 +       return ret;
1019 +}
1020 --- /dev/null
1021 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
1022 @@ -0,0 +1,55 @@
1023 +/*
1024 + * Freescale Management Complex VFIO private declarations
1025 + *
1026 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
1027 + * Copyright 2016 NXP
1028 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
1029 + *
1030 + * This file is licensed under the terms of the GNU General Public
1031 + * License version 2. This program is licensed "as is" without any
1032 + * warranty of any kind, whether express or implied.
1033 + */
1034 +
1035 +#ifndef VFIO_FSL_MC_PRIVATE_H
1036 +#define VFIO_FSL_MC_PRIVATE_H
1037 +
1038 +#define VFIO_FSL_MC_OFFSET_SHIFT    40
1039 +#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
1040 +
1041 +#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
1042 +
1043 +#define VFIO_FSL_MC_INDEX_TO_OFFSET(index)     \
1044 +       ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
1045 +
1046 +struct vfio_fsl_mc_irq {
1047 +       u32                     flags;
1048 +       u32                     count;
1049 +       struct eventfd_ctx      *trigger;
1050 +       char                    *name;
1051 +};
1052 +
1053 +struct vfio_fsl_mc_region {
1054 +       u32                     flags;
1055 +#define VFIO_FSL_MC_REGION_TYPE_MMIO  1
1056 +#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE  2
1057 +       u32                     type;
1058 +       u64                     addr;
1059 +       resource_size_t         size;
1060 +       void __iomem            *ioaddr;
1061 +};
1062 +
1063 +struct vfio_fsl_mc_device {
1064 +       struct fsl_mc_device            *mc_dev;
1065 +       int                             refcnt;
1066 +       u32                             num_regions;
1067 +       struct vfio_fsl_mc_region       *regions;
1068 +       struct vfio_fsl_mc_irq          *mc_irqs;
1069 +};
1070 +
1071 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
1072 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
1073 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
1074 +                              uint32_t flags, unsigned int index,
1075 +                              unsigned int start, unsigned int count,
1076 +                              void *data);
1077 +#endif /* VFIO_PCI_PRIVATE_H */
1078 --- a/drivers/vfio/vfio_iommu_type1.c
1079 +++ b/drivers/vfio/vfio_iommu_type1.c
1080 @@ -36,6 +36,8 @@
1081  #include <linux/uaccess.h>
1082  #include <linux/vfio.h>
1083  #include <linux/workqueue.h>
1084 +#include <linux/dma-iommu.h>
1085 +#include <linux/irqdomain.h>
1086  
1087  #define DRIVER_VERSION  "0.2"
1088  #define DRIVER_AUTHOR   "Alex Williamson <alex.williamson@redhat.com>"
1089 @@ -733,6 +735,27 @@ static void vfio_test_domain_fgsp(struct
1090         __free_pages(pages, order);
1091  }
1092  
1093 +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1094 +{
1095 +       struct list_head group_resv_regions;
1096 +       struct iommu_resv_region *region, *next;
1097 +       bool ret = false;
1098 +
1099 +       INIT_LIST_HEAD(&group_resv_regions);
1100 +       iommu_get_group_resv_regions(group, &group_resv_regions);
1101 +       list_for_each_entry(region, &group_resv_regions, list) {
1102 +               if (region->type == IOMMU_RESV_SW_MSI) {
1103 +                       *base = region->start;
1104 +                       ret = true;
1105 +                       goto out;
1106 +               }
1107 +       }
1108 +out:
1109 +       list_for_each_entry_safe(region, next, &group_resv_regions, list)
1110 +               kfree(region);
1111 +       return ret;
1112 +}
1113 +
1114  static int vfio_iommu_type1_attach_group(void *iommu_data,
1115                                          struct iommu_group *iommu_group)
1116  {
1117 @@ -741,6 +764,8 @@ static int vfio_iommu_type1_attach_group
1118         struct vfio_domain *domain, *d;
1119         struct bus_type *bus = NULL;
1120         int ret;
1121 +       bool resv_msi, msi_remap;
1122 +       phys_addr_t resv_msi_base;
1123  
1124         mutex_lock(&iommu->lock);
1125  
1126 @@ -787,11 +812,15 @@ static int vfio_iommu_type1_attach_group
1127         if (ret)
1128                 goto out_domain;
1129  
1130 +       resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1131 +
1132         INIT_LIST_HEAD(&domain->group_list);
1133         list_add(&group->next, &domain->group_list);
1134  
1135 -       if (!allow_unsafe_interrupts &&
1136 -           !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
1137 +       msi_remap = resv_msi ? irq_domain_check_msi_remap() :
1138 +                               iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
1139 +
1140 +       if (!allow_unsafe_interrupts && !msi_remap) {
1141                 pr_warn("%s: No interrupt remapping support.  Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1142                        __func__);
1143                 ret = -EPERM;
1144 @@ -833,6 +862,12 @@ static int vfio_iommu_type1_attach_group
1145         if (ret)
1146                 goto out_detach;
1147  
1148 +       if (resv_msi) {
1149 +               ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1150 +               if (ret)
1151 +                       goto out_detach;
1152 +       }
1153 +
1154         list_add(&domain->next, &iommu->domain_list);
1155  
1156         mutex_unlock(&iommu->lock);
1157 --- a/include/uapi/linux/vfio.h
1158 +++ b/include/uapi/linux/vfio.h
1159 @@ -198,6 +198,7 @@ struct vfio_device_info {
1160  #define VFIO_DEVICE_FLAGS_PCI  (1 << 1)        /* vfio-pci device */
1161  #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2)    /* vfio-platform device */
1162  #define VFIO_DEVICE_FLAGS_AMBA  (1 << 3)       /* vfio-amba device */
1163 +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5)      /* vfio-fsl-mc device */
1164         __u32   num_regions;    /* Max region index + 1 */
1165         __u32   num_irqs;       /* Max IRQ index + 1 */
1166  };