2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/netdevice.h>
39 #include <linux/inetdevice.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/if_vlan.h>
43 #include <net/addrconf.h>
45 #include <rdma/ib_smi.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_addr.h>
49 #include <linux/mlx4/driver.h>
50 #include <linux/mlx4/cmd.h>
51 #include <linux/mlx4/qp.h>
56 #define DRV_NAME MLX4_IB_DRV_NAME
57 #define DRV_VERSION "2.2-1"
58 #define DRV_RELDATE "Feb 2014"
60 #define MLX4_IB_FLOW_MAX_PRIO 0xFFF
61 #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
63 MODULE_AUTHOR("Roland Dreier");
64 MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
65 MODULE_LICENSE("Dual BSD/GPL");
66 MODULE_VERSION(DRV_VERSION);
68 int mlx4_ib_sm_guid_assign = 1;
69 module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444);
70 MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 1)");
72 static const char mlx4_ib_version[] =
73 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
74 DRV_VERSION " (" DRV_RELDATE ")\n";
76 struct update_gid_work {
77 struct work_struct work;
78 union ib_gid gids[128];
79 struct mlx4_ib_dev *dev;
83 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
85 static struct workqueue_struct *wq;
87 static void init_query_mad(struct ib_smp *mad)
89 mad->base_version = 1;
90 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
91 mad->class_version = 1;
92 mad->method = IB_MGMT_METHOD_GET;
95 static union ib_gid zgid;
97 static int check_flow_steering_support(struct mlx4_dev *dev)
99 int eth_num_ports = 0;
100 int ib_num_ports = 0;
102 int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED;
106 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
108 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
110 dmfs &= (!ib_num_ports ||
111 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) &&
113 (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN));
114 if (ib_num_ports && mlx4_is_mfunc(dev)) {
115 pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n");
122 static int mlx4_ib_query_device(struct ib_device *ibdev,
123 struct ib_device_attr *props)
125 struct mlx4_ib_dev *dev = to_mdev(ibdev);
126 struct ib_smp *in_mad = NULL;
127 struct ib_smp *out_mad = NULL;
130 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
131 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
132 if (!in_mad || !out_mad)
135 init_query_mad(in_mad);
136 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
138 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
139 1, NULL, NULL, in_mad, out_mad);
143 memset(props, 0, sizeof *props);
145 props->fw_ver = dev->dev->caps.fw_ver;
146 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
147 IB_DEVICE_PORT_ACTIVE_EVENT |
148 IB_DEVICE_SYS_IMAGE_GUID |
149 IB_DEVICE_RC_RNR_NAK_GEN |
150 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
151 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
152 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
153 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
154 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
155 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
156 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
157 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
158 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
159 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
160 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
161 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
162 props->device_cap_flags |= IB_DEVICE_UD_TSO;
163 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
164 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
165 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
166 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
167 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
168 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
169 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)
170 props->device_cap_flags |= IB_DEVICE_XRC;
171 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW)
172 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW;
173 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
174 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B)
175 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
177 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
178 if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
179 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
182 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
184 props->vendor_part_id = dev->dev->pdev->device;
185 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
186 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
188 props->max_mr_size = ~0ull;
189 props->page_size_cap = dev->dev->caps.page_size_cap;
190 props->max_qp = dev->dev->quotas.qp;
191 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
192 props->max_sge = min(dev->dev->caps.max_sq_sg,
193 dev->dev->caps.max_rq_sg);
194 props->max_cq = dev->dev->quotas.cq;
195 props->max_cqe = dev->dev->caps.max_cqes;
196 props->max_mr = dev->dev->quotas.mpt;
197 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
198 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
199 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
200 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
201 props->max_srq = dev->dev->quotas.srq;
202 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
203 props->max_srq_sge = dev->dev->caps.max_srq_sge;
204 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
205 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
206 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
207 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
208 props->masked_atomic_cap = props->atomic_cap;
209 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
210 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
211 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
212 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
213 props->max_mcast_grp;
214 props->max_map_per_fmr = dev->dev->caps.max_fmr_maps;
215 props->max_ah = INT_MAX;
224 static enum rdma_link_layer
225 mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
227 struct mlx4_dev *dev = to_mdev(device)->dev;
229 return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
230 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
233 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
234 struct ib_port_attr *props, int netw_view)
236 struct ib_smp *in_mad = NULL;
237 struct ib_smp *out_mad = NULL;
238 int ext_active_speed;
239 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
242 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
243 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
244 if (!in_mad || !out_mad)
247 init_query_mad(in_mad);
248 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
249 in_mad->attr_mod = cpu_to_be32(port);
251 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
252 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
254 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
260 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
261 props->lmc = out_mad->data[34] & 0x7;
262 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
263 props->sm_sl = out_mad->data[36] & 0xf;
264 props->state = out_mad->data[32] & 0xf;
265 props->phys_state = out_mad->data[33] >> 4;
266 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
268 props->gid_tbl_len = out_mad->data[50];
270 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
271 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
272 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
273 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
274 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
275 props->active_width = out_mad->data[31] & 0xf;
276 props->active_speed = out_mad->data[35] >> 4;
277 props->max_mtu = out_mad->data[41] & 0xf;
278 props->active_mtu = out_mad->data[36] >> 4;
279 props->subnet_timeout = out_mad->data[51] & 0x1f;
280 props->max_vl_num = out_mad->data[37] >> 4;
281 props->init_type_reply = out_mad->data[41] >> 4;
283 /* Check if extended speeds (EDR/FDR/...) are supported */
284 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
285 ext_active_speed = out_mad->data[62] >> 4;
287 switch (ext_active_speed) {
289 props->active_speed = IB_SPEED_FDR;
292 props->active_speed = IB_SPEED_EDR;
297 /* If reported active speed is QDR, check if is FDR-10 */
298 if (props->active_speed == IB_SPEED_QDR) {
299 init_query_mad(in_mad);
300 in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
301 in_mad->attr_mod = cpu_to_be32(port);
303 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
304 NULL, NULL, in_mad, out_mad);
308 /* Checking LinkSpeedActive for FDR-10 */
309 if (out_mad->data[15] & 0x1)
310 props->active_speed = IB_SPEED_FDR10;
313 /* Avoid wrong speed value returned by FW if the IB link is down. */
314 if (props->state == IB_PORT_DOWN)
315 props->active_speed = IB_SPEED_SDR;
323 static u8 state_to_phys_state(enum ib_port_state state)
325 return state == IB_PORT_ACTIVE ? 5 : 3;
328 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
329 struct ib_port_attr *props, int netw_view)
332 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
333 struct mlx4_ib_iboe *iboe = &mdev->iboe;
334 struct net_device *ndev;
336 struct mlx4_cmd_mailbox *mailbox;
339 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
341 return PTR_ERR(mailbox);
343 err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
344 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
349 props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ||
350 (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
351 IB_WIDTH_4X : IB_WIDTH_1X;
352 props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ?
353 IB_SPEED_FDR : IB_SPEED_QDR;
354 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
355 props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
356 props->max_msg_sz = mdev->dev->caps.max_msg_sz;
357 props->pkey_tbl_len = 1;
358 props->max_mtu = IB_MTU_4096;
359 props->max_vl_num = 2;
360 props->state = IB_PORT_DOWN;
361 props->phys_state = state_to_phys_state(props->state);
362 props->active_mtu = IB_MTU_256;
363 spin_lock(&iboe->lock);
364 ndev = iboe->netdevs[port - 1];
368 tmp = iboe_get_mtu(ndev->mtu);
369 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
371 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
372 IB_PORT_ACTIVE : IB_PORT_DOWN;
373 props->phys_state = state_to_phys_state(props->state);
375 spin_unlock(&iboe->lock);
377 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
381 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
382 struct ib_port_attr *props, int netw_view)
386 memset(props, 0, sizeof *props);
388 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
389 ib_link_query_port(ibdev, port, props, netw_view) :
390 eth_link_query_port(ibdev, port, props, netw_view);
395 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
396 struct ib_port_attr *props)
398 /* returns host view */
399 return __mlx4_ib_query_port(ibdev, port, props, 0);
402 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
403 union ib_gid *gid, int netw_view)
405 struct ib_smp *in_mad = NULL;
406 struct ib_smp *out_mad = NULL;
408 struct mlx4_ib_dev *dev = to_mdev(ibdev);
410 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
412 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
413 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
414 if (!in_mad || !out_mad)
417 init_query_mad(in_mad);
418 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
419 in_mad->attr_mod = cpu_to_be32(port);
421 if (mlx4_is_mfunc(dev->dev) && netw_view)
422 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
424 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, out_mad);
428 memcpy(gid->raw, out_mad->data + 8, 8);
430 if (mlx4_is_mfunc(dev->dev) && !netw_view) {
432 /* For any index > 0, return the null guid */
439 init_query_mad(in_mad);
440 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
441 in_mad->attr_mod = cpu_to_be32(index / 8);
443 err = mlx4_MAD_IFC(dev, mad_ifc_flags, port,
444 NULL, NULL, in_mad, out_mad);
448 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
452 memset(gid->raw + 8, 0, 8);
458 static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
461 struct mlx4_ib_dev *dev = to_mdev(ibdev);
463 *gid = dev->iboe.gid_table[port - 1][index];
468 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
471 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
472 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
474 return iboe_query_gid(ibdev, port, index, gid);
477 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
478 u16 *pkey, int netw_view)
480 struct ib_smp *in_mad = NULL;
481 struct ib_smp *out_mad = NULL;
482 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
485 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
486 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
487 if (!in_mad || !out_mad)
490 init_query_mad(in_mad);
491 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
492 in_mad->attr_mod = cpu_to_be32(index / 32);
494 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
495 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
497 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
502 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
510 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
512 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
515 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
516 struct ib_device_modify *props)
518 struct mlx4_cmd_mailbox *mailbox;
521 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
524 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
527 if (mlx4_is_slave(to_mdev(ibdev)->dev))
530 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
531 memcpy(ibdev->node_desc, props->node_desc, 64);
532 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
535 * If possible, pass node desc to FW, so it can generate
536 * a 144 trap. If cmd fails, just ignore.
538 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
542 memcpy(mailbox->buf, props->node_desc, 64);
543 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
544 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
546 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
551 static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
554 struct mlx4_cmd_mailbox *mailbox;
557 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
559 return PTR_ERR(mailbox);
561 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
562 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
563 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
565 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
566 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
569 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
570 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
572 mlx4_free_cmd_mailbox(dev->dev, mailbox);
576 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
577 struct ib_port_modify *props)
579 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
580 u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
581 struct ib_port_attr attr;
585 /* return OK if this is RoCE. CM calls ib_modify_port() regardless
586 * of whether port link layer is ETH or IB. For ETH ports, qkey
587 * violations and port capabilities are not meaningful.
592 mutex_lock(&mdev->cap_mask_mutex);
594 err = mlx4_ib_query_port(ibdev, port, &attr);
598 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
599 ~props->clr_port_cap_mask;
601 err = mlx4_ib_SET_PORT(mdev, port,
602 !!(mask & IB_PORT_RESET_QKEY_CNTR),
606 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
610 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
611 struct ib_udata *udata)
613 struct mlx4_ib_dev *dev = to_mdev(ibdev);
614 struct mlx4_ib_ucontext *context;
615 struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
616 struct mlx4_ib_alloc_ucontext_resp resp;
620 return ERR_PTR(-EAGAIN);
622 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
623 resp_v3.qp_tab_size = dev->dev->caps.num_qps;
624 resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
625 resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
627 resp.dev_caps = dev->dev->caps.userspace_caps;
628 resp.qp_tab_size = dev->dev->caps.num_qps;
629 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
630 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
631 resp.cqe_size = dev->dev->caps.cqe_size;
634 context = kmalloc(sizeof *context, GFP_KERNEL);
636 return ERR_PTR(-ENOMEM);
638 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
644 INIT_LIST_HEAD(&context->db_page_list);
645 mutex_init(&context->db_page_mutex);
647 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
648 err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
650 err = ib_copy_to_udata(udata, &resp, sizeof(resp));
653 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
655 return ERR_PTR(-EFAULT);
658 return &context->ibucontext;
661 static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
663 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
665 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
671 static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
673 struct mlx4_ib_dev *dev = to_mdev(context->device);
675 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
678 if (vma->vm_pgoff == 0) {
679 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
681 if (io_remap_pfn_range(vma, vma->vm_start,
682 to_mucontext(context)->uar.pfn,
683 PAGE_SIZE, vma->vm_page_prot))
685 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
686 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
688 if (io_remap_pfn_range(vma, vma->vm_start,
689 to_mucontext(context)->uar.pfn +
690 dev->dev->caps.num_uars,
691 PAGE_SIZE, vma->vm_page_prot))
699 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
700 struct ib_ucontext *context,
701 struct ib_udata *udata)
703 struct mlx4_ib_pd *pd;
706 pd = kmalloc(sizeof *pd, GFP_KERNEL);
708 return ERR_PTR(-ENOMEM);
710 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
717 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
718 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
720 return ERR_PTR(-EFAULT);
726 static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
728 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
734 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
735 struct ib_ucontext *context,
736 struct ib_udata *udata)
738 struct mlx4_ib_xrcd *xrcd;
741 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
742 return ERR_PTR(-ENOSYS);
744 xrcd = kmalloc(sizeof *xrcd, GFP_KERNEL);
746 return ERR_PTR(-ENOMEM);
748 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
752 xrcd->pd = ib_alloc_pd(ibdev);
753 if (IS_ERR(xrcd->pd)) {
754 err = PTR_ERR(xrcd->pd);
758 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, 1, 0);
759 if (IS_ERR(xrcd->cq)) {
760 err = PTR_ERR(xrcd->cq);
764 return &xrcd->ibxrcd;
767 ib_dealloc_pd(xrcd->pd);
769 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
775 static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
777 ib_destroy_cq(to_mxrcd(xrcd)->cq);
778 ib_dealloc_pd(to_mxrcd(xrcd)->pd);
779 mlx4_xrcd_free(to_mdev(xrcd->device)->dev, to_mxrcd(xrcd)->xrcdn);
785 static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
787 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
788 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
789 struct mlx4_ib_gid_entry *ge;
791 ge = kzalloc(sizeof *ge, GFP_KERNEL);
796 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
797 ge->port = mqp->port;
801 mutex_lock(&mqp->mutex);
802 list_add_tail(&ge->list, &mqp->gid_list);
803 mutex_unlock(&mqp->mutex);
808 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
811 struct net_device *ndev;
817 spin_lock(&mdev->iboe.lock);
818 ndev = mdev->iboe.netdevs[mqp->port - 1];
821 spin_unlock(&mdev->iboe.lock);
831 struct mlx4_ib_steering {
832 struct list_head list;
837 static int parse_flow_attr(struct mlx4_dev *dev,
839 union ib_flow_spec *ib_spec,
840 struct _rule_hw *mlx4_spec)
842 enum mlx4_net_trans_rule_id type;
844 switch (ib_spec->type) {
845 case IB_FLOW_SPEC_ETH:
846 type = MLX4_NET_TRANS_RULE_ID_ETH;
847 memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
849 memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
851 mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
852 mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
854 case IB_FLOW_SPEC_IB:
855 type = MLX4_NET_TRANS_RULE_ID_IB;
856 mlx4_spec->ib.l3_qpn =
858 mlx4_spec->ib.qpn_mask =
859 cpu_to_be32(MLX4_IB_FLOW_QPN_MASK);
863 case IB_FLOW_SPEC_IPV4:
864 type = MLX4_NET_TRANS_RULE_ID_IPV4;
865 mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
866 mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
867 mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
868 mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
871 case IB_FLOW_SPEC_TCP:
872 case IB_FLOW_SPEC_UDP:
873 type = ib_spec->type == IB_FLOW_SPEC_TCP ?
874 MLX4_NET_TRANS_RULE_ID_TCP :
875 MLX4_NET_TRANS_RULE_ID_UDP;
876 mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
877 mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
878 mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
879 mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
885 if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
886 mlx4_hw_rule_sz(dev, type) < 0)
888 mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
889 mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
890 return mlx4_hw_rule_sz(dev, type);
893 struct default_rules {
894 __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
895 __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS];
896 __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS];
899 static const struct default_rules default_table[] = {
901 .mandatory_fields = {IB_FLOW_SPEC_IPV4},
902 .mandatory_not_fields = {IB_FLOW_SPEC_ETH},
903 .rules_create_list = {IB_FLOW_SPEC_IB},
904 .link_layer = IB_LINK_LAYER_INFINIBAND
908 static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
909 struct ib_flow_attr *flow_attr)
913 const struct default_rules *pdefault_rules = default_table;
914 u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
916 for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
918 __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
919 memset(&field_types, 0, sizeof(field_types));
921 if (link_layer != pdefault_rules->link_layer)
924 ib_flow = flow_attr + 1;
925 /* we assume the specs are sorted */
926 for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS &&
927 j < flow_attr->num_of_specs; k++) {
928 union ib_flow_spec *current_flow =
929 (union ib_flow_spec *)ib_flow;
931 /* same layer but different type */
932 if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) ==
933 (pdefault_rules->mandatory_fields[k] &
934 IB_FLOW_SPEC_LAYER_MASK)) &&
935 (current_flow->type !=
936 pdefault_rules->mandatory_fields[k]))
939 /* same layer, try match next one */
940 if (current_flow->type ==
941 pdefault_rules->mandatory_fields[k]) {
944 ((union ib_flow_spec *)ib_flow)->size;
948 ib_flow = flow_attr + 1;
949 for (j = 0; j < flow_attr->num_of_specs;
950 j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size)
951 for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++)
952 /* same layer and same type */
953 if (((union ib_flow_spec *)ib_flow)->type ==
954 pdefault_rules->mandatory_not_fields[k])
963 static int __mlx4_ib_create_default_rules(
964 struct mlx4_ib_dev *mdev,
966 const struct default_rules *pdefault_rules,
967 struct _rule_hw *mlx4_spec) {
971 for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
972 sizeof(pdefault_rules->rules_create_list[0]); i++) {
974 union ib_flow_spec ib_spec;
975 switch (pdefault_rules->rules_create_list[i]) {
979 case IB_FLOW_SPEC_IB:
980 ib_spec.type = IB_FLOW_SPEC_IB;
981 ib_spec.size = sizeof(struct ib_flow_spec_ib);
988 /* We must put empty rule, qpn is being ignored */
989 ret = parse_flow_attr(mdev->dev, 0, &ib_spec,
992 pr_info("invalid parsing\n");
996 mlx4_spec = (void *)mlx4_spec + ret;
1002 static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
1004 enum mlx4_net_trans_promisc_mode flow_type,
1010 struct mlx4_ib_dev *mdev = to_mdev(qp->device);
1011 struct mlx4_cmd_mailbox *mailbox;
1012 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
1015 static const u16 __mlx4_domain[] = {
1016 [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
1017 [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
1018 [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
1019 [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
1022 if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
1025 if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
1026 pr_err("Invalid priority value %d\n", flow_attr->priority);
1030 if (domain >= IB_FLOW_DOMAIN_NUM) {
1031 pr_err("Invalid domain value %d\n", domain);
1035 if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
1038 mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
1039 if (IS_ERR(mailbox))
1040 return PTR_ERR(mailbox);
1041 ctrl = mailbox->buf;
1043 ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
1044 flow_attr->priority);
1045 ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
1046 ctrl->port = flow_attr->port;
1047 ctrl->qpn = cpu_to_be32(qp->qp_num);
1049 ib_flow = flow_attr + 1;
1050 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
1051 /* Add default flows */
1052 default_flow = __mlx4_ib_default_rules_match(qp, flow_attr);
1053 if (default_flow >= 0) {
1054 ret = __mlx4_ib_create_default_rules(
1055 mdev, qp, default_table + default_flow,
1056 mailbox->buf + size);
1058 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1063 for (i = 0; i < flow_attr->num_of_specs; i++) {
1064 ret = parse_flow_attr(mdev->dev, qp->qp_num, ib_flow,
1065 mailbox->buf + size);
1067 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1070 ib_flow += ((union ib_flow_spec *) ib_flow)->size;
1074 ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1075 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1078 pr_err("mcg table is full. Fail to register network rule.\n");
1079 else if (ret == -ENXIO)
1080 pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
1082 pr_err("Invalid argumant. Fail to register network rule.\n");
1084 mlx4_free_cmd_mailbox(mdev->dev, mailbox);
1088 static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1091 err = mlx4_cmd(dev, reg_id, 0, 0,
1092 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1095 pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1100 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1101 struct ib_flow_attr *flow_attr,
1105 struct mlx4_ib_flow *mflow;
1106 enum mlx4_net_trans_promisc_mode type[2];
1108 memset(type, 0, sizeof(type));
1110 mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
1116 switch (flow_attr->type) {
1117 case IB_FLOW_ATTR_NORMAL:
1118 type[0] = MLX4_FS_REGULAR;
1121 case IB_FLOW_ATTR_ALL_DEFAULT:
1122 type[0] = MLX4_FS_ALL_DEFAULT;
1125 case IB_FLOW_ATTR_MC_DEFAULT:
1126 type[0] = MLX4_FS_MC_DEFAULT;
1129 case IB_FLOW_ATTR_SNIFFER:
1130 type[0] = MLX4_FS_UC_SNIFFER;
1131 type[1] = MLX4_FS_MC_SNIFFER;
1139 while (i < ARRAY_SIZE(type) && type[i]) {
1140 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1147 return &mflow->ibflow;
1151 return ERR_PTR(err);
1154 static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
1158 struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
1159 struct mlx4_ib_flow *mflow = to_mflow(flow_id);
1161 while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
1162 err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
1172 static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1175 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1176 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1178 struct mlx4_ib_steering *ib_steering = NULL;
1179 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1181 if (mdev->dev->caps.steering_mode ==
1182 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1183 ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
1188 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
1190 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
1193 pr_err("multicast attach op failed, err %d\n", err);
1197 err = add_gid_entry(ibqp, gid);
1202 memcpy(ib_steering->gid.raw, gid->raw, 16);
1203 ib_steering->reg_id = reg_id;
1204 mutex_lock(&mqp->mutex);
1205 list_add(&ib_steering->list, &mqp->steering_rules);
1206 mutex_unlock(&mqp->mutex);
1211 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1219 static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
1221 struct mlx4_ib_gid_entry *ge;
1222 struct mlx4_ib_gid_entry *tmp;
1223 struct mlx4_ib_gid_entry *ret = NULL;
1225 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
1226 if (!memcmp(raw, ge->gid.raw, 16)) {
1235 static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1238 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
1239 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
1240 struct net_device *ndev;
1241 struct mlx4_ib_gid_entry *ge;
1243 enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
1245 if (mdev->dev->caps.steering_mode ==
1246 MLX4_STEERING_MODE_DEVICE_MANAGED) {
1247 struct mlx4_ib_steering *ib_steering;
1249 mutex_lock(&mqp->mutex);
1250 list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
1251 if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
1252 list_del(&ib_steering->list);
1256 mutex_unlock(&mqp->mutex);
1257 if (&ib_steering->list == &mqp->steering_rules) {
1258 pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
1261 reg_id = ib_steering->reg_id;
1265 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
1270 mutex_lock(&mqp->mutex);
1271 ge = find_gid_entry(mqp, gid->raw);
1273 spin_lock(&mdev->iboe.lock);
1274 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
1277 spin_unlock(&mdev->iboe.lock);
1280 list_del(&ge->list);
1283 pr_warn("could not find mgid entry\n");
1285 mutex_unlock(&mqp->mutex);
1290 static int init_node_data(struct mlx4_ib_dev *dev)
1292 struct ib_smp *in_mad = NULL;
1293 struct ib_smp *out_mad = NULL;
1294 int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
1297 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1298 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1299 if (!in_mad || !out_mad)
1302 init_query_mad(in_mad);
1303 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1304 if (mlx4_is_master(dev->dev))
1305 mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
1307 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1311 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1313 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1315 err = mlx4_MAD_IFC(dev, mad_ifc_flags, 1, NULL, NULL, in_mad, out_mad);
1319 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1320 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1328 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1331 struct mlx4_ib_dev *dev =
1332 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1333 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
1336 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1339 struct mlx4_ib_dev *dev =
1340 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1341 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
1342 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
1343 (int) dev->dev->caps.fw_ver & 0xffff);
1346 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1349 struct mlx4_ib_dev *dev =
1350 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1351 return sprintf(buf, "%x\n", dev->dev->rev_id);
1354 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1357 struct mlx4_ib_dev *dev =
1358 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
1359 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
1360 dev->dev->board_id);
1363 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1364 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1365 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1366 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1368 static struct device_attribute *mlx4_class_attributes[] = {
1375 static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
1376 struct net_device *dev)
1378 memcpy(eui, dev->dev_addr, 3);
1379 memcpy(eui + 5, dev->dev_addr + 3, 3);
1380 if (vlan_id < 0x1000) {
1381 eui[3] = vlan_id >> 8;
1382 eui[4] = vlan_id & 0xff;
1390 static void update_gids_task(struct work_struct *work)
1392 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
1393 struct mlx4_cmd_mailbox *mailbox;
1396 struct mlx4_dev *dev = gw->dev->dev;
1398 if (!gw->dev->ib_active)
1401 mailbox = mlx4_alloc_cmd_mailbox(dev);
1402 if (IS_ERR(mailbox)) {
1403 pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
1407 gids = mailbox->buf;
1408 memcpy(gids, gw->gids, sizeof gw->gids);
1410 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1411 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1414 pr_warn("set port command failed\n");
1416 mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
1418 mlx4_free_cmd_mailbox(dev, mailbox);
1422 static void reset_gids_task(struct work_struct *work)
1424 struct update_gid_work *gw =
1425 container_of(work, struct update_gid_work, work);
1426 struct mlx4_cmd_mailbox *mailbox;
1429 struct mlx4_dev *dev = gw->dev->dev;
1431 if (!gw->dev->ib_active)
1434 mailbox = mlx4_alloc_cmd_mailbox(dev);
1435 if (IS_ERR(mailbox)) {
1436 pr_warn("reset gid table failed\n");
1440 gids = mailbox->buf;
1441 memcpy(gids, gw->gids, sizeof(gw->gids));
1443 if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
1444 IB_LINK_LAYER_ETHERNET) {
1445 err = mlx4_cmd(dev, mailbox->dma,
1446 MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
1447 1, MLX4_CMD_SET_PORT,
1448 MLX4_CMD_TIME_CLASS_B,
1451 pr_warn(KERN_WARNING
1452 "set port %d command failed\n", gw->port);
1455 mlx4_free_cmd_mailbox(dev, mailbox);
1460 static int update_gid_table(struct mlx4_ib_dev *dev, int port,
1461 union ib_gid *gid, int clear,
1464 struct update_gid_work *work;
1466 int need_update = 0;
1474 max_gids = dev->dev->caps.gid_table_len[port];
1475 for (i = 1; i < max_gids; ++i) {
1476 if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
1483 dev->iboe.gid_table[port - 1][found] =
1492 !memcmp(&dev->iboe.gid_table[port - 1][i],
1493 &zgid, sizeof(*gid)))
1499 if (found == -1 && !clear && free >= 0) {
1500 dev->iboe.gid_table[port - 1][free] = *gid;
1507 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1511 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
1512 INIT_WORK(&work->work, update_gids_task);
1515 queue_work(wq, &work->work);
1520 static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
1522 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
1523 mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
1527 static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
1529 struct update_gid_work *work;
1531 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1535 memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
1536 memset(work->gids, 0, sizeof(work->gids));
1537 INIT_WORK(&work->work, reset_gids_task);
1540 queue_work(wq, &work->work);
1544 static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
1545 struct mlx4_ib_dev *ibdev, union ib_gid *gid)
1547 struct mlx4_ib_iboe *iboe;
1549 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
1550 rdma_vlan_dev_real_dev(event_netdev) :
1552 union ib_gid default_gid;
1554 mlx4_make_default_gid(real_dev, &default_gid);
1556 if (!memcmp(gid, &default_gid, sizeof(*gid)))
1559 if (event != NETDEV_DOWN && event != NETDEV_UP)
1562 if ((real_dev != event_netdev) &&
1563 (event == NETDEV_DOWN) &&
1564 rdma_link_local_addr((struct in6_addr *)gid))
1567 iboe = &ibdev->iboe;
1568 spin_lock(&iboe->lock);
1570 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1571 if ((netif_is_bond_master(real_dev) &&
1572 (real_dev == iboe->masters[port - 1])) ||
1573 (!netif_is_bond_master(real_dev) &&
1574 (real_dev == iboe->netdevs[port - 1])))
1575 update_gid_table(ibdev, port, gid,
1576 event == NETDEV_DOWN, 0);
1578 spin_unlock(&iboe->lock);
1583 static u8 mlx4_ib_get_dev_port(struct net_device *dev,
1584 struct mlx4_ib_dev *ibdev)
1587 struct mlx4_ib_iboe *iboe;
1588 struct net_device *real_dev = rdma_vlan_dev_real_dev(dev) ?
1589 rdma_vlan_dev_real_dev(dev) : dev;
1591 iboe = &ibdev->iboe;
1593 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
1594 if ((netif_is_bond_master(real_dev) &&
1595 (real_dev == iboe->masters[port - 1])) ||
1596 (!netif_is_bond_master(real_dev) &&
1597 (real_dev == iboe->netdevs[port - 1])))
1600 if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1606 static int mlx4_ib_inet_event(struct notifier_block *this, unsigned long event,
1609 struct mlx4_ib_dev *ibdev;
1610 struct in_ifaddr *ifa = ptr;
1612 struct net_device *event_netdev = ifa->ifa_dev->dev;
1614 ipv6_addr_set_v4mapped(ifa->ifa_address, (struct in6_addr *)&gid);
1616 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet);
1618 mlx4_ib_addr_event(event, event_netdev, ibdev, &gid);
1622 #if IS_ENABLED(CONFIG_IPV6)
1623 static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event,
1626 struct mlx4_ib_dev *ibdev;
1627 struct inet6_ifaddr *ifa = ptr;
1628 union ib_gid *gid = (union ib_gid *)&ifa->addr;
1629 struct net_device *event_netdev = ifa->idev->dev;
1631 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb_inet6);
1633 mlx4_ib_addr_event(event, event_netdev, ibdev, gid);
1638 #define MLX4_IB_INVALID_MAC ((u64)-1)
1639 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
1640 struct net_device *dev,
1644 u64 release_mac = MLX4_IB_INVALID_MAC;
1645 struct mlx4_ib_qp *qp;
1647 read_lock(&dev_base_lock);
1648 new_smac = mlx4_mac_to_u64(dev->dev_addr);
1649 read_unlock(&dev_base_lock);
1651 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
1652 qp = ibdev->qp1_proxy[port - 1];
1655 u64 old_smac = qp->pri.smac;
1656 struct mlx4_update_qp_params update_params;
1658 if (new_smac == old_smac)
1661 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
1663 if (new_smac_index < 0)
1666 update_params.smac_index = new_smac_index;
1667 if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
1669 release_mac = new_smac;
1673 qp->pri.smac = new_smac;
1674 qp->pri.smac_index = new_smac_index;
1676 release_mac = old_smac;
1680 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
1681 if (release_mac != MLX4_IB_INVALID_MAC)
1682 mlx4_unregister_mac(ibdev->dev, port, release_mac);
1685 static void mlx4_ib_get_dev_addr(struct net_device *dev,
1686 struct mlx4_ib_dev *ibdev, u8 port)
1688 struct in_device *in_dev;
1689 #if IS_ENABLED(CONFIG_IPV6)
1690 struct inet6_dev *in6_dev;
1692 struct inet6_ifaddr *ifp;
1693 union ib_gid default_gid;
1698 if ((port == 0) || (port > ibdev->dev->caps.num_ports))
1702 in_dev = in_dev_get(dev);
1705 /*ifa->ifa_address;*/
1706 ipv6_addr_set_v4mapped(ifa->ifa_address,
1707 (struct in6_addr *)&gid);
1708 update_gid_table(ibdev, port, &gid, 0, 0);
1713 #if IS_ENABLED(CONFIG_IPV6)
1714 mlx4_make_default_gid(dev, &default_gid);
1716 in6_dev = in6_dev_get(dev);
1718 read_lock_bh(&in6_dev->lock);
1719 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1720 pgid = (union ib_gid *)&ifp->addr;
1721 if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
1723 update_gid_table(ibdev, port, pgid, 0, 0);
1725 read_unlock_bh(&in6_dev->lock);
1726 in6_dev_put(in6_dev);
1731 static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
1732 struct net_device *dev, u8 port)
1735 mlx4_make_default_gid(dev, &gid);
1736 update_gid_table(ibdev, port, &gid, 0, 1);
1739 static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
1741 struct net_device *dev;
1742 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
1745 for (i = 1; i <= ibdev->num_ports; ++i)
1746 if (reset_gid_table(ibdev, i))
1749 read_lock(&dev_base_lock);
1750 spin_lock(&iboe->lock);
1752 for_each_netdev(&init_net, dev) {
1753 u8 port = mlx4_ib_get_dev_port(dev, ibdev);
1755 mlx4_ib_get_dev_addr(dev, ibdev, port);
1758 spin_unlock(&iboe->lock);
1759 read_unlock(&dev_base_lock);
1764 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
1765 struct net_device *dev,
1766 unsigned long event)
1769 struct mlx4_ib_iboe *iboe;
1770 int update_qps_port = -1;
1773 iboe = &ibdev->iboe;
1775 spin_lock(&iboe->lock);
1776 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
1777 enum ib_port_state port_state = IB_PORT_NOP;
1778 struct net_device *old_master = iboe->masters[port - 1];
1779 struct net_device *curr_netdev;
1780 struct net_device *curr_master;
1782 iboe->netdevs[port - 1] =
1783 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
1784 if (iboe->netdevs[port - 1])
1785 mlx4_ib_set_default_gid(ibdev,
1786 iboe->netdevs[port - 1], port);
1787 curr_netdev = iboe->netdevs[port - 1];
1789 if (iboe->netdevs[port - 1] &&
1790 netif_is_bond_slave(iboe->netdevs[port - 1])) {
1791 iboe->masters[port - 1] = netdev_master_upper_dev_get(
1792 iboe->netdevs[port - 1]);
1794 iboe->masters[port - 1] = NULL;
1796 curr_master = iboe->masters[port - 1];
1798 if (dev == iboe->netdevs[port - 1] &&
1799 (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER ||
1800 event == NETDEV_UP || event == NETDEV_CHANGE))
1801 update_qps_port = port;
1804 port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
1805 IB_PORT_ACTIVE : IB_PORT_DOWN;
1806 mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
1807 /* if using bonding/team and a slave port is down, we
1808 * don't the bond IP based gids in the table since
1809 * flows that select port by gid may get the down port.
1811 if (curr_master && (port_state == IB_PORT_DOWN)) {
1812 reset_gid_table(ibdev, port);
1813 mlx4_ib_set_default_gid(ibdev,
1816 /* if bonding is used it is possible that we add it to
1817 * masters only after IP address is assigned to the
1818 * net bonding interface.
1820 if (curr_master && (old_master != curr_master)) {
1821 reset_gid_table(ibdev, port);
1822 mlx4_ib_set_default_gid(ibdev,
1824 mlx4_ib_get_dev_addr(curr_master, ibdev, port);
1827 if (!curr_master && (old_master != curr_master)) {
1828 reset_gid_table(ibdev, port);
1829 mlx4_ib_set_default_gid(ibdev,
1831 mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
1834 reset_gid_table(ibdev, port);
1838 spin_unlock(&iboe->lock);
1840 if (update_qps_port > 0)
1841 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
1844 static int mlx4_ib_netdev_event(struct notifier_block *this,
1845 unsigned long event, void *ptr)
1847 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1848 struct mlx4_ib_dev *ibdev;
1850 if (!net_eq(dev_net(dev), &init_net))
1853 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
1854 mlx4_ib_scan_netdevs(ibdev, dev, event);
1859 static void init_pkeys(struct mlx4_ib_dev *ibdev)
1865 if (mlx4_is_master(ibdev->dev)) {
1866 for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) {
1867 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1869 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1871 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
1872 /* master has the identity virt2phys pkey mapping */
1873 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
1874 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
1875 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
1876 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
1880 /* initialize pkey cache */
1881 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
1883 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
1885 ibdev->pkeys.phys_pkey_cache[port-1][i] =
1891 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1894 int eq_per_port = 0;
1899 /* Legacy mode or comp_pool is not large enough */
1900 if (dev->caps.comp_pool == 0 ||
1901 dev->caps.num_ports > dev->caps.comp_pool)
1904 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
1905 dev->caps.num_ports);
1909 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
1910 added_eqs += eq_per_port;
1912 total_eqs = dev->caps.num_comp_vectors + added_eqs;
1914 ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL);
1915 if (!ibdev->eq_table)
1918 ibdev->eq_added = added_eqs;
1921 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
1922 for (j = 0; j < eq_per_port; j++) {
1923 snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
1924 i, j, dev->pdev->bus->name);
1925 /* Set IRQ for specific name (per ring) */
1926 if (mlx4_assign_eq(dev, name, NULL,
1927 &ibdev->eq_table[eq])) {
1928 /* Use legacy (same as mlx4_en driver) */
1929 pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
1930 ibdev->eq_table[eq] =
1931 (eq % dev->caps.num_comp_vectors);
1937 /* Fill the reset of the vector with legacy EQ */
1938 for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++)
1939 ibdev->eq_table[eq++] = i;
1941 /* Advertise the new number of EQs to clients */
1942 ibdev->ib_dev.num_comp_vectors = total_eqs;
1945 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1949 /* no additional eqs were added */
1950 if (!ibdev->eq_table)
1953 /* Reset the advertised EQ number */
1954 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
1956 /* Free only the added eqs */
1957 for (i = 0; i < ibdev->eq_added; i++) {
1958 /* Don't free legacy eqs if used */
1959 if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
1961 mlx4_release_eq(dev, ibdev->eq_table[i]);
1964 kfree(ibdev->eq_table);
1967 static void *mlx4_ib_add(struct mlx4_dev *dev)
1969 struct mlx4_ib_dev *ibdev;
1973 struct mlx4_ib_iboe *iboe;
1974 int ib_num_ports = 0;
1976 pr_info_once("%s", mlx4_ib_version);
1979 mlx4_foreach_ib_transport_port(i, dev)
1982 /* No point in registering a device with no ports... */
1986 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
1988 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
1992 iboe = &ibdev->iboe;
1994 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
1997 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2000 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2002 if (!ibdev->uar_map)
2004 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2008 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2009 ibdev->ib_dev.owner = THIS_MODULE;
2010 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2011 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2012 ibdev->num_ports = num_ports;
2013 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
2014 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2015 ibdev->ib_dev.dma_device = &dev->pdev->dev;
2017 if (dev->caps.userspace_caps)
2018 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2020 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2022 ibdev->ib_dev.uverbs_cmd_mask =
2023 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
2024 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
2025 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
2026 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
2027 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
2028 (1ull << IB_USER_VERBS_CMD_REG_MR) |
2029 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
2030 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
2031 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
2032 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
2033 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
2034 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
2035 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
2036 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
2037 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
2038 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
2039 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
2040 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
2041 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
2042 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
2043 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
2044 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
2045 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
2047 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2048 ibdev->ib_dev.query_port = mlx4_ib_query_port;
2049 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2050 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2051 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2052 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2053 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2054 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2055 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2056 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2057 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2058 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2059 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2060 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2061 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2062 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2063 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2064 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2065 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2066 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2067 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2068 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2069 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2070 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2071 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2072 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2073 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2074 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2075 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2076 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2077 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2078 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2079 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2080 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2081 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2082 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
2083 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
2084 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
2085 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2086 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2087 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2089 if (!mlx4_is_slave(ibdev->dev)) {
2090 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2091 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2092 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2093 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2096 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2097 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) {
2098 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2099 ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw;
2100 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2102 ibdev->ib_dev.uverbs_cmd_mask |=
2103 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
2104 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
2107 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {
2108 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2109 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2110 ibdev->ib_dev.uverbs_cmd_mask |=
2111 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
2112 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
2115 if (check_flow_steering_support(dev)) {
2116 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2117 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2118 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2120 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2121 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
2122 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
2125 mlx4_ib_alloc_eqs(dev, ibdev);
2127 spin_lock_init(&iboe->lock);
2129 if (init_node_data(ibdev))
2132 for (i = 0; i < ibdev->num_ports; ++i) {
2133 mutex_init(&ibdev->qp1_proxy_lock[i]);
2134 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2135 IB_LINK_LAYER_ETHERNET) {
2136 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
2138 ibdev->counters[i] = -1;
2140 ibdev->counters[i] = -1;
2144 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2147 spin_lock_init(&ibdev->sm_lock);
2148 mutex_init(&ibdev->cap_mask_mutex);
2150 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2152 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2153 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2154 MLX4_IB_UC_STEER_QPN_ALIGN,
2155 &ibdev->steer_qpn_base);
2159 ibdev->ib_uc_qpns_bitmap =
2160 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2163 if (!ibdev->ib_uc_qpns_bitmap) {
2164 dev_err(&dev->pdev->dev, "bit map alloc failed\n");
2165 goto err_steer_qp_release;
2168 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) {
2169 bitmap_zero(ibdev->ib_uc_qpns_bitmap,
2170 ibdev->steer_qpn_count);
2171 err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE(
2172 dev, ibdev->steer_qpn_base,
2173 ibdev->steer_qpn_base +
2174 ibdev->steer_qpn_count - 1);
2176 goto err_steer_free_bitmap;
2178 bitmap_fill(ibdev->ib_uc_qpns_bitmap,
2179 ibdev->steer_qpn_count);
2183 if (ib_register_device(&ibdev->ib_dev, NULL))
2184 goto err_steer_free_bitmap;
2186 if (mlx4_ib_mad_init(ibdev))
2189 if (mlx4_ib_init_sriov(ibdev))
2192 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
2193 if (!iboe->nb.notifier_call) {
2194 iboe->nb.notifier_call = mlx4_ib_netdev_event;
2195 err = register_netdevice_notifier(&iboe->nb);
2197 iboe->nb.notifier_call = NULL;
2201 if (!iboe->nb_inet.notifier_call) {
2202 iboe->nb_inet.notifier_call = mlx4_ib_inet_event;
2203 err = register_inetaddr_notifier(&iboe->nb_inet);
2205 iboe->nb_inet.notifier_call = NULL;
2209 #if IS_ENABLED(CONFIG_IPV6)
2210 if (!iboe->nb_inet6.notifier_call) {
2211 iboe->nb_inet6.notifier_call = mlx4_ib_inet6_event;
2212 err = register_inet6addr_notifier(&iboe->nb_inet6);
2214 iboe->nb_inet6.notifier_call = NULL;
2219 for (i = 1 ; i <= ibdev->num_ports ; ++i)
2220 reset_gid_table(ibdev, i);
2222 mlx4_ib_scan_netdevs(ibdev, NULL, 0);
2224 mlx4_ib_init_gid_table(ibdev);
2227 for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
2228 if (device_create_file(&ibdev->ib_dev.dev,
2229 mlx4_class_attributes[j]))
2233 ibdev->ib_active = true;
2235 if (mlx4_is_mfunc(ibdev->dev))
2238 /* create paravirt contexts for any VFs which are active */
2239 if (mlx4_is_master(ibdev->dev)) {
2240 for (j = 0; j < MLX4_MFUNC_MAX; j++) {
2241 if (j == mlx4_master_func_num(ibdev->dev))
2243 if (mlx4_is_slave_active(ibdev->dev, j))
2244 do_slave_init(ibdev, j, 1);
2250 if (ibdev->iboe.nb.notifier_call) {
2251 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2252 pr_warn("failure unregistering notifier\n");
2253 ibdev->iboe.nb.notifier_call = NULL;
2255 if (ibdev->iboe.nb_inet.notifier_call) {
2256 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2257 pr_warn("failure unregistering notifier\n");
2258 ibdev->iboe.nb_inet.notifier_call = NULL;
2260 #if IS_ENABLED(CONFIG_IPV6)
2261 if (ibdev->iboe.nb_inet6.notifier_call) {
2262 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2263 pr_warn("failure unregistering notifier\n");
2264 ibdev->iboe.nb_inet6.notifier_call = NULL;
2267 flush_workqueue(wq);
2269 mlx4_ib_close_sriov(ibdev);
2272 mlx4_ib_mad_cleanup(ibdev);
2275 ib_unregister_device(&ibdev->ib_dev);
2277 err_steer_free_bitmap:
2278 kfree(ibdev->ib_uc_qpns_bitmap);
2280 err_steer_qp_release:
2281 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2282 ibdev->steer_qpn_count);
2285 if (ibdev->counters[i - 1] != -1)
2286 mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
2289 mlx4_ib_free_eqs(dev, ibdev);
2290 iounmap(ibdev->uar_map);
2293 mlx4_uar_free(dev, &ibdev->priv_uar);
2296 mlx4_pd_free(dev, ibdev->priv_pdn);
2299 ib_dealloc_device(&ibdev->ib_dev);
2304 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn)
2308 WARN_ON(!dev->ib_uc_qpns_bitmap);
2310 offset = bitmap_find_free_region(dev->ib_uc_qpns_bitmap,
2311 dev->steer_qpn_count,
2312 get_count_order(count));
2316 *qpn = dev->steer_qpn_base + offset;
2320 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count)
2323 dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED)
2326 BUG_ON(qpn < dev->steer_qpn_base);
2328 bitmap_release_region(dev->ib_uc_qpns_bitmap,
2329 qpn - dev->steer_qpn_base,
2330 get_count_order(count));
2333 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
2338 struct ib_flow_attr *flow = NULL;
2339 struct ib_flow_spec_ib *ib_spec;
2342 flow_size = sizeof(struct ib_flow_attr) +
2343 sizeof(struct ib_flow_spec_ib);
2344 flow = kzalloc(flow_size, GFP_KERNEL);
2347 flow->port = mqp->port;
2348 flow->num_of_specs = 1;
2349 flow->size = flow_size;
2350 ib_spec = (struct ib_flow_spec_ib *)(flow + 1);
2351 ib_spec->type = IB_FLOW_SPEC_IB;
2352 ib_spec->size = sizeof(struct ib_flow_spec_ib);
2353 /* Add an empty rule for IB L2 */
2354 memset(&ib_spec->mask, 0, sizeof(ib_spec->mask));
2356 err = __mlx4_ib_create_flow(&mqp->ibqp, flow,
2361 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
2367 static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
2369 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2372 ibdev->ib_active = false;
2373 flush_workqueue(wq);
2375 if (ibdev->iboe.nb.notifier_call) {
2376 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2377 pr_warn("failure unregistering notifier\n");
2378 ibdev->iboe.nb.notifier_call = NULL;
2381 mlx4_ib_close_sriov(ibdev);
2382 mlx4_ib_mad_cleanup(ibdev);
2383 ib_unregister_device(&ibdev->ib_dev);
2385 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2386 ibdev->steer_qpn_count);
2387 kfree(ibdev->ib_uc_qpns_bitmap);
2389 if (ibdev->iboe.nb_inet.notifier_call) {
2390 if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
2391 pr_warn("failure unregistering notifier\n");
2392 ibdev->iboe.nb_inet.notifier_call = NULL;
2394 #if IS_ENABLED(CONFIG_IPV6)
2395 if (ibdev->iboe.nb_inet6.notifier_call) {
2396 if (unregister_inet6addr_notifier(&ibdev->iboe.nb_inet6))
2397 pr_warn("failure unregistering notifier\n");
2398 ibdev->iboe.nb_inet6.notifier_call = NULL;
2402 iounmap(ibdev->uar_map);
2403 for (p = 0; p < ibdev->num_ports; ++p)
2404 if (ibdev->counters[p] != -1)
2405 mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
2406 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
2407 mlx4_CLOSE_PORT(dev, p);
2409 mlx4_ib_free_eqs(dev, ibdev);
2411 mlx4_uar_free(dev, &ibdev->priv_uar);
2412 mlx4_pd_free(dev, ibdev->priv_pdn);
2413 ib_dealloc_device(&ibdev->ib_dev);
2416 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2418 struct mlx4_ib_demux_work **dm = NULL;
2419 struct mlx4_dev *dev = ibdev->dev;
2421 unsigned long flags;
2422 struct mlx4_active_ports actv_ports;
2424 unsigned int first_port;
2426 if (!mlx4_is_master(dev))
2429 actv_ports = mlx4_get_active_ports(dev, slave);
2430 ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2431 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2433 dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC);
2435 pr_err("failed to allocate memory for tunneling qp update\n");
2439 for (i = 0; i < ports; i++) {
2440 dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC);
2442 pr_err("failed to allocate memory for tunneling qp update work struct\n");
2443 for (i = 0; i < dev->caps.num_ports; i++) {
2450 /* initialize or tear down tunnel QPs for the slave */
2451 for (i = 0; i < ports; i++) {
2452 INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work);
2453 dm[i]->port = first_port + i + 1;
2454 dm[i]->slave = slave;
2455 dm[i]->do_init = do_init;
2457 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
2458 if (!ibdev->sriov.is_going_down)
2459 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
2460 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
2467 static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
2468 enum mlx4_dev_event event, unsigned long param)
2470 struct ib_event ibev;
2471 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
2472 struct mlx4_eqe *eqe = NULL;
2473 struct ib_event_work *ew;
2476 if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
2477 eqe = (struct mlx4_eqe *)param;
2482 case MLX4_DEV_EVENT_PORT_UP:
2483 if (p > ibdev->num_ports)
2485 if (mlx4_is_master(dev) &&
2486 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
2487 IB_LINK_LAYER_INFINIBAND) {
2488 mlx4_ib_invalidate_all_guid_record(ibdev, p);
2490 ibev.event = IB_EVENT_PORT_ACTIVE;
2493 case MLX4_DEV_EVENT_PORT_DOWN:
2494 if (p > ibdev->num_ports)
2496 ibev.event = IB_EVENT_PORT_ERR;
2499 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
2500 ibdev->ib_active = false;
2501 ibev.event = IB_EVENT_DEVICE_FATAL;
2504 case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
2505 ew = kmalloc(sizeof *ew, GFP_ATOMIC);
2507 pr_err("failed to allocate memory for events work\n");
2511 INIT_WORK(&ew->work, handle_port_mgmt_change_event);
2512 memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
2514 /* need to queue only for port owner, which uses GEN_EQE */
2515 if (mlx4_is_master(dev))
2516 queue_work(wq, &ew->work);
2518 handle_port_mgmt_change_event(&ew->work);
2521 case MLX4_DEV_EVENT_SLAVE_INIT:
2522 /* here, p is the slave id */
2523 do_slave_init(ibdev, p, 1);
2526 case MLX4_DEV_EVENT_SLAVE_SHUTDOWN:
2527 /* here, p is the slave id */
2528 do_slave_init(ibdev, p, 0);
2535 ibev.device = ibdev_ptr;
2536 ibev.element.port_num = (u8) p;
2538 ib_dispatch_event(&ibev);
2541 static struct mlx4_interface mlx4_ib_interface = {
2543 .remove = mlx4_ib_remove,
2544 .event = mlx4_ib_event,
2545 .protocol = MLX4_PROT_IB_IPV6
2548 static int __init mlx4_ib_init(void)
2552 wq = create_singlethread_workqueue("mlx4_ib");
2556 err = mlx4_ib_mcg_init();
2560 err = mlx4_register_interface(&mlx4_ib_interface);
2567 mlx4_ib_mcg_destroy();
2570 destroy_workqueue(wq);
2574 static void __exit mlx4_ib_cleanup(void)
2576 mlx4_unregister_interface(&mlx4_ib_interface);
2577 mlx4_ib_mcg_destroy();
2578 destroy_workqueue(wq);
2581 module_init(mlx4_ib_init);
2582 module_exit(mlx4_ib_cleanup);