2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/rdma_netlink.h>
35 #include <net/addrconf.h>
39 MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
40 MODULE_DESCRIPTION("Soft RDMA transport");
41 MODULE_LICENSE("Dual BSD/GPL");
43 /* free resources for all ports on a device */
44 static void rxe_cleanup_ports(struct rxe_dev *rxe)
46 kfree(rxe->port.pkey_tbl);
47 rxe->port.pkey_tbl = NULL;
51 /* free resources for a rxe device all objects created for this device must
54 void rxe_dealloc(struct ib_device *ib_dev)
56 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
58 rxe_pool_cleanup(&rxe->uc_pool);
59 rxe_pool_cleanup(&rxe->pd_pool);
60 rxe_pool_cleanup(&rxe->ah_pool);
61 rxe_pool_cleanup(&rxe->srq_pool);
62 rxe_pool_cleanup(&rxe->qp_pool);
63 rxe_pool_cleanup(&rxe->cq_pool);
64 rxe_pool_cleanup(&rxe->mr_pool);
65 rxe_pool_cleanup(&rxe->mw_pool);
66 rxe_pool_cleanup(&rxe->mc_grp_pool);
67 rxe_pool_cleanup(&rxe->mc_elem_pool);
69 rxe_cleanup_ports(rxe);
72 crypto_free_shash(rxe->tfm);
75 /* initialize rxe device parameters */
76 static void rxe_init_device_param(struct rxe_dev *rxe)
78 rxe->max_inline_data = RXE_MAX_INLINE_DATA;
80 rxe->attr.fw_ver = RXE_FW_VER;
81 rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
82 rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
83 rxe->attr.vendor_id = RXE_VENDOR_ID;
84 rxe->attr.vendor_part_id = RXE_VENDOR_PART_ID;
85 rxe->attr.hw_ver = RXE_HW_VER;
86 rxe->attr.max_qp = RXE_MAX_QP;
87 rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
88 rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
89 rxe->attr.max_send_sge = RXE_MAX_SGE;
90 rxe->attr.max_recv_sge = RXE_MAX_SGE;
91 rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
92 rxe->attr.max_cq = RXE_MAX_CQ;
93 rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
94 rxe->attr.max_mr = RXE_MAX_MR;
95 rxe->attr.max_pd = RXE_MAX_PD;
96 rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
97 rxe->attr.max_ee_rd_atom = RXE_MAX_EE_RD_ATOM;
98 rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
99 rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
100 rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
101 rxe->attr.atomic_cap = IB_ATOMIC_HCA;
102 rxe->attr.max_ee = RXE_MAX_EE;
103 rxe->attr.max_rdd = RXE_MAX_RDD;
104 rxe->attr.max_mw = RXE_MAX_MW;
105 rxe->attr.max_raw_ipv6_qp = RXE_MAX_RAW_IPV6_QP;
106 rxe->attr.max_raw_ethy_qp = RXE_MAX_RAW_ETHY_QP;
107 rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
108 rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
109 rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
110 rxe->attr.max_ah = RXE_MAX_AH;
111 rxe->attr.max_fmr = RXE_MAX_FMR;
112 rxe->attr.max_map_per_fmr = RXE_MAX_MAP_PER_FMR;
113 rxe->attr.max_srq = RXE_MAX_SRQ;
114 rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
115 rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
116 rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
117 rxe->attr.max_pkeys = RXE_MAX_PKEYS;
118 rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
120 rxe->max_ucontext = RXE_MAX_UCONTEXT;
123 /* initialize port attributes */
124 static int rxe_init_port_param(struct rxe_port *port)
126 port->attr.state = IB_PORT_DOWN;
127 port->attr.max_mtu = IB_MTU_4096;
128 port->attr.active_mtu = IB_MTU_256;
129 port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
130 port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
131 port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
132 port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
133 port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
134 port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
135 port->attr.lid = RXE_PORT_LID;
136 port->attr.sm_lid = RXE_PORT_SM_LID;
137 port->attr.lmc = RXE_PORT_LMC;
138 port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
139 port->attr.sm_sl = RXE_PORT_SM_SL;
140 port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
141 port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
142 port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
143 port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
144 port->attr.phys_state = RXE_PORT_PHYS_STATE;
145 port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
146 port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
151 /* initialize port state, note IB convention that HCA ports are always
154 static int rxe_init_ports(struct rxe_dev *rxe)
156 struct rxe_port *port = &rxe->port;
158 rxe_init_port_param(port);
160 if (!port->attr.pkey_tbl_len || !port->attr.gid_tbl_len)
163 port->pkey_tbl = kcalloc(port->attr.pkey_tbl_len,
164 sizeof(*port->pkey_tbl), GFP_KERNEL);
169 port->pkey_tbl[0] = 0xffff;
170 addrconf_addr_eui48((unsigned char *)&port->port_guid,
171 rxe->ndev->dev_addr);
173 spin_lock_init(&port->port_lock);
178 /* init pools of managed objects */
179 static int rxe_init_pools(struct rxe_dev *rxe)
183 err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
188 err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
193 err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
198 err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
203 err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
208 err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
213 err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
218 err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
223 err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
224 rxe->attr.max_mcast_grp);
228 err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
229 rxe->attr.max_total_mcast_qp_attach);
236 rxe_pool_cleanup(&rxe->mc_grp_pool);
238 rxe_pool_cleanup(&rxe->mw_pool);
240 rxe_pool_cleanup(&rxe->mr_pool);
242 rxe_pool_cleanup(&rxe->cq_pool);
244 rxe_pool_cleanup(&rxe->qp_pool);
246 rxe_pool_cleanup(&rxe->srq_pool);
248 rxe_pool_cleanup(&rxe->ah_pool);
250 rxe_pool_cleanup(&rxe->pd_pool);
252 rxe_pool_cleanup(&rxe->uc_pool);
257 /* initialize rxe device state */
258 static int rxe_init(struct rxe_dev *rxe)
262 /* init default device parameters */
263 rxe_init_device_param(rxe);
265 err = rxe_init_ports(rxe);
269 err = rxe_init_pools(rxe);
273 /* init pending mmap list */
274 spin_lock_init(&rxe->mmap_offset_lock);
275 spin_lock_init(&rxe->pending_lock);
276 INIT_LIST_HEAD(&rxe->pending_mmaps);
278 mutex_init(&rxe->usdev_lock);
283 rxe_cleanup_ports(rxe);
288 void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
290 struct rxe_port *port = &rxe->port;
293 mtu = eth_mtu_int_to_enum(ndev_mtu);
295 /* Make sure that new MTU in range */
296 mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
298 port->attr.active_mtu = mtu;
299 port->mtu_cap = ib_mtu_enum_to_int(mtu);
302 /* called by ifc layer to create new rxe device.
303 * The caller should allocate memory for rxe by calling ib_alloc_device.
305 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
313 rxe_set_mtu(rxe, mtu);
315 return rxe_register_device(rxe, ibdev_name);
318 static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
320 struct rxe_dev *exists;
323 exists = rxe_get_dev_from_net(ndev);
325 ib_device_put(&exists->ib_dev);
326 pr_err("already configured on %s\n", ndev->name);
331 err = rxe_net_add(ibdev_name, ndev);
333 pr_err("failed to add %s\n", ndev->name);
340 static struct rdma_link_ops rxe_link_ops = {
342 .newlink = rxe_newlink,
345 static int __init rxe_module_init(void)
349 /* initialize slab caches for managed objects */
350 err = rxe_cache_init();
352 pr_err("unable to init object pools\n");
356 err = rxe_net_init();
360 rdma_link_register(&rxe_link_ops);
365 static void __exit rxe_module_exit(void)
367 rdma_link_unregister(&rxe_link_ops);
368 ib_unregister_driver(RDMA_DRIVER_RXE);
372 pr_info("unloaded\n");
375 late_initcall(rxe_module_init);
376 module_exit(rxe_module_exit);
378 MODULE_ALIAS_RDMA_LINK("rxe");